aboutsummaryrefslogtreecommitdiffstats
path: root/erts
diff options
context:
space:
mode:
Diffstat (limited to 'erts')
-rw-r--r--erts/Makefile.in67
-rw-r--r--erts/aclocal.m4540
-rwxr-xr-xerts/autoconf/win32.config.cache.static2
-rwxr-xr-xerts/autoconf/win64.config.cache.static271
-rw-r--r--erts/configure.in400
-rw-r--r--erts/doc/specs/.gitignore1
-rw-r--r--erts/doc/src/Makefile38
-rw-r--r--erts/doc/src/absform.xml5
-rw-r--r--erts/doc/src/alt_dist.xml4
-rw-r--r--erts/doc/src/driver.xml10
-rw-r--r--erts/doc/src/driver_entry.xml48
-rw-r--r--erts/doc/src/epmd.xml4
-rw-r--r--erts/doc/src/erl.xml179
-rw-r--r--erts/doc/src/erl_driver.xml326
-rw-r--r--[-rwxr-xr-x]erts/doc/src/erl_ext_fig.gifbin3834 -> 3834 bytes
-rw-r--r--erts/doc/src/erl_nif.xml52
-rw-r--r--erts/doc/src/erl_prim_loader.xml88
-rw-r--r--erts/doc/src/erlang.xml1081
-rw-r--r--erts/doc/src/erlsrv.xml19
-rw-r--r--erts/doc/src/erts_alloc.xml132
-rw-r--r--erts/doc/src/init.xml67
-rw-r--r--erts/doc/src/make.dep32
-rw-r--r--erts/doc/src/match_spec.xml8
-rw-r--r--erts/doc/src/notes.xml1047
-rw-r--r--erts/doc/src/specs.xml7
-rw-r--r--erts/doc/src/start_erl.xml31
-rw-r--r--erts/doc/src/zlib.xml363
-rw-r--r--erts/emulator/Makefile.in394
-rw-r--r--erts/emulator/beam/atom.c8
-rw-r--r--erts/emulator/beam/atom.h4
-rw-r--r--erts/emulator/beam/atom.names11
-rw-r--r--erts/emulator/beam/beam_bif_load.c151
-rw-r--r--erts/emulator/beam/beam_bp.c68
-rw-r--r--erts/emulator/beam/beam_bp.h23
-rw-r--r--erts/emulator/beam/beam_catches.c48
-rw-r--r--erts/emulator/beam/beam_debug.c32
-rw-r--r--erts/emulator/beam/beam_emu.c913
-rw-r--r--erts/emulator/beam/beam_load.c1643
-rw-r--r--erts/emulator/beam/beam_load.h17
-rw-r--r--erts/emulator/beam/bif.c632
-rw-r--r--erts/emulator/beam/bif.h155
-rw-r--r--erts/emulator/beam/bif.tab39
-rw-r--r--erts/emulator/beam/big.c102
-rw-r--r--erts/emulator/beam/big.h2
-rw-r--r--erts/emulator/beam/binary.c12
-rw-r--r--erts/emulator/beam/break.c30
-rw-r--r--erts/emulator/beam/copy.c15
-rw-r--r--erts/emulator/beam/dist.c283
-rw-r--r--erts/emulator/beam/dist.h4
-rw-r--r--erts/emulator/beam/dtrace-wrapper.h109
-rw-r--r--erts/emulator/beam/erl_afit_alloc.c33
-rw-r--r--erts/emulator/beam/erl_alloc.c1543
-rw-r--r--erts/emulator/beam/erl_alloc.h227
-rw-r--r--erts/emulator/beam/erl_alloc.types81
-rw-r--r--erts/emulator/beam/erl_alloc_util.c1731
-rw-r--r--erts/emulator/beam/erl_alloc_util.h154
-rw-r--r--erts/emulator/beam/erl_ao_firstfit_alloc.c976
-rw-r--r--erts/emulator/beam/erl_ao_firstfit_alloc.h60
-rw-r--r--erts/emulator/beam/erl_arith.c8
-rw-r--r--erts/emulator/beam/erl_async.c768
-rw-r--r--erts/emulator/beam/erl_async.h66
-rw-r--r--erts/emulator/beam/erl_bestfit_alloc.c186
-rw-r--r--erts/emulator/beam/erl_bestfit_alloc.h5
-rw-r--r--erts/emulator/beam/erl_bif_binary.c80
-rw-r--r--erts/emulator/beam/erl_bif_ddll.c169
-rw-r--r--erts/emulator/beam/erl_bif_guard.c6
-rw-r--r--erts/emulator/beam/erl_bif_info.c518
-rw-r--r--erts/emulator/beam/erl_bif_lists.c104
-rw-r--r--erts/emulator/beam/erl_bif_op.c25
-rw-r--r--erts/emulator/beam/erl_bif_os.c24
-rw-r--r--erts/emulator/beam/erl_bif_port.c121
-rw-r--r--erts/emulator/beam/erl_bif_re.c135
-rw-r--r--erts/emulator/beam/erl_bif_timer.c11
-rw-r--r--erts/emulator/beam/erl_bif_trace.c176
-rw-r--r--erts/emulator/beam/erl_bits.c22
-rw-r--r--erts/emulator/beam/erl_bits.h4
-rw-r--r--erts/emulator/beam/erl_cpu_topology.c23
-rw-r--r--erts/emulator/beam/erl_db.c290
-rw-r--r--erts/emulator/beam/erl_db.h8
-rw-r--r--erts/emulator/beam/erl_db_hash.c140
-rw-r--r--erts/emulator/beam/erl_db_hash.h5
-rw-r--r--erts/emulator/beam/erl_db_tree.c34
-rw-r--r--erts/emulator/beam/erl_db_util.c80
-rw-r--r--erts/emulator/beam/erl_db_util.h6
-rw-r--r--erts/emulator/beam/erl_debug.h4
-rw-r--r--erts/emulator/beam/erl_driver.h125
-rw-r--r--erts/emulator/beam/erl_drv_thread.c8
-rw-r--r--erts/emulator/beam/erl_gc.c196
-rw-r--r--erts/emulator/beam/erl_gc.h4
-rw-r--r--erts/emulator/beam/erl_goodfit_alloc.c40
-rw-r--r--erts/emulator/beam/erl_init.c279
-rw-r--r--erts/emulator/beam/erl_instrument.c8
-rw-r--r--erts/emulator/beam/erl_lock_check.c23
-rw-r--r--erts/emulator/beam/erl_message.c260
-rw-r--r--erts/emulator/beam/erl_message.h15
-rw-r--r--erts/emulator/beam/erl_monitors.c20
-rw-r--r--erts/emulator/beam/erl_mtrace.c8
-rw-r--r--erts/emulator/beam/erl_nif.c159
-rw-r--r--erts/emulator/beam/erl_nif.h7
-rw-r--r--erts/emulator/beam/erl_nif_api_funcs.h202
-rw-r--r--erts/emulator/beam/erl_nmgc.c2
-rw-r--r--erts/emulator/beam/erl_node_container_utils.h2
-rw-r--r--erts/emulator/beam/erl_node_tables.c20
-rw-r--r--erts/emulator/beam/erl_node_tables.h3
-rw-r--r--erts/emulator/beam/erl_port_task.c102
-rw-r--r--erts/emulator/beam/erl_port_task.h8
-rw-r--r--erts/emulator/beam/erl_process.c2523
-rw-r--r--erts/emulator/beam/erl_process.h194
-rw-r--r--erts/emulator/beam/erl_process_dump.c2
-rw-r--r--erts/emulator/beam/erl_process_lock.c39
-rw-r--r--erts/emulator/beam/erl_process_lock.h53
-rw-r--r--erts/emulator/beam/erl_sched_spec_pre_alloc.c304
-rw-r--r--erts/emulator/beam/erl_sched_spec_pre_alloc.h239
-rw-r--r--erts/emulator/beam/erl_smp.h1054
-rw-r--r--erts/emulator/beam/erl_term.h14
-rw-r--r--erts/emulator/beam/erl_thr_progress.c1377
-rw-r--r--erts/emulator/beam/erl_thr_progress.h303
-rw-r--r--erts/emulator/beam/erl_thr_queue.c762
-rw-r--r--erts/emulator/beam/erl_thr_queue.h209
-rw-r--r--erts/emulator/beam/erl_threads.h1257
-rw-r--r--erts/emulator/beam/erl_time.h30
-rw-r--r--erts/emulator/beam/erl_time_sup.c178
-rw-r--r--erts/emulator/beam/erl_trace.c196
-rw-r--r--erts/emulator/beam/erl_unicode.c52
-rw-r--r--erts/emulator/beam/erl_vm.h8
-rw-r--r--erts/emulator/beam/erlang_dtrace.d726
-rw-r--r--erts/emulator/beam/export.c8
-rw-r--r--erts/emulator/beam/external.c99
-rw-r--r--erts/emulator/beam/external.h7
-rw-r--r--erts/emulator/beam/fix_alloc.c287
-rw-r--r--erts/emulator/beam/global.h158
-rw-r--r--erts/emulator/beam/io.c466
-rw-r--r--erts/emulator/beam/module.c5
-rw-r--r--erts/emulator/beam/ops.tab265
-rw-r--r--erts/emulator/beam/packet_parser.c45
-rw-r--r--erts/emulator/beam/register.h4
-rw-r--r--erts/emulator/beam/safe_hash.c10
-rw-r--r--erts/emulator/beam/sys.h411
-rw-r--r--erts/emulator/beam/time.c49
-rw-r--r--erts/emulator/beam/utils.c828
-rw-r--r--erts/emulator/drivers/common/efile_drv.c1336
-rw-r--r--erts/emulator/drivers/common/erl_efile.h49
-rw-r--r--erts/emulator/drivers/common/gzio.c2
-rw-r--r--erts/emulator/drivers/common/inet_drv.c1244
-rw-r--r--erts/emulator/drivers/common/ram_file_drv.c82
-rw-r--r--erts/emulator/drivers/common/zlib_drv.c29
-rw-r--r--erts/emulator/drivers/unix/ttsl_drv.c24
-rw-r--r--erts/emulator/drivers/unix/unix_efile.c292
-rw-r--r--erts/emulator/drivers/win32/registry_drv.c21
-rw-r--r--erts/emulator/drivers/win32/ttsl_drv.c38
-rw-r--r--erts/emulator/drivers/win32/win_con.c101
-rw-r--r--[-rwxr-xr-x]erts/emulator/drivers/win32/win_efile.c128
-rw-r--r--erts/emulator/hipe/hipe_abi.txt2
-rw-r--r--erts/emulator/hipe/hipe_amd64_bifs.m453
-rw-r--r--erts/emulator/hipe/hipe_amd64_primops.h2
-rw-r--r--erts/emulator/hipe/hipe_arm.c2
-rw-r--r--erts/emulator/hipe/hipe_arm_bifs.m439
-rw-r--r--erts/emulator/hipe/hipe_bif0.c53
-rw-r--r--erts/emulator/hipe/hipe_bif0.h2
-rw-r--r--erts/emulator/hipe/hipe_bif0.tab2
-rw-r--r--erts/emulator/hipe/hipe_bif2.c23
-rw-r--r--erts/emulator/hipe/hipe_bif_list.m430
-rw-r--r--erts/emulator/hipe/hipe_gc.c10
-rw-r--r--erts/emulator/hipe/hipe_mkliterals.c90
-rw-r--r--erts/emulator/hipe/hipe_mode_switch.c66
-rw-r--r--erts/emulator/hipe/hipe_mode_switch.h2
-rw-r--r--erts/emulator/hipe/hipe_native_bif.c51
-rw-r--r--erts/emulator/hipe/hipe_native_bif.h22
-rw-r--r--erts/emulator/hipe/hipe_ppc.c2
-rw-r--r--erts/emulator/hipe/hipe_ppc_asm.m411
-rw-r--r--erts/emulator/hipe/hipe_ppc_bifs.m459
-rw-r--r--erts/emulator/hipe/hipe_primops.h4
-rw-r--r--erts/emulator/hipe/hipe_process.h6
-rw-r--r--erts/emulator/hipe/hipe_risc_glue.h16
-rw-r--r--erts/emulator/hipe/hipe_sparc_bifs.m465
-rw-r--r--erts/emulator/hipe/hipe_x86.h2
-rw-r--r--erts/emulator/hipe/hipe_x86_bifs.m457
-rw-r--r--erts/emulator/hipe/hipe_x86_glue.h19
-rw-r--r--erts/emulator/hipe/hipe_x86_primops.h3
-rw-r--r--erts/emulator/pcre/Makefile.in165
-rw-r--r--erts/emulator/pcre/pcre.mk111
-rw-r--r--erts/emulator/sys/common/erl_check_io.c74
-rw-r--r--erts/emulator/sys/common/erl_check_io.h15
-rw-r--r--erts/emulator/sys/common/erl_mseg.c682
-rw-r--r--erts/emulator/sys/common/erl_mseg.h11
-rw-r--r--erts/emulator/sys/common/erl_poll.c287
-rw-r--r--erts/emulator/sys/common/erl_poll.h7
-rw-r--r--erts/emulator/sys/unix/erl_unix_sys.h7
-rw-r--r--erts/emulator/sys/unix/sys.c320
-rw-r--r--erts/emulator/sys/vxworks/erl_vxworks_sys.h3
-rw-r--r--erts/emulator/sys/vxworks/sys.c24
-rw-r--r--erts/emulator/sys/win32/erl_poll.c74
-rw-r--r--erts/emulator/sys/win32/erl_win32_sys_ddll.c5
-rw-r--r--erts/emulator/sys/win32/erl_win_dyn_driver.h44
-rw-r--r--erts/emulator/sys/win32/erl_win_sys.h41
-rwxr-xr-x[-rw-r--r--]erts/emulator/sys/win32/sys.c219
-rw-r--r--erts/emulator/sys/win32/sys_env.c16
-rw-r--r--erts/emulator/sys/win32/sys_float.c5
-rw-r--r--erts/emulator/sys/win32/sys_interrupt.c12
-rw-r--r--erts/emulator/sys/win32/sys_time.c327
-rw-r--r--erts/emulator/test/Makefile3
-rw-r--r--erts/emulator/test/a_SUITE_data/timer_driver.c20
-rw-r--r--erts/emulator/test/alloc_SUITE_data/allocator_test.h22
-rw-r--r--erts/emulator/test/alloc_SUITE_data/coalesce.c2
-rw-r--r--erts/emulator/test/alloc_SUITE_data/rbtree.c86
-rw-r--r--erts/emulator/test/alloc_SUITE_data/testcase_driver.c25
-rw-r--r--erts/emulator/test/bif_SUITE.erl54
-rw-r--r--erts/emulator/test/big_SUITE.erl15
-rw-r--r--erts/emulator/test/binary_SUITE.erl57
-rw-r--r--erts/emulator/test/bs_construct_SUITE.erl17
-rw-r--r--erts/emulator/test/bs_match_misc_SUITE.erl14
-rw-r--r--erts/emulator/test/bs_utf_SUITE.erl12
-rw-r--r--erts/emulator/test/busy_port_SUITE.erl20
-rw-r--r--erts/emulator/test/busy_port_SUITE_data/busy_drv.c19
-rw-r--r--erts/emulator/test/busy_port_SUITE_data/hs_busy_drv.c14
-rw-r--r--erts/emulator/test/call_trace_SUITE.erl85
-rw-r--r--erts/emulator/test/code_SUITE.erl252
-rw-r--r--erts/emulator/test/code_SUITE_data/fun_confusion.erl31
-rw-r--r--erts/emulator/test/code_SUITE_data/literals.erl23
-rw-r--r--erts/emulator/test/code_SUITE_data/versions.erl33
-rw-r--r--erts/emulator/test/ddll_SUITE_data/dummy_drv.c19
-rw-r--r--erts/emulator/test/ddll_SUITE_data/echo_drv.c26
-rw-r--r--erts/emulator/test/ddll_SUITE_data/echo_drv_fail_init.c26
-rw-r--r--erts/emulator/test/ddll_SUITE_data/initfail_drv.c19
-rw-r--r--erts/emulator/test/ddll_SUITE_data/lock_drv.c26
-rw-r--r--erts/emulator/test/ddll_SUITE_data/noinit_drv.c21
-rw-r--r--erts/emulator/test/ddll_SUITE_data/wrongname_drv.c19
-rw-r--r--erts/emulator/test/decode_packet_SUITE.erl60
-rw-r--r--erts/emulator/test/distribution_SUITE.erl45
-rw-r--r--erts/emulator/test/driver_SUITE.erl188
-rw-r--r--erts/emulator/test/driver_SUITE_data/Makefile.src8
-rw-r--r--erts/emulator/test/driver_SUITE_data/async_blast_drv.c124
-rw-r--r--erts/emulator/test/driver_SUITE_data/caller_drv.c30
-rw-r--r--erts/emulator/test/driver_SUITE_data/chkio_drv.c90
-rw-r--r--erts/emulator/test/driver_SUITE_data/io_ready_exit_drv.c31
-rw-r--r--erts/emulator/test/driver_SUITE_data/ioq_exit_drv.c15
-rw-r--r--erts/emulator/test/driver_SUITE_data/many_events_drv.c28
-rw-r--r--erts/emulator/test/driver_SUITE_data/monitor_drv.c15
-rw-r--r--erts/emulator/test/driver_SUITE_data/otp_6879_drv.c20
-rw-r--r--erts/emulator/test/driver_SUITE_data/otp_9302_drv.c6
-rw-r--r--erts/emulator/test/driver_SUITE_data/outputv_drv.c16
-rw-r--r--erts/emulator/test/driver_SUITE_data/peek_non_existing_queue_drv.c13
-rw-r--r--erts/emulator/test/driver_SUITE_data/queue_drv.c23
-rw-r--r--erts/emulator/test/driver_SUITE_data/sys_info_base_drv.c (renamed from erts/emulator/test/driver_SUITE_data/sys_info_1_0_drv.c)26
-rw-r--r--erts/emulator/test/driver_SUITE_data/sys_info_drv_impl.c11
-rw-r--r--erts/emulator/test/driver_SUITE_data/sys_info_prev_drv.c (renamed from erts/emulator/test/driver_SUITE_data/sys_info_1_1_drv.c)20
-rw-r--r--erts/emulator/test/driver_SUITE_data/thr_alloc_drv.c16
-rw-r--r--erts/emulator/test/driver_SUITE_data/thr_free_drv.c241
-rw-r--r--erts/emulator/test/driver_SUITE_data/timer_drv.c17
-rw-r--r--erts/emulator/test/emulator.spec1
-rw-r--r--erts/emulator/test/erl_drv_thread_SUITE_data/testcase_driver.c26
-rw-r--r--erts/emulator/test/erts_debug_SUITE.erl67
-rw-r--r--erts/emulator/test/exception_SUITE.erl246
-rw-r--r--erts/emulator/test/float_SUITE.erl126
-rw-r--r--erts/emulator/test/float_SUITE_data/fp_drv.c12
-rw-r--r--erts/emulator/test/fun_SUITE.erl10
-rw-r--r--erts/emulator/test/fun_r13_SUITE.erl (renamed from erts/emulator/test/fun_r12_SUITE.erl)10
-rw-r--r--erts/emulator/test/guard_SUITE.erl8
-rw-r--r--erts/emulator/test/hibernate_SUITE.erl31
-rw-r--r--erts/emulator/test/match_spec_SUITE.erl61
-rw-r--r--erts/emulator/test/mtx_SUITE.erl17
-rw-r--r--erts/emulator/test/mtx_SUITE_data/mtx_SUITE.c48
-rw-r--r--erts/emulator/test/nif_SUITE.erl115
-rw-r--r--erts/emulator/test/nif_SUITE_data/nif_SUITE.c123
-rw-r--r--erts/emulator/test/nif_SUITE_data/tester.c1
-rw-r--r--erts/emulator/test/port_SUITE.erl2
-rw-r--r--erts/emulator/test/port_SUITE_data/echo_drv.c30
-rw-r--r--erts/emulator/test/port_SUITE_data/exit_drv.c33
-rw-r--r--erts/emulator/test/port_SUITE_data/failure_drv.c29
-rw-r--r--erts/emulator/test/port_bif_SUITE_data/control_drv.c25
-rw-r--r--erts/emulator/test/process_SUITE.erl99
-rw-r--r--erts/emulator/test/scheduler_SUITE.erl33
-rw-r--r--erts/emulator/test/send_term_SUITE.erl4
-rw-r--r--erts/emulator/test/send_term_SUITE_data/send_term_drv.c20
-rw-r--r--erts/emulator/test/sensitive_SUITE.erl3
-rw-r--r--erts/emulator/test/smoke_test_SUITE.erl139
-rw-r--r--erts/emulator/test/statistics_SUITE.erl98
-rw-r--r--erts/emulator/test/system_info_SUITE.erl313
-rw-r--r--erts/emulator/test/system_profile_SUITE.erl163
-rw-r--r--erts/emulator/test/system_profile_SUITE_data/echo_drv.c30
-rw-r--r--erts/emulator/test/time_SUITE.erl55
-rw-r--r--erts/emulator/test/trace_local_SUITE.erl32
-rw-r--r--erts/emulator/test/trace_port_SUITE.erl8
-rw-r--r--erts/emulator/test/trace_port_SUITE_data/echo_drv.c37
-rwxr-xr-xerts/emulator/utils/beam_makeops315
-rwxr-xr-xerts/emulator/utils/make_preload13
-rwxr-xr-xerts/emulator/utils/make_tables39
-rw-r--r--erts/emulator/valgrind/suppress.patched.3.6.0350
-rw-r--r--erts/emulator/valgrind/suppress.standard308
-rw-r--r--erts/emulator/zlib/Makefile.in102
-rw-r--r--erts/emulator/zlib/zlib.mk74
-rw-r--r--erts/epmd/src/Makefile.in14
-rw-r--r--erts/epmd/src/epmd.c6
-rw-r--r--erts/epmd/src/epmd_cli.c7
-rw-r--r--erts/epmd/src/epmd_int.h11
-rw-r--r--erts/epmd/src/epmd_srv.c23
-rw-r--r--erts/etc/common/Makefile.in258
-rw-r--r--erts/etc/common/erlc.c1
-rw-r--r--erts/etc/common/erlexec.c7
-rw-r--r--erts/etc/common/escript.c5
-rw-r--r--erts/etc/common/heart.c8
-rw-r--r--erts/etc/common/inet_gethost.c21
-rw-r--r--erts/etc/unix/etp-commands34
-rw-r--r--erts/etc/unix/to_erl.c9
-rw-r--r--erts/etc/win32/Install.c5
-rwxr-xr-xerts/etc/win32/cygwin_tools/vc/emu_cc.sh11
-rw-r--r--erts/etc/win32/erlsrv/erlsrv_interactive.c191
-rw-r--r--erts/etc/win32/erlsrv/erlsrv_interactive.h4
-rw-r--r--erts/etc/win32/msys_tools/erl42
-rw-r--r--erts/etc/win32/msys_tools/erlc59
-rw-r--r--erts/etc/win32/msys_tools/javac.sh65
-rw-r--r--erts/etc/win32/msys_tools/make_bootstrap_ini.sh (renamed from erts/emulator/zlib/Makefile)31
-rw-r--r--erts/etc/win32/msys_tools/make_local_ini.sh (renamed from erts/emulator/pcre/Makefile)29
-rw-r--r--erts/etc/win32/msys_tools/msys2win_path.sh44
-rw-r--r--erts/etc/win32/msys_tools/reg_query.sh24
-rw-r--r--erts/etc/win32/msys_tools/vc/ar.sh47
-rw-r--r--erts/etc/win32/msys_tools/vc/cc.sh320
-rw-r--r--erts/etc/win32/msys_tools/vc/coffix.c161
-rw-r--r--erts/etc/win32/msys_tools/vc/emu_cc.sh95
-rw-r--r--erts/etc/win32/msys_tools/vc/ld.sh198
-rw-r--r--erts/etc/win32/msys_tools/vc/mc.sh87
-rw-r--r--erts/etc/win32/msys_tools/vc/rc.sh86
-rw-r--r--erts/etc/win32/msys_tools/win2msys_path.sh38
-rw-r--r--erts/etc/win32/nsis/Makefile27
-rwxr-xr-xerts/etc/win32/nsis/dll_version_helper.sh16
-rw-r--r--erts/etc/win32/nsis/erlang20.nsi15
-rw-r--r--[-rwxr-xr-x]erts/etc/win32/nsis/erlang_uninst.icobin766 -> 766 bytes
-rwxr-xr-xerts/etc/win32/nsis/find_redist.sh52
-rw-r--r--erts/etc/win32/start_erl.c161
-rw-r--r--erts/etc/win32/win_erlexec.c5
-rw-r--r--erts/example/matrix_nif.c87
-rw-r--r--erts/include/erl_int_sizes_config.h.in5
-rw-r--r--erts/include/internal/ethr_atomics.h9255
-rw-r--r--erts/include/internal/ethr_internal.h6
-rw-r--r--erts/include/internal/ethr_mutex.h145
-rw-r--r--erts/include/internal/ethr_optimized_fallbacks.h175
-rw-r--r--erts/include/internal/ethread.h163
-rw-r--r--erts/include/internal/ethread_header_config.h.in118
-rw-r--r--erts/include/internal/gcc/ethr_atomic.h251
-rw-r--r--erts/include/internal/gcc/ethr_dw_atomic.h115
-rw-r--r--erts/include/internal/gcc/ethr_membar.h73
-rw-r--r--erts/include/internal/gcc/ethread.h20
-rw-r--r--erts/include/internal/i386/atomic.h334
-rw-r--r--erts/include/internal/i386/ethr_dw_atomic.h278
-rw-r--r--erts/include/internal/i386/ethr_membar.h114
-rw-r--r--erts/include/internal/i386/ethread.h8
-rw-r--r--erts/include/internal/i386/rwlock.h5
-rw-r--r--erts/include/internal/i386/spinlock.h27
-rw-r--r--erts/include/internal/libatomic_ops/ethr_atomic.h298
-rw-r--r--erts/include/internal/libatomic_ops/ethr_membar.h75
-rw-r--r--erts/include/internal/libatomic_ops/ethread.h14
-rw-r--r--erts/include/internal/ppc32/atomic.h148
-rw-r--r--erts/include/internal/ppc32/ethr_membar.h63
-rw-r--r--erts/include/internal/ppc32/ethread.h5
-rw-r--r--erts/include/internal/ppc32/rwlock.h15
-rw-r--r--erts/include/internal/ppc32/spinlock.h10
-rw-r--r--erts/include/internal/pthread/ethr_event.h8
-rw-r--r--erts/include/internal/sparc32/atomic.h194
-rw-r--r--erts/include/internal/sparc32/ethr_membar.h115
-rw-r--r--erts/include/internal/sparc32/ethread.h7
-rw-r--r--erts/include/internal/sparc32/rwlock.h17
-rw-r--r--erts/include/internal/sparc32/spinlock.h11
-rw-r--r--erts/include/internal/tile/atomic.h136
-rw-r--r--erts/include/internal/tile/ethr_membar.h35
-rw-r--r--erts/include/internal/tile/ethread.h5
-rw-r--r--erts/include/internal/win/ethr_atomic.h595
-rw-r--r--erts/include/internal/win/ethr_dw_atomic.h154
-rw-r--r--erts/include/internal/win/ethr_event.h16
-rw-r--r--erts/include/internal/win/ethr_membar.h145
-rw-r--r--erts/include/internal/win/ethread.h4
-rw-r--r--erts/lib_src/Makefile.in45
-rw-r--r--erts/lib_src/common/erl_memory_trace_parser.c15
-rw-r--r--erts/lib_src/common/erl_misc_utils.c18
-rw-r--r--erts/lib_src/common/erl_printf.c18
-rw-r--r--erts/lib_src/common/erl_printf_format.c4
-rw-r--r--erts/lib_src/common/ethr_atomics.c4608
-rw-r--r--erts/lib_src/common/ethr_aux.c83
-rw-r--r--erts/lib_src/common/ethr_mutex.c501
-rw-r--r--erts/lib_src/pthread/ethr_x86_sse2_asm.c31
-rw-r--r--erts/lib_src/pthread/ethread.c121
-rwxr-xr-xerts/lib_src/utils/make_atomics_api2286
-rw-r--r--erts/lib_src/win/ethr_event.c22
-rw-r--r--erts/lib_src/win/ethread.c22
-rw-r--r--erts/ntbuild.erl332
-rw-r--r--erts/preloaded/ebin/erl_prim_loader.beambin50392 -> 52904 bytes
-rw-r--r--erts/preloaded/ebin/erlang.beambin24148 -> 42416 bytes
-rw-r--r--erts/preloaded/ebin/init.beambin44880 -> 48064 bytes
-rw-r--r--erts/preloaded/ebin/otp_ring0.beambin1436 -> 1448 bytes
-rw-r--r--erts/preloaded/ebin/prim_file.beambin31528 -> 40608 bytes
-rw-r--r--erts/preloaded/ebin/prim_inet.beambin64892 -> 70100 bytes
-rw-r--r--erts/preloaded/ebin/prim_zip.beambin22444 -> 23460 bytes
-rw-r--r--erts/preloaded/ebin/zlib.beambin10620 -> 12804 bytes
-rw-r--r--erts/preloaded/src/erl_prim_loader.erl77
-rw-r--r--erts/preloaded/src/erlang.erl584
-rw-r--r--erts/preloaded/src/init.erl33
-rw-r--r--erts/preloaded/src/prim_file.erl345
-rw-r--r--erts/preloaded/src/prim_inet.erl184
-rw-r--r--erts/preloaded/src/prim_zip.erl6
-rw-r--r--erts/preloaded/src/zlib.erl289
-rw-r--r--erts/start_scripts/Makefile8
-rw-r--r--erts/test/autoimport_SUITE.erl11
-rw-r--r--erts/test/erl_print_SUITE.erl8
-rw-r--r--erts/test/erl_print_SUITE_data/Makefile.src4
-rw-r--r--erts/test/erlc_SUITE.erl36
-rw-r--r--erts/test/ethread_SUITE.erl120
-rw-r--r--erts/test/ethread_SUITE_data/ethread_tests.c440
-rw-r--r--erts/test/nt_SUITE.erl8
-rw-r--r--erts/test/otp_SUITE.erl43
-rw-r--r--erts/test/z_SUITE.erl29
-rw-r--r--erts/vsn.mk6
410 files changed, 53071 insertions, 15472 deletions
diff --git a/erts/Makefile.in b/erts/Makefile.in
index 2e63fc469e..1979c50781 100644
--- a/erts/Makefile.in
+++ b/erts/Makefile.in
@@ -1,7 +1,7 @@
#
# %CopyrightBegin%
#
-# Copyright Ericsson AB 2006-2010. All Rights Reserved.
+# Copyright Ericsson AB 2006-2012. All Rights Reserved.
#
# The contents of this file are subject to the Erlang Public License,
# Version 1.1, (the "License"); you may not use this file except in
@@ -16,13 +16,16 @@
#
# %CopyrightEnd%
#
+
+.NOTPARALLEL:
+
include $(ERL_TOP)/make/target.mk
include vsn.mk
# ----------------------------------------------------------------------
# Other erts dirs than the emulator dir...
-ERTSDIRS = doc/src etc epmd lib_src
+ERTSDIRS = etc epmd lib_src
XINSTDIRS = preloaded
ifeq ($(NO_START_SCRIPTS),)
ERTSDIRS += start_scripts
@@ -37,42 +40,36 @@ ifeq ($(BUILD_HYBRID_EMU),yes)
EXTRA_FLAVORS += hybrid
endif
-#
-# Some byggy 'make's get confused when a directory is created and used
-# for storing files which other files depend on during the same "make
-# session". As a workaround we do a 'make generate' (which creates
-# all directories) before doing 'make opt', etc...
-#
-
+.PHONY: all
ifneq ($(BUILD_HYBRID_EMU),yes)
all: smp opt
else
all: hybrid smp opt
endif
-debug opt docs clean:
- @ case $@ in \
- docs|clean) ;; \
- *) ( cd emulator && $(MAKE) generate TYPE=$@ FLAVOR=$(FLAVOR)) ;; \
- esac
- @ ( cd emulator && $(MAKE) $@ FLAVOR=$(FLAVOR))
- @for d in $(ERTSDIRS); do \
+.PHONY: docs
+docs:
+ ( cd doc/src && $(MAKE) $@ )
+
+.PHONY: debug opt clean
+debug opt clean:
+ for d in emulator $(ERTSDIRS); do \
if test -d $$d; then \
- ( cd $$d && $(MAKE) $@ ) || exit $$? ; \
+ ( cd $$d && $(MAKE) $@ FLAVOR=$(FLAVOR) ) || exit $$? ; \
fi ; \
- done
+ done
# ----------------------------------------------------------------------
# These are "convenience targets", provided as shortcuts for developers
# - don't use them in scripts or assume they will always stay like this!
#
+.PHONY: $(EXTRA_FLAVORS)
$(EXTRA_FLAVORS):
- @ ( cd emulator \
- && $(MAKE) generate TYPE=opt FLAVOR=$@ \
- && $(MAKE) opt FLAVOR=$@ )
+ ( cd emulator && $(MAKE) opt FLAVOR=$@ )
ifneq ($(BUILD_HYBRID_EMU),yes)
+.PHONY: hybrid
hybrid:
@echo '*** Omitted build of hybrid heap emulator'
@echo '*** since target is $(TARGET)'
@@ -85,6 +82,7 @@ endif
# The copying of beam.dll should be removed when the beam dll need no longer be
# in the same directory...
+.PHONY: local_setup
local_setup:
@cd start_scripts && $(MAKE)
@echo `ls $(ERL_TOP)/bin/`
@@ -107,8 +105,7 @@ local_setup:
cp $(ERL_TOP)/bin/$(TARGET)/escript.exe $(ERL_TOP)/bin/escript.exe; \
chmod 755 $(ERL_TOP)/bin/erl.exe $(ERL_TOP)/bin/erlc.exe \
$(ERL_TOP)/bin/werl.exe; \
- $(ERL_TOP)/erts/etc/win32/cygwin_tools/make_local_ini.sh \
- $(ERL_TOP); \
+ make_local_ini.sh $(ERL_TOP); \
else \
sed -e "s;%FINAL_ROOTDIR%;$(ERL_TOP);" \
-e "s;erts-.*/bin;bin/$(TARGET);" \
@@ -132,11 +129,13 @@ local_setup:
$(ERL_TOP)/bin/start_clean.script
# Run the configure script
+.PHONY: configure
configure:
@set -e ; cd autoconf && $(MAKE)
# Remake the makefiles, if you already have configured but you have edited
# a "Makefile.in".
+.PHONY: makefiles
makefiles:
@set -e ; cd autoconf && $(MAKE) $@
@@ -144,21 +143,17 @@ makefiles:
# Release targets
#
-release release_docs:
-ifeq ($(BUILD_HYBRID_EMU),yes)
- @if test $@ = release; then ( cd emulator && $(MAKE) $@ FLAVOR=hybrid) fi
-else
- @if test $@ = release; then ( $(MAKE) hybrid ) fi
-endif
- @if test $@ = release; then ( cd emulator && $(MAKE) $@ FLAVOR=smp) fi
- @ (cd emulator && $(MAKE) $@ FLAVOR=plain)
- @for d in $(ERTSDIRS) $(XINSTDIRS); do \
+.PHONY: release
+release:
+ for f in plain $(EXTRA_FLAVORS) ; do \
+ ( cd emulator && $(MAKE) release FLAVOR=$$f ) \
+ done
+ for d in $(ERTSDIRS) $(XINSTDIRS); do \
if test -d $$d; then \
( cd $$d && $(MAKE) $@ ) || exit $$? ; \
fi ; \
done
-# ----------------------------------------------------------------------
-
-.PHONY: debug opt docs clean local_setup configure release \
- release_docs run_test_cases hybrid smp
+.PHONY: release_docs
+release_docs:
+ ( cd doc/src && $(MAKE) $@ )
diff --git a/erts/aclocal.m4 b/erts/aclocal.m4
index b64380e817..339a15a2bb 100644
--- a/erts/aclocal.m4
+++ b/erts/aclocal.m4
@@ -112,6 +112,94 @@ fi
dnl ----------------------------------------------------------------------
dnl
+dnl LM_WINDOWS_ENVIRONMENT
+dnl
+dnl
+dnl Tries to determine thw windows build environment, i.e.
+dnl MIXED_CYGWIN_VC or MIXED_MSYS_VC
+dnl
+
+AC_DEFUN(LM_WINDOWS_ENVIRONMENT,
+[
+MIXED_CYGWIN=no
+MIXED_MSYS=no
+
+AC_MSG_CHECKING(for mixed cygwin or msys and native VC++ environment)
+if test "X$host" = "Xwin32" -a "x$GCC" != "xyes"; then
+ if test -x /usr/bin/cygpath; then
+ CFLAGS="-O2"
+ MIXED_CYGWIN=yes
+ AC_MSG_RESULT([Cygwin and VC])
+ MIXED_CYGWIN_VC=yes
+ CPPFLAGS="$CPPFLAGS -DERTS_MIXED_CYGWIN_VC"
+ elif test -x /usr/bin/msysinfo; then
+ CFLAGS="-O2"
+ MIXED_MSYS=yes
+ AC_MSG_RESULT([MSYS and VC])
+ MIXED_MSYS_VC=yes
+ CPPFLAGS="$CPPFLAGS -DERTS_MIXED_MSYS_VC"
+ else
+ AC_MSG_RESULT([undeterminable])
+ AC_MSG_ERROR(Seems to be mixed windows but not with cygwin, cannot handle this!)
+ fi
+else
+ AC_MSG_RESULT([no])
+ MIXED_CYGWIN_VC=no
+ MIXED_MSYS_VC=no
+fi
+AC_SUBST(MIXED_CYGWIN_VC)
+AC_SUBST(MIXED_MSYS_VC)
+
+MIXED_VC=no
+if test "x$MIXED_MSYS_VC" = "xyes" -o "x$MIXED_CYGWIN_VC" = "xyes" ; then
+ MIXED_VC=yes
+fi
+
+AC_SUBST(MIXED_VC)
+
+if test "x$MIXED_MSYS" != "xyes"; then
+ AC_MSG_CHECKING(for mixed cygwin and native MinGW environment)
+ if test "X$host" = "Xwin32" -a "x$GCC" = x"yes"; then
+ if test -x /usr/bin/cygpath; then
+ CFLAGS="-O2"
+ MIXED_CYGWIN=yes
+ AC_MSG_RESULT([yes])
+ MIXED_CYGWIN_MINGW=yes
+ CPPFLAGS="$CPPFLAGS -DERTS_MIXED_CYGWIN_MINGW"
+ else
+ AC_MSG_RESULT([undeterminable])
+ AC_MSG_ERROR(Seems to be mixed windows but not with cygwin, cannot handle this!)
+ fi
+ else
+ AC_MSG_RESULT([no])
+ MIXED_CYGWIN_MINGW=no
+ fi
+else
+ MIXED_CYGWIN_MINGW=no
+fi
+AC_SUBST(MIXED_CYGWIN_MINGW)
+
+AC_MSG_CHECKING(if we mix cygwin with any native compiler)
+if test "X$MIXED_CYGWIN" = "Xyes"; then
+ AC_MSG_RESULT([yes])
+else
+ AC_MSG_RESULT([no])
+fi
+
+AC_SUBST(MIXED_CYGWIN)
+
+AC_MSG_CHECKING(if we mix msys with another native compiler)
+if test "X$MIXED_MSYS" = "Xyes" ; then
+ AC_MSG_RESULT([yes])
+else
+ AC_MSG_RESULT([no])
+fi
+
+AC_SUBST(MIXED_MSYS)
+])
+
+dnl ----------------------------------------------------------------------
+dnl
dnl LM_FIND_EMU_CC
dnl
dnl
@@ -125,6 +213,11 @@ AC_DEFUN(LM_FIND_EMU_CC,
ac_cv_prog_emu_cc,
[
AC_TRY_COMPILE([],[
+#if defined(__clang_major__) && __clang_major__ >= 3
+ /* clang 3.x or later is fine */
+#elif defined(__llvm__)
+#error "this version of llvm is unable to correctly compile beam_emu.c"
+#endif
__label__ lbl1;
__label__ lbl2;
int x = magic();
@@ -140,7 +233,7 @@ lbl2:
],ac_cv_prog_emu_cc=$CC,ac_cv_prog_emu_cc=no)
if test $ac_cv_prog_emu_cc = no; then
- for ac_progname in emu_cc.sh gcc; do
+ for ac_progname in emu_cc.sh gcc-4.2 gcc; do
IFS="${IFS= }"; ac_save_ifs="$IFS"; IFS=":"
ac_dummy="$PATH"
for ac_dir in $ac_dummy; do
@@ -165,6 +258,11 @@ if test $ac_cv_prog_emu_cc != no; then
CFLAGS=""
CPPFLAGS=""
AC_TRY_COMPILE([],[
+#if defined(__clang_major__) && __clang_major__ >= 3
+ /* clang 3.x or later is fine */
+#elif defined(__llvm__)
+#error "this version of llvm is unable to correctly compile beam_emu.c"
+#endif
__label__ lbl1;
__label__ lbl2;
int x = magic();
@@ -679,6 +777,55 @@ AC_SUBST(ERTS_INTERNAL_X_LIBS)
])
+AC_DEFUN(ETHR_CHK_SYNC_OP,
+[
+ AC_MSG_CHECKING([for $3-bit $1()])
+ case "$2" in
+ "1") sync_call="$1(&var);";;
+ "2") sync_call="$1(&var, ($4) 0);";;
+ "3") sync_call="$1(&var, ($4) 0, ($4) 0);";;
+ esac
+ have_sync_op=no
+ AC_TRY_LINK([],
+ [
+ $4 res;
+ volatile $4 var;
+ res = $sync_call
+ ],
+ [have_sync_op=yes])
+ test $have_sync_op = yes && $5
+ AC_MSG_RESULT([$have_sync_op])
+])
+
+AC_DEFUN(ETHR_CHK_INTERLOCKED,
+[
+ ilckd="$1"
+ AC_MSG_CHECKING([for ${ilckd}()])
+ case "$2" in
+ "1") ilckd_call="${ilckd}(var);";;
+ "2") ilckd_call="${ilckd}(var, ($3) 0);";;
+ "3") ilckd_call="${ilckd}(var, ($3) 0, ($3) 0);";;
+ "4") ilckd_call="${ilckd}(var, ($3) 0, ($3) 0, arr);";;
+ esac
+ have_interlocked_op=no
+ AC_TRY_LINK(
+ [
+ #define WIN32_LEAN_AND_MEAN
+ #include <windows.h>
+ #include <intrin.h>
+ ],
+ [
+ volatile $3 *var;
+ volatile $3 arr[2];
+
+ $ilckd_call
+ return 0;
+ ],
+ [have_interlocked_op=yes])
+ test $have_interlocked_op = yes && $4
+ AC_MSG_RESULT([$have_interlocked_op])
+])
+
dnl ----------------------------------------------------------------------
dnl
dnl ERL_FIND_ETHR_LIB
@@ -750,121 +897,41 @@ case "$THR_LIB_NAME" in
AC_DEFINE(ETHR_WIN32_THREADS, 1, [Define if you have win32 threads])
- have_ilckd=no
- AC_MSG_CHECKING([for _InterlockedCompareExchange64()])
- AC_TRY_LINK([
- #define WIN32_LEAN_AND_MEAN
- #include <windows.h>
- ],
- [
- volatile __int64 *var;
- _InterlockedCompareExchange64(var, (__int64) 1, (__int64) 0);
- return 0;
- ],
- have_ilckd=yes)
- AC_MSG_RESULT([$have_ilckd])
- test $have_ilckd = yes && AC_DEFINE(ETHR_HAVE__INTERLOCKEDCOMPAREEXCHANGE64, 1, [Define if you have _InterlockedCompareExchange64()])
-
- AC_CHECK_SIZEOF(void *)
- case "$ac_cv_sizeof_void_p-$have_ilckd" in
- 8-no)
- ethr_have_native_atomics=no
- ethr_have_native_spinlock=no;;
- *)
- ethr_have_native_atomics=yes
- ethr_have_native_spinlock=yes;;
- esac
-
- have_ilckd=no
- AC_MSG_CHECKING([for _InterlockedDecrement64()])
- AC_TRY_LINK([
- #define WIN32_LEAN_AND_MEAN
- #include <windows.h>
- ],
- [
- volatile __int64 *var;
- _InterlockedDecrement64(var);
- return 0;
- ],
- have_ilckd=yes)
- AC_MSG_RESULT([$have_ilckd])
- test $have_ilckd = yes && AC_DEFINE(ETHR_HAVE__INTERLOCKEDDECREMENT64, 1, [Define if you have _InterlockedDecrement64()])
-
- have_ilckd=no
- AC_MSG_CHECKING([for _InterlockedIncrement64()])
- AC_TRY_LINK([
- #define WIN32_LEAN_AND_MEAN
- #include <windows.h>
- ],
- [
- volatile __int64 *var;
- _InterlockedIncrement64(var);
- return 0;
- ],
- have_ilckd=yes)
- AC_MSG_RESULT([$have_ilckd])
- test $have_ilckd = yes && AC_DEFINE(ETHR_HAVE__INTERLOCKEDINCREMENT64, 1, [Define if you have _InterlockedIncrement64()])
-
- have_ilckd=no
- AC_MSG_CHECKING([for _InterlockedExchangeAdd64()])
- AC_TRY_LINK([
- #define WIN32_LEAN_AND_MEAN
- #include <windows.h>
- ],
- [
- volatile __int64 *var;
- _InterlockedExchangeAdd64(var, (__int64) 1);
- return 0;
- ],
- have_ilckd=yes)
- AC_MSG_RESULT([$have_ilckd])
- test $have_ilckd = yes && AC_DEFINE(ETHR_HAVE__INTERLOCKEDEXCHANGEADD64, 1, [Define if you have _InterlockedExchangeAdd64()])
-
- have_ilckd=no
- AC_MSG_CHECKING([for _InterlockedExchange64()])
- AC_TRY_LINK([
- #define WIN32_LEAN_AND_MEAN
- #include <windows.h>
- ],
- [
- volatile __int64 *var;
- _InterlockedExchange64(var, (__int64) 1);
- return 0;
- ],
- have_ilckd=yes)
- AC_MSG_RESULT([$have_ilckd])
- test $have_ilckd = yes && AC_DEFINE(ETHR_HAVE__INTERLOCKEDEXCHANGE64, 1, [Define if you have _InterlockedExchange64()])
-
- have_ilckd=no
- AC_MSG_CHECKING([for _InterlockedAnd64()])
- AC_TRY_LINK([
- #define WIN32_LEAN_AND_MEAN
- #include <windows.h>
- ],
- [
- volatile __int64 *var;
- _InterlockedAnd64(var, (__int64) 1);
- return 0;
- ],
- have_ilckd=yes)
- AC_MSG_RESULT([$have_ilckd])
- test $have_ilckd = yes && AC_DEFINE(ETHR_HAVE__INTERLOCKEDAND64, 1, [Define if you have _InterlockedAnd64()])
-
- have_ilckd=no
- AC_MSG_CHECKING([for _InterlockedOr64()])
- AC_TRY_LINK([
- #define WIN32_LEAN_AND_MEAN
- #include <windows.h>
- ],
- [
- volatile __int64 *var;
- _InterlockedOr64(var, (__int64) 1);
- return 0;
- ],
- have_ilckd=yes)
- AC_MSG_RESULT([$have_ilckd])
- test $have_ilckd = yes && AC_DEFINE(ETHR_HAVE__INTERLOCKEDOR64, 1, [Define if you have _InterlockedOr64()])
-
+ ETHR_CHK_INTERLOCKED([_InterlockedDecrement], [1], [long], AC_DEFINE_UNQUOTED(ETHR_HAVE__INTERLOCKEDDECREMENT, 1, [Define if you have _InterlockedDecrement()]))
+ ETHR_CHK_INTERLOCKED([_InterlockedDecrement_rel], [1], [long], AC_DEFINE_UNQUOTED(ETHR_HAVE__INTERLOCKEDDECREMENT_REL, 1, [Define if you have _InterlockedDecrement_rel()]))
+ ETHR_CHK_INTERLOCKED([_InterlockedIncrement], [1], [long], AC_DEFINE_UNQUOTED(ETHR_HAVE__INTERLOCKEDINCREMENT, 1, [Define if you have _InterlockedIncrement()]))
+ ETHR_CHK_INTERLOCKED([_InterlockedIncrement_acq], [1], [long], AC_DEFINE_UNQUOTED(ETHR_HAVE__INTERLOCKEDINCREMENT_ACQ, 1, [Define if you have _InterlockedIncrement_acq()]))
+ ETHR_CHK_INTERLOCKED([_InterlockedExchangeAdd], [2], [long], AC_DEFINE_UNQUOTED(ETHR_HAVE__INTERLOCKEDEXCHANGEADD, 1, [Define if you have _InterlockedExchangeAdd()]))
+ ETHR_CHK_INTERLOCKED([_InterlockedExchangeAdd_acq], [2], [long], AC_DEFINE_UNQUOTED(ETHR_HAVE__INTERLOCKEDEXCHANGEADD_ACQ, 1, [Define if you have _InterlockedExchangeAdd_acq()]))
+ ETHR_CHK_INTERLOCKED([_InterlockedAnd], [2], [long], AC_DEFINE_UNQUOTED(ETHR_HAVE__INTERLOCKEDAND, 1, [Define if you have _InterlockedAnd()]))
+ ETHR_CHK_INTERLOCKED([_InterlockedOr], [2], [long], AC_DEFINE_UNQUOTED(ETHR_HAVE__INTERLOCKEDOR, 1, [Define if you have _InterlockedOr()]))
+ ETHR_CHK_INTERLOCKED([_InterlockedExchange], [2], [long], AC_DEFINE_UNQUOTED(ETHR_HAVE__INTERLOCKEDEXCHANGE, 1, [Define if you have _InterlockedExchange()]))
+ ETHR_CHK_INTERLOCKED([_InterlockedCompareExchange], [3], [long], AC_DEFINE_UNQUOTED(ETHR_HAVE__INTERLOCKEDCOMPAREEXCHANGE, 1, [Define if you have _InterlockedCompareExchange()]))
+ test "$have_interlocked_op" = "yes" && ethr_have_native_atomics=yes
+ ETHR_CHK_INTERLOCKED([_InterlockedCompareExchange_acq], [3], [long], AC_DEFINE_UNQUOTED(ETHR_HAVE__INTERLOCKEDCOMPAREEXCHANGE_ACQ, 1, [Define if you have _InterlockedCompareExchange_acq()]))
+ test "$have_interlocked_op" = "yes" && ethr_have_native_atomics=yes
+ ETHR_CHK_INTERLOCKED([_InterlockedCompareExchange_rel], [3], [long], AC_DEFINE_UNQUOTED(ETHR_HAVE__INTERLOCKEDCOMPAREEXCHANGE_REL, 1, [Define if you have _InterlockedCompareExchange_rel()]))
+ test "$have_interlocked_op" = "yes" && ethr_have_native_atomics=yes
+
+ ETHR_CHK_INTERLOCKED([_InterlockedDecrement64], [1], [__int64], AC_DEFINE_UNQUOTED(ETHR_HAVE__INTERLOCKEDDECREMENT64, 1, [Define if you have _InterlockedDecrement64()]))
+ ETHR_CHK_INTERLOCKED([_InterlockedDecrement64_rel], [1], [__int64], AC_DEFINE_UNQUOTED(ETHR_HAVE__INTERLOCKEDDECREMENT64_REL, 1, [Define if you have _InterlockedDecrement64_rel()]))
+ ETHR_CHK_INTERLOCKED([_InterlockedIncrement64], [1], [__int64], AC_DEFINE_UNQUOTED(ETHR_HAVE__INTERLOCKEDINCREMENT64, 1, [Define if you have _InterlockedIncrement64()]))
+ ETHR_CHK_INTERLOCKED([_InterlockedIncrement64_acq], [1], [__int64], AC_DEFINE_UNQUOTED(ETHR_HAVE__INTERLOCKEDINCREMENT64_ACQ, 1, [Define if you have _InterlockedIncrement64_acq()]))
+ ETHR_CHK_INTERLOCKED([_InterlockedExchangeAdd64], [2], [__int64], AC_DEFINE_UNQUOTED(ETHR_HAVE__INTERLOCKEDEXCHANGEADD64, 1, [Define if you have _InterlockedExchangeAdd64()]))
+ ETHR_CHK_INTERLOCKED([_InterlockedExchangeAdd64_acq], [2], [__int64], AC_DEFINE_UNQUOTED(ETHR_HAVE__INTERLOCKEDEXCHANGEADD64_ACQ, 1, [Define if you have _InterlockedExchangeAdd64_acq()]))
+ ETHR_CHK_INTERLOCKED([_InterlockedAnd64], [2], [__int64], AC_DEFINE_UNQUOTED(ETHR_HAVE__INTERLOCKEDAND64, 1, [Define if you have _InterlockedAnd64()]))
+ ETHR_CHK_INTERLOCKED([_InterlockedOr64], [2], [__int64], AC_DEFINE_UNQUOTED(ETHR_HAVE__INTERLOCKEDOR64, 1, [Define if you have _InterlockedOr64()]))
+ ETHR_CHK_INTERLOCKED([_InterlockedExchange64], [2], [__int64], AC_DEFINE_UNQUOTED(ETHR_HAVE__INTERLOCKEDEXCHANGE64, 1, [Define if you have _InterlockedExchange64()]))
+ ETHR_CHK_INTERLOCKED([_InterlockedCompareExchange64], [3], [__int64], AC_DEFINE_UNQUOTED(ETHR_HAVE__INTERLOCKEDCOMPAREEXCHANGE64, 1, [Define if you have _InterlockedCompareExchange64()]))
+ test "$have_interlocked_op" = "yes" && ethr_have_native_atomics=yes
+ ETHR_CHK_INTERLOCKED([_InterlockedCompareExchange64_acq], [3], [__int64], AC_DEFINE_UNQUOTED(ETHR_HAVE__INTERLOCKEDCOMPAREEXCHANGE64_ACQ, 1, [Define if you have _InterlockedCompareExchange64_acq()]))
+ test "$have_interlocked_op" = "yes" && ethr_have_native_atomics=yes
+ ETHR_CHK_INTERLOCKED([_InterlockedCompareExchange64_rel], [3], [__int64], AC_DEFINE_UNQUOTED(ETHR_HAVE__INTERLOCKEDCOMPAREEXCHANGE64_REL, 1, [Define if you have _InterlockedCompareExchange64_rel()]))
+ test "$have_interlocked_op" = "yes" && ethr_have_native_atomics=yes
+
+ ETHR_CHK_INTERLOCKED([_InterlockedCompareExchange128], [4], [__int64], AC_DEFINE_UNQUOTED(ETHR_HAVE__INTERLOCKEDCOMPAREEXCHANGE128, 1, [Define if you have _InterlockedCompareExchange128()]))
+
+ test "$ethr_have_native_atomics" = "yes" && ethr_have_native_spinlock=yes
;;
pthread)
@@ -1100,35 +1167,51 @@ case "$THR_LIB_NAME" in
AC_MSG_RESULT([$linux_futex])
test $linux_futex = yes && AC_DEFINE(ETHR_HAVE_LINUX_FUTEX, 1, [Define if you have a linux futex implementation.])
- AC_MSG_CHECKING([for GCC atomic operations])
- ethr_have_gcc_atomic_ops=no
- AC_TRY_LINK([],
- [
- long res;
- volatile long val;
- res = __sync_val_compare_and_swap(&val, (long) 1, (long) 0);
- res = __sync_add_and_fetch(&val, (long) 1);
- res = __sync_sub_and_fetch(&val, (long) 1);
- res = __sync_fetch_and_and(&val, (long) 1);
- res = __sync_fetch_and_or(&val, (long) 1);
- ],
- [ethr_have_native_atomics=yes
- ethr_have_gcc_atomic_ops=yes])
- AC_MSG_RESULT([$ethr_have_gcc_atomic_ops])
- test $ethr_have_gcc_atomic_ops = yes && AC_DEFINE(ETHR_HAVE_GCC_ATOMIC_OPS, 1, [Define if you have gcc atomic operations])
+ AC_CHECK_SIZEOF(int)
+ AC_CHECK_SIZEOF(long)
+ AC_CHECK_SIZEOF(long long)
+ AC_CHECK_SIZEOF(__int128_t)
+
+ if test "$ac_cv_sizeof_int" = "4"; then
+ int32="int"
+ elif test "$ac_cv_sizeof_long" = "4"; then
+ int32="long"
+ elif test "$ac_cv_sizeof_long_long" = "4"; then
+ int32="long long"
+ else
+ AC_MSG_ERROR([No 32-bit type found])
+ fi
- case "$host_cpu" in
- sun4u | sparc64 | sun4v)
- ethr_have_native_atomics=yes;;
- i86pc | i*86 | x86_64 | amd64)
- ethr_have_native_atomics=yes;;
- macppc | ppc | "Power Macintosh")
- ethr_have_native_atomics=yes;;
- tile)
- ethr_have_native_atomics=yes;;
- *)
- ;;
- esac
+ if test "$ac_cv_sizeof_int" = "8"; then
+ int64="int"
+ elif test "$ac_cv_sizeof_long" = "8"; then
+ int64="long"
+ elif test "$ac_cv_sizeof_long_long" = "8"; then
+ int64="long long"
+ else
+ AC_MSG_ERROR([No 64-bit type found])
+ fi
+
+ int128=no
+ if test "$ac_cv_sizeof___int128_t" = "16"; then
+ int128="__int128_t"
+ fi
+
+ ETHR_CHK_SYNC_OP([__sync_val_compare_and_swap], [3], [32], [$int32], AC_DEFINE(ETHR_HAVE___SYNC_VAL_COMPARE_AND_SWAP32, 1, [Define if you have __sync_val_compare_and_swap() for 32-bit integers]))
+ test "$have_sync_op" = "yes" && ethr_have_native_atomics=yes
+ ETHR_CHK_SYNC_OP([__sync_add_and_fetch], [2], [32], [$int32], AC_DEFINE(ETHR_HAVE___SYNC_ADD_AND_FETCH32, 1, [Define if you have __sync_add_and_fetch() for 32-bit integers]))
+ ETHR_CHK_SYNC_OP([__sync_fetch_and_and], [2], [32], [$int32], AC_DEFINE(ETHR_HAVE___SYNC_FETCH_AND_AND32, 1, [Define if you have __sync_fetch_and_and() for 32-bit integers]))
+ ETHR_CHK_SYNC_OP([__sync_fetch_and_or], [2], [32], [$int32], AC_DEFINE(ETHR_HAVE___SYNC_FETCH_AND_OR32, 1, [Define if you have __sync_fetch_and_or() for 32-bit integers]))
+
+ ETHR_CHK_SYNC_OP([__sync_val_compare_and_swap], [3], [64], [$int64], AC_DEFINE(ETHR_HAVE___SYNC_VAL_COMPARE_AND_SWAP64, 1, [Define if you have __sync_val_compare_and_swap() for 64-bit integers]))
+ test "$have_sync_op" = "yes" && ethr_have_native_atomics=yes
+ ETHR_CHK_SYNC_OP([__sync_add_and_fetch], [2], [64], [$int64], AC_DEFINE(ETHR_HAVE___SYNC_ADD_AND_FETCH64, 1, [Define if you have __sync_add_and_fetch() for 64-bit integers]))
+ ETHR_CHK_SYNC_OP([__sync_fetch_and_and], [2], [64], [$int64], AC_DEFINE(ETHR_HAVE___SYNC_FETCH_AND_AND64, 1, [Define if you have __sync_fetch_and_and() for 64-bit integers]))
+ ETHR_CHK_SYNC_OP([__sync_fetch_and_or], [2], [64], [$int64], AC_DEFINE(ETHR_HAVE___SYNC_FETCH_AND_OR64, 1, [Define if you have __sync_fetch_and_or() for 64-bit integers]))
+
+ if test $int128 != no; then
+ ETHR_CHK_SYNC_OP([__sync_val_compare_and_swap], [3], [128], [$int128], AC_DEFINE(ETHR_HAVE___SYNC_VAL_COMPARE_AND_SWAP128, 1, [Define if you have __sync_val_compare_and_swap() for 128-bit integers]))
+ fi
AC_MSG_CHECKING([for a usable libatomic_ops implementation])
case "x$with_libatomic_ops" in
@@ -1175,6 +1258,34 @@ case "$THR_LIB_NAME" in
AC_MSG_ERROR([No usable libatomic_ops implementation found])
fi
+ case "$host_cpu" in
+ sparc | sun4u | sparc64 | sun4v)
+ case "$with_sparc_memory_order" in
+ "TSO")
+ AC_DEFINE(ETHR_SPARC_TSO, 1, [Define if only run in Sparc TSO mode]);;
+ "PSO")
+ AC_DEFINE(ETHR_SPARC_PSO, 1, [Define if only run in Sparc PSO, or TSO mode]);;
+ "RMO"|"")
+ AC_DEFINE(ETHR_SPARC_RMO, 1, [Define if run in Sparc RMO, PSO, or TSO mode]);;
+ *)
+ AC_MSG_ERROR([Unsupported Sparc memory order: $with_sparc_memory_order]);;
+ esac
+ ethr_have_native_atomics=yes;;
+ i86pc | i*86 | x86_64 | amd64)
+ if test "$enable_x86_out_of_order" = "yes"; then
+ AC_DEFINE(ETHR_X86_OUT_OF_ORDER, 1, [Define if x86/x86_64 out of order instructions should be synchronized])
+ fi
+ ethr_have_native_atomics=yes;;
+ macppc | ppc | "Power Macintosh")
+ ethr_have_native_atomics=yes;;
+ tile)
+ ethr_have_native_atomics=yes;;
+ *)
+ ;;
+ esac
+
+ test ethr_have_native_atomics = "yes" && ethr_have_native_spinlock=yes
+
dnl Restore LIBS
LIBS=$saved_libs
dnl restore CPPFLAGS
@@ -1210,6 +1321,8 @@ AC_CHECK_SIZEOF(long long)
AC_DEFINE_UNQUOTED(ETHR_SIZEOF_LONG_LONG, $ac_cv_sizeof_long_long, [Define to the size of long long])
AC_CHECK_SIZEOF(__int64)
AC_DEFINE_UNQUOTED(ETHR_SIZEOF___INT64, $ac_cv_sizeof___int64, [Define to the size of __int64])
+AC_CHECK_SIZEOF(__int128_t)
+AC_DEFINE_UNQUOTED(ETHR_SIZEOF___INT128_T, $ac_cv_sizeof___int128_t, [Define to the size of __int128_t])
case X$erl_xcomp_bigendian in
@@ -1232,6 +1345,10 @@ AC_ARG_ENABLE(native-ethr-impls,
*) disable_native_ethr_impls=no ;;
esac ], disable_native_ethr_impls=no)
+AC_ARG_ENABLE(x86-out-of-order,
+ AS_HELP_STRING([--enable-x86-out-of-order],
+ [enable x86/x84_64 out of order support (default disabled)]))
+
test "X$disable_native_ethr_impls" = "Xyes" &&
AC_DEFINE(ETHR_DISABLE_NATIVE_IMPLS, 1, [Define if you want to disable native ethread implementations])
@@ -1250,56 +1367,100 @@ AC_ARG_WITH(libatomic_ops,
AS_HELP_STRING([--with-libatomic_ops=PATH],
[specify and prefer usage of libatomic_ops in the ethread library]))
-AC_ARG_ENABLE(ethread-pre-pentium4-compatibility,
- AS_HELP_STRING([--enable-ethread-pre-pentium4-compatibility],
- [enable compatibility with x86 processors before pentium 4 (back to 486) in the ethread library]),
-[
- case "$enable_ethread_pre_pentium4_compatibility" in
- yes|no) ;;
- *) enable_ethread_pre_pentium4_compatibility=check;;
- esac
-],
-[enable_ethread_pre_pentium4_compatibility=check])
-
-test "$cross_compiling" != "yes" || enable_ethread_pre_pentium4_compatibility=no
-
-case "$enable_ethread_pre_pentium4_compatibility-$host_cpu" in
- check-i86pc | check-i*86)
- AC_MSG_CHECKING([whether pre pentium 4 compatibility should forced])
- AC_RUN_IFELSE([
-#if defined(__GNUC__)
-# if defined(ETHR_PREFER_LIBATOMIC_OPS_NATIVE_IMPLS)
-# define CHECK_LIBATOMIC_OPS__
-# else
-# define CHECK_GCC_ASM__
-# endif
-#elif defined(ETHR_HAVE_LIBATOMIC_OPS)
-# define CHECK_LIBATOMIC_OPS__
+AC_ARG_WITH(with_sparc_memory_order,
+ AS_HELP_STRING([--with-sparc-memory-order=TSO|PSO|RMO],
+ [specify sparc memory order (defaults to RMO)]))
+
+ETHR_X86_SSE2_ASM=no
+case "$GCC-$ac_cv_sizeof_void_p-$host_cpu" in
+ yes-4-i86pc | yes-4-i*86 | yes-4-x86_64 | yes-4-amd64)
+ AC_MSG_CHECKING([for gcc sse2 asm support])
+ save_CFLAGS="$CFLAGS"
+ CFLAGS="$CFLAGS -msse2"
+ gcc_sse2_asm=no
+ AC_TRY_COMPILE([],
+ [
+ long long x, *y;
+ __asm__ __volatile__("movq %1, %0\n\t" : "=x"(x) : "m"(*y) : "memory");
+ ],
+ [gcc_sse2_asm=yes])
+ CFLAGS="$save_CFLAGS"
+ AC_MSG_RESULT([$gcc_sse2_asm])
+ if test "$gcc_sse2_asm" = "yes"; then
+ AC_DEFINE(ETHR_GCC_HAVE_SSE2_ASM_SUPPORT, 1, [Define if you use a gcc that supports -msse2 and understand sse2 specific asm statements])
+ ETHR_X86_SSE2_ASM=yes
+ fi
+ ;;
+ *)
+ ;;
+esac
+
+case "$GCC-$host_cpu" in
+ yes-i86pc | yes-i*86 | yes-x86_64 | yes-amd64)
+ gcc_dw_cmpxchg_asm=no
+ AC_MSG_CHECKING([for gcc double word cmpxchg asm support])
+ AC_TRY_COMPILE([],
+ [
+ char xchgd;
+ long new[2], xchg[2], *p;
+ __asm__ __volatile__(
+#if ETHR_SIZEOF_PTR == 4 && defined(__PIC__) && __PIC__
+ "pushl %%ebx\n\t"
+ "movl %8, %%ebx\n\t"
#endif
-#if defined(CHECK_LIBATOMIC_OPS__)
-#include "atomic_ops.h"
+#if ETHR_SIZEOF_PTR == 4
+ "lock; cmpxchg8b %0\n\t"
+#else
+ "lock; cmpxchg16b %0\n\t"
#endif
-int main(void)
-{
-#if defined(CHECK_GCC_ASM__)
- __asm__ __volatile__("mfence" : : : "memory");
-#elif defined(CHECK_LIBATOMIC_OPS__)
- AO_nop_full();
+ "setz %3\n\t"
+#if ETHR_SIZEOF_PTR == 4 && defined(__PIC__) && __PIC__
+ "popl %%ebx\n\t"
#endif
- return 0;
-}
+ : "=m"(*p), "=d"(xchg[1]), "=a"(xchg[0]), "=c"(xchgd)
+ : "m"(*p), "1"(xchg[1]), "2"(xchg[0]), "3"(new[1]),
+#if ETHR_SIZEOF_PTR == 4 && defined(__PIC__) && __PIC__
+ "r"(new[0])
+#else
+ "b"(new[0])
+#endif
+ : "cc", "memory");
+
],
- [enable_ethread_pre_pentium4_compatibility=no],
- [enable_ethread_pre_pentium4_compatibility=yes],
- [enable_ethread_pre_pentium4_compatibility=no])
- AC_MSG_RESULT([$enable_ethread_pre_pentium4_compatibility]);;
+ [gcc_dw_cmpxchg_asm=yes])
+ if test $gcc_dw_cmpxchg_asm = no && test $ac_cv_sizeof_void_p = 4; then
+ AC_TRY_COMPILE([],
+ [
+ char xchgd;
+ long new[2], xchg[2], *p;
+#if !defined(__PIC__) || !__PIC__
+# error nope
+#endif
+ __asm__ __volatile__(
+ "pushl %%ebx\n\t"
+ "movl (%7), %%ebx\n\t"
+ "movl 4(%7), %%ecx\n\t"
+ "lock; cmpxchg8b %0\n\t"
+ "setz %3\n\t"
+ "popl %%ebx\n\t"
+ : "=m"(*p), "=d"(xchg[1]), "=a"(xchg[0]), "=c"(xchgd)
+ : "m"(*p), "1"(xchg[1]), "2"(xchg[0]), "3"(new)
+ : "cc", "memory");
+
+ ],
+ [gcc_dw_cmpxchg_asm=yes])
+ if test "$gcc_dw_cmpxchg_asm" = "yes"; then
+ AC_DEFINE(ETHR_CMPXCHG8B_REGISTER_SHORTAGE, 1, [Define if you get a register shortage with cmpxchg8b and position independent code])
+ fi
+ fi
+ AC_MSG_RESULT([$gcc_dw_cmpxchg_asm])
+ if test "$gcc_dw_cmpxchg_asm" = "yes"; then
+ AC_DEFINE(ETHR_GCC_HAVE_DW_CMPXCHG_ASM_SUPPORT, 1, [Define if you use a gcc that supports the double word cmpxchg instruction])
+ fi;;
*)
;;
esac
-test $enable_ethread_pre_pentium4_compatibility = yes &&
- AC_DEFINE(ETHR_PRE_PENTIUM4_COMPAT, 1, [Define if you want compatibilty with x86 processors before pentium4.])
-
AC_DEFINE(ETHR_HAVE_ETHREAD_DEFINES, 1, \
[Define if you have all ethread defines])
@@ -1309,6 +1470,7 @@ AC_SUBST(ETHR_LIB_NAME)
AC_SUBST(ETHR_DEFS)
AC_SUBST(ETHR_THR_LIB_BASE)
AC_SUBST(ETHR_THR_LIB_BASE_DIR)
+AC_SUBST(ETHR_X86_SSE2_ASM)
])
@@ -1579,11 +1741,11 @@ dnl Freely inspired by AC_TRY_LINK. (Maybe better to create a
dnl AC_LANG_JAVA instead...)
AC_DEFUN(ERL_TRY_LINK_JAVA,
[java_link='$JAVAC conftest.java 1>&AC_FD_CC'
-changequote(�, �)dnl
+changequote(, )dnl
cat > conftest.java <<EOF
-�$1�
+$1
class conftest { public static void main(String[] args) {
- �$2�
+ $2
; return; }}
EOF
changequote([, ])dnl
diff --git a/erts/autoconf/win32.config.cache.static b/erts/autoconf/win32.config.cache.static
index d25b1df9d9..b387db2b22 100755
--- a/erts/autoconf/win32.config.cache.static
+++ b/erts/autoconf/win32.config.cache.static
@@ -96,7 +96,6 @@ ac_cv_func_sbrk=${ac_cv_func_sbrk=no}
ac_cv_func_select=${ac_cv_func_select=no}
ac_cv_func_setlocale=${ac_cv_func_setlocale=yes}
ac_cv_func_setsid=${ac_cv_func_setsid=no}
-ac_cv_func_setvbuf_reversed=${ac_cv_func_setvbuf_reversed=yes}
ac_cv_func_socket=${ac_cv_func_socket=no}
ac_cv_func_strchr=${ac_cv_func_strchr=yes}
ac_cv_func_strerror=${ac_cv_func_strerror=yes}
@@ -124,7 +123,6 @@ ac_cv_header_ieeefp_h=${ac_cv_header_ieeefp_h=no}
ac_cv_header_inttypes_h=${ac_cv_header_inttypes_h=no}
ac_cv_header_langinfo_h=${ac_cv_header_langinfo_h=no}
ac_cv_header_limits_h=${ac_cv_header_limits_h=yes}
-ac_cv_header_mach_o_dyld_h=${ac_cv_header_mach_o_dyld_h=no}
ac_cv_header_malloc_h=${ac_cv_header_malloc_h=yes}
ac_cv_header_memory_h=${ac_cv_header_memory_h=yes}
ac_cv_header_net_errno_h=${ac_cv_header_net_errno_h=no}
diff --git a/erts/autoconf/win64.config.cache.static b/erts/autoconf/win64.config.cache.static
new file mode 100755
index 0000000000..a8a2bfb59c
--- /dev/null
+++ b/erts/autoconf/win64.config.cache.static
@@ -0,0 +1,271 @@
+# This file is a shell script that caches the results of configure
+# tests run on this system so they can be shared between configure
+# scripts and configure runs, see configure's option --config-cache.
+# It is not useful on other systems. If it contains results you don't
+# want to keep, you may remove or edit it.
+#
+# config.status only pays attention to the cache file if you give it
+# the --recheck option to rerun configure.
+#
+# `ac_cv_env_foo' variables (set or unset) will be overridden when
+# loading this file, other *unset* `ac_cv_foo' will be assigned the
+# following values.
+
+ac_cv_c_bigendian=${ac_cv_c_bigendian=no}
+ac_cv_c_compiler_gnu=${ac_cv_c_compiler_gnu=no}
+ac_cv_c_const=${ac_cv_c_const=yes}
+ac_cv_cxx_compiler_gnu=${ac_cv_cxx_compiler_gnu=no}
+ac_cv_decl_h_errno=${ac_cv_decl_h_errno=no}
+ac_cv_decl_inaddr_loopback=${ac_cv_decl_inaddr_loopback=no}
+ac_cv_decl_inaddr_loopback_rpc=${ac_cv_decl_inaddr_loopback_rpc=no}
+ac_cv_decl_inaddr_loopback_winsock2=${ac_cv_decl_inaddr_loopback_winsock2=yes}
+ac_cv_decl_so_bsdcompat=${ac_cv_decl_so_bsdcompat=no}
+ac_cv_decl_sys_errlist=${ac_cv_decl_sys_errlist=no}
+ac_cv_env_AR_set=set
+ac_cv_env_AR_value=ar.sh
+ac_cv_env_CCC_set=
+ac_cv_env_CCC_value=
+ac_cv_env_CC_set=set
+ac_cv_env_CC_value=cc.sh
+ac_cv_env_CFLAGS_set=
+ac_cv_env_CFLAGS_value=
+ac_cv_env_CPPFLAGS_set=
+ac_cv_env_CPPFLAGS_value=
+ac_cv_env_CPP_set=
+ac_cv_env_CPP_value=
+ac_cv_env_CXXFLAGS_set=
+ac_cv_env_CXXFLAGS_value=
+ac_cv_env_CXX_set=set
+ac_cv_env_CXX_value=cc.sh
+ac_cv_env_LDFLAGS_set=
+ac_cv_env_LDFLAGS_value=
+ac_cv_env_build_alias_set=set
+ac_cv_env_build_alias_value=win32
+ac_cv_env_host_alias_set=set
+ac_cv_env_host_alias_value=win32
+ac_cv_env_target_alias_set=set
+ac_cv_env_target_alias_value=win32
+ac_cv_exeext=${ac_cv_exeext=.exe}
+ac_cv_func___brk=${ac_cv_func___brk=no}
+ac_cv_func___sbrk=${ac_cv_func___sbrk=no}
+ac_cv_func__brk=${ac_cv_func__brk=no}
+ac_cv_func__doprnt=${ac_cv_func__doprnt=no}
+ac_cv_func__sbrk=${ac_cv_func__sbrk=no}
+ac_cv_func_accept=${ac_cv_func_accept=no}
+ac_cv_func_alloca_works=${ac_cv_func_alloca_works=yes}
+ac_cv_func_brk=${ac_cv_func_brk=no}
+ac_cv_func_clock_gettime=${ac_cv_func_clock_gettime=no}
+ac_cv_func_connect=${ac_cv_func_connect=no}
+ac_cv_func_decl_fread=${ac_cv_func_decl_fread=yes}
+ac_cv_func_dlopen=${ac_cv_func_dlopen=no}
+ac_cv_func_dup2=${ac_cv_func_dup2=yes}
+ac_cv_func_fdatasync=${ac_cv_func_fdatasync=no}
+ac_cv_func_finite=${ac_cv_func_finite=no}
+ac_cv_func_flockfile=${ac_cv_func_flockfile=no}
+ac_cv_func_fork=${ac_cv_func_fork=no}
+ac_cv_func_fork_works=${ac_cv_func_fork_works=no}
+ac_cv_func_fpsetmask=${ac_cv_func_fpsetmask=no}
+ac_cv_func_fstat=${ac_cv_func_fstat=yes}
+ac_cv_func_gethostbyaddr=${ac_cv_func_gethostbyaddr=no}
+ac_cv_func_gethostbyaddr_r=${ac_cv_func_gethostbyaddr_r=no}
+ac_cv_func_gethostbyname=${ac_cv_func_gethostbyname=yes}
+ac_cv_func_gethostbyname2=${ac_cv_func_gethostbyname2=no}
+ac_cv_func_gethostbyname_r=${ac_cv_func_gethostbyname_r=no}
+ac_cv_func_gethostname=${ac_cv_func_gethostname=no}
+ac_cv_func_gethrtime=${ac_cv_func_gethrtime=no}
+ac_cv_func_getifaddrs=${ac_cv_func_getifaddrs=no}
+ac_cv_func_getipnodebyaddr=${ac_cv_func_getipnodebyaddr=no}
+ac_cv_func_getipnodebyname=${ac_cv_func_getipnodebyname=no}
+ac_cv_func_getpagesize=${ac_cv_func_getpagesize=no}
+ac_cv_func_gettimeofday=${ac_cv_func_gettimeofday=no}
+ac_cv_func_gmtime_r=${ac_cv_func_gmtime_r=no}
+ac_cv_func_ieee_handler=${ac_cv_func_ieee_handler=no}
+ac_cv_func_inet_ntoa=${ac_cv_func_inet_ntoa=no}
+ac_cv_func_inet_pton=${ac_cv_func_inet_pton=yes}
+ac_cv_func_isinf=${ac_cv_func_isinf=no}
+ac_cv_func_isnan=${ac_cv_func_isnan=no}
+ac_cv_func_localtime_r=${ac_cv_func_localtime_r=no}
+ac_cv_func_mallopt=${ac_cv_func_mallopt=no}
+ac_cv_func_memchr=${ac_cv_func_memchr=yes}
+ac_cv_func_memcmp_working=${ac_cv_func_memcmp_working=yes}
+ac_cv_func_memcpy=${ac_cv_func_memcpy=no}
+ac_cv_func_memmove=${ac_cv_func_memmove=yes}
+ac_cv_func_memset=${ac_cv_func_memset=yes}
+ac_cv_func_mmap=${ac_cv_func_mmap=no}
+ac_cv_func_mmap_fixed_mapped=${ac_cv_func_mmap_fixed_mapped=no}
+ac_cv_func_mremap=${ac_cv_func_mremap=no}
+ac_cv_func_nl_langinfo=${ac_cv_func_nl_langinfo=no}
+ac_cv_func_openpty=${ac_cv_func_openpty=no}
+ac_cv_func_poll=${ac_cv_func_poll=no}
+ac_cv_func_posix2time=${ac_cv_func_posix2time=no}
+ac_cv_func_posix_fadvise=${ac_cv_func_posix_fadvise=no}
+ac_cv_func_pread=${ac_cv_func_pread=no}
+ac_cv_func_pwrite=${ac_cv_func_pwrite=no}
+ac_cv_func_res_gethostbyname=${ac_cv_func_res_gethostbyname=no}
+ac_cv_func_sbrk=${ac_cv_func_sbrk=no}
+ac_cv_func_sctp_bindx=${ac_cv_func_sctp_bindx=no}
+ac_cv_func_sctp_peeloff=${ac_cv_func_sctp_peeloff=no}
+ac_cv_func_select=${ac_cv_func_select=no}
+ac_cv_func_setlocale=${ac_cv_func_setlocale=yes}
+ac_cv_func_setsid=${ac_cv_func_setsid=no}
+ac_cv_func_setvbuf_reversed=${ac_cv_func_setvbuf_reversed=yes}
+ac_cv_func_socket=${ac_cv_func_socket=no}
+ac_cv_func_strchr=${ac_cv_func_strchr=yes}
+ac_cv_func_strerror=${ac_cv_func_strerror=yes}
+ac_cv_func_strerror_r=${ac_cv_func_strerror_r=no}
+ac_cv_func_strlcat=${ac_cv_func_strlcat=no}
+ac_cv_func_strlcpy=${ac_cv_func_strlcpy=no}
+ac_cv_func_strncasecmp=${ac_cv_func_strncasecmp=no}
+ac_cv_func_strrchr=${ac_cv_func_strrchr=yes}
+ac_cv_func_strstr=${ac_cv_func_strstr=yes}
+ac_cv_func_uname=${ac_cv_func_uname=no}
+ac_cv_func_vfork=${ac_cv_func_vfork=no}
+ac_cv_func_vfork_works=${ac_cv_func_vfork_works=no}
+ac_cv_func_vprintf=${ac_cv_func_vprintf=yes}
+ac_cv_func_writev=${ac_cv_func_writev=no}
+ac_cv_have_decl_SCTPS_BOUND=${ac_cv_have_decl_SCTPS_BOUND=no}
+ac_cv_have_decl_SCTPS_COOKIE_ECHOED=${ac_cv_have_decl_SCTPS_COOKIE_ECHOED=no}
+ac_cv_have_decl_SCTPS_COOKIE_WAIT=${ac_cv_have_decl_SCTPS_COOKIE_WAIT=no}
+ac_cv_have_decl_SCTPS_ESTABLISHED=${ac_cv_have_decl_SCTPS_ESTABLISHED=no}
+ac_cv_have_decl_SCTPS_IDLE=${ac_cv_have_decl_SCTPS_IDLE=no}
+ac_cv_have_decl_SCTPS_LISTEN=${ac_cv_have_decl_SCTPS_LISTEN=no}
+ac_cv_have_decl_SCTPS_SHUTDOWN_ACK_SENT=${ac_cv_have_decl_SCTPS_SHUTDOWN_ACK_SENT=no}
+ac_cv_have_decl_SCTPS_SHUTDOWN_PENDING=${ac_cv_have_decl_SCTPS_SHUTDOWN_PENDING=no}
+ac_cv_have_decl_SCTPS_SHUTDOWN_RECEIVED=${ac_cv_have_decl_SCTPS_SHUTDOWN_RECEIVED=no}
+ac_cv_have_decl_SCTPS_SHUTDOWN_SENT=${ac_cv_have_decl_SCTPS_SHUTDOWN_SENT=no}
+ac_cv_have_decl_SCTP_ABORT=${ac_cv_have_decl_SCTP_ABORT=no}
+ac_cv_have_decl_SCTP_ADDR_CONFIRMED=${ac_cv_have_decl_SCTP_ADDR_CONFIRMED=no}
+ac_cv_have_decl_SCTP_ADDR_OVER=${ac_cv_have_decl_SCTP_ADDR_OVER=no}
+ac_cv_have_decl_SCTP_BOUND=${ac_cv_have_decl_SCTP_BOUND=no}
+ac_cv_have_decl_SCTP_CLOSED=${ac_cv_have_decl_SCTP_CLOSED=no}
+ac_cv_have_decl_SCTP_COOKIE_ECHOED=${ac_cv_have_decl_SCTP_COOKIE_ECHOED=no}
+ac_cv_have_decl_SCTP_COOKIE_WAIT=${ac_cv_have_decl_SCTP_COOKIE_WAIT=no}
+ac_cv_have_decl_SCTP_DELAYED_ACK_TIME=${ac_cv_have_decl_SCTP_DELAYED_ACK_TIME=no}
+ac_cv_have_decl_SCTP_EMPTY=${ac_cv_have_decl_SCTP_EMPTY=no}
+ac_cv_have_decl_SCTP_EOF=${ac_cv_have_decl_SCTP_EOF=no}
+ac_cv_have_decl_SCTP_ESTABLISHED=${ac_cv_have_decl_SCTP_ESTABLISHED=no}
+ac_cv_have_decl_SCTP_LISTEN=${ac_cv_have_decl_SCTP_LISTEN=no}
+ac_cv_have_decl_SCTP_SENDALL=${ac_cv_have_decl_SCTP_SENDALL=no}
+ac_cv_have_decl_SCTP_SHUTDOWN_ACK_SENT=${ac_cv_have_decl_SCTP_SHUTDOWN_ACK_SENT=no}
+ac_cv_have_decl_SCTP_SHUTDOWN_PENDING=${ac_cv_have_decl_SCTP_SHUTDOWN_PENDING=no}
+ac_cv_have_decl_SCTP_SHUTDOWN_RECEIVED=${ac_cv_have_decl_SCTP_SHUTDOWN_RECEIVED=no}
+ac_cv_have_decl_SCTP_SHUTDOWN_SENT=${ac_cv_have_decl_SCTP_SHUTDOWN_SENT=no}
+ac_cv_have_decl_SCTP_UNORDERED=${ac_cv_have_decl_SCTP_UNORDERED=no}
+ac_cv_have_decl_posix2time=${ac_cv_have_decl_posix2time=no}
+ac_cv_header_arpa_inet_h=${ac_cv_header_arpa_inet_h=no}
+ac_cv_header_arpa_nameser_h=${ac_cv_header_arpa_nameser_h=no}
+ac_cv_header_dirent_dirent_h=${ac_cv_header_dirent_dirent_h=no}
+ac_cv_header_dirent_ndir_h=${ac_cv_header_dirent_ndir_h=no}
+ac_cv_header_dirent_sys_dir_h=${ac_cv_header_dirent_sys_dir_h=no}
+ac_cv_header_dirent_sys_ndir_h=${ac_cv_header_dirent_sys_ndir_h=no}
+ac_cv_header_dlfcn_h=${ac_cv_header_dlfcn_h=no}
+ac_cv_header_fcntl_h=${ac_cv_header_fcntl_h=yes}
+ac_cv_header_gl_gl_h=${ac_cv_header_gl_gl_h=yes}
+ac_cv_header_ieeefp_h=${ac_cv_header_ieeefp_h=no}
+ac_cv_header_ifaddrs_h=${ac_cv_header_ifaddrs_h=no}
+ac_cv_header_inttypes_h=${ac_cv_header_inttypes_h=no}
+ac_cv_header_langinfo_h=${ac_cv_header_langinfo_h=no}
+ac_cv_header_limits_h=${ac_cv_header_limits_h=yes}
+ac_cv_header_mach_o_dyld_h=${ac_cv_header_mach_o_dyld_h=no}
+ac_cv_header_malloc_h=${ac_cv_header_malloc_h=yes}
+ac_cv_header_memory_h=${ac_cv_header_memory_h=yes}
+ac_cv_header_net_errno_h=${ac_cv_header_net_errno_h=no}
+ac_cv_header_net_if_dl_h=${ac_cv_header_net_if_dl_h=no}
+ac_cv_header_netdb_h=${ac_cv_header_netdb_h=no}
+ac_cv_header_netinet_in_h=${ac_cv_header_netinet_in_h=no}
+ac_cv_header_netpacket_packet_h=${ac_cv_header_netpacket_packet_h=no}
+ac_cv_header_poll_h=${ac_cv_header_poll_h=no}
+ac_cv_header_pty_h=${ac_cv_header_pty_h=no}
+ac_cv_header_stdc=${ac_cv_header_stdc=yes}
+ac_cv_header_stddef_h=${ac_cv_header_stddef_h=yes}
+ac_cv_header_stdint_h=${ac_cv_header_stdint_h=yes}
+ac_cv_header_stdlib_h=${ac_cv_header_stdlib_h=yes}
+ac_cv_header_string_h=${ac_cv_header_string_h=yes}
+ac_cv_header_strings_h=${ac_cv_header_strings_h=no}
+ac_cv_header_sys_devpoll_h=${ac_cv_header_sys_devpoll_h=no}
+ac_cv_header_sys_epoll_h=${ac_cv_header_sys_epoll_h=no}
+ac_cv_header_sys_event_h=${ac_cv_header_sys_event_h=no}
+ac_cv_header_sys_ioctl_h=${ac_cv_header_sys_ioctl_h=no}
+ac_cv_header_sys_param_h=${ac_cv_header_sys_param_h=no}
+ac_cv_header_sys_resource_h=${ac_cv_header_sys_resource_h=no}
+ac_cv_header_sys_select_h=${ac_cv_header_sys_select_h=no}
+ac_cv_header_sys_socket_h=${ac_cv_header_sys_socket_h=no}
+ac_cv_header_sys_socketio_h=${ac_cv_header_sys_socketio_h=no}
+ac_cv_header_sys_sockio_h=${ac_cv_header_sys_sockio_h=no}
+ac_cv_header_sys_stat_h=${ac_cv_header_sys_stat_h=yes}
+ac_cv_header_sys_stropts_h=${ac_cv_header_sys_stropts_h=no}
+ac_cv_header_sys_sysctl_h=${ac_cv_header_sys_sysctl_h=no}
+ac_cv_header_sys_time_h=${ac_cv_header_sys_time_h=no}
+ac_cv_header_sys_types_h=${ac_cv_header_sys_types_h=yes}
+ac_cv_header_sys_uio_h=${ac_cv_header_sys_uio_h=no}
+ac_cv_header_sys_wait_h=${ac_cv_header_sys_wait_h=no}
+ac_cv_header_syslog_h=${ac_cv_header_syslog_h=no}
+ac_cv_header_time=${ac_cv_header_time=no}
+ac_cv_header_unistd_h=${ac_cv_header_unistd_h=no}
+ac_cv_header_util_h=${ac_cv_header_util_h=no}
+ac_cv_header_utmp_h=${ac_cv_header_utmp_h=no}
+ac_cv_header_valgrind_valgrind_h=${ac_cv_header_valgrind_valgrind_h=no}
+ac_cv_header_vfork_h=${ac_cv_header_vfork_h=no}
+ac_cv_header_windows_h=${ac_cv_header_windows_h=yes}
+ac_cv_header_winsock2_h=${ac_cv_header_winsock2_h=yes}
+ac_cv_header_ws2tcpip_h=${ac_cv_header_ws2tcpip_h=yes}
+ac_cv_lib_dl_dlopen=${ac_cv_lib_dl_dlopen=no}
+ac_cv_lib_inet_main=${ac_cv_lib_inet_main=no}
+ac_cv_lib_kstat_kstat_open=${ac_cv_lib_kstat_kstat_open=no}
+ac_cv_lib_m_sin=${ac_cv_lib_m_sin=no}
+ac_cv_lib_nsl_gethostbyname=${ac_cv_lib_nsl_gethostbyname=no}
+ac_cv_lib_nsl_main=${ac_cv_lib_nsl_main=no}
+ac_cv_lib_resolv_res_gethostbyname=${ac_cv_lib_resolv_res_gethostbyname=no}
+ac_cv_lib_rt_clock_gettime=${ac_cv_lib_rt_clock_gettime=no}
+ac_cv_lib_socket_getpeername=${ac_cv_lib_socket_getpeername=no}
+ac_cv_lib_socket_main=${ac_cv_lib_socket_main=yes}
+ac_cv_lib_socket_socket=${ac_cv_lib_socket_socket=no}
+ac_cv_lib_util_openpty=${ac_cv_lib_util_openpty=no}
+ac_cv_lib_ws2_32_main=${ac_cv_lib_ws2_32_main=yes}
+ac_cv_member_struct_ErlDrvEntry_stop_select=${ac_cv_member_struct_ErlDrvEntry_stop_select=no}
+ac_cv_member_struct_sctp_paddrparams_spp_flags=${ac_cv_member_struct_sctp_paddrparams_spp_flags=no}
+ac_cv_member_struct_sctp_paddrparams_spp_pathmtu=${ac_cv_member_struct_sctp_paddrparams_spp_pathmtu=no}
+ac_cv_member_struct_sctp_paddrparams_spp_sackdelay=${ac_cv_member_struct_sctp_paddrparams_spp_sackdelay=no}
+ac_cv_member_struct_sctp_remote_error_sre_data=${ac_cv_member_struct_sctp_remote_error_sre_data=no}
+ac_cv_member_struct_sctp_send_failed_ssf_data=${ac_cv_member_struct_sctp_send_failed_ssf_data=no}
+ac_cv_objext=${ac_cv_objext=o}
+ac_cv_path_MKDIR=${ac_cv_path_MKDIR=/bin/mkdir}
+ac_cv_path_RM=${ac_cv_path_RM=/bin/rm}
+ac_cv_prog_AR=${ac_cv_prog_AR=ar.sh}
+ac_cv_prog_CC=${ac_cv_prog_CC=cc.sh}
+ac_cv_prog_CPP=${ac_cv_prog_CPP='cc.sh -E'}
+ac_cv_prog_CXX=${ac_cv_prog_CXX=cc.sh}
+ac_cv_prog_DED_LD=${ac_cv_prog_DED_LD=ld.sh}
+ac_cv_prog_JAVAC=${ac_cv_prog_JAVAC=javac.sh}
+ac_cv_prog_M4=${ac_cv_prog_M4=m4}
+ac_cv_prog_RANLIB=${ac_cv_prog_RANLIB=true}
+ac_cv_prog_cc_c89=${ac_cv_prog_cc_c89=}
+ac_cv_prog_cc_g=${ac_cv_prog_cc_g=yes}
+ac_cv_search_fdatasync=${ac_cv_search_fdatasync=no}
+ac_cv_search_opendir=${ac_cv_search_opendir=no}
+ac_cv_search_strerror=${ac_cv_search_strerror='none required'}
+ac_cv_sizeof___int64=${ac_cv_sizeof___int64=8}
+ac_cv_sizeof_char=${ac_cv_sizeof_char=1}
+ac_cv_sizeof_int=${ac_cv_sizeof_int=4}
+ac_cv_sizeof_long=${ac_cv_sizeof_long=4}
+ac_cv_sizeof_long_long=${ac_cv_sizeof_long_long=8}
+ac_cv_sizeof_off_t=${ac_cv_sizeof_off_t=4}
+ac_cv_sizeof_short=${ac_cv_sizeof_short=2}
+ac_cv_sizeof_size_t=${ac_cv_sizeof_size_t=8}
+ac_cv_sizeof_void_p=${ac_cv_sizeof_void_p=8}
+ac_cv_struct_exception=${ac_cv_struct_exception=no}
+ac_cv_struct_sockaddr_sa_len=${ac_cv_struct_sockaddr_sa_len=no}
+ac_cv_struct_tm=${ac_cv_struct_tm=time.h}
+ac_cv_type_off_t=${ac_cv_type_off_t=yes}
+ac_cv_type_pid_t=${ac_cv_type_pid_t=no}
+ac_cv_type_signal=${ac_cv_type_signal=void}
+ac_cv_type_size_t=${ac_cv_type_size_t=yes}
+ac_cv_type_uid_t=${ac_cv_type_uid_t=no}
+ac_cv_working_alloca_h=${ac_cv_working_alloca_h=no}
+erl_cv_time_correction=${erl_cv_time_correction=none}
+erts_cv___after_morecore_hook_can_track_malloc=${erts_cv___after_morecore_hook_can_track_malloc=no}
+erts_cv_fwrite_unlocked=${erts_cv_fwrite_unlocked=no}
+erts_cv_have__end_symbol=${erts_cv_have__end_symbol=no}
+erts_cv_have_end_symbol=${erts_cv_have_end_symbol=no}
+erts_cv_putc_unlocked=${erts_cv_putc_unlocked=no}
+erts_cv_windows_h_includes_winsock2_h=${erts_cv_windows_h_includes_winsock2_h=no}
diff --git a/erts/configure.in b/erts/configure.in
index fac07f8b6a..cb1b00b8b1 100644
--- a/erts/configure.in
+++ b/erts/configure.in
@@ -2,7 +2,7 @@ dnl Process this file with autoconf to produce a configure script. -*-m4-*-
dnl %CopyrightBegin%
dnl
-dnl Copyright Ericsson AB 1997-2011. All Rights Reserved.
+dnl Copyright Ericsson AB 1997-2012. All Rights Reserved.
dnl
dnl The contents of this file are subject to the Erlang Public License,
dnl Version 1.1, (the "License"); you may not use this file except in
@@ -191,7 +191,7 @@ AS_HELP_STRING([--disable-kernel-poll], [disable kernel poll support]),
AC_ARG_ENABLE(sctp,
-AS_HELP_STRING([--enable-sctp], [enable sctp support])
+AS_HELP_STRING([--enable-sctp], [enable sctp support (default)])
AS_HELP_STRING([--disable-sctp], [disable sctp support]),
[ case "$enableval" in
no) enable_sctp=no ;;
@@ -259,13 +259,6 @@ AS_HELP_STRING([--enable-m32-build],
esac
],enable_m32_build=no)
-AC_ARG_ENABLE(fixalloc,
-AS_HELP_STRING([--disable-fixalloc], [disable the use of fix_alloc]))
-if test x${enable_fixalloc} = xno ; then
- AC_DEFINE(NO_FIX_ALLOC,[],
- [Define if you don't want the fix allocator in Erlang])
-fi
-
AC_SUBST(PERFCTR_PATH)
AC_ARG_WITH(perfctr,
AS_HELP_STRING([--with-perfctr=PATH],
@@ -283,6 +276,61 @@ else
[Define to enable hrvtime() on Linux systems with perfctr extension])
fi
+
+AC_ARG_WITH(dynamic-trace,
+AS_HELP_STRING([--with-dynamic-trace={dtrace|systemtap}],
+ [specify use of dynamic trace framework, dtrace or systemtap])
+AS_HELP_STRING([--without-dynamic-trace],
+ [don't enable any dynamic tracing (default)]))
+
+if test X"$with_dynamic_trace" = X""; then
+ with_dynamic_trace=no
+fi
+
+case "$with_dynamic_trace" in
+ no) DYNAMIC_TRACE_FRAMEWORK=;;
+ dtrace)
+ AC_DEFINE(USE_DTRACE,[1],
+ [Define if you want to use dtrace for dynamic tracing])
+ DYNAMIC_TRACE_FRAMEWORK=dtrace;;
+ systemtap)
+ AC_DEFINE(USE_SYSTEMTAP,[1],
+ [Define if you want to use systemtap for dynamic tracing])
+ DYNAMIC_TRACE_FRAMEWORK=systemtap;;
+ *)
+ AC_MSG_ERROR(Unknown dynamic tracing framework specified with --with-dynamic-trace!);;
+esac
+
+if test X"$DYNAMIC_TRACE_FRAMEWORK" != X""; then
+ AC_DEFINE(USE_DYNAMIC_TRACE,[1],
+ [Define if you want to use dynamic tracing])
+fi
+
+AC_ARG_ENABLE(vm-probes,
+AS_HELP_STRING([--enable-vm-probes],
+ [add dynamic trace probes to the Beam VM (only possible if --with-dynamic-trace is enabled, and then default)]),
+ [ case "$enableval" in
+ no) use_vm_probes=no ;;
+ *)
+ if test X"$DYNAMIC_TRACE_FRAMEWORK" != X""; then
+ use_vm_probes=yes ;
+ else
+ AC_MSG_ERROR(Can not enable VM probes without any dynamic tracing framework!);
+ fi;;
+ esac ], if test X"$DYNAMIC_TRACE_FRAMEWORK" != X""; then
+ use_vm_probes=yes ;
+ else
+ use_vm_probes=no
+ fi)
+
+AC_SUBST(USE_VM_PROBES)
+if test X"$use_vm_probes" = X"yes"; then
+ USE_VM_PROBES=yes
+ AC_DEFINE(USE_VM_PROBES,[1],
+ [Define to enable VM dynamic trace probes])
+fi
+
+
AC_ARG_ENABLE(clock-gettime,
AS_HELP_STRING([--enable-clock-gettime],
[use clock-gettime for time correction]),
@@ -390,12 +438,6 @@ case $host_os in
# The ethread library requires _WIN32_WINNT of at least 0x0403.
# -D_WIN32_WINNT=* from CPPFLAGS is saved in ETHR_DEFS.
CPPFLAGS="$CPPFLAGS -D_WIN32_WINNT=0x0500 -DWINVER=0x0500"
- # _USE_32BIT_TIME_T is needed when using VC++ 2005 (ctime() will fail
- # otherwise since we pass it a 32-bit value).
- #
- # FIXME: Use time_t all the way and remove _USE_32BIT_TIME_T.
- AC_MSG_WARN([Reverting to 32-bit time_t])
- CPPFLAGS="$CPPFLAGS -D_USE_32BIT_TIME_T"
;;
darwin*)
CPPFLAGS="$CPPFLAGS -D_XOPEN_SOURCE"
@@ -405,53 +447,7 @@ case $host_os in
esac
-
-MIXED_CYGWIN=no
-
-AC_MSG_CHECKING(for mixed cygwin and native VC++ environment)
-if test "X$host" = "Xwin32" -a "x$GCC" != x"yes"; then
- if test -x /usr/bin/cygpath; then
- CFLAGS="-O2"
- MIXED_CYGWIN=yes
- AC_MSG_RESULT([yes])
- MIXED_CYGWIN_VC=yes
- CPPFLAGS="$CPPFLAGS -DERTS_MIXED_CYGWIN_VC"
- else
- AC_MSG_RESULT([undeterminable])
- AC_MSG_ERROR(Seems to be mixed windows but not with cygwin, cannot handle this!)
- fi
-else
- AC_MSG_RESULT([no])
- MIXED_CYGWIN_VC=no
-fi
-AC_SUBST(MIXED_CYGWIN_VC)
-
-AC_MSG_CHECKING(for mixed cygwin and native MinGW environment)
-if test "X$host" = "Xwin32" -a "x$GCC" = x"yes"; then
- if test -x /usr/bin/cygpath; then
- CFLAGS="-O2"
- MIXED_CYGWIN=yes
- AC_MSG_RESULT([yes])
- MIXED_CYGWIN_MINGW=yes
- CPPFLAGS="$CPPFLAGS -DERTS_MIXED_CYGWIN_MINGW"
- else
- AC_MSG_RESULT([undeterminable])
- AC_MSG_ERROR(Seems to be mixed windows but not with cygwin, cannot handle this!)
- fi
-else
- AC_MSG_RESULT([no])
- MIXED_CYGWIN_MINGW=no
-fi
-AC_SUBST(MIXED_CYGWIN_MINGW)
-
-AC_MSG_CHECKING(if we mix cygwin with any native compiler)
-if test "X$MIXED_CYGWIN" = "Xyes" ; then
- AC_MSG_RESULT([yes])
-else
- AC_MSG_RESULT([no])
-fi
-
-AC_SUBST(MIXED_CYGWIN)
+LM_WINDOWS_ENVIRONMENT
dnl
dnl Flags to the C compiler
@@ -497,7 +493,6 @@ CFLAG_RUNTIME_LIBRARY_PATH="-Wl,-R"
case $host_os in
darwin*)
CFLAG_RUNTIME_LIBRARY_PATH=
- CFLAGS="$CFLAGS -no-cpp-precomp"
;;
win32)
CFLAG_RUNTIME_LIBRARY_PATH=
@@ -914,16 +909,6 @@ fi
AC_SUBST(ERLANG_OSTYPE)
-dnl Which sysv4 would this be, and what is it for???
-dnl XXX: replace with feature tests.
-case $host_os in
- sysv4*)
- AC_DEFINE(SOCKOPT_CONNECT_STAT,[],[Obscure SYSV feature])
- AC_DEFINE(NO_PRAGMA_WEAK,[],[Obscure SYSV feature])
- LIBS="$LIBS -lgen -lc -L /usr/ucblib -lucb"
- ;;
-esac
-
# Check how to export functions from the emulator executable, needed
# when dynamically loaded drivers are loaded (so that they can find
# emulator functions).
@@ -1484,9 +1469,71 @@ AC_CHECK_HEADERS(fcntl.h limits.h unistd.h syslog.h dlfcn.h ieeefp.h \
sys/types.h sys/stropts.h sys/sysctl.h \
sys/ioctl.h sys/time.h sys/uio.h \
sys/socket.h sys/sockio.h sys/socketio.h \
- net/errno.h malloc.h mach-o/dyld.h arpa/nameser.h \
+ net/errno.h malloc.h arpa/nameser.h libdlpi.h \
pty.h util.h utmp.h langinfo.h poll.h sdkddkver.h)
+AC_CHECK_MEMBERS([struct ifreq.ifr_hwaddr], [], [],
+ [#ifdef __WIN32__
+ #else
+ #ifdef VXWORKS
+ #else
+ #include <net/if.h>
+ #endif
+ #endif
+ ])
+
+AC_CHECK_MEMBERS([struct ifreq.ifr_enaddr], [], [],
+ [#ifdef __WIN32__
+ #else
+ #ifdef VXWORKS
+ #else
+ #include <net/if.h>
+ #endif
+ #endif
+ ])
+
+dnl ----------------------------------------------------------------------
+dnl Check the availability for libdlpi
+dnl ----------------------------------------------------------------------
+AC_CHECK_LIB(dlpi, dlpi_open)
+if test x"$ac_cv_lib_dlpi_dlpi_open" = x"no"; then
+ unset -v ac_cv_lib_dlpi_dlpi_open
+ dnl Try again now with -L/lib (or ditto 64) as argument to linker since
+ dnl gcc makes /usr/ccs/bin/ld ignore the crle configured linker default paths
+ dnl typically causing dlpi not being found on Solaris et.al
+ save_ldflags="$LDFLAGS"
+ try_dlpi_lib=/lib
+ if test x"$ac_cv_sizeof_void_p" = x"8"; then
+ if test -d /lib64; then
+ try_dlpi_lib=/lib64
+ elif test -d /lib/64; then
+ try_dlpi_lib=/lib/64
+ fi
+ fi
+ if test ! -f "$try_dlpi_lib/libdlpi.so" && \
+ test -f "$try_dlpi_lib/libdlpi.so.1"
+ then
+ dnl It looks like there is a missing symlink
+ dnl - let's be helpful and notify the user
+ dnl NOTE this help is far from perfect e.g if there would be no
+ dnl *.so.1 but a *.so.1.123 or *.so.2 this will be no help
+ AC_MSG_ERROR(
+ [Your OS installation is missing a symbolic link.
+ Maybe it lacks some development package(s)...
+ It can anyhow be fixed with the following command:
+ # ln -s libdlpi.so.1 $try_dlpi_lib/libdlpi.so
+ ])
+ fi
+ LDFLAGS="-L$try_dlpi_lib -R$try_dlpi_lib $LDFLAGS"
+ unset -v try_dlpi_lib
+ AC_MSG_NOTICE([Extending the search to include /lib])
+ AC_CHECK_LIB(dlpi, dlpi_open)
+ if test x"$ac_cv_lib_dlpi_dlpi_open" = x"no"; then
+ LDFLAGS="$save_ldflags"
+ fi
+ unset -v save_ldflags
+fi
+
AC_CHECK_HEADER(sys/resource.h,
[AC_DEFINE(HAVE_SYS_RESOURCE_H, 1,
[Define to 1 if you have the <sys/resource.h> header file])
@@ -1503,7 +1550,7 @@ AC_CHECK_HEADER(sys/devpoll.h, have_kernel_poll=/dev/poll)
dnl Check for kernel SCTP support
AC_SUBST(LIBSCTP)
-if test "x$enable_sctp" = "xyes" ; then
+if test "x$enable_sctp" != "xno" ; then
AC_CHECK_HEADER(netinet/sctp.h,
[LIBSCTP=libsctp.so.1
AC_DEFINE(HAVE_SCTP_H, [1],
@@ -1513,8 +1560,24 @@ if test "x$enable_sctp" = "xyes" ; then
#include <sys/socket.h>
#endif
])
+fi
+
+if test x"$ac_cv_header_netinet_sctp_h" = x"yes"; then
+ AC_CHECK_FUNCS([sctp_bindx sctp_peeloff])
AC_CHECK_DECLS([SCTP_UNORDERED, SCTP_ADDR_OVER, SCTP_ABORT,
- SCTP_EOF, SCTP_SENDALL, SCTP_ADDR_CONFIRMED], [], [],
+ SCTP_EOF, SCTP_SENDALL, SCTP_ADDR_CONFIRMED,
+ SCTP_DELAYED_ACK_TIME,
+ SCTP_EMPTY,
+ SCTP_CLOSED, SCTPS_IDLE,
+ SCTP_BOUND, SCTPS_BOUND,
+ SCTP_LISTEN, SCTPS_LISTEN,
+ SCTP_COOKIE_WAIT, SCTPS_COOKIE_WAIT,
+ SCTP_COOKIE_ECHOED, SCTPS_COOKIE_ECHOED,
+ SCTP_ESTABLISHED, SCTPS_ESTABLISHED,
+ SCTP_SHUTDOWN_PENDING, SCTPS_SHUTDOWN_PENDING,
+ SCTP_SHUTDOWN_SENT, SCTPS_SHUTDOWN_SENT,
+ SCTP_SHUTDOWN_RECEIVED, SCTPS_SHUTDOWN_RECEIVED,
+ SCTP_SHUTDOWN_ACK_SENT, SCTPS_SHUTDOWN_ACK_SENT], [], [],
[#if HAVE_SYS_SOCKET_H
#include <sys/socket.h>
#endif
@@ -1581,6 +1644,7 @@ AC_CHECK_SIZEOF(void *)
AC_CHECK_SIZEOF(long long)
AC_CHECK_SIZEOF(size_t)
AC_CHECK_SIZEOF(off_t)
+AC_CHECK_SIZEOF(time_t)
BITS64=
@@ -1673,6 +1737,33 @@ esac
AC_C_BIGENDIAN
+dnl fdatasync syscall (Unix only)
+AC_CHECK_FUNCS([fdatasync])
+
+dnl Find which C libraries are required to use fdatasync
+dnl TODO: Remove check once SunOS >= 5.11 is required by erts.
+dnl fdatasync requires linking against -lrt on SunOS <= 5.10.
+dnl OpenSolaris 2009.06 is SunOS 5.11 and does not require -lrt.
+AC_SEARCH_LIBS(fdatasync, [rt])
+
+
+dnl sendfile syscall
+case $host_os in
+ linux*|freebsd*|dragonfly*|darwin*)
+ AC_CHECK_FUNCS([sendfile])
+ ;;
+ solaris*)
+ AC_SEARCH_LIBS(sendfilev, sendfile,
+ AC_DEFINE([HAVE_SENDFILEV],[1],
+ [Define to 1 if you have the `sendfilev' function.]))
+ ;;
+ win32)
+ LIBS="$LIBS -lmswsock"
+ ;;
+ *)
+ ;;
+esac
+
dnl ----------------------------------------------------------------------
dnl Checks for library functions.
dnl ----------------------------------------------------------------------
@@ -1800,11 +1891,6 @@ AC_CHECK_FUNCS([ieee_handler fpsetmask finite isnan isinf res_gethostbyname dlop
AC_CHECK_DECLS([posix2time],,,[#include <time.h>])
-if test "X$host" = "Xwin32"; then
- ac_cv_func_setvbuf_reversed=yes
-fi
-AC_FUNC_SETVBUF_REVERSED
-
disable_vfork=false
if test "x$EMU_THR_LIB_NAME" != "x"; then
AC_MSG_CHECKING([if vfork is known to hang multithreaded applications])
@@ -1860,12 +1946,6 @@ fi
dnl Need by run_erl.
AC_CHECK_FUNCS([openpty])
-dnl fdatasync syscall (Unix only)
-AC_CHECK_FUNCS([fdatasync])
-
-dnl Find which C libraries are required to use fdatasync
-AC_SEARCH_LIBS(fdatasync, [rt])
-
AC_CHECK_HEADERS(net/if_dl.h ifaddrs.h netpacket/packet.h)
AC_CHECK_FUNCS([getifaddrs])
@@ -2452,6 +2532,17 @@ case $ARCH-$OPSYS in
darwin_mcontext_leopard=no
;;
esac
+
+if test X${enable_fp_exceptions} = Xauto ; then
+ case $host_os in
+ darwin*)
+ enable_fp_exceptions=no
+ AC_MSG_NOTICE([Floating point exceptions disabled by default on MacOS X]) ;;
+ *)
+ ;;
+ esac
+fi
+
if test X${enable_fp_exceptions} = Xauto ; then
if test X${enable_hipe} = Xyes; then
enable_fp_exceptions=yes
@@ -2499,7 +2590,7 @@ static void new_fp_exception(void)
* Implement unmask_fpe() and check_fpe() based on CPU/OS combination
*/
-#if (defined(__i386__) || defined(__x86_64__)) && defined(__GNUC__) && !defined(__CYGWIN__)
+#if (defined(__i386__) || defined(__x86_64__)) && defined(__GNUC__) && !defined(__CYGWIN__) && !defined(__MINGW32__)
static void unmask_x87(void)
{
@@ -3082,8 +3173,6 @@ dnl done up where floating point is checked, need to descide there already...
if test X${enable_hipe} = Xyes; then
if test X$ac_cv_sizeof_void_p != X4 -a X$ARCH != Xamd64 -a X$ARCH != Xppc64; then
AC_MSG_WARN([HiPE is not supported in 64-bit builds])
- elif test X$FPE != Xreliable -a X$ARCH != Xarm; then
- AC_MSG_WARN([HiPE is not supported on $ARCH without reliable floating-point exceptions])
else
HIPE_ENABLED=yes
AC_DEFINE(HIPE,[1],[Define to enable HiPE])
@@ -3512,6 +3601,74 @@ dnl
LM_FIND_EMU_CC
dnl
+dnl DTrace
+dnl
+case $DYNAMIC_TRACE_FRAMEWORK in
+ dtrace|systemtap)
+ AC_CHECK_TOOL(DTRACE, dtrace, none)
+ test "$DTRACE" = "none" && AC_MSG_ERROR([No dtrace utility found.]);
+ enable_dtrace_test=yes;;
+ *) enable_dtrace_test=no;;
+esac
+
+AC_SUBST(DTRACE)
+
+AC_SUBST(DTRACE_CPP)
+AC_SUBST(DTRACE_ENABLED)
+AC_SUBST(DTRACE_ENABLED_2STEP)
+DTRACE_CPP=-C
+DTRACE_ENABLED=
+DTRACE_ENABLED_2STEP=
+DTRACE_2STEP_TEST=./dtrace-test.o
+DTRACE_BITS_FLAG=
+case $OPSYS in
+ freebsd)
+ if test "$BITS64" = "yes" ; then
+ DTRACE_BITS_FLAG=-64
+ else
+ DTRACE_BITS_FLAG=-32
+ fi
+ ;;
+ *)
+ : # Nothing to do
+ ;;
+esac
+if test "$enable_dtrace_test" = "yes" ; then
+ if test "$DTRACE" = "dtrace" ; then
+ AC_CHECK_HEADERS(sys/sdt.h)
+ # The OS X version of dtrace prints a spurious line here.
+ if ! dtrace -h $DTRACE_CPP -Iemulator/beam -o ./foo-dtrace.h -s emulator/beam/erlang_dtrace.d; then
+ AC_MSG_ERROR([Could not precompile erlang_dtrace.d: dtrace -h failed])
+ fi
+ rm -f foo-dtrace.h
+
+ $RM -f $DTRACE_2STEP_TEST
+ if dtrace -G $DTRACE_CPP $DTRACE_BITS_FLAG -Iemulator/beam -o $DTRACE_2STEP_TEST -s emulator/beam/erlang_dtrace.d 2> /dev/null && \
+ test -f $DTRACE_2STEP_TEST ; then
+ rm $DTRACE_2STEP_TEST
+ DTRACE_ENABLED_2STEP=yes
+ AC_MSG_NOTICE([dtrace precompilation for 2-stage DTrace successful])
+ else
+ AC_MSG_NOTICE([dtrace precompilation for 1-stage DTrace successful])
+ fi
+ DTRACE_ENABLED=yes
+ case $OPSYS in
+ linux)
+ : # No extra libs to add to LIBS
+ ;;
+ freebsd)
+ LIBS="$LIBS -lelf"
+ ;;
+ *)
+ LIBS="$LIBS -ldtrace"
+ ;;
+ esac
+ else
+ AC_MSG_ERROR([Dtrace preprocessing test failed.])
+ fi
+fi
+
+dnl
dnl SSL, SSH and CRYPTO need the OpenSSL libraries
dnl
dnl Check flags --with-ssl, --without-ssl --with-ssl=PATH.
@@ -3535,7 +3692,7 @@ AC_SUBST(STATIC_KERBEROS_LIBS)
AC_SUBST(SSL_LINK_WITH_ZLIB)
AC_SUBST(STATIC_ZLIB_LIBS)
-std_ssl_locations="/usr/local /usr/sfw /opt/local /usr /usr/pkg /usr/local/openssl /usr/lib/openssl /usr/openssl /usr/local/ssl /usr/lib/ssl /usr/ssl"
+std_ssl_locations="/usr/local /usr/sfw /usr /opt/local /usr/pkg /usr/local/openssl /usr/lib/openssl /usr/openssl /usr/local/ssl /usr/lib/ssl /usr/ssl"
AC_ARG_WITH(ssl-zlib,
AS_HELP_STRING([--with-ssl-zlib=PATH],
@@ -3554,7 +3711,7 @@ elif test "x$with_ssl_zlib" = "xyes" || test "x$with_ssl_zlib" = "x"; then
AC_MSG_WARN([Cannot search for zlib; missing cross system root (erl_xcomp_sysroot).])
SSL_LINK_WITH_ZLIB=no
STATIC_ZLIB_LIBS=
- elif test "x$MIXED_CYGWIN" = "xyes"; then
+ elif test "x$MIXED_CYGWIN" = "xyes" -o "x$MIXED_MSYS" = "xyes"; then
SSL_LINK_WITH_ZLIB=no
STATIC_ZLIB_LIBS=
else
@@ -3666,16 +3823,23 @@ case "$erl_xcomp_without_sysroot-$with_ssl" in
AC_CHECK_PROG(REGTOOL, regtool, regtool, false)
if test "$ac_cv_prog_REGTOOL" != false; then
wrp="/machine/software/microsoft/windows/currentversion/"
- urp="uninstall/openssl_is1/inno setup: app path"
+ if test "x$ARCH" = "xamd64"; then
+ urp="uninstall/openssl (64-bit)_is1/inno setup: app path"
+ regtool_subsystem=-w
+ else
+ urp="uninstall/openssl (32-bit)_is1/inno setup: app path"
+ regtool_subsystem=-W
+ fi
rp="$wrp$urp"
- if regtool -q get "$rp" > /dev/null; then
+ if regtool -q $regtool_subsystem get "$rp" > /dev/null; then
true
else
- urp="uninstall/openssl (32-bit)_is1/inno setup: app path"
+ # Fallback to unspecified wordlength
+ urp="uninstall/openssl_is1/inno setup: app path"
rp="$wrp$urp"
fi
- if regtool -q get "$rp" > /dev/null; then
- ssl_install_dir=`regtool -q get "$rp"`
+ if regtool -q $regtool_subsystem get "$rp" > /dev/null; then
+ ssl_install_dir=`regtool -q $regtool_subsystem get "$rp"`
# Try hard to get rid of spaces...
if cygpath -d "$ssl_install_dir" > /dev/null 2>&1; then
ssl_install_dir=`cygpath -d "$ssl_install_dir"`
@@ -3683,6 +3847,20 @@ case "$erl_xcomp_without_sysroot-$with_ssl" in
extra_dir=`cygpath $ssl_install_dir`
fi
fi
+ elif test "x$MIXED_MSYS" = "xyes"; then
+ AC_CHECK_PROG(REGTOOL, reg_query.sh, reg_query.sh, false)
+ if test "$ac_cv_prog_REGTOOL" != false; then
+ if test "x$ARCH" = "xamd64"; then
+ rp="HKLM/SOFTWARE/Microsoft/Windows/CurrentVersion/Uninstall/OpenSSL (64-bit)_is1"
+ else
+ rp="HKLM/SOFTWARE/Microsoft/Windows/CurrentVersion/Uninstall/OpenSSL_is1"
+ fi
+ key="Inno Setup: App Path"
+ if "$ac_cv_prog_REGTOOL" "$rp" "$key" > /dev/null; then
+ ssl_install_dir=`"$ac_cv_prog_REGTOOL" "$rp" "$key"`
+ extra_dir=`win2msys_path.sh "$ssl_install_dir"`
+ fi
+ fi
fi
# We search for OpenSSL in the common OS standard locations.
SSL_APP=ssl
@@ -3691,14 +3869,25 @@ case "$erl_xcomp_without_sysroot-$with_ssl" in
SSL_CRYPTO_LIBNAME=crypto
SSL_SSL_LIBNAME=ssl
+
+ if test "x$MIXED_CYGWIN" = "xyes" -o "x$MIXED_MSYS" = "xyes"; then
+ if test "x$ARCH" = "xamd64"; then
+ std_win_ssl_locations="/cygdrive/c/OpenSSL-Win64 /c/OpenSSL-Win64 /opt/local64/pgm/OpenSSL"
+ else
+ std_win_ssl_locations="/cygdrive/c/OpenSSL-Win32 /c/OpenSSL-Win32 /cygdrive/c/OpenSSL /c/OpenSSL /opt/local/pgm/OpenSSL"
+ fi
+ else
+ std_win_ssl_locations=""
+ fi
+
AC_MSG_CHECKING(for OpenSSL >= 0.9.7 in standard locations)
- for rdir in $extra_dir /cygdrive/c/OpenSSL $std_ssl_locations; do
+ for rdir in $extra_dir $std_win_ssl_locations $std_ssl_locations; do
dir="$erl_xcomp_sysroot$rdir"
if test -f "$erl_xcomp_isysroot$rdir/include/openssl/opensslv.h"; then
is_real_ssl=yes
SSL_ROOT="$dir"
- if test "x$MIXED_CYGWIN" = "xyes" ; then
+ if test "x$MIXED_CYGWIN" = "xyes" -o "x$MIXED_MSYS" = "xyes"; then
if test -f "$dir/lib/VC/libeay32.lib"; then
SSL_RUNTIME_LIBDIR="$rdir/lib/VC"
SSL_LIBDIR="$dir/lib/VC"
@@ -3722,7 +3911,7 @@ case "$erl_xcomp_without_sysroot-$with_ssl" in
SSL_RUNTIME_LIBDIR="$rdir/lib"
SSL_LIBDIR="$dir/lib"
SSL_CRYPTO_LIBNAME=libeay32
- SSL_CRYPTO_LIBNAME=ssleay32
+ SSL_SSL_LIBNAME=ssleay32
elif test -f "$dir/lib/openssl.lib"; then
SSL_RUNTIME_LIBDIR="$rdir/lib"
SSL_LIBDIR="$dir/lib"
@@ -3773,7 +3962,7 @@ case "$erl_xcomp_without_sysroot-$with_ssl" in
])
CPPFLAGS=$old_CPPFLAGS
if test "x$ssl_found" = "xyes"; then
- if test "x$MIXED_CYGWIN" = "xyes" ; then
+ if test "x$MIXED_CYGWIN" = "xyes" -o "x$MIXED_MSYS" = "xyes"; then
ssl_linkable=yes
else
saveCFLAGS="$CFLAGS"
@@ -3885,7 +4074,7 @@ dnl so it is - be adoptable
SSL_ROOT="$with_ssl"
SSL_CRYPTO_LIBNAME=crypto
SSL_SSL_LIBNAME=ssl
- if test "x$MIXED_CYGWIN" = "xyes" && test -d "$with_ssl/lib/VC"; then
+ if test "x$MIXED_CYGWIN" = "xyes" -o "x$MIXED_MSYS" = "xyes" && test -d "$with_ssl/lib/VC"; then
if test -f "$with_ssl/lib/VC/libeay32.lib"; then
SSL_LIBDIR="$with_ssl/lib/VC"
SSL_CRYPTO_LIBNAME=libeay32
@@ -3904,7 +4093,7 @@ dnl so it is - be adoptable
elif test -f "$with_ssl/lib/libeay32.lib"; then
SSL_LIBDIR="$with_ssl/lib"
SSL_CRYPTO_LIBNAME=libeay32
- SSL_CRYPTO_LIBNAME=ssleay32
+ SSL_SSL_LIBNAME=ssleay32
else
# This probably wont work, but that's what the user said, so...
SSL_LIBDIR="$with_ssl/lib"
@@ -4299,7 +4488,6 @@ AH_BOTTOM([
#endif
])
-
dnl ----------------------------------------------------------------------
dnl Output the result.
dnl ----------------------------------------------------------------------
@@ -4308,8 +4496,6 @@ dnl Note that the output files are relative to $srcdir
AC_OUTPUT(
emulator/$host/Makefile:emulator/Makefile.in
- emulator/zlib/$host/Makefile:emulator/zlib/Makefile.in
- emulator/pcre/$host/Makefile:emulator/pcre/Makefile.in
epmd/src/$host/Makefile:epmd/src/Makefile.in
etc/common/$host/Makefile:etc/common/Makefile.in
include/internal/$host/ethread.mk:include/internal/ethread.mk.in
@@ -4323,7 +4509,7 @@ dnl The ones below should be moved to their respective lib
dnl
../lib/ic/c_src/$host/Makefile:../lib/ic/c_src/Makefile.in
../lib/os_mon/c_src/$host/Makefile:../lib/os_mon/c_src/Makefile.in
- ../lib/ssl/c_src/$host/Makefile:../lib/ssl/c_src/Makefile.in
+dnl ../lib/ssl/c_src/$host/Makefile:../lib/ssl/c_src/Makefile.in
../lib/crypto/c_src/$host/Makefile:../lib/crypto/c_src/Makefile.in
../lib/orber/c_src/$host/Makefile:../lib/orber/c_src/Makefile.in
../lib/runtime_tools/c_src/$host/Makefile:../lib/runtime_tools/c_src/Makefile.in
diff --git a/erts/doc/specs/.gitignore b/erts/doc/specs/.gitignore
new file mode 100644
index 0000000000..322eebcb06
--- /dev/null
+++ b/erts/doc/specs/.gitignore
@@ -0,0 +1 @@
+specs_*.xml
diff --git a/erts/doc/src/Makefile b/erts/doc/src/Makefile
index 6578923fe1..cfa5527474 100644
--- a/erts/doc/src/Makefile
+++ b/erts/doc/src/Makefile
@@ -1,7 +1,7 @@
#
# %CopyrightBegin%
#
-# Copyright Ericsson AB 1997-2010. All Rights Reserved.
+# Copyright Ericsson AB 1997-2011. All Rights Reserved.
#
# The contents of this file are subject to the Erlang Public License,
# Version 1.1, (the "License"); you may not use this file except in
@@ -16,6 +16,9 @@
#
# %CopyrightEnd%
#
+
+SPECS_ESRC = ../../preloaded/src/
+
include $(ERL_TOP)/make/target.mk
include $(ERL_TOP)/make/$(TARGET)/otp.mk
@@ -43,6 +46,12 @@ XML_REF1_FILES = epmd.xml \
run_erl.xml \
start.xml
+XML_REF3_EFILES = \
+ erl_prim_loader.xml \
+ erlang.xml \
+ init.xml \
+ zlib.xml
+
XML_REF3_FILES = \
driver_entry.xml \
erl_nif.xml \
@@ -98,18 +107,26 @@ HTML_REF_MAN_FILE = $(HTMLDIR)/index.html
TOP_PDF_FILE = $(PDFDIR)/$(APPLICATION)-$(VSN).pdf
+SPECS_FILES = $(XML_REF3_EFILES:%.xml=$(SPECDIR)/specs_%.xml)
+
+TOP_SPECS_FILE = specs.xml
+
# ----------------------------------------------------
# FLAGS
# ----------------------------------------------------
XML_FLAGS +=
+KERNEL_SRC=$(ERL_TOP)/lib/kernel/src
+KERNEL_INCLUDE=$(ERL_TOP)/lib/kernel/include
+SPECS_FLAGS = -I$(KERNEL_SRC) -I$(KERNEL_INCLUDE)
+
# ----------------------------------------------------
# Targets
# ----------------------------------------------------
$(HTMLDIR)/%.gif: %.gif
$(INSTALL_DATA) $< $@
-docs: pdf html man $(INFO_FILE)
+docs: man pdf html $(INFO_FILE)
$(TOP_PDF_FILE): $(XML_FILES)
@@ -132,8 +149,25 @@ clean:
rm -f $(MAN1DIR)/*
rm -f $(MAN3DIR)/*
rm -f $(TOP_PDF_FILE) $(TOP_PDF_FILE:%.pdf=%.fo)
+ rm -f $(SPECDIR)/*
rm -f errs core *~
+$(SPECDIR)/specs_driver_entry.xml:
+ escript $(SPECS_EXTRACTOR) $(SPECS_FLAGS) \
+ -o$(dir $@) -module driver_entry
+$(SPECDIR)/specs_erl_nif.xml:
+ escript $(SPECS_EXTRACTOR) $(SPECS_FLAGS) \
+ -o$(dir $@) -module erl_nif
+$(SPECDIR)/specs_erl_set_memory_block.xml:
+ escript $(SPECS_EXTRACTOR) $(SPECS_FLAGS) \
+ -o$(dir $@) -module erl_set_memory_block
+$(SPECDIR)/specs_erl_driver.xml:
+ escript $(SPECS_EXTRACTOR) $(SPECS_FLAGS) \
+ -o$(dir $@) -module erl_driver
+$(SPECDIR)/specs_erts_alloc.xml:
+ escript $(SPECS_EXTRACTOR) $(SPECS_FLAGS) \
+ -o$(dir $@) -module erts_alloc
+
# ----------------------------------------------------
# Release Target
# ----------------------------------------------------
diff --git a/erts/doc/src/absform.xml b/erts/doc/src/absform.xml
index 4c84412dd6..4455d0ac92 100644
--- a/erts/doc/src/absform.xml
+++ b/erts/doc/src/absform.xml
@@ -4,7 +4,7 @@
<chapter>
<header>
<copyright>
- <year>2001</year><year>2009</year>
+ <year>2001</year><year>2011</year>
<holder>Ericsson AB. All Rights Reserved.</holder>
</copyright>
<legalnotice>
@@ -285,7 +285,8 @@
<item>If E is <c><![CDATA[fun Name / Arity]]></c>, then
Rep(E) = <c><![CDATA[{'fun',LINE,{function,Name,Arity}}]]></c>.</item>
<item>If E is <c><![CDATA[fun Module:Name/Arity]]></c>, then
- Rep(E) = <c><![CDATA[{'fun',LINE,{function,Module,Name,Arity}}]]></c>.</item>
+ Rep(E) = <c><![CDATA[{'fun',LINE,{function,Rep(Module),Rep(Name),Rep(Arity)}}]]></c>.
+ (Before the R15 release: Rep(E) = <c><![CDATA[{'fun',LINE,{function,Module,Name,Arity}}]]></c>.)</item>
<item>If E is <c><![CDATA[fun Fc_1 ; ... ; Fc_k end]]></c>
where each <c><![CDATA[Fc_i]]></c> is a function clause then Rep(E) =
<c><![CDATA[{'fun',LINE,{clauses,[Rep(Fc_1), ..., Rep(Fc_k)]}}]]></c>.</item>
diff --git a/erts/doc/src/alt_dist.xml b/erts/doc/src/alt_dist.xml
index 36d83a685b..038950b54d 100644
--- a/erts/doc/src/alt_dist.xml
+++ b/erts/doc/src/alt_dist.xml
@@ -4,7 +4,7 @@
<chapter>
<header>
<copyright>
- <year>2000</year><year>2010</year>
+ <year>2000</year><year>2011</year>
<holder>Ericsson AB. All Rights Reserved.</holder>
</copyright>
<legalnotice>
@@ -377,7 +377,7 @@
( 1) typedef enum {
( 2) portTypeUnknown, /* An uninitialized port */
( 3) portTypeListener, /* A listening port/socket */
-( 4) portTypeAcceptor, /* An intermidiate stage when accepting
+( 4) portTypeAcceptor, /* An intermediate stage when accepting
( 5) on a listen port */
( 6) portTypeConnector, /* An intermediate stage when connecting */
( 7) portTypeCommand, /* A connected open port in command mode */
diff --git a/erts/doc/src/driver.xml b/erts/doc/src/driver.xml
index 9f246c4a6c..52283879c7 100644
--- a/erts/doc/src/driver.xml
+++ b/erts/doc/src/driver.xml
@@ -30,11 +30,11 @@
</header>
<note><p>This document was written a long time ago. A lot of it is still
- valid, but some things have changed since it was first written.
- Updates of this document are planned for the future. The reader
- is encouraged to also read the
- <seealso marker="erl_driver">erl_driver</seealso>, and the
- <seealso marker="erl_driver">driver_entry</seealso> documentation.
+ interesting since it explains important concepts, but it was
+ written for an older driver interface so the examples do not
+ work anymore. The reader is encouraged to read
+ <seealso marker="erl_driver">erl_driver</seealso> and the
+ <seealso marker="driver_entry">driver_entry</seealso> documentation.
</p></note>
<section>
diff --git a/erts/doc/src/driver_entry.xml b/erts/doc/src/driver_entry.xml
index 8bdd154cb9..a2efdf3ebc 100644
--- a/erts/doc/src/driver_entry.xml
+++ b/erts/doc/src/driver_entry.xml
@@ -34,18 +34,22 @@
<lib>driver_entry</lib>
<libsummary>The driver-entry structure used by erlang drivers.</libsummary>
<description>
- <p>As of erts version 5.5.3 the driver interface has been extended
- (see <seealso marker="driver_entry#extended_marker">extended marker</seealso>).
- The extended interface introduces
- <seealso marker="erl_driver#version_management">version management</seealso>,
- the possibility to pass capability flags
- (see <seealso marker="driver_entry#driver_flags">driver flags</seealso>)
- to the runtime system at driver initialization, and some new
- driver API functions. </p>
+ <p>
+ As of erts version 5.9 (OTP release R15B) the driver interface
+ has been changed with larger types for the callbacks
+ <seealso marker="#output">output</seealso>,
+ <seealso marker="#control">control</seealso> and
+ <seealso marker="#call">call</seealso>.
+ See driver <seealso marker="erl_driver#version_management">
+ version management</seealso> in
+ <seealso marker="erl_driver">erl_driver</seealso>.
+ </p>
<note>
<p>Old drivers (compiled with an <c>erl_driver.h</c> from an
- earlier erts version than 5.5.3) have to be recompiled
- (but do not have to use the extended interface).</p>
+ earlier erts version than 5.9) have to be updated and have
+ to use the extended interface (with
+ <seealso marker="erl_driver#version_management">version management
+ </seealso>).</p>
</note>
<p>The <c>driver_entry</c> structure is a C struct that all erlang
drivers define. It contains entry points for the erlang driver
@@ -53,7 +57,7 @@
the driver.</p>
<p>
<marker id="emulator"></marker>
- The <seealso marker="driver_entry">erl_driver</seealso> driver
+ The <seealso marker="erl_driver">erl_driver</seealso> driver
API functions need a port handle
that identifies the driver instance (and the port in the
emulator). This is only passed to the <c>start</c> function, but
@@ -84,7 +88,7 @@
the emulator, the driver is <em>not</em> allowed to modify the
<c>driver_entry</c>.</p>
<note>
- <p>Do <em>not</em> declare the <c>driver_entry</c><c>const</c>. This since the emulator needs to
+ <p>Do <em>not</em> declare the <c>driver_entry</c> <c>const</c>. This since the emulator needs to
modify the <c>handle</c>, and the <c>handle2</c>
fields. A statically allocated, and <c>const</c>
declared <c>driver_entry</c> may be located in
@@ -116,7 +120,7 @@ typedef struct erl_drv_entry {
void (*stop)(ErlDrvData drv_data);
/* called when port is closed, and when the
emulator is halted. */
- void (*output)(ErlDrvData drv_data, char *buf, int len);
+ void (*output)(ErlDrvData drv_data, char *buf, ErlDrvSizeT len);
/* called when we have output from erlang to
the port */
void (*ready_input)(ErlDrvData drv_data, ErlDrvEvent event);
@@ -130,8 +134,9 @@ typedef struct erl_drv_entry {
void (*finish)(void); /* called before unloading the driver -
DYNAMIC DRIVERS ONLY */
void *handle; /* Reserved -- Used by emulator internally */
- int (*control)(ErlDrvData drv_data, unsigned int command, char *buf,
- int len, char **rbuf, int rlen);
+ ErlDrvSSizeT (*control)(ErlDrvData drv_data, unsigned int command,
+ char *buf, ErlDrvSizeT len,
+ char **rbuf, ErlDrvSizeT rlen);
/* "ioctl" for drivers - invoked by
port_control/3 */
void (*timeout)(ErlDrvData drv_data); /* Handling of timeout in driver */
@@ -144,8 +149,9 @@ typedef struct erl_drv_entry {
closed, and there is data in the
driver queue that needs to be flushed
before 'stop' can be called */
- int (*call)(ErlDrvData drv_data, unsigned int command, char *buf,
- int len, char **rbuf, int rlen, unsigned int *flags);
+ ErlDrvSSizeT (*call)(ErlDrvData drv_data, unsigned int command,
+ char *buf, ErlDrvSizeT len,
+ char **rbuf, ErlDrvSizeT rlen, unsigned int *flags);
/* Works mostly like 'control', a synchronous
call into the driver. */
void (*event)(ErlDrvData drv_data, ErlDrvEvent event,
@@ -192,7 +198,7 @@ typedef struct erl_drv_entry {
<c>start</c>, then <c>stop</c> is the place to deallocate that
memory.</p>
</item>
- <tag><marker id="output"/>void (*output)(ErlDrvData drv_data, char *buf, int len)</tag>
+ <tag><marker id="output"/>void (*output)(ErlDrvData drv_data, char *buf, ErlDrvSizeT len)</tag>
<item>
<p>This is called when an erlang process has sent data to the
port. The data is pointed to by <c>buf</c>, and is
@@ -243,7 +249,7 @@ typedef struct erl_drv_entry {
emulator will modify this field; therefore, it is important
that the <c>driver_entry</c> isn't declared <c>const</c>.</p>
</item>
- <tag><marker id="control"></marker>int (*control)(ErlDrvData drv_data, unsigned int command, char *buf, int len, char **rbuf, int rlen)</tag>
+ <tag><marker id="control"></marker>ErlDrvSSizeT (*control)(ErlDrvData drv_data, unsigned int command, char *buf, ErlDrvSizeT len, char **rbuf, ErlDrvSizeT rlen)</tag>
<item>
<p>This is a special routine invoked with the erlang function
<c>port_control/3</c>. It works a little like an "ioctl" for
@@ -316,7 +322,7 @@ typedef struct erl_drv_entry {
opposed to the asynchronous function, which is called in
some thread (if multithreading is enabled).</p>
</item>
- <tag><marker id="call"/>int (*call)(ErlDrvData drv_data, unsigned int command, char *buf, int len, char **rbuf, int rlen, unsigned int *flags)</tag>
+ <tag><marker id="call"/>ErlDrvSSizeT (*call)(ErlDrvData drv_data, unsigned int command, char *buf, ErlDrvSizeT len, char **rbuf, ErlDrvSizeT rlen, unsigned int *flags)</tag>
<item>
<p>This function is called from <c>erlang:port_call/3</c>. It
works a lot like the <c>control</c> call-back, but uses the
@@ -452,7 +458,7 @@ typedef struct erl_drv_entry {
<title>SEE ALSO</title>
<p><seealso marker="erl_driver">erl_driver(3)</seealso>,
<seealso marker="kernel:erl_ddll">erl_ddll(3)</seealso>,
- <seealso marker="erts:erlang">erlang(3)</seealso>,
+ <seealso marker="erlang">erlang(3)</seealso>,
kernel(3)</p>
</section>
</cref>
diff --git a/erts/doc/src/epmd.xml b/erts/doc/src/epmd.xml
index 411e627c85..3e7005410f 100644
--- a/erts/doc/src/epmd.xml
+++ b/erts/doc/src/epmd.xml
@@ -120,7 +120,7 @@
<item>
<p>Let this instance of <c>epmd</c> listen only on the
comma-separated list of IP addresses and on the loopback address
- (which is implicitely added to the list if it has not been
+ (which is implicitly added to the list if it has not been
specified). This can also be set using the
<c><![CDATA[ERL_EPMD_ADDRESS]]></c> environment variable, see the
section <seealso marker="#environment_variables">Environment
@@ -243,7 +243,7 @@
<p>This environment variable may be set to a comma-separated
list of IP addresses, in which case the <c>epmd</c> daemon
will listen only on the specified address(es) and on the
- loopback address (which is implicitely added to the list if it
+ loopback address (which is implicitly added to the list if it
has not been specified). The default behaviour is to listen on
all available IP addresses.</p>
</item>
diff --git a/erts/doc/src/erl.xml b/erts/doc/src/erl.xml
index 514ee5ffaf..cfbc38f176 100644
--- a/erts/doc/src/erl.xml
+++ b/erts/doc/src/erl.xml
@@ -41,25 +41,11 @@
to scroll back to text which has scrolled off the screen.
The <c><![CDATA[erl]]></c> program must be used, however, in pipelines or if
you want to redirect standard input or output.</p>
- <note><p>As of ERTS version 5.8 (OTP-R14A) the runtime system will by
- default bind schedulers to logical processors using the
- <c>default_bind</c> bind type if the amount of schedulers are
- at least equal to the amount of logical processors configured,
- binding of schedulers is supported, and a CPU topology is
- available at startup.
- </p><p>
- If the Erlang runtime system is the only operating system
- process that binds threads to logical processors, this
- improves the performance of the runtime system. However,
- if other operating system processes (as for example
- another Erlang runtime system) also bind threads to
- logical processors, there might be a performance penalty
- instead. If this is the case you, are are advised to
- unbind the schedulers using the
- <seealso marker="#+sbt">+sbtu</seealso> command line argument,
- or by invoking
- <seealso marker="erlang#system_flag_scheduler_bind_type">erlang:system_flag(scheduler_bind_type,
- unbound)</seealso>.</p>
+ <note><p>As of ERTS version 5.9 (OTP-R15B) the runtime system will by
+ default <em>not</em> bind schedulers to logical processors.
+ For more information see documentation of the
+ <seealso marker="#+sbt">+sbt</seealso> system flag.
+ </p>
</note>
</description>
<funcs>
@@ -587,6 +573,13 @@
<p>Enables auto load tracing, displaying info while loading
code.</p>
</item>
+ <tag><c><![CDATA[+L]]></c></tag>
+ <item>
+ <p>Don't load information about source filenames and line numbers.
+ This will save some memory, but exceptions will not contain
+ information about the filenames and line numbers.
+ </p>
+ </item>
<tag><marker id="erts_alloc"><c><![CDATA[+MFlag Value]]></c></marker></tag>
<item>
<p>Memory allocator specific flags, see
@@ -672,41 +665,66 @@
</p>
<taglist>
<tag><c>u</c></tag>
- <item><p>Same as
- <seealso marker="erlang#system_flag_scheduler_bind_type">erlang:system_flag(scheduler_bind_type, unbound)</seealso>.
- </p></item>
+ <item>
+ <p><c>unbound</c> - Schedulers will not be bound to logical
+ processors, i.e., the operating system decides where the
+ scheduler threads execute, and when to migrate them. This is
+ the default.</p>
+ </item>
<tag><c>ns</c></tag>
- <item><p>Same as
- <seealso marker="erlang#system_flag_scheduler_bind_type">erlang:system_flag(scheduler_bind_type, no_spread)</seealso>.
- </p></item>
+ <item>
+ <p><c>no_spread</c> - Schedulers with close scheduler
+ identifiers will be bound as close as possible in hardware.</p>
+ </item>
<tag><c>ts</c></tag>
- <item><p>Same as
- <seealso marker="erlang#system_flag_scheduler_bind_type">erlang:system_flag(scheduler_bind_type, thread_spread)</seealso>.
- </p></item>
+ <item>
+ <p><c>thread_spread</c> - Thread refers to hardware threads
+ (e.g. Intel's hyper-threads). Schedulers with low scheduler
+ identifiers, will be bound to the first hardware thread of
+ each core, then schedulers with higher scheduler identifiers
+ will be bound to the second hardware thread of each core,
+ etc.</p>
+ </item>
<tag><c>ps</c></tag>
- <item><p>Same as
- <seealso marker="erlang#system_flag_scheduler_bind_type">erlang:system_flag(scheduler_bind_type, processor_spread)</seealso>.
- </p></item>
+ <item>
+ <p><c>processor_spread</c> - Schedulers will be spread like
+ <c>thread_spread</c>, but also over physical processor chips.</p>
+ </item>
<tag><c>s</c></tag>
- <item><p>Same as
- <seealso marker="erlang#system_flag_scheduler_bind_type">erlang:system_flag(scheduler_bind_type, spread)</seealso>.
- </p></item>
+ <item>
+ <p><c>spread</c> - Schedulers will be spread as much as
+ possible.</p>
+ </item>
<tag><c>nnts</c></tag>
- <item><p>Same as
- <seealso marker="erlang#system_flag_scheduler_bind_type">erlang:system_flag(scheduler_bind_type, no_node_thread_spread)</seealso>.
- </p></item>
+ <item>
+ <p><c>no_node_thread_spread</c> - Like <c>thread_spread</c>,
+ but if multiple NUMA (Non-Uniform Memory Access) nodes exists,
+ schedulers will be spread over one NUMA node at a time,
+ i.e., all logical processors of one NUMA node will be bound
+ to schedulers in sequence.</p>
+ </item>
<tag><c>nnps</c></tag>
- <item><p>Same as
- <seealso marker="erlang#system_flag_scheduler_bind_type">erlang:system_flag(scheduler_bind_type, no_node_processor_spread)</seealso>.
- </p></item>
+ <item>
+ <p><c>no_node_processor_spread</c> - Like
+ <c>processor_spread</c>, but if multiple NUMA nodes exists,
+ schedulers will be spread over one NUMA node at a time, i.e.,
+ all logical processors of one NUMA node will be bound to
+ schedulers in sequence.</p>
+ </item>
<tag><c>tnnps</c></tag>
- <item><p>Same as
- <seealso marker="erlang#system_flag_scheduler_bind_type">erlang:system_flag(scheduler_bind_type, thread_no_node_processor_spread)</seealso>.
- </p></item>
+ <item>
+ <p><c>thread_no_node_processor_spread</c> - A combination of
+ <c>thread_spread</c>, and <c>no_node_processor_spread</c>.
+ Schedulers will be spread over hardware threads across NUMA
+ nodes, but schedulers will only be spread over processors
+ internally in one NUMA node at a time.</p>
+ </item>
<tag><c>db</c></tag>
- <item><p>Same as
- <seealso marker="erlang#system_flag_scheduler_bind_type">erlang:system_flag(scheduler_bind_type, default_bind)</seealso>.
- </p></item>
+ <item>
+ <p><c>default_bind</c> - Binds schedulers the default way.
+ Currently the default is <c>thread_no_node_processor_spread</c>
+ (which might change in the future).</p>
+ </item>
</taglist>
<p>Binding of schedulers is currently only supported on newer
Linux, Solaris, FreeBSD, and Windows systems.</p>
@@ -718,24 +736,47 @@
that the <c>+sct</c> flag may have to be passed before the
<c>+sbt</c> flag on the command line (in case no CPU topology
has been automatically detected).</p>
- <p>The runtime system will by default bind schedulers to logical
- processors using the <c>default_bind</c> bind type if the amount
- of schedulers are at least equal to the amount of logical
- processors configured, binding of schedulers is supported,
- and a CPU topology is available at startup.
+ <p>The runtime system will by default <em>not</em> bind schedulers
+ to logical processors.
</p>
- <p><em>NOTE:</em> If the Erlang runtime system is the only operating
- system process that binds threads to logical processors, this
- improves the performance of the runtime system. However, if other
- operating system processes (as for example another Erlang runtime
- system) also bind threads to logical processors, there might be a
- performance penalty instead. If this is the case you, are advised
- to unbind the schedulers using the <c>+sbtu</c> command line
- argument, or by invoking
- <seealso marker="erlang#system_flag_scheduler_bind_type">erlang:system_flag(scheduler_bind_type,
- unbound)</seealso>.</p>
- <p>For more information, see
- <seealso marker="erlang#system_flag_scheduler_bind_type">erlang:system_flag(scheduler_bind_type, SchedulerBindType)</seealso>.
+ <p><em>NOTE:</em> If the Erlang runtime system is the only operating system
+ process that binds threads to logical processors, this
+ improves the performance of the runtime system. However,
+ if other operating system processes (as for example
+ another Erlang runtime system) also bind threads to
+ logical processors, there might be a performance penalty
+ instead. In some cases this performance penalty might be
+ severe. If this is the case, you are advised to not
+ bind the schedulers.</p>
+ <p>How schedulers are bound matters. For example, in
+ situations when there are fewer running processes than
+ schedulers online, the runtime system tries to migrate
+ processes to schedulers with low scheduler identifiers.
+ The more the schedulers are spread over the hardware,
+ the more resources will be available to the runtime
+ system in such situations.
+ </p>
+ <p>
+ <em>NOTE:</em> If a scheduler fails to bind, this
+ will often be silently ignored. This since it isn't always
+ possible to verify valid logical processor identifiers. If
+ an error is reported, it will be reported to the
+ <c>error_logger</c>. If you want to verify that the
+ schedulers actually have bound as requested, call
+ <seealso marker="erlang#system_info_scheduler_bindings">erlang:system_info(scheduler_bindings)</seealso>.
+ </p>
+ </item>
+ <tag><marker id="+scl"><c>+scl true|false</c></marker></tag>
+ <item>
+ <p>Enable or disable scheduler compaction of load. By default
+ scheduler compaction of load is enabled. When enabled, load
+ balancing will strive for a load distribution which causes
+ as many scheduler threads as possible to be fully loaded (i.e.,
+ not run out of work). This is accomplished by migrating load
+ (e.g. runnable processes) into a smaller set of schedulers
+ when schedulers frequently run out of work. When disabled,
+ the frequency with which schedulers run out of work will
+ not be taken into account by the load balancing logic.
</p>
</item>
<tag><marker id="+sct"><c>+sct CpuTopology</c></marker></tag>
@@ -753,6 +794,12 @@
<item><c><![CDATA[<IdDefs> = <LogicalIds><ThreadIds><CoreIds><ProcessorIds><NodeIds> | <LogicalIds><ThreadIds><CoreIds><NodeIds><ProcessorIds>]]></c></item>
<item><c><![CDATA[CpuTopology = <IdDefs>:<IdDefs> | <IdDefs>]]></c></item>
</list>
+ <p>Set a user defined CPU topology. The user defined
+ CPU topology will override any automatically detected
+ CPU topology. The CPU topology is used when
+ <seealso marker="#+sbt">binding schedulers to logical
+ processors</seealso>.
+ </p>
<p>Upper-case letters signify real identifiers and lower-case
letters signify fake identifiers only used for description
of the topology. Identifiers passed as real identifiers may
@@ -852,7 +899,7 @@
how the real CPU topology looks like is likely to
decrease the performance of the runtime system.</p>
<p>For more information, see
- <seealso marker="erlang#system_flag_cpu_topology">erlang:system_flag(cpu_topology, CpuTopology)</seealso>.</p>
+ <seealso marker="erlang#system_info_cpu_topology">erlang:system_info(cpu_topology)</seealso>.</p>
</item>
<tag><marker id="+swt"><c>+swt very_low|low|medium|high|very_high</c></marker></tag>
<item>
@@ -987,7 +1034,7 @@
the <c><![CDATA[-extra]]></c> section, i.e. the end of the command line
following after an <c><![CDATA[-extra]]></c> flag.</p>
</item>
- <tag><c><![CDATA[ERL_ZFLAGS]]></c>and <c><![CDATA[ERL_FLAGS]]></c></tag>
+ <tag><c><![CDATA[ERL_ZFLAGS]]></c> and <c><![CDATA[ERL_FLAGS]]></c></tag>
<item>
<p>The content of these environment variables will be added to the
end of the command line for <c><![CDATA[erl]]></c>.</p>
@@ -1010,7 +1057,7 @@
list of IP addresses, in which case the
<seealso marker="epmd">epmd</seealso> daemon
will listen only on the specified address(es) and on the
- loopback address (which is implicitely added to the list if it
+ loopback address (which is implicitly added to the list if it
has not been specified).</p>
</item>
<tag><c><![CDATA[ERL_EPMD_PORT]]></c></tag>
diff --git a/erts/doc/src/erl_driver.xml b/erts/doc/src/erl_driver.xml
index 2fb03954b6..4fd74b783e 100644
--- a/erts/doc/src/erl_driver.xml
+++ b/erts/doc/src/erl_driver.xml
@@ -37,15 +37,18 @@
<p>As of erts version 5.5.3 the driver interface has been extended
(see <seealso marker="driver_entry#extended_marker">extended marker</seealso>).
The extended interface introduce
- <seealso marker="erl_driver#version_management">version management</seealso>,
+ <seealso marker="#version_management">version management</seealso>,
the possibility to pass capability flags
(see <seealso marker="driver_entry#driver_flags">driver flags</seealso>)
to the runtime system at driver initialization, and some new
driver API functions. </p>
<note>
- <p>Old drivers (compiled with an <c>erl_driver.h</c> from an
- earlier erts version than 5.5.3) have to be recompiled
- (but does not have to use the extended interface).</p>
+ <p>As of erts version 5.9 old drivers have to be recompiled
+ and have to use the extended interface. They also have to be
+ adjusted to the
+ <seealso marker="#rewrites_for_64_bits">64-bit capable driver interface.
+ </seealso>
+ </p>
</note>
<p>The driver calls back to the emulator, using the API
functions declared in <c>erl_driver.h</c>. They are used for
@@ -242,9 +245,9 @@
</p>
</item>
<tag>Adding / removing drivers</tag>
- <item>A driver can add and later remove drivers.</item>
+ <item><p>A driver can add and later remove drivers.</p></item>
<tag>Monitoring processes</tag>
- <item>A driver can monitor a process that does not own a port.</item>
+ <item><p>A driver can monitor a process that does not own a port.</p></item>
<tag><marker id="version_management">Version management</marker></tag>
<item>
<p>Version management is enabled for drivers that have set the
@@ -268,15 +271,203 @@
versions differ, or if the major versions are equal and the
minor version used by the driver is greater than the one used
by the runtime system.</p>
- <p>The emulator tries to check that a driver that doesn't use the
- extended driver interface isn't incompatible when loading it.
- It can, however, not make sure that it isn't incompatible. Therefore,
- when loading a driver that doesn't use the extended driver
- interface, there is a risk that it will be loaded also when
- the driver is incompatible. When the driver uses the extended driver
- interface, the emulator can verify that it isn't of an incompatible
- driver version. You are therefore advised to use the extended driver
- interface.</p>
+ <p>The emulator will refuse to load a driver that does not use
+ the extended driver interface since,
+ to allow for 64-bit capable drivers,
+ incompatible type changes for the callbacks
+ <seealso marker="driver_entry#output">output</seealso>,
+ <seealso marker="driver_entry#control">control</seealso> and
+ <seealso marker="driver_entry#call">call</seealso>
+ were introduced in release R15B. A driver written
+ with the old types would compile with warnings and when
+ called return garbage sizes to the emulator causing it
+ to read random memory and create huge incorrect result blobs.</p>
+ <p>Therefore it is not enough to just recompile drivers written with
+ version management for pre-R15B types; the types have to be changed
+ in the driver suggesting other rewrites especially regarding
+ size variables. Investigate all warnings when recompiling!</p>
+ <p>Also, the API driver functions <c>driver_output*</c>,
+ <c>driver_vec_to_buf</c>, <c>driver_alloc/realloc*</c>
+ and the <c>driver_*</c> queue functions were changed to have
+ larger length arguments and return values. This is a
+ lesser problem since code that passes smaller types
+ will get them auto converted in the calls and as long as
+ the driver does not handle sizes that overflow an <c>int</c>
+ all will work as before.</p>
+ </item>
+ </taglist>
+ </section>
+
+ <section>
+ <marker id="rewrites_for_64_bits"/>
+ <title>
+ REWRITES FOR 64-BIT DRIVER INTERFACE
+ </title>
+ <p>
+ For erts-5.9 two new integer types
+ <seealso marker="#ErlDrvSizeT">ErlDrvSizeT</seealso> and
+ <seealso marker="#ErlDrvSSizeT">ErlDrvSSizeT</seealso>
+ were introduced that can hold 64-bit sizes if necessary.
+ </p>
+ <p>
+ To not update a driver and just recompile it probably works
+ when building for a 32-bit machine creating a false sense of security.
+ Hopefully that will generate many important warnings.
+ But when recompiling the same driver later on for a 64-bit machine
+ there <em>will</em> be warnings and almost certainly crashes.
+ So it is a BAD idea to postpone updating the driver and
+ not fixing the warnings!
+ </p>
+ <p>
+ When recompiling with <c>gcc</c> use the <c>-Wstrict-prototypes</c>
+ flag to get better warnings. Try to find a similar flag if you
+ are using some other compiler.
+ </p>
+ <p>
+ Here follows a checklist for rewriting a pre erts-5.9 driver,
+ most important first.
+ </p>
+ <taglist>
+ <tag>Return types for driver callbacks</tag>
+ <item>
+ <p>
+ Rewrite driver callback
+ <c><seealso marker="driver_entry#control">control</seealso></c>
+ to use return type <c>ErlDrvSSizeT</c> instead of <c>int</c>.
+ </p>
+ <p>
+ Rewrite driver callback
+ <c><seealso marker="driver_entry#call">call</seealso></c>
+ to use return type <c>ErlDrvSSizeT</c> instead of <c>int</c>.
+ </p>
+ <note>
+ <p>
+ These changes are essential to not crash the emulator
+ or worse cause malfunction.
+ Without them a driver may return garbage in the high 32 bits
+ to the emulator causing it to build a huge result from random
+ bytes either crashing on memory allocation or succeeding with
+ a random result from the driver call.
+ </p>
+ </note>
+ </item>
+ <tag>Arguments to driver callbacks</tag>
+ <item>
+ <p>
+ Driver callback
+ <c><seealso marker="driver_entry#output">output</seealso></c>
+ now gets <c>ErlDrvSizeT</c> as 3rd argument instead
+ of previously <c>int</c>.
+ </p>
+ <p>
+ Driver callback
+ <c><seealso marker="driver_entry#control">control</seealso></c>
+ now gets <c>ErlDrvSizeT</c> as 4th and 6th arguments instead
+ of previously <c>int</c>.
+ </p>
+ <p>
+ Driver callback
+ <c><seealso marker="driver_entry#call">call</seealso></c>
+ now gets <c>ErlDrvSizeT</c> as 4th and 6th arguments instead
+ of previously <c>int</c>.
+ </p>
+ <p>
+ Sane compiler's calling conventions probably make these changes
+ necessary only for a driver to handle data chunks that require
+ 64-bit size fields (mostly larger than 2 GB since that is what
+ an <c>int</c> of 32 bits can hold). But it is possible to think
+ of non-sane calling conventions that would make the driver
+ callbacks mix up the arguments causing malfunction.
+ </p>
+ <note>
+ <p>
+ The argument type change is from signed to unsigned which
+ may cause problems for e.g. loop termination conditions or
+ error conditions if you just change the types all over the place.
+ </p>
+ </note>
+ </item>
+ <tag>Larger <c>size</c> field in <c>ErlIOVec</c></tag>
+ <item>
+ <p>
+ The <c>size</c> field in
+ <seealso marker="#ErlIOVec"><c>ErlIOVec</c></seealso>
+ has been changed to <c>ErlDrvSizeT</c> from <c>int</c>.
+ Check all code that use that field.
+ </p>
+ <p>
+ Automatic type casting probably makes these changes necessary only
+ for a driver that encounters sizes larger than 32 bits.
+ </p>
+ <note>
+ <p>
+ The <c>size</c> field changed from signed to unsigned which
+ may cause problems for e.g. loop termination conditions or
+ error conditions if you just change the types all over the place.
+ </p>
+ </note>
+ </item>
+ <tag>Arguments and return values in the driver API</tag>
+ <item>
+ <p>
+ Many driver API functions have changed argument type
+ and/or return value to <c>ErlDrvSizeT</c> from mostly <c>int</c>.
+ Automatic type casting probably makes these changes necessary only
+ for a driver that encounters sizes larger than 32 bits.
+ </p>
+ <taglist>
+ <tag><seealso marker="#driver_output">driver_output</seealso></tag>
+ <item>3rd argument</item>
+ <tag><seealso marker="#driver_output2">driver_output2</seealso></tag>
+ <item>3rd and 5th arguments</item>
+ <tag>
+ <seealso marker="#driver_output_binary">driver_output_binary</seealso>
+ </tag>
+ <item>3rd 5th and 6th arguments</item>
+ <tag><seealso marker="#driver_outputv">driver_outputv</seealso></tag>
+ <item>3rd and 5th arguments</item>
+ <tag>
+ <seealso marker="#driver_vec_to_buf">driver_vec_to_buf</seealso>
+ </tag>
+ <item>3rd argument and return value</item>
+ <tag><seealso marker="#driver_alloc">driver_alloc</seealso></tag>
+ <item>1st argument</item>
+ <tag><seealso marker="#driver_realloc">driver_realloc</seealso></tag>
+ <item>2nd argument</item>
+ <tag>
+ <seealso marker="#driver_alloc_binary">driver_alloc_binary</seealso>
+ </tag>
+ <item>1st argument</item>
+ <tag>
+ <seealso marker="#driver_realloc_binary">driver_realloc_binary</seealso>
+ </tag>
+ <item>2nd argument</item>
+ <tag><seealso marker="#driver_enq">driver_enq</seealso></tag>
+ <item>3rd argument</item>
+ <tag><seealso marker="#driver_pushq">driver_pushq</seealso></tag>
+ <item>3rd argument</item>
+ <tag><seealso marker="#driver_deq">driver_deq</seealso></tag>
+ <item>2nd argument and return value</item>
+ <tag><seealso marker="#driver_sizeq">driver_sizeq</seealso></tag>
+ <item>return value</item>
+ <tag><seealso marker="#driver_enq_bin">driver_enq_bin</seealso></tag>
+ <item>3rd and 4th argument</item>
+ <tag><seealso marker="#driver_pushq_bin">driver_pushq_bin</seealso></tag>
+ <item>3rd and 4th argument</item>
+ <tag><seealso marker="#driver_enqv">driver_enqv</seealso></tag>
+ <item>3rd argument</item>
+ <tag><seealso marker="#driver_pushqv">driver_pushqv</seealso></tag>
+ <item>3rd argument</item>
+ <tag><seealso marker="#driver_peekqv">driver_peekqv</seealso></tag>
+ <item>return value</item>
+ </taglist>
+ <note>
+ <p>
+ This is a change from signed to unsigned which
+ may cause problems for e.g. loop termination conditions and
+ error conditions if you just change the types all over the place.
+ </p>
+ </note>
</item>
</taglist>
</section>
@@ -285,7 +476,11 @@
<title>DATA TYPES</title>
<taglist>
- <tag><marker id="ErlDrvSysInfo"/>ErlDrvSysInfo</tag>
+ <tag><marker id="ErlDrvSizeT"/>ErlDrvSizeT</tag>
+ <item><p>An unsigned integer type to be used as <c>size_t</c></p></item>
+ <tag><marker id="ErlDrvSSizeT"/>ErlDrvSSizeT</tag>
+ <item><p>A signed integer type the size of <c>ErlDrvSizeT</c></p></item>
+ <tag><marker id="ErlDrvSysInfo"/>ErlDrvSysInfo</tag>
<item>
<p/>
<code type="none">
@@ -332,12 +527,12 @@ typedef struct ErlDrvSysInfo {
<tag><c>erts_version</c></tag>
<item>A string containing the version number of the runtime system
(the same as returned by
- <seealso marker="erts:erlang#system_info_version">erlang:system_info(version)</seealso>).
+ <seealso marker="erlang#system_info_version">erlang:system_info(version)</seealso>).
</item>
<tag><c>otp_release</c></tag>
<item>A string containing the OTP release number
(the same as returned by
- <seealso marker="erts:erlang#system_info_otp_release">erlang:system_info(otp_release)</seealso>).
+ <seealso marker="erlang#system_info_otp_release">erlang:system_info(otp_release)</seealso>).
</item>
<tag><c>thread_support</c></tag>
<item>A value <c>!= 0</c> if the runtime system has thread support;
@@ -351,12 +546,12 @@ typedef struct ErlDrvSysInfo {
<item>The number of async threads in the async thread pool used
by <seealso marker="#driver_async">driver_async()</seealso>
(the same as returned by
- <seealso marker="erts:erlang#system_info_thread_pool_size">erlang:system_info(thread_pool_size)</seealso>).
+ <seealso marker="erlang#system_info_thread_pool_size">erlang:system_info(thread_pool_size)</seealso>).
</item>
<tag><c>scheduler_threads</c></tag>
<item>The number of scheduler threads used by the runtime system
(the same as returned by
- <seealso marker="erts:erlang#system_info_schedulers">erlang:system_info(schedulers)</seealso>).
+ <seealso marker="erlang#system_info_schedulers">erlang:system_info(schedulers)</seealso>).
</item>
<tag><c>nif_major_version</c></tag>
<item>The value of <c>ERL_NIF_MAJOR_VERSION</c> when the runtime system was compiled.
@@ -371,7 +566,7 @@ typedef struct ErlDrvSysInfo {
<p/>
<code type="none">
typedef struct ErlDrvBinary {
- int orig_size;
+ ErlDrvSint orig_size;
char orig_bytes[];
} ErlDrvBinary;
</code>
@@ -423,7 +618,7 @@ typedef struct ErlDrvBinary {
<item>
<p>The <c>ErlDrvData</c> is a handle to driver-specific data,
passed to the driver call-backs. It is a pointer, and is
- most often casted to a specific pointer in the driver.</p>
+ most often type casted to a specific pointer in the driver.</p>
</item>
<tag>SysIOVec</tag>
<item>
@@ -437,7 +632,7 @@ typedef struct ErlDrvBinary {
<code type="none">
typedef struct ErlIOVec {
int vsize;
- int size;
+ ErlDrvSizeT size;
SysIOVec* iov;
ErlDrvBinary** binv;
} ErlIOVec;
@@ -630,7 +825,7 @@ typedef struct ErlIOVec {
</desc>
</func>
<func>
- <name><ret>int</ret><nametext>driver_output(ErlDrvPort port, char *buf, int len)</nametext></name>
+ <name><ret>int</ret><nametext>driver_output(ErlDrvPort port, char *buf, ErlDrvSizeT len)</nametext></name>
<fsummary>Send data from driver to port owner</fsummary>
<desc>
<marker id="driver_output"></marker>
@@ -650,7 +845,7 @@ typedef struct ErlIOVec {
</desc>
</func>
<func>
- <name><ret>int</ret><nametext>driver_output2(ErlDrvPort port, char *hbuf, int hlen, char *buf, int len)</nametext></name>
+ <name><ret>int</ret><nametext>driver_output2(ErlDrvPort port, char *hbuf, ErlDrvSizeT hlen, char *buf, ErlDrvSizeT len)</nametext></name>
<fsummary>Send data and binary data to port owner</fsummary>
<desc>
<marker id="driver_output2"></marker>
@@ -665,7 +860,7 @@ typedef struct ErlIOVec {
</desc>
</func>
<func>
- <name><ret>int</ret><nametext>driver_output_binary(ErlDrvPort port, char *hbuf, int hlen, ErlDrvBinary* bin, int offset, int len)</nametext></name>
+ <name><ret>int</ret><nametext>driver_output_binary(ErlDrvPort port, char *hbuf, ErlDrvSizeT hlen, ErlDrvBinary* bin, ErlDrvSizeT offset, ErlDrvSizeT len)</nametext></name>
<fsummary>Send data from a driver binary to port owner</fsummary>
<desc>
<marker id="driver_output_binary"></marker>
@@ -688,7 +883,7 @@ typedef struct ErlIOVec {
</desc>
</func>
<func>
- <name><ret>int</ret><nametext>driver_outputv(ErlDrvPort port, char* hbuf, int hlen, ErlIOVec *ev, int skip)</nametext></name>
+ <name><ret>int</ret><nametext>driver_outputv(ErlDrvPort port, char* hbuf, ErlDrvSizeT hlen, ErlIOVec *ev, ErlDrvSizeT skip)</nametext></name>
<fsummary>Send vectorized data to port owner</fsummary>
<desc>
<marker id="driver_outputv"></marker>
@@ -711,7 +906,7 @@ typedef struct ErlIOVec {
</desc>
</func>
<func>
- <name><ret>int</ret><nametext>driver_vec_to_buf(ErlIOVec *ev, char *buf, int len)</nametext></name>
+ <name><ret>ErlDrvSizeT</ret><nametext>driver_vec_to_buf(ErlIOVec *ev, char *buf, ErlDrvSizeT len)</nametext></name>
<fsummary>Collect data segments into a buffer</fsummary>
<desc>
<marker id="driver_vec_to_buf"></marker>
@@ -738,7 +933,7 @@ typedef struct ErlIOVec {
<c>time</c> parameter is the time in milliseconds before the
timer expires.</p>
<p>When the timer reaches 0 and expires, the driver entry
- function <seealso marker="driver_entry#emulator">timeout</seealso> is called.</p>
+ function <seealso marker="driver_entry#timeout">timeout</seealso> is called.</p>
<p>Note that there is only one timer on each driver instance;
setting a new timer will replace an older one.</p>
<p>Return value is 0 (-1 only when the <c>timeout</c> driver
@@ -832,7 +1027,7 @@ typedef struct ErlIOVec {
</desc>
</func>
<func>
- <name><ret>void *</ret><nametext>driver_alloc(size_t size)</nametext></name>
+ <name><ret>void *</ret><nametext>driver_alloc(ErlDrvSizeT size)</nametext></name>
<fsummary>Allocate memory</fsummary>
<desc>
<marker id="driver_alloc"></marker>
@@ -846,7 +1041,7 @@ typedef struct ErlIOVec {
</desc>
</func>
<func>
- <name><ret>void *</ret><nametext>driver_realloc(void *ptr, size_t size)</nametext></name>
+ <name><ret>void *</ret><nametext>driver_realloc(void *ptr, ErlDrvSizeT size)</nametext></name>
<fsummary>Resize an allocated memory block</fsummary>
<desc>
<marker id="driver_realloc"></marker>
@@ -872,7 +1067,7 @@ typedef struct ErlIOVec {
</desc>
</func>
<func>
- <name><ret>ErlDrvBinary*</ret><nametext>driver_alloc_binary(int size)</nametext></name>
+ <name><ret>ErlDrvBinary*</ret><nametext>driver_alloc_binary(ErlDrvSizeT size)</nametext></name>
<fsummary>Allocate a driver binary</fsummary>
<desc>
<marker id="driver_alloc_binary"></marker>
@@ -892,7 +1087,7 @@ typedef struct ErlIOVec {
</desc>
</func>
<func>
- <name><ret>ErlDrvBinary*</ret><nametext>driver_realloc_binary(ErlDrvBinary *bin, int size)</nametext></name>
+ <name><ret>ErlDrvBinary*</ret><nametext>driver_realloc_binary(ErlDrvBinary *bin, ErlDrvSizeT size)</nametext></name>
<fsummary>Resize a driver binary</fsummary>
<desc>
<marker id="driver_realloc_binary"></marker>
@@ -958,7 +1153,7 @@ typedef struct ErlIOVec {
</desc>
</func>
<func>
- <name><ret>int</ret><nametext>driver_enq(ErlDrvPort port, char* buf, int len)</nametext></name>
+ <name><ret>int</ret><nametext>driver_enq(ErlDrvPort port, char* buf, ErlDrvSizeT len)</nametext></name>
<fsummary>Enqueue data in the driver queue</fsummary>
<desc>
<marker id="driver_enq"></marker>
@@ -982,7 +1177,7 @@ typedef struct ErlIOVec {
</desc>
</func>
<func>
- <name><ret>int</ret><nametext>driver_pushq(ErlDrvPort port, char* buf, int len)</nametext></name>
+ <name><ret>int</ret><nametext>driver_pushq(ErlDrvPort port, char* buf, ErlDrvSizeT len)</nametext></name>
<fsummary>Push data at the head of the driver queue</fsummary>
<desc>
<marker id="driver_pushq"></marker>
@@ -997,7 +1192,7 @@ typedef struct ErlIOVec {
</desc>
</func>
<func>
- <name><ret>int</ret><nametext>driver_deq(ErlDrvPort port, int size)</nametext></name>
+ <name><ret>ErlDrvSizeT</ret><nametext>driver_deq(ErlDrvPort port, ErlDrvSizeT size)</nametext></name>
<fsummary>Dequeue data from the head of the driver queue</fsummary>
<desc>
<marker id="driver_deq"></marker>
@@ -1013,7 +1208,7 @@ typedef struct ErlIOVec {
</desc>
</func>
<func>
- <name><ret>int</ret><nametext>driver_sizeq(ErlDrvPort port)</nametext></name>
+ <name><ret>ErlDrvSizeT</ret><nametext>driver_sizeq(ErlDrvPort port)</nametext></name>
<fsummary>Return the size of the driver queue</fsummary>
<desc>
<marker id="driver_sizeq"></marker>
@@ -1026,7 +1221,7 @@ typedef struct ErlIOVec {
</desc>
</func>
<func>
- <name><ret>int</ret><nametext>driver_enq_bin(ErlDrvPort port, ErlDrvBinary *bin, int offset, int len)</nametext></name>
+ <name><ret>int</ret><nametext>driver_enq_bin(ErlDrvPort port, ErlDrvBinary *bin, ErlDrvSizeT offset, ErlDrvSizeT len)</nametext></name>
<fsummary>Enqueue binary in the driver queue</fsummary>
<desc>
<marker id="driver_enq_bin"></marker>
@@ -1043,7 +1238,7 @@ typedef struct ErlIOVec {
</desc>
</func>
<func>
- <name><ret>int</ret><nametext>driver_pushq_bin(ErlDrvPort port, ErlDrvBinary *bin, int offset, int len)</nametext></name>
+ <name><ret>int</ret><nametext>driver_pushq_bin(ErlDrvPort port, ErlDrvBinary *bin, ErlDrvSizeT offset, ErlDrvSizeT len)</nametext></name>
<fsummary>Push binary at the head of the driver queue</fsummary>
<desc>
<marker id="driver_pushq_bin"></marker>
@@ -1060,13 +1255,35 @@ typedef struct ErlIOVec {
</desc>
</func>
<func>
+ <name><ret>ErlDrvSizeT</ret><nametext>driver_peekqv(ErlDrvPort port, ErlIOVec *ev)</nametext></name>
+ <fsummary>Get the driver queue as an IO vector</fsummary>
+ <desc>
+ <marker id="driver_peekqv"></marker>
+ <p>
+ This function retrieves the driver queue into a supplied
+ <c>ErlIOVec</c> <c>ev</c>. It also returns the queue size.
+ This is one of two ways to get data out of the queue.
+ </p>
+ <p>
+ If <c>ev</c> is <c>NULL</c> all ones i.e. <c>-1</c> type cast to
+ <c>ErlDrvSizeT</c> is returned.
+ </p>
+ <p>Nothing is removed from the queue by this function, that must be done
+ with <c>driver_deq</c>.</p>
+ <p>This function can be called from an arbitrary thread if a
+ <seealso marker="#ErlDrvPDL">port data lock</seealso>
+ associated with the <c>port</c> is locked by the calling
+ thread during the call.</p>
+ </desc>
+ </func>
+ <func>
<name><ret>SysIOVec*</ret><nametext>driver_peekq(ErlDrvPort port, int *vlen)</nametext></name>
<fsummary>Get the driver queue as a vector</fsummary>
<desc>
<marker id="driver_peekq"></marker>
<p>This function retrieves the driver queue as a pointer to an
array of <c>SysIOVec</c>s. It also returns the number of
- elements in <c>vlen</c>. This is the only way to get data
+ elements in <c>vlen</c>. This is one of two ways to get data
out of the queue.</p>
<p>Nothing is removed from the queue by this function, that must be done
with <c>driver_deq</c>.</p>
@@ -1079,7 +1296,7 @@ typedef struct ErlIOVec {
</desc>
</func>
<func>
- <name><ret>int</ret><nametext>driver_enqv(ErlDrvPort port, ErlIOVec *ev, int skip)</nametext></name>
+ <name><ret>int</ret><nametext>driver_enqv(ErlDrvPort port, ErlIOVec *ev, ErlDrvSizeT skip)</nametext></name>
<fsummary>Enqueue vector in the driver queue</fsummary>
<desc>
<marker id="driver_enqv"></marker>
@@ -1095,7 +1312,7 @@ typedef struct ErlIOVec {
</desc>
</func>
<func>
- <name><ret>int</ret><nametext>driver_pushqv(ErlDrvPort port, ErlIOVec *ev, int skip)</nametext></name>
+ <name><ret>int</ret><nametext>driver_pushqv(ErlDrvPort port, ErlIOVec *ev, ErlDrvSizeT skip)</nametext></name>
<fsummary>Push vector at the head of the driver queue</fsummary>
<desc>
<marker id="driver_pushqv"></marker>
@@ -1494,7 +1711,7 @@ ERL_DRV_EXT2TERM char *buf, ErlDrvUInt len
term encoded with the
<seealso marker="erl_ext_dist">external format</seealso>,
i.e., a term that has been encoded by
- <seealso marker="erts:erlang#term_to_binary/2">erlang:term_to_binary</seealso>,
+ <seealso marker="erlang#term_to_binary/2">erlang:term_to_binary</seealso>,
<seealso marker="erl_interface:ei">erl_interface</seealso>, etc.
For example, if <c>binp</c> is a pointer to an <c>ErlDrvBinary</c>
that contains the term <c>{17, 4711}</c> encoded with the
@@ -1638,12 +1855,19 @@ ERL_DRV_EXT2TERM char *buf, ErlDrvUInt len
<fsummary>Cancel an asynchronous call</fsummary>
<desc>
<marker id="driver_async_cancel"></marker>
- <p>This function cancels an asynchronous operation, by removing
- it from the queue. Only functions in the queue can be
- cancelled; if a function is executing, it's too late to
- cancel it. The <c>async_free</c> function is also called.</p>
- <p>The return value is 1 if the operation was removed from the
- queue, otherwise 0.</p>
+ <p>This function used to cancel a scheduled asynchronous operation,
+ if it was still in the queue. It returned 1 if it succeeded, and
+ 0 if it failed.</p>
+ <p>Since it could not guarantee success, it was more or less useless.
+ The user had to implement synchronization of cancellation anyway.
+ It also unnecessarily complicated the implementation. Therefore,
+ as of OTP-R15B <c>driver_async_cancel()</c> is deprecated, and
+ scheduled for removal in OTP-R16. It will currently always fail,
+ and return 0.</p>
+ <warning><p><c>driver_async_cancel()</c> is deferred and will
+ be removed in the OTP-R16 release.</p>
+ </warning>
+
</desc>
</func>
<func>
@@ -1687,7 +1911,7 @@ ERL_DRV_EXT2TERM char *buf, ErlDrvUInt len
The driver defined handle is normally created in the
<seealso marker="driver_entry#start">driver start call-back</seealso>
when a port is created via
- <seealso marker="erts:erlang#open_port/2">erlang:open_port/2</seealso>. </item>
+ <seealso marker="erlang#open_port/2">erlang:open_port/2</seealso>. </item>
</taglist>
<p>The caller of <c>driver_create_port()</c> is allowed to
manipulate the newly created port when <c>driver_create_port()</c>
@@ -2455,7 +2679,7 @@ ERL_DRV_EXT2TERM char *buf, ErlDrvUInt len
<title>SEE ALSO</title>
<p><seealso marker="driver_entry">driver_entry(3)</seealso>,
<seealso marker="kernel:erl_ddll">erl_ddll(3)</seealso>,
- <seealso marker="erts:erlang">erlang(3)</seealso></p>
+ <seealso marker="erlang">erlang(3)</seealso></p>
<p>An Alternative Distribution Driver (ERTS User's
Guide Ch. 3)</p>
</section>
diff --git a/erts/doc/src/erl_ext_fig.gif b/erts/doc/src/erl_ext_fig.gif
index 14d6bbc871..14d6bbc871 100755..100644
--- a/erts/doc/src/erl_ext_fig.gif
+++ b/erts/doc/src/erl_ext_fig.gif
Binary files differ
diff --git a/erts/doc/src/erl_nif.xml b/erts/doc/src/erl_nif.xml
index cdce4ec0b8..5fc6508aad 100644
--- a/erts/doc/src/erl_nif.xml
+++ b/erts/doc/src/erl_nif.xml
@@ -136,9 +136,7 @@ ok
then retrieved by calling <seealso marker="#enif_priv_data">enif_priv_data</seealso>.</p>
<p>There is no way to explicitly unload a NIF library. A library will be
automatically unloaded when the module code that it belongs to is purged
- by the code server. A NIF library will also be unloaded if it is replaced
- by another version of the library by a second call to
- <c>erlang:load_nif/2</c> from the same module code.</p>
+ by the code server.</p>
</description>
<section>
<title>FUNCTIONALITY</title>
@@ -216,14 +214,14 @@ ok
<p/>
<code type="none">
ERL_NIF_TERM term;
- MyStruct* ptr = enif_alloc_resource(my_resource_type, sizeof(MyStruct));
+ MyStruct* obj = enif_alloc_resource(my_resource_type, sizeof(MyStruct));
/* initialize struct ... */
- term = enif_make_resource(env, ptr);
+ term = enif_make_resource(env, obj);
if (keep_a_reference_of_our_own) {
- /* store 'ptr' in static variable, private data or other resource object */
+ /* store 'obj' in static variable, private data or other resource object */
}
else {
enif_release_resource(obj);
@@ -308,21 +306,9 @@ ok
initialization is needed.</p>
</item>
- <tag><marker id="reload"/>int (*reload)(ErlNifEnv* env, void** priv_data, ERL_NIF_TERM load_info)</tag>
- <item><p><c>reload</c> is called when the NIF library is loaded
- and there is already a previously loaded library for this
- module code.</p>
- <p>Works the same as <c>load</c>. The only difference is that
- <c>*priv_data</c> already contains the value set by the
- previous call to <c>load</c> or <c>reload</c>.</p>
- <p>The library will fail to load if <c>reload</c> returns
- anything other than 0 or if <c>reload</c> is NULL.</p>
- </item>
-
<tag><marker id="upgrade"/>int (*upgrade)(ErlNifEnv* env, void** priv_data, void** old_priv_data, ERL_NIF_TERM load_info)</tag>
<item><p><c>upgrade</c> is called when the NIF library is loaded
- and there is no previously loaded library for this module
- code, BUT there is old code of this module with a loaded NIF library.</p>
+ and there is old code of this module with a loaded NIF library.</p>
<p>Works the same as <c>load</c>. The only difference is that
<c>*old_priv_data</c> already contains the value set by the
last call to <c>load</c> or <c>reload</c> for the old module
@@ -339,6 +325,23 @@ ok
called for a replaced library as a consequence of <c>reload</c>.</p>
</item>
+ <tag><marker id="reload"/>int (*reload)(ErlNifEnv* env, void** priv_data, ERL_NIF_TERM load_info)</tag>
+ <note><p>The reload mechanism is <em>deprecated</em>. It was only intended
+ as a development feature. Do not use it as an upgrade method for
+ live production systems. It might be removed in future releases. Be sure
+ to pass <c>reload</c> as <c>NULL</c> to <seealso marker="#ERL_NIF_INIT">ERL_NIF_INIT</seealso>
+ to disable it when not used.</p>
+ </note>
+ <item><p><c>reload</c> is called when the NIF library is loaded
+ and there is already a previously loaded library for this
+ module code.</p>
+ <p>Works the same as <c>load</c>. The only difference is that
+ <c>*priv_data</c> already contains the value set by the
+ previous call to <c>load</c> or <c>reload</c>.</p>
+ <p>The library will fail to load if <c>reload</c> returns
+ anything other than 0 or if <c>reload</c> is NULL.</p>
+ </item>
+
</taglist>
</section>
@@ -692,6 +695,10 @@ typedef enum {
<fsummary>Determine if a term is an exception</fsummary>
<desc><p>Return true if <c>term</c> is an exception.</p></desc>
</func>
+ <func><name><ret>int</ret><nametext>enif_is_number(ErlNifEnv* env, ERL_NIF_TERM term)</nametext></name>
+ <fsummary>Determine if a term is a number (integer or float)</fsummary>
+ <desc><p>Return true if <c>term</c> is a number.</p></desc>
+ </func>
<func><name><ret>int</ret><nametext>enif_is_fun(ErlNifEnv* env, ERL_NIF_TERM term)</nametext></name>
<fsummary>Determine if a term is a fun</fsummary>
<desc><p>Return true if <c>term</c> is a fun.</p></desc>
@@ -822,6 +829,13 @@ typedef enum {
<desc><p>Create an ordinary list containing the elements of array <c>arr</c>
of length <c>cnt</c>. An empty list is returned if <c>cnt</c> is 0.</p></desc>
</func>
+ <func><name><ret>int</ret><nametext>enif_make_reverse_list(ErlNifEnv* env, ERL_NIF_TERM term, ERL_NIF_TERM *list)</nametext></name>
+ <fsummary>Create the reverse list of the list <c>term</c>.</fsummary>
+ <desc><p>Set <c>*list</c> to the reverse list of the list <c>term</c> and return true,
+ or return false if <c>term</c> is not a list. This function should only be used on
+ short lists as a copy will be created of the list which will not be released until after the
+ nif returns.</p></desc>
+ </func>
<func><name><ret>ERL_NIF_TERM</ret><nametext>enif_make_long(ErlNifEnv* env, long int i)</nametext></name>
<fsummary>Create an integer term from a long int</fsummary>
<desc><p>Create an integer term from a <c>long int</c>.</p></desc>
diff --git a/erts/doc/src/erl_prim_loader.xml b/erts/doc/src/erl_prim_loader.xml
index ccaa9b725f..9f5b3f385b 100644
--- a/erts/doc/src/erl_prim_loader.xml
+++ b/erts/doc/src/erl_prim_loader.xml
@@ -4,7 +4,7 @@
<erlref>
<header>
<copyright>
- <year>1996</year><year>2009</year>
+ <year>1996</year><year>2011</year>
<holder>Ericsson AB. All Rights Reserved.</holder>
</copyright>
<legalnotice>
@@ -55,33 +55,31 @@
<c>-loader_debug</c> are also experimental</p></warning>
</description>
+ <datatypes>
+ <datatype>
+ <name name="host"/>
+ </datatype>
+ </datatypes>
+
<funcs>
<func>
- <name>start(Id, Loader, Hosts) -> {ok, Pid} | {error, What}</name>
+ <name name="start" arity="3"/>
<fsummary>Start the Erlang low level loader</fsummary>
- <type>
- <v>Id = term()</v>
- <v>Loader = atom() | string()</v>
- <v>Hosts = [Host]</v>
- <v>Host = atom()</v>
- <v>Pid = pid()</v>
- <v>What = term()</v>
- </type>
<desc>
<p>Starts the Erlang low level loader. This function is called
by the <c>init</c> process (and module). The <c>init</c>
- process reads the command line flags <c>-id Id</c>,
- <c>-loader Loader</c>, and <c>-hosts Hosts</c>. These are
+ process reads the command line flags <c>-id <anno>Id</anno></c>,
+ <c>-loader <anno>Loader</anno></c>, and <c>-hosts <anno>Hosts</anno></c>. These are
the arguments supplied to the <c>start/3</c> function.</p>
<p>If <c>-loader</c> is not given, the default loader is
<c>efile</c> which tells the system to read from the file
system.</p>
- <p>If <c>-loader</c> is <c>inet</c>, the <c>-id Id</c>,
- <c>-hosts Hosts</c>, and <c>-setcookie Cookie</c> flags must
- also be supplied. <c>Hosts</c> identifies hosts which this
+ <p>If <c>-loader</c> is <c>inet</c>, the <c>-id <anno>Id</anno></c>,
+ <c>-hosts <anno>Hosts</anno></c>, and <c>-setcookie Cookie</c> flags must
+ also be supplied. <c><anno>Hosts</anno></c> identifies hosts which this
node can contact in order to load modules. One Erlang
runtime system with a <c>erl_boot_server</c> process must be
- started on each of hosts given in <c>Hosts</c> in order to
+ started on each of hosts given in <c><anno>Hosts</anno></c> in order to
answer the requests. See <seealso
marker="kernel:erl_boot_server">erl_boot_server(3)</seealso>.</p>
<p>If <c>-loader</c> is something else, the given port program
@@ -90,35 +88,26 @@
</desc>
</func>
<func>
- <name>get_file(Filename) -> {ok, Bin, FullName} | error</name>
+ <name name="get_file" arity="1"/>
<fsummary>Get a file</fsummary>
- <type>
- <v>Filename = string()</v>
- <v>Bin = binary()</v>
- <v>FullName = string()</v>
- </type>
<desc>
<p>This function fetches a file using the low level loader.
- <c>Filename</c> is either an absolute file name or just the name
+ <c><anno>Filename</anno></c> is either an absolute file name or just the name
of the file, for example <c>"lists.beam"</c>. If an internal
path is set to the loader, this path is used to find the file.
If a user supplied loader is used, the path can be stripped
off if it is obsolete, and the loader does not use a path.
- <c>FullName</c> is the complete name of the fetched file.
- <c>Bin</c> is the contents of the file as a binary.</p>
+ <c><anno>FullName</anno></c> is the complete name of the fetched file.
+ <c><anno>Bin</anno></c> is the contents of the file as a binary.</p>
- <p>The <c>Filename</c> can also be a file in an archive. For example
- <c>/otp/root/lib/mnesia-4.4.7.ez/mnesia-4.4.7/ebin/mnesia_backup.beam</c>
+ <p>The <c><anno>Filename</anno></c> can also be a file in an archive. For example
+ <c>$OTPROOT/lib/</c><c>mnesia-4.4.7.ez/mnesia-4.4.7/ebin/</c><c>mnesia.beam</c>.
See <seealso marker="kernel:code">code(3)</seealso> about archive files.</p>
</desc>
</func>
<func>
- <name>get_path() -> {ok, Path}</name>
+ <name name="get_path" arity="0"/>
<fsummary>Get the path set in the loader</fsummary>
- <type>
- <v>Path = [Dir]</v>
- <v>Dir = string()</v>
- </type>
<desc>
<p>This function gets the path set in the loader. The path is
set by the <c>init</c> process according to information found
@@ -126,35 +115,26 @@
</desc>
</func>
<func>
- <name>list_dir(Dir) -> {ok, Filenames} | error</name>
+ <name name="list_dir" arity="1"/>
<fsummary>List files in a directory</fsummary>
- <type>
- <v>Dir = name()</v>
- <v>Filenames = [Filename]</v>
- <v>Filename = string()</v>
- </type>
<desc>
<p>Lists all the files in a directory. Returns
- <c>{ok, Filenames}</c> if successful. Otherwise, it returns
- <c>error</c>. <c>Filenames</c> is a list of
+ <c>{ok, <anno>Filenames</anno>}</c> if successful. Otherwise, it returns
+ <c>error</c>. <c><anno>Filenames</anno></c> is a list of
the names of all the files in the directory. The names are
not sorted.</p>
- <p>The <c>Dir</c> can also be a directory in an archive. For example
- <c>/otp/root/lib/mnesia-4.4.7.ez/mnesia-4.4.7/ebin</c>
+ <p>The <c><anno>Dir</anno></c> can also be a directory in an archive. For example
+ <c>$OTPROOT/lib/</c><c>mnesia-4.4.7.ez/mnesia-4.4.7/ebin</c>.
See <seealso marker="kernel:code">code(3)</seealso> about archive files.</p>
</desc>
</func>
<func>
- <name>read_file_info(Filename) -> {ok, FileInfo} | error</name>
+ <name name="read_file_info" arity="1"/>
<fsummary>Get information about a file</fsummary>
- <type>
- <v>Filename = name()</v>
- <v>FileInfo = #file_info{}</v>
- </type>
<desc>
<p>Retrieves information about a file. Returns
- <c>{ok, FileInfo}</c> if successful, otherwise
- <c>error</c>. <c>FileInfo</c> is a record
+ <c>{ok, <anno>FileInfo</anno>}</c> if successful, otherwise
+ <c>error</c>. <c><anno>FileInfo</anno></c> is a record
<c>file_info</c>, defined in the Kernel include file
<c>file.hrl</c>. Include the following directive in the module
from which the function is called:</p>
@@ -162,18 +142,14 @@
-include_lib("kernel/include/file.hrl").</code>
<p>See <seealso marker="kernel:file">file(3)</seealso> for more info about
the record <c>file_info</c>.</p>
- <p>The <c>Filename</c> can also be a file in an archive. For example
- <c>/otp/root/lib/mnesia-4.4.7.ez/mnesia-4.4.7/ebin/mnesia_backup.beam</c>
+ <p>The <c><anno>Filename</anno></c> can also be a file in an archive. For example
+ <c>$OTPROOT/lib/</c><c>mnesia-4.4.7.ez/mnesia-4.4.7/ebin/</c><c>mnesia</c>.
See <seealso marker="kernel:code">code(3)</seealso> about archive files.</p>
</desc>
</func>
<func>
- <name>set_path(Path) -> ok</name>
+ <name name="set_path" arity="1"/>
<fsummary>Set the path of the loader</fsummary>
- <type>
- <v>Path = [Dir]</v>
- <v>Dir = string()</v>
- </type>
<desc>
<p>This function sets the path of the loader if <c>init</c>
interprets a <c>path</c> command in the start script.</p>
diff --git a/erts/doc/src/erlang.xml b/erts/doc/src/erlang.xml
index f98e15cb52..0963904b83 100644
--- a/erts/doc/src/erlang.xml
+++ b/erts/doc/src/erlang.xml
@@ -4,7 +4,7 @@
<erlref>
<header>
<copyright>
- <year>1996</year><year>2011</year>
+ <year>1996</year><year>2012</year>
<holder>Ericsson AB. All Rights Reserved.</holder>
</copyright>
<legalnotice>
@@ -48,22 +48,24 @@
"Allowed in guard tests".</p>
</description>
- <section>
- <title>DATA TYPES</title>
- <marker id="iolist_definition"></marker>
- <code type="none">
-ext_binary()
- a binary data object,
- structured according to the Erlang external term format
-
-iodata() = iolist() | binary()
+ <datatypes>
+ <datatype>
+ <name><marker id="type-ext_binary">ext_binary()</marker></name>
+ <desc>
+ <p>A binary data object, structured according to
+ the Erlang external term format.</p>
+ </desc>
+ </datatype>
+ <datatype>
+ <name name="timestamp"></name>
+ <desc><p>See <seealso marker="#now/0">now/0</seealso>.</p>
+ </desc>
+ </datatype>
+ </datatypes>
-iolist() = [char() | binary() | iolist()]
- a binary is allowed as the tail of the list</code>
- </section>
<funcs>
<func>
- <name>abs(Number) -> int() | float()</name>
+ <name>abs(Number) -> integer() | float()</name>
<fsummary>Arithmetical absolute value</fsummary>
<type>
<v>Number = number()</v>
@@ -80,7 +82,7 @@ iolist() = [char() | binary() | iolist()]
</desc>
</func>
<func>
- <name>erlang:adler32(Data) -> int()</name>
+ <name>erlang:adler32(Data) -> integer()</name>
<fsummary>Compute adler32 checksum</fsummary>
<type>
<v>Data = iodata()</v>
@@ -90,10 +92,10 @@ iolist() = [char() | binary() | iolist()]
</desc>
</func>
<func>
- <name>erlang:adler32(OldAdler, Data) -> int()</name>
+ <name>erlang:adler32(OldAdler, Data) -> integer()</name>
<fsummary>Compute adler32 checksum</fsummary>
<type>
- <v>OldAdler = int()</v>
+ <v>OldAdler = integer()</v>
<v>Data = iodata()</v>
</type>
<desc>
@@ -112,11 +114,11 @@ iolist() = [char() | binary() | iolist()]
</desc>
</func>
<func>
- <name>erlang:adler32_combine(FirstAdler, SecondAdler, SecondSize) -> int()</name>
+ <name>erlang:adler32_combine(FirstAdler, SecondAdler, SecondSize) -> integer()</name>
<fsummary>Combine two adler32 checksums</fsummary>
<type>
- <v>FirstAdler = SecondAdler = int()</v>
- <v>SecondSize = int()</v>
+ <v>FirstAdler = SecondAdler = integer()</v>
+ <v>SecondSize = integer()</v>
</type>
<desc>
<p>Combines two previously computed adler32 checksums.
@@ -155,20 +157,16 @@ iolist() = [char() | binary() | iolist()]
</desc>
</func>
<func>
- <name>apply(Fun, Args) -> term() | empty()</name>
+ <name name="apply" arity="2"/>
<fsummary>Apply a function to an argument list</fsummary>
- <type>
- <v>Fun = fun()</v>
- <v>Args = [term()]</v>
- </type>
<desc>
- <p>Call a fun, passing the elements in <c>Args</c> as
+ <p>Call a fun, passing the elements in <c><anno>Args</anno></c> as
arguments.</p>
<p>Note: If the number of elements in the arguments are known at
compile-time, the call is better written as
- <c>Fun(Arg1, Arg2, ... ArgN)</c>.</p>
+ <c><anno>Fun</anno>(Arg1, Arg2, ... ArgN)</c>.</p>
<warning>
- <p>Earlier, <c>Fun</c> could also be given as
+ <p>Earlier, <c><anno>Fun</anno></c> could also be given as
<c>{Module, Function}</c>, equivalent to
<c>apply(Module, Function, Args)</c>. This usage is
deprecated and will stop working in a future release of
@@ -177,15 +175,11 @@ iolist() = [char() | binary() | iolist()]
</desc>
</func>
<func>
- <name>apply(Module, Function, Args) -> term() | empty()</name>
+ <name name="apply" arity="3"/>
<fsummary>Apply a function to an argument list</fsummary>
- <type>
- <v>Module = Function = atom()</v>
- <v>Args = [term()]</v>
- </type>
<desc>
<p>Returns the result of applying <c>Function</c> in
- <c>Module</c> to <c>Args</c>. The applied function must
+ <c><anno>Module</anno></c> to <c><anno>Args</anno></c>. The applied function must
be exported from <c>Module</c>. The arity of the function is
the length of <c>Args</c>.</p>
<pre>
@@ -198,7 +192,7 @@ iolist() = [char() | binary() | iolist()]
"Erlang"</pre>
<p>Note: If the number of arguments are known at compile-time,
the call is better written as
- <c>Module:Function(Arg1, Arg2, ..., ArgN)</c>.</p>
+ <c><anno>Module</anno>:<anno>Function</anno>(Arg1, Arg2, ..., ArgN)</c>.</p>
<p>Failure: <c>error_handler:undefined_function/3</c> is called
if the applied function is not exported. The error handler
can be redefined (see
@@ -221,9 +215,9 @@ iolist() = [char() | binary() | iolist()]
representation of <c>Atom</c>. If <c>Encoding</c>
is <c>latin1</c>, there will be one byte for each character
in the text representation. If <c>Encoding</c> is <c>utf8</c> or
- <c>unicode</c>, the characters will encoded using UTF-8
+ <c>unicode</c>, the characters will be encoded using UTF-8
(meaning that characters from 16#80 up to 0xFF will be
- encode in two bytes).</p>
+ encoded in two bytes).</p>
<note><p>Currently, <c>atom_to_binary(Atom, latin1)</c> can
never fail because the text representation of an atom can only contain
@@ -258,8 +252,8 @@ iolist() = [char() | binary() | iolist()]
<type>
<v>Subject = binary()</v>
<v>PosLen = {Start,Length}</v>
- <v>Start = int()</v>
- <v>Length = int()</v>
+ <v>Start = integer() >= 0</v>
+ <v>Length = integer() >= 0</v>
</type>
<desc>
<p>Extracts the part of the binary described by <c>PosLen</c>.</p>
@@ -274,7 +268,7 @@ iolist() = [char() | binary() | iolist()]
<p>If <c>PosLen</c> in any way references outside the binary, a <c>badarg</c> exception is raised.</p>
- <p><c>Start</c> is zero-based, i.e:</p>
+ <p><c>Start</c> is zero-based, i.e.:</p>
<code>
1> Bin = &lt;&lt;1,2,3&gt;&gt;
2> binary_part(Bin,{0,2}).
@@ -291,8 +285,8 @@ iolist() = [char() | binary() | iolist()]
<fsummary>Extracts a part of a binary</fsummary>
<type>
<v>Subject = binary()</v>
- <v>Start = int()</v>
- <v>Length = int()</v>
+ <v>Start = integer() >= 0</v>
+ <v>Length = integer() >= 0</v>
</type>
<desc>
<p>The same as <c>binary_part(Subject, {Pos, Len})</c>.</p>
@@ -390,7 +384,7 @@ iolist() = [char() | binary() | iolist()]
<name>binary_to_term(Binary) -> term()</name>
<fsummary>Decode an Erlang external term format binary</fsummary>
<type>
- <v>Binary = ext_binary()</v>
+ <v>Binary = <seealso marker="#type-ext_binary">ext_binary()</seealso></v>
</type>
<desc>
<p>Returns an Erlang term which is the result of decoding
@@ -411,7 +405,7 @@ iolist() = [char() | binary() | iolist()]
<fsummary>Decode an Erlang external term format binary</fsummary>
<type>
<v>Opts = [safe]</v>
- <v>Binary = ext_binary()</v>
+ <v>Binary = <seealso marker="#type-ext_binary">ext_binary()</seealso></v>
</type>
<desc>
<p>As <c>binary_to_term/1</c>, but takes options that affect decoding
@@ -442,7 +436,7 @@ iolist() = [char() | binary() | iolist()]
</desc>
</func>
<func>
- <name>bit_size(Bitstring) -> int()</name>
+ <name>bit_size(Bitstring) -> integer() >= 0</name>
<fsummary>Return the size of a bitstring</fsummary>
<type>
<v>Bitstring = bitstring()</v>
@@ -461,7 +455,7 @@ iolist() = [char() | binary() | iolist()]
<name>erlang:bump_reductions(Reductions) -> void()</name>
<fsummary>Increment the reduction counter</fsummary>
<type>
- <v>Reductions = int()</v>
+ <v>Reductions = integer() >= 0</v>
</type>
<desc>
<p>This implementation-dependent function increments
@@ -478,7 +472,7 @@ iolist() = [char() | binary() | iolist()]
</desc>
</func>
<func>
- <name>byte_size(Bitstring) -> int()</name>
+ <name>byte_size(Bitstring) -> integer() >= 0</name>
<fsummary>Return the size of a bitstring (or binary)</fsummary>
<type>
<v>Bitstring = bitstring()</v>
@@ -500,7 +494,7 @@ iolist() = [char() | binary() | iolist()]
<fsummary>Cancel a timer</fsummary>
<type>
<v>TimerRef = reference()</v>
- <v>Time = int()</v>
+ <v>Time = integer() >= 0</v>
</type>
<desc>
<p>Cancels a timer, where <c>TimerRef</c> was returned by
@@ -524,7 +518,19 @@ iolist() = [char() | binary() | iolist()]
</func>
<func>
- <name>check_process_code(Pid, Module) -> bool()</name>
+ <name>check_old_code(Module) -> boolean()</name>
+ <fsummary>Check if a module has old code</fsummary>
+ <type>
+ <v>Module = atom()</v>
+ </type>
+ <desc>
+ <p>Returns <c>true</c> if the <c>Module</c> has old code,
+ and <c>false</c> otherwise.</p>
+ <p>See also <seealso marker="kernel:code">code(3)</seealso>.</p>
+ </desc>
+ </func>
+ <func>
+ <name>check_process_code(Pid, Module) -> boolean()</name>
<fsummary>Check if a process is executing old code for a module</fsummary>
<type>
<v>Pid = pid()</v>
@@ -544,16 +550,7 @@ false</pre>
</desc>
</func>
<func>
- <name>concat_binary(ListOfBinaries)</name>
- <fsummary>Concatenate a list of binaries (deprecated)</fsummary>
- <desc>
- <p>Do not use; use
- <seealso marker="#list_to_binary/1">list_to_binary/1</seealso>
- instead.</p>
- </desc>
- </func>
- <func>
- <name>erlang:crc32(Data) -> int()</name>
+ <name>erlang:crc32(Data) -> integer() >= 0</name>
<fsummary>Compute crc32 (IEEE 802.3) checksum</fsummary>
<type>
<v>Data = iodata()</v>
@@ -563,10 +560,10 @@ false</pre>
</desc>
</func>
<func>
- <name>erlang:crc32(OldCrc, Data) -> int()</name>
+ <name>erlang:crc32(OldCrc, Data) -> integer() >= 0</name>
<fsummary>Compute crc32 (IEEE 802.3) checksum</fsummary>
<type>
- <v>OldCrc = int()</v>
+ <v>OldCrc = integer() >= 0</v>
<v>Data = iodata()</v>
</type>
<desc>
@@ -585,11 +582,11 @@ false</pre>
</desc>
</func>
<func>
- <name>erlang:crc32_combine(FirstCrc, SecondCrc, SecondSize) -> int()</name>
+ <name>erlang:crc32_combine(FirstCrc, SecondCrc, SecondSize) -> integer() >= 0</name>
<fsummary>Combine two crc32 (IEEE 802.3) checksums</fsummary>
<type>
- <v>FirstCrc = SecondCrc = int()</v>
- <v>SecondSize = int()</v>
+ <v>FirstCrc = SecondCrc = integer() >= 0</v>
+ <v>SecondSize = integer() >= 0</v>
</type>
<desc>
<p>Combines two previously computed crc32 checksums.
@@ -609,10 +606,10 @@ false</pre>
</desc>
</func>
<func>
- <name>date() -> {Year, Month, Day}</name>
+ <name>date() -> Date</name>
<fsummary>Current date</fsummary>
<type>
- <v>Year = Month = Day = int()</v>
+ <v>Date = <seealso marker="calendar#type-date">calendar:date()</seealso></v>
</type>
<desc>
<p>Returns the current date as <c>{Year, Month, Day}</c>.</p>
@@ -631,20 +628,20 @@ false</pre>
<v>Options = [Opt]</v>
<v>Packet = binary() | HttpPacket</v>
<v>Rest = binary()</v>
- <v>Length = int() | undefined</v>
+ <v>Length = integer() > 0 | undefined</v>
<v>Reason = term()</v>
<v>&nbsp;Type, Opt -- see below</v>
<v></v>
<v>HttpPacket = HttpRequest | HttpResponse | HttpHeader | http_eoh | HttpError</v>
<v>HttpRequest = {http_request, HttpMethod, HttpUri, HttpVersion}</v>
<v>HttpResponse = {http_response, HttpVersion, integer(), HttpString}</v>
- <v>HttpHeader = {http_header, int(), HttpField, Reserved=term(), Value=HttpString}</v>
+ <v>HttpHeader = {http_header, integer(), HttpField, Reserved=term(), Value=HttpString}</v>
<v>HttpError = {http_error, HttpString}</v>
<v>HttpMethod = HttpMethodAtom | HttpString</v>
<v>HttpMethodAtom = 'OPTIONS' | 'GET' | 'HEAD' | 'POST' | 'PUT' | 'DELETE' | 'TRACE'</v>
- <v>HttpUri = '*' | {absoluteURI, http|https, Host=HttpString, Port=int()|undefined, Path=HttpString} |
+ <v>HttpUri = '*' | {absoluteURI, http|https, Host=HttpString, Port=integer()|undefined, Path=HttpString} |
{scheme, Scheme=HttpString, HttpString} | {abs_path, HttpString} | HttpString</v>
- <v>HttpVersion = {Major=int(), Minor=int()}</v>
+ <v>HttpVersion = {Major=integer(), Minor=integer()}</v>
<v>HttpString = string() | binary()</v>
<v>HttpField = HttpFieldAtom | HttpString</v>
<v>HttpFieldAtom = 'Cache-Control' | 'Connection' | 'Date' | 'Pragma' | 'Transfer-Encoding' | 'Upgrade' | 'Via' | 'Accept' | 'Accept-Charset' | 'Accept-Encoding' | 'Accept-Language' | 'Authorization' | 'From' | 'Host' | 'If-Modified-Since' | 'If-Match' | 'If-None-Match' | 'If-Range' | 'If-Unmodified-Since' | 'Max-Forwards' | 'Proxy-Authorization' | 'Range' | 'Referer' | 'User-Agent' | 'Age' | 'Location' | 'Proxy-Authenticate' | 'Public' | 'Retry-After' | 'Server' | 'Vary' | 'Warning' | 'Www-Authenticate' | 'Allow' | 'Content-Base' | 'Content-Encoding' | 'Content-Language' | 'Content-Length' | 'Content-Location' | 'Content-Md5' | 'Content-Range' | 'Content-Type' | 'Etag' | 'Expires' | 'Last-Modified' | 'Accept-Ranges' | 'Set-Cookie' | 'Set-Cookie2' | 'X-Forwarded-For' | 'Cookie' | 'Keep-Alive' | 'Proxy-Connection'</v>
@@ -719,17 +716,20 @@ false</pre>
</taglist>
<p>The following options are available:</p>
<taglist>
- <tag><c>{packet_size, int()}</c></tag>
+ <tag><c>{packet_size, integer()}</c></tag>
<item><p>Sets the max allowed size of the packet body. If
the packet header indicates that the length of the
packet is longer than the max allowed length, the packet
is considered invalid. Default is 0 which means no
size limit.</p>
</item>
- <tag><c>{line_length, int()}</c></tag>
- <item><p>Applies only to line oriented protocols
- (<c>line</c>, <c>http</c>). Lines longer than this
- will be truncated.</p>
+ <tag><c>{line_length, integer()}</c></tag>
+ <item><p>For packet type <c>line</c>, truncate lines longer
+ than the indicated length.</p>
+ <p>Option <c>line_length</c> also applies to <c>http*</c>
+ packet types as an alias for option <c>packet_size</c> in the
+ case when <c>packet_size</c> itself is not set. This usage is
+ only intended for backward compatibility.</p>
</item>
</taglist>
<pre>
@@ -773,9 +773,9 @@ false</pre>
turned off, nothing happens.</p>
<p>Once <c>demonitor(MonitorRef)</c> has returned it is
guaranteed that no <c>{'DOWN', MonitorRef, _, _, _}</c> message
- due to the monitor will be placed in the callers message queue
+ due to the monitor will be placed in the caller's message queue
in the future. A <c>{'DOWN', MonitorRef, _, _, _}</c> message
- might have been placed in the callers message queue prior to
+ might have been placed in the caller's message queue prior to
the call, though. Therefore, in most cases, it is advisable
to remove such a <c>'DOWN'</c> message from the message queue
after monitoring has been stopped.
@@ -800,13 +800,12 @@ false</pre>
</desc>
</func>
<func>
- <name>demonitor(MonitorRef, OptionList) -> true|false</name>
+ <name>demonitor(MonitorRef, OptionList) -> boolean()</name>
<fsummary>Stop monitoring</fsummary>
<type>
<v>MonitorRef = reference()</v>
<v>OptionList = [Option]</v>
- <v>Option = flush</v>
- <v>Option = info</v>
+ <v>&nbsp;Option = flush | info</v>
</type>
<desc>
<p>The returned value is <c>true</c> unless <c>info</c> is part
@@ -819,7 +818,7 @@ false</pre>
<tag><c>flush</c></tag>
<item>
<p>Remove (one) <c>{_, MonitorRef, _, _, _}</c> message,
- if there is one, from the callers message queue after
+ if there is one, from the caller's message queue after
monitoring has been stopped.</p>
<p>Calling <c>demonitor(MonitorRef, [flush])</c>
is equivalent to the following, but more efficient:</p>
@@ -848,7 +847,7 @@ false</pre>
<item><p>The monitor was not found and could not be removed.
This probably because someone already has placed a
<c>'DOWN'</c> message corresponding to this monitor
- in the callers message queue.
+ in the caller's message queue.
</p>
</item>
</taglist>
@@ -867,14 +866,11 @@ false</pre>
</desc>
</func>
<func>
- <name>disconnect_node(Node) -> bool() | ignored</name>
+ <name name="disconnect_node" arity="1"/>
<fsummary>Force the disconnection of a node</fsummary>
- <type>
- <v>Node = atom()</v>
- </type>
<desc>
<p>Forces the disconnection of a node. This will appear to
- the node <c>Node</c> as if the local node has crashed. This
+ the node <c><anno>Node</anno></c> as if the local node has crashed. This
BIF is mainly used in the Erlang network authentication
protocols. Returns <c>true</c> if disconnection succeeds,
otherwise <c>false</c>. If the local node is not alive,
@@ -1032,6 +1028,56 @@ b</pre>
</desc>
</func>
<func>
+ <name>erlang:external_size(Term) -> integer() >= 0</name>
+ <fsummary>Calculate the maximum size for a term encoded in the Erlang
+ external term format</fsummary>
+ <type>
+ <v>Term = term()</v>
+ </type>
+ <desc>
+ <p>Calculates, without doing the encoding, the maximum byte size for
+ a term encoded in the Erlang external term format. The following
+ condition applies always:</p>
+ <p>
+ <pre>
+> <input>Size1 = byte_size(term_to_binary(Term)),</input>
+> <input>Size2 = erlang:external_size(Term),</input>
+> <input>true = Size1 =&lt; Size2.</input>
+true
+ </pre>
+ </p>
+ <p>This is equivalent to a call to: <code>erlang:external_size(Term, [])
+ </code></p>
+ </desc>
+ </func>
+ <func>
+ <name>erlang:external_size(Term, [Option]) -> integer() >= 0</name>
+ <fsummary>Calculate the maximum size for a term encoded in the Erlang
+ external term format</fsummary>
+ <type>
+ <v>Term = term()</v>
+ <v>Option = {minor_version, Version}</v>
+ </type>
+ <desc>
+ <p>Calculates, without doing the encoding, the maximum byte size for
+ a term encoded in the Erlang external term format. The following
+ condition applies always:</p>
+ <p>
+ <pre>
+> <input>Size1 = byte_size(term_to_binary(Term, Options)),</input>
+> <input>Size2 = erlang:external_size(Term, Options),</input>
+> <input>true = Size1 =&lt; Size2.</input>
+true
+ </pre>
+ </p>
+ <p>The option <c>{minor_version, Version}</c> specifies how floats
+ are encoded. See
+ <seealso marker="#term_to_binary/2">term_to_binary/2</seealso> for
+ a more detailed description.
+ </p>
+ </desc>
+ </func>
+ <func>
<name>float(Number) -> float()</name>
<fsummary>Convert a number to a float</fsummary>
<type>
@@ -1069,15 +1115,11 @@ b</pre>
</desc>
</func>
<func>
- <name>erlang:fun_info(Fun) -> [{Item, Info}]</name>
+ <name name="fun_info" arity="1"/>
<fsummary>Information about a fun</fsummary>
- <type>
- <v>Fun = fun()</v>
- <v>Item, Info -- see below</v>
- </type>
<desc>
<p>Returns a list containing information about the fun
- <c>Fun</c>. Each element of the list is a tuple. The order of
+ <c><anno>Fun</anno></c>. Each element of the list is a tuple. The order of
the tuples is not defined, and more tuples may be added in a
future release.</p>
<warning>
@@ -1156,11 +1198,16 @@ b</pre>
</item>
<tag><c>{new_uniq, Uniq}</c></tag>
<item>
- <p><c>Uniq</c> (a binary) is a unique value for this fun.</p>
+ <p><c>Uniq</c> (a binary) is a unique value for this fun.
+ It is calculated from the compiled code for the entire module.</p>
</item>
<tag><c>{uniq, Uniq}</c></tag>
<item>
- <p><c>Uniq</c> (an integer) is a unique value for this fun.</p>
+ <p><c>Uniq</c> (an integer) is a unique value for this fun.
+ Starting in the R15 release, this integer is calculated from
+ the compiled code for the entire module. Before R15, this
+ integer was based on only the body of the fun.
+ </p>
</item>
</taglist>
</desc>
@@ -1176,7 +1223,7 @@ b</pre>
<p>Returns information about <c>Fun</c> as specified by
<c>Item</c>, in the form <c>{Item,Info}</c>.</p>
<p>For any fun, <c>Item</c> can be any of the atoms
- <c>module</c>, <c>name</c>, <c>arity</c>, or <c>env</c>.</p>
+ <c>module</c>, <c>name</c>, <c>arity</c>, <c>env</c>, or <c>type</c>.</p>
<p>For a local fun, <c>Item</c> can also be any of the atoms
<c>index</c>, <c>new_index</c>, <c>new_uniq</c>,
<c>uniq</c>, and <c>pid</c>. For an external fun, the value
@@ -1197,11 +1244,11 @@ b</pre>
</desc>
</func>
<func>
- <name>erlang:function_exported(Module, Function, Arity) -> bool()</name>
+ <name>erlang:function_exported(Module, Function, Arity) -> boolean()</name>
<fsummary>Check if a function is exported and loaded</fsummary>
<type>
<v>Module = Function = atom()</v>
- <v>Arity = int()</v>
+ <v>Arity = arity()</v>
</type>
<desc>
<p>Returns <c>true</c> if the module <c>Module</c> is loaded
@@ -1229,7 +1276,7 @@ b</pre>
</desc>
</func>
<func>
- <name>garbage_collect(Pid) -> bool()</name>
+ <name>garbage_collect(Pid) -> boolean()</name>
<fsummary>Force an immediate garbage collection of a process</fsummary>
<type>
<v>Pid = pid()</v>
@@ -1276,11 +1323,8 @@ b</pre>
</desc>
</func>
<func>
- <name>erlang:get_cookie() -> Cookie | nocookie</name>
+ <name name="get_cookie" arity="0"/>
<fsummary>Get the magic cookie of the local node</fsummary>
- <type>
- <v>Cookie = atom()</v>
- </type>
<desc>
<p>Returns the magic cookie of the local node, if the node is
alive; otherwise the atom <c>nocookie</c>.</p>
@@ -1307,17 +1351,18 @@ b</pre>
</desc>
</func>
<func>
- <name>erlang:get_stacktrace() -> [{Module, Function, Arity | Args}]</name>
+ <name>erlang:get_stacktrace() -> [{Module, Function, Arity | Args, Location}]</name>
<fsummary>Get the call stack back-trace of the last exception</fsummary>
<type>
<v>Module = Function = atom()</v>
- <v>Arity = int()</v>
+ <v>Arity = arity()</v>
<v>Args = [term()]</v>
+ <v>Location = [{atom(),term()}]</v>
</type>
<desc>
<p>Get the call stack back-trace (<em>stacktrace</em>) of the last
exception in the calling process as a list of
- <c>{Module,Function,Arity}</c> tuples.
+ <c>{Module,Function,Arity,Location}</c> tuples.
The <c>Arity</c> field in the first tuple may be the argument
list of that function call instead of an arity integer,
depending on the exception.</p>
@@ -1327,6 +1372,25 @@ b</pre>
<p>The stacktrace is the same data as the <c>catch</c> operator
returns, for example:</p>
<p><c>{'EXIT',{badarg,Stacktrace}} = catch abs(x)</c></p>
+ <p><c>Location</c> is a (possibly empty) list of two-tuples that
+ may indicate the location in the source code of the function.
+ The first element is an atom that describes the type of
+ information in the second element. Currently the following
+ items may occur:</p>
+ <taglist>
+ <tag><c>file</c></tag>
+ <item>
+ <p>The second element of the tuple is a string (list of
+ characters) representing the filename of the source file
+ of the function.</p>
+ </item>
+ <tag><c>line</c></tag>
+ <item>
+ <p>The second element of the tuple is the line number
+ (an integer greater than zero) in the source file
+ where the exception occurred or the function was called.</p>
+ </item>
+ </taglist>
<p>See also
<seealso marker="#error/1">erlang:error/1</seealso> and
<seealso marker="#error/2">erlang:error/2</seealso>.</p>
@@ -1368,29 +1432,69 @@ b</pre>
<name>halt()</name>
<fsummary>Halt the Erlang runtime system and indicate normal exit to the calling environment</fsummary>
<desc>
- <p>Halts the Erlang runtime system and indicates normal exit to
- the calling environment. Has no return value.</p>
+ <p>The same as
+ <seealso marker="#halt/2"><c>halt(0, [])</c></seealso>.</p>
<pre>
> <input>halt().</input>
-os_prompt%</pre>
+os_prompt% </pre>
</desc>
</func>
<func>
<name>halt(Status)</name>
<fsummary>Halt the Erlang runtime system</fsummary>
<type>
- <v>Status = int()>=0 | string()</v>
+ <v>Status = integer() >= 0 | string() | abort</v>
</type>
<desc>
- <p><c>Status</c> must be a non-negative integer, or a string.
- Halts the Erlang runtime system. Has no return value.
- If <c>Status</c> is an integer, it is returned as an exit
- status of Erlang to the calling environment.
- If <c>Status</c> is a string, produces an Erlang crash dump
- with <c>String</c> as slogan, and then exits with a non-zero
- status code.</p>
- <p>Note that on many platforms, only the status codes 0-255 are
- supported by the operating system.</p>
+ <p>The same as
+ <seealso marker="#halt/2"><c>halt(Status, [])</c></seealso>.</p>
+ <pre>
+> <input>halt(17).</input>
+os_prompt% <input>echo $?</input>
+17
+os_prompt% </pre>
+ </desc>
+ </func>
+ <func>
+ <name>halt(Status, Options)</name>
+ <fsummary>Halt the Erlang runtime system</fsummary>
+ <type>
+ <v>Status = integer() >= 0 | string() | abort</v>
+ <v>Options = [Option]</v>
+ <v>Option = {flush,boolean()} | term()</v>
+ </type>
+ <desc>
+ <p><c>Status</c> must be a non-negative integer, a string,
+ or the atom <c>abort</c>.
+ Halts the Erlang runtime system. Has no return value.
+ Depending on <c>Status</c>:
+ </p>
+ <taglist>
+ <tag>integer()</tag>
+ <item>The runtime system exits with the integer value <c>Status</c>
+ as status code to the calling environment (operating system).
+ </item>
+ <tag>string()</tag>
+ <item>An erlang crash dump is produced with <c>Status</c> as slogan,
+ and then the runtime system exits with status code <c>1</c>.
+ </item>
+ <tag><c>abort</c></tag>
+ <item>
+ The runtime system aborts producing a core dump, if that is
+ enabled in the operating system.
+ </item>
+ </taglist>
+ <p>Note that on many platforms, only the status codes 0-255 are
+ supported by the operating system.
+ </p>
+ <p>For integer <c>Status</c> the Erlang runtime system closes all ports
+ and allows async threads to finish their operations before exiting.
+ To exit without such flushing use
+ <c>Option</c> as <c>{flush,false}</c>.
+ </p>
+ <p>For statuses <c>string()</c> and <c>abort</c> the <c>flush</c>
+ option is ignored and flushing is <em>not</em> done.
+ </p>
</desc>
</func>
<func>
@@ -1472,7 +1576,7 @@ os_prompt%</pre>
<name>integer_to_list(Integer) -> string()</name>
<fsummary>Text representation of an integer</fsummary>
<type>
- <v>Integer = int()</v>
+ <v>Integer = integer()</v>
</type>
<desc>
<p>Returns a string which corresponds to the text
@@ -1483,15 +1587,11 @@ os_prompt%</pre>
</desc>
</func>
<func>
- <name>integer_to_list(Integer, Base) -> string()</name>
+ <name name="integer_to_list" arity="2"/>
<fsummary>Text representation of an integer</fsummary>
- <type>
- <v>Integer = int()</v>
- <v>Base = 2..36</v>
- </type>
<desc>
<p>Returns a string which corresponds to the text
- representation of <c>Integer</c> in base <c>Base</c>.</p>
+ representation of <c><anno>Integer</anno></c> in base <c><anno>Base</anno></c>.</p>
<pre>
> <input>integer_to_list(1023, 16).</input>
"3FF"</pre>
@@ -1518,7 +1618,7 @@ os_prompt%</pre>
</desc>
</func>
<func>
- <name>iolist_size(Item) -> int()</name>
+ <name>iolist_size(Item) -> integer() >= 0</name>
<fsummary>Size of an iolist</fsummary>
<type>
<v>Item = iolist() | binary()</v>
@@ -1533,7 +1633,7 @@ os_prompt%</pre>
</desc>
</func>
<func>
- <name>is_alive() -> bool()</name>
+ <name>is_alive() -> boolean()</name>
<fsummary>Check whether the local node is alive</fsummary>
<desc>
<p>Returns <c>true</c> if the local node is alive; that is, if
@@ -1542,7 +1642,7 @@ os_prompt%</pre>
</desc>
</func>
<func>
- <name>is_atom(Term) -> bool()</name>
+ <name>is_atom(Term) -> boolean()</name>
<fsummary>Check whether a term is an atom</fsummary>
<type>
<v>Term = term()</v>
@@ -1554,7 +1654,7 @@ os_prompt%</pre>
</desc>
</func>
<func>
- <name>is_binary(Term) -> bool()</name>
+ <name>is_binary(Term) -> boolean()</name>
<fsummary>Check whether a term is a binary</fsummary>
<type>
<v>Term = term()</v>
@@ -1569,7 +1669,7 @@ os_prompt%</pre>
</desc>
</func>
<func>
- <name>is_bitstring(Term) -> bool()</name>
+ <name>is_bitstring(Term) -> boolean()</name>
<fsummary>Check whether a term is a bitstring</fsummary>
<type>
<v>Term = term()</v>
@@ -1582,7 +1682,7 @@ os_prompt%</pre>
</desc>
</func>
<func>
- <name>is_boolean(Term) -> bool()</name>
+ <name>is_boolean(Term) -> boolean()</name>
<fsummary>Check whether a term is a boolean</fsummary>
<type>
<v>Term = term()</v>
@@ -1595,11 +1695,11 @@ os_prompt%</pre>
</desc>
</func>
<func>
- <name>erlang:is_builtin(Module, Function, Arity) -> bool()</name>
+ <name>erlang:is_builtin(Module, Function, Arity) -> boolean()</name>
<fsummary>Check if a function is a BIF implemented in C</fsummary>
<type>
<v>Module = Function = atom()</v>
- <v>Arity = int()</v>
+ <v>Arity = arity()</v>
</type>
<desc>
<p>Returns <c>true</c> if <c>Module:Function/Arity</c> is
@@ -1608,7 +1708,7 @@ os_prompt%</pre>
</desc>
</func>
<func>
- <name>is_float(Term) -> bool()</name>
+ <name>is_float(Term) -> boolean()</name>
<fsummary>Check whether a term is a float</fsummary>
<type>
<v>Term = term()</v>
@@ -1620,7 +1720,7 @@ os_prompt%</pre>
</desc>
</func>
<func>
- <name>is_function(Term) -> bool()</name>
+ <name>is_function(Term) -> boolean()</name>
<fsummary>Check whether a term is a fun</fsummary>
<type>
<v>Term = term()</v>
@@ -1632,11 +1732,11 @@ os_prompt%</pre>
</desc>
</func>
<func>
- <name>is_function(Term, Arity) -> bool()</name>
+ <name>is_function(Term, Arity) -> boolean()</name>
<fsummary>Check whether a term is a fun with a given arity</fsummary>
<type>
<v>Term = term()</v>
- <v>Arity = int()</v>
+ <v>Arity = arity()</v>
</type>
<desc>
<p>Returns <c>true</c> if <c>Term</c> is a fun that can be
@@ -1653,7 +1753,7 @@ os_prompt%</pre>
</desc>
</func>
<func>
- <name>is_integer(Term) -> bool()</name>
+ <name>is_integer(Term) -> boolean()</name>
<fsummary>Check whether a term is an integer</fsummary>
<type>
<v>Term = term()</v>
@@ -1665,7 +1765,7 @@ os_prompt%</pre>
</desc>
</func>
<func>
- <name>is_list(Term) -> bool()</name>
+ <name>is_list(Term) -> boolean()</name>
<fsummary>Check whether a term is a list</fsummary>
<type>
<v>Term = term()</v>
@@ -1677,7 +1777,7 @@ os_prompt%</pre>
</desc>
</func>
<func>
- <name>is_number(Term) -> bool()</name>
+ <name>is_number(Term) -> boolean()</name>
<fsummary>Check whether a term is a number</fsummary>
<type>
<v>Term = term()</v>
@@ -1689,7 +1789,7 @@ os_prompt%</pre>
</desc>
</func>
<func>
- <name>is_pid(Term) -> bool()</name>
+ <name>is_pid(Term) -> boolean()</name>
<fsummary>Check whether a term is a pid</fsummary>
<type>
<v>Term = term()</v>
@@ -1701,7 +1801,7 @@ os_prompt%</pre>
</desc>
</func>
<func>
- <name>is_port(Term) -> bool()</name>
+ <name>is_port(Term) -> boolean()</name>
<fsummary>Check whether a term is a port</fsummary>
<type>
<v>Term = term()</v>
@@ -1713,7 +1813,7 @@ os_prompt%</pre>
</desc>
</func>
<func>
- <name>is_process_alive(Pid) -> bool()</name>
+ <name>is_process_alive(Pid) -> boolean()</name>
<fsummary>Check whether a process is alive</fsummary>
<type>
<v>Pid = pid()</v>
@@ -1728,7 +1828,7 @@ os_prompt%</pre>
</desc>
</func>
<func>
- <name>is_record(Term, RecordTag) -> bool()</name>
+ <name>is_record(Term, RecordTag) -> boolean()</name>
<fsummary>Check whether a term appears to be a record</fsummary>
<type>
<v>Term = term()</v>
@@ -1751,12 +1851,12 @@ os_prompt%</pre>
</desc>
</func>
<func>
- <name>is_record(Term, RecordTag, Size) -> bool()</name>
+ <name>is_record(Term, RecordTag, Size) -> boolean()</name>
<fsummary>Check whether a term appears to be a record</fsummary>
<type>
<v>Term = term()</v>
<v>RecordTag = atom()</v>
- <v>Size = int()</v>
+ <v>Size = integer()</v>
</type>
<desc>
<p><c>RecordTag</c> must be an atom. Returns <c>true</c> if
@@ -1771,7 +1871,7 @@ os_prompt%</pre>
</desc>
</func>
<func>
- <name>is_reference(Term) -> bool()</name>
+ <name>is_reference(Term) -> boolean()</name>
<fsummary>Check whether a term is a reference</fsummary>
<type>
<v>Term = term()</v>
@@ -1783,7 +1883,7 @@ os_prompt%</pre>
</desc>
</func>
<func>
- <name>is_tuple(Term) -> bool()</name>
+ <name>is_tuple(Term) -> boolean()</name>
<fsummary>Check whether a term is a tuple</fsummary>
<type>
<v>Term = term()</v>
@@ -1795,7 +1895,7 @@ os_prompt%</pre>
</desc>
</func>
<func>
- <name>length(List) -> int()</name>
+ <name>length(List) -> integer() >= 0</name>
<fsummary>Length of a list</fsummary>
<type>
<v>List = [term()]</v>
@@ -1916,7 +2016,7 @@ os_prompt%</pre>
</desc>
</func>
<func>
- <name>list_to_integer(String) -> int()</name>
+ <name>list_to_integer(String) -> integer()</name>
<fsummary>Convert from text representation to an integer</fsummary>
<type>
<v>String = string()</v>
@@ -1932,19 +2032,15 @@ os_prompt%</pre>
</desc>
</func>
<func>
- <name>list_to_integer(String, Base) -> int()</name>
+ <name name="list_to_integer" arity="2"/>
<fsummary>Convert from text representation to an integer</fsummary>
- <type>
- <v>String = string()</v>
- <v>Base = 2..36</v>
- </type>
<desc>
<p>Returns an integer whose text representation in base
- <c>Base</c> is <c>String</c>.</p>
+ <c><anno>Base</anno></c> is <c><anno>String</anno></c>.</p>
<pre>
> <input>list_to_integer("3FF", 16).</input>
1023</pre>
- <p>Failure: <c>badarg</c> if <c>String</c> contains a bad
+ <p>Failure: <c>badarg</c> if <c><anno>String</anno></c> contains a bad
representation of an integer.</p>
</desc>
</func>
@@ -2095,12 +2191,10 @@ os_prompt%</pre>
</desc>
</func>
<func>
- <name>erlang:localtime() -> {Date, Time}</name>
+ <name>erlang:localtime() -> DateTime</name>
<fsummary>Current local date and time</fsummary>
<type>
- <v>Date = {Year, Month, Day}</v>
- <v>Time = {Hour, Minute, Second}</v>
- <v>&nbsp;Year = Month = Day = Hour = Minute = Second = int()</v>
+ <v>DateTime = <seealso marker="calendar#type-datetime">calendar:datetime()</seealso></v>
</type>
<desc>
<p>Returns the current local date and time
@@ -2113,17 +2207,12 @@ os_prompt%</pre>
</desc>
</func>
<func>
- <name>erlang:localtime_to_universaltime({Date1, Time1}) -> {Date2, Time2}</name>
+ <name name="localtime_to_universaltime" arity="1"/>
<fsummary>Convert from local to Universal Time Coordinated (UTC) date and time</fsummary>
- <type>
- <v>Date1 = Date2 = {Year, Month, Day}</v>
- <v>Time1 = Time2 = {Hour, Minute, Second}</v>
- <v>&nbsp;Year = Month = Day = Hour = Minute = Second = int()</v>
- </type>
<desc>
<p>Converts local date and time to Universal Time Coordinated
(UTC), if this is supported by the underlying OS. Otherwise,
- no conversion is done and <c>{Date1, Time1}</c> is returned.</p>
+ no conversion is done and <c>{<anno>Date1</anno>, <anno>Time1</anno>}</c> is returned.</p>
<pre>
> <input>erlang:localtime_to_universaltime({{1996,11,6},{14,45,17}}).</input>
{{1996,11,6},{13,45,17}}</pre>
@@ -2135,9 +2224,8 @@ os_prompt%</pre>
<name>erlang:localtime_to_universaltime({Date1, Time1}, IsDst) -> {Date2, Time2}</name>
<fsummary>Convert from local to Universal Time Coordinated (UTC) date and time</fsummary>
<type>
- <v>Date1 = Date2 = {Year, Month, Day}</v>
- <v>Time1 = Time2 = {Hour, Minute, Second}</v>
- <v>&nbsp;Year = Month = Day = Hour = Minute = Second = int()</v>
+ <v>Date1 = Date2 = <seealso marker="calendar#type-date">calendar:date()</seealso></v>
+ <v>Time1 = Time2 = <seealso marker="calendar#type-time">calendar:time()</seealso></v>
<v>IsDst = true | false | undefined</v>
</type>
<desc>
@@ -2177,7 +2265,7 @@ os_prompt%</pre>
<name>erlang:make_tuple(Arity, InitialValue) -> tuple()</name>
<fsummary>Create a new tuple of a given arity</fsummary>
<type>
- <v>Arity = int()</v>
+ <v>Arity = arity()</v>
<v>InitialValue = term()</v>
</type>
<desc>
@@ -2192,7 +2280,7 @@ os_prompt%</pre>
<name>erlang:make_tuple(Arity, Default, InitList) -> tuple()</name>
<fsummary>Create a new tuple with given arity and contents</fsummary>
<type>
- <v>Arity = int()</v>
+ <v>Arity = arity()</v>
<v>Default = term()</v>
<v>InitList = [{Position,term()}]</v>
<v>Position = integer()</v>
@@ -2211,14 +2299,11 @@ os_prompt%</pre>
</desc>
</func>
<func>
- <name>max(Term1, Term2) -> Maximum</name>
+ <name name="max" arity="2"/>
<fsummary>Return the largest of two term</fsummary>
- <type>
- <v>Term1 = Term2 = Maximum = term()</v>
- </type>
<desc>
- <p>Return the largest of <c>Term1</c> and <c>Term2</c>;
- if the terms compares equal, <c>Term1</c> will be returned.</p>
+ <p>Return the largest of <c><anno>Term1</anno></c> and <c><anno>Term2</anno></c>;
+ if the terms compare equal, <c><anno>Term1</anno></c> will be returned.</p>
</desc>
</func>
<func>
@@ -2468,18 +2553,15 @@ os_prompt%</pre>
</desc>
</func>
<func>
- <name>min(Term1, Term2) -> Minimum</name>
+ <name name="min" arity="2"/>
<fsummary>Return the smallest of two term</fsummary>
- <type>
- <v>Term1 = Term2 = Minimum = term()</v>
- </type>
<desc>
- <p>Return the smallest of <c>Term1</c> and <c>Term2</c>;
- if the terms compare equal, <c>Term1</c> will be returned.</p>
+ <p>Return the smallest of <c><anno>Term1</anno></c> and <c><anno>Term2</anno></c>;
+ if the terms compare equal, <c><anno>Term1</anno></c> will be returned.</p>
</desc>
</func>
<func>
- <name>module_loaded(Module) -> bool()</name>
+ <name>module_loaded(Module) -> boolean()</name>
<fsummary>Check if a module is loaded</fsummary>
<type>
<v>Module = atom()</v>
@@ -2602,7 +2684,7 @@ os_prompt%</pre>
<fsummary>Monitor the status of a node</fsummary>
<type>
<v>Node = node()</v>
- <v>Flag = bool()</v>
+ <v>Flag = boolean()</v>
</type>
<desc>
<p>Monitors the status of the node <c>Node</c>. If <c>Flag</c>
@@ -2628,7 +2710,7 @@ os_prompt%</pre>
<fsummary>Monitor the status of a node</fsummary>
<type>
<v>Node = node()</v>
- <v>Flag = bool()</v>
+ <v>Flag = boolean()</v>
<v>Options = [Option]</v>
<v>Option = allow_passive_connect</v>
</type>
@@ -2711,11 +2793,8 @@ os_prompt%</pre>
</desc>
</func>
<func>
- <name>nodes() -> Nodes</name>
+ <name name="nodes" arity="0"/>
<fsummary>All visible nodes in the system</fsummary>
- <type>
- <v>Nodes = [node()]</v>
- </type>
<desc>
<p>Returns a list of all visible nodes in the system, excluding
the local node. Same as <c>nodes(visible)</c>.</p>
@@ -2765,11 +2844,12 @@ os_prompt%</pre>
</desc>
</func>
<func>
- <name>now() -> {MegaSecs, Secs, MicroSecs}</name>
- <fsummary>Elapsed time since 00:00 GMT</fsummary>
+ <name>now() -> timestamp()</name>
<type>
- <v>MegaSecs = Secs = MicroSecs = int()</v>
+ <v>timestamp() = {MegaSecs, Secs, MicroSecs}</v>
+ <v>MegaSecs = Secs = MicroSecs = integer() >= 0</v>
</type>
+ <fsummary>Elapsed time since 00:00 GMT</fsummary>
<desc>
<p>Returns the tuple <c>{MegaSecs, Secs, MicroSecs}</c> which is
the elapsed time since 00:00 GMT, January 1, 1970 (zero hour)
@@ -2792,12 +2872,12 @@ os_prompt%</pre>
<v>PortName = {spawn, Command} | {spawn_driver, Command} | {spawn_executable, FileName} | {fd, In, Out}</v>
<v>&nbsp;Command = string()</v>
<v>&nbsp;FileName = [ FileNameChar ] | binary()</v>
- <v>&nbsp;FileNameChar = int() (1..255 or any Unicode codepoint, see description)</v>
- <v>&nbsp;In = Out = int()</v>
+ <v>&nbsp;FileNameChar = integer() (1..255 or any Unicode codepoint, see description)</v>
+ <v>&nbsp;In = Out = integer()</v>
<v>PortSettings = [Opt]</v>
<v>&nbsp;Opt = {packet, N} | stream | {line, L} | {cd, Dir} | {env, Env} | {args, [ ArgString ]} | {arg0, ArgString} | exit_status | use_stdio | nouse_stdio | stderr_to_stdout | in | out | binary | eof</v>
<v>&nbsp;&nbsp;N = 1 | 2 | 4</v>
- <v>&nbsp;&nbsp;L = int()</v>
+ <v>&nbsp;&nbsp;L = integer()</v>
<v>&nbsp;&nbsp;Dir = string()</v>
<v>&nbsp;&nbsp;ArgString = [ FileNameChar ] | binary()</v>
<v>&nbsp;&nbsp;Env = [{Name, Val}]</v>
@@ -2829,7 +2909,7 @@ os_prompt%</pre>
<p>For external programs, the <c>PATH</c> is searched
(or an equivalent method is used to find programs,
depending on operating system). This is done by invoking
- the shell och certain platforms. The first space
+ the shell on certain platforms. The first space
separated token of the command will be considered as the
name of the executable (or driver). This (among other
things) makes this option unsuitable for running
@@ -2876,11 +2956,11 @@ os_prompt%</pre>
<seealso marker="stdlib:unicode_usage">stdlib users guide
</seealso> for details.</p>
- <note>The characters in the name (if given as a list)
+ <note><p>The characters in the name (if given as a list)
can only be &gt; 255 if the Erlang VM is started in
Unicode file name translation mode, otherwise the name
of the executable is limited to the ISO-latin-1
- character set.</note>
+ character set.</p></note>
<p>If the <c>Command</c> cannot be run, an error
exception, with the posix error code as the reason, is
@@ -2993,11 +3073,11 @@ os_prompt%</pre>
Unicode translation of arguments, they can be supplied as
binaries in whatever encoding is deemed appropriate.</p>
- <note>The characters in the arguments (if given as a
+ <note><p>The characters in the arguments (if given as a
list of characters) can only be &gt; 255 if the Erlang
VM is started in Unicode file name mode,
otherwise the arguments are limited to the
- ISO-latin-1 character set.</note>
+ ISO-latin-1 character set.</p></note>
<p>If one, for any reason, wants to explicitly set the
program name in the argument vector, the <c>arg0</c>
@@ -3283,7 +3363,7 @@ os_prompt%</pre>
</desc>
</func>
<func>
- <name>port_command(Port, Data, OptionList) -> true|false</name>
+ <name>port_command(Port, Data, OptionList) -> boolean()</name>
<fsummary>Send data to a port</fsummary>
<type>
<v>Port = port() | atom()</v>
@@ -3399,7 +3479,7 @@ os_prompt%</pre>
<fsummary>Perform a synchronous control operation on a port</fsummary>
<type>
<v>Port = port() | atom()</v>
- <v>Operation = int()</v>
+ <v>Operation = integer()</v>
<v>Data = Res = iodata()</v>
</type>
<desc>
@@ -3423,7 +3503,7 @@ os_prompt%</pre>
<fsummary>Synchronous call to a port with term data</fsummary>
<type>
<v>Port = port() | atom()</v>
- <v>Operation = int()</v>
+ <v>Operation = integer()</v>
<v>Data = term()</v>
</type>
<desc>
@@ -3682,12 +3762,6 @@ os_prompt%</pre>
<tag><c>process_flag(save_calls, N)</c></tag>
<item>
- <p>When there are runnable processes on priority <c>max</c>
- no processes on priority <c>low</c>, <c>normal</c>, or
- <c>high</c> will be selected for execution. As with the
- <c>high</c> priority, processes on lower priorities might
- execute in parallel with processes on priority <c>max</c>.
- </p>
<p><c>N</c> must be an integer in the interval 0..10000.
If <c>N</c> &gt; 0, call saving is made active for the
process, which means that information about the <c>N</c>
@@ -3858,11 +3932,26 @@ os_prompt%</pre>
catches in this process. This <c>InfoTuple</c> may be
changed or removed without prior notice.</p>
</item>
- <tag><c>{current_function, {Module, Function, Args}}</c></tag>
+ <tag><c>{current_function, {Module, Function, Arity}}</c></tag>
<item>
- <p><c>Module</c>, <c>Function</c>, <c>Args</c> is
+ <p><c>Module</c>, <c>Function</c>, <c>Arity</c> is
the current function call of the process.</p>
</item>
+ <tag><c>{current_location, {Module, Function, Arity, Location}}</c></tag>
+ <item>
+ <p><c>Module</c>, <c>Function</c>, <c>Arity</c> is
+ the current function call of the process.
+ <c>Location</c> is a list of two-tuples that describes the
+ location in the source code.
+ </p>
+ </item>
+ <tag><c>{current_stacktrace, Stack}</c></tag>
+ <item>
+ <p>Return the current call stack back-trace (<em>stacktrace</em>)
+ of the process. The stack has the same format as returned by
+ <seealso marker="#get_stacktrace/0">erlang:get_stacktrace/0</seealso>.
+ </p>
+ </item>
<tag><c>{dictionary, Dictionary}</c></tag>
<item>
<p><c>Dictionary</c> is the dictionary of the process.</p>
@@ -4109,7 +4198,7 @@ os_prompt%</pre>
<v>Reason = term()</v>
<v>Stacktrace = [{Module, Function, Arity | Args} | {Fun, Args}]</v>
<v>&nbsp;Module = Function = atom()</v>
- <v>&nbsp;Arity = int()</v>
+ <v>&nbsp;Arity = arity()</v>
<v>&nbsp;Args = [term()]</v>
<v>&nbsp;Fun = [fun()]</v>
</type>
@@ -4129,11 +4218,14 @@ os_prompt%</pre>
equivalent to <c>erlang:Class(Reason)</c>.
<c>Reason</c> is any term and <c>Stacktrace</c> is a list as
returned from <c>get_stacktrace()</c>, that is a list of
- 3-tuples <c>{Module, Function, Arity | Args}</c> where
- <c>Module</c> and <c>Function</c> are atoms and the third
- element is an integer arity or an argument list. The
- stacktrace may also contain <c>{Fun, Args}</c> tuples where
+ 4-tuples <c>{Module, Function, Arity | Args,
+ Location}</c> where <c>Module</c> and <c>Function</c>
+ are atoms and the third element is an integer arity or an
+ argument list. The stacktrace may also contain <c>{Fun,
+ Args, Location}</c> tuples where
<c>Fun</c> is a local fun and <c>Args</c> is an argument list.</p>
+ <p>The <c>Location</c> element at the end is optional.
+ Omitting it is equivalent to specifying an empty list.</p>
<p>The stacktrace is used as the exception stacktrace for the
calling process; it will be truncated to the current
maximum stacktrace depth.</p>
@@ -4146,7 +4238,7 @@ os_prompt%</pre>
</desc>
</func>
<func>
- <name>erlang:read_timer(TimerRef) -> int() | false</name>
+ <name>erlang:read_timer(TimerRef) -> integer() >= 0 | false</name>
<fsummary>Number of milliseconds remaining for a timer</fsummary>
<type>
<v>TimerRef = reference()</v>
@@ -4262,7 +4354,7 @@ true</pre>
</desc>
</func>
<func>
- <name>round(Number) -> int()</name>
+ <name>round(Number) -> integer()</name>
<fsummary>Return an integer by rounding a number</fsummary>
<type>
<v>Number = number()</v>
@@ -4346,7 +4438,7 @@ true</pre>
<name>erlang:send_after(Time, Dest, Msg) -> TimerRef</name>
<fsummary>Start a timer</fsummary>
<type>
- <v>Time = int()</v>
+ <v>Time = integer() >= 0</v>
<v>&nbsp;0 &lt;= Time &lt;= 4294967295</v>
<v>Dest = pid() | RegName </v>
<v>&nbsp;LocalPid = pid() (of a process, alive or dead, on the local node)</v>
@@ -4375,17 +4467,12 @@ true</pre>
</desc>
</func>
<func>
- <name>erlang:send_nosuspend(Dest, Msg) -> bool()</name>
+ <name name="send_nosuspend" arity="2"/>
<fsummary>Try to send a message without ever blocking</fsummary>
- <type>
- <v>Dest = pid() | port() | RegName | {RegName, Node}</v>
- <v>&nbsp;RegName = atom()</v>
- <v>&nbsp;Node = node()</v>
- <v>Msg = term()</v>
- </type>
+ <type name="dst"/>
<desc>
<p>The same as
- <seealso marker="#send/3">erlang:send(Dest, Msg, [nosuspend])</seealso>, but returns <c>true</c> if
+ <seealso marker="#send/3">erlang:send(<anno>Dest</anno>, <anno>Msg</anno>, [nosuspend])</seealso>, but returns <c>true</c> if
the message was sent and <c>false</c> if the message was not
sent because the sender would have had to be suspended.</p>
<p>This function is intended for send operations towards an
@@ -4393,7 +4480,7 @@ true</pre>
(Erlang) process. If the connection to the remote node
(usually not a real Erlang node, but a node written in C or
Java) is overloaded, this function <em>will not send the message</em> but return <c>false</c> instead.</p>
- <p>The same happens, if <c>Dest</c> refers to a local port that
+ <p>The same happens, if <c><anno>Dest</anno></c> refers to a local port that
is busy. For all other destinations (allowed for the ordinary
send operator <c>'!'</c>) this function sends the message and
returns <c>true</c>.</p>
@@ -4426,18 +4513,12 @@ true</pre>
</desc>
</func>
<func>
- <name>erlang:send_nosuspend(Dest, Msg, Options) -> bool()</name>
+ <name name="send_nosuspend" arity="3"/>
<fsummary>Try to send a message without ever blocking</fsummary>
- <type>
- <v>Dest = pid() | port() | RegName | {RegName, Node}</v>
- <v>&nbsp;RegName = atom()</v>
- <v>&nbsp;Node = node()</v>
- <v>Msg = term()</v>
- <v>Option = noconnect</v>
- </type>
+ <type name="dst"/>
<desc>
<p>The same as
- <seealso marker="#send/3">erlang:send(Dest, Msg, [nosuspend | Options])</seealso>,
+ <seealso marker="#send/3">erlang:send(<anno>Dest</anno>, <anno>Msg</anno>, [nosuspend | <anno>Options</anno>])</seealso>,
but with boolean return value.</p>
<p>This function behaves like
<seealso marker="#send_nosuspend/2">erlang:send_nosuspend/2)</seealso>,
@@ -4462,17 +4543,13 @@ true</pre>
</desc>
</func>
<func>
- <name>erlang:set_cookie(Node, Cookie) -> true</name>
+ <name name="set_cookie" arity="2"/>
<fsummary>Set the magic cookie of a node</fsummary>
- <type>
- <v>Node = node()</v>
- <v>Cookie = atom()</v>
- </type>
<desc>
- <p>Sets the magic cookie of <c>Node</c> to the atom
- <c>Cookie</c>. If <c>Node</c> is the local node, the function
+ <p>Sets the magic cookie of <c><anno>Node</anno></c> to the atom
+ <c><anno>Cookie</anno></c>. If <c><anno>Node</anno></c> is the local node, the function
also sets the cookie of all other unknown nodes to
- <c>Cookie</c> (see
+ <c><anno>Cookie</anno></c> (see
<seealso marker="doc/reference_manual:distributed">Distributed Erlang</seealso> in the Erlang Reference Manual).</p>
<p>Failure: <c>function_clause</c> if the local node is not
alive.</p>
@@ -4497,7 +4574,7 @@ true</pre>
</desc>
</func>
<func>
- <name>size(Item) -> int()</name>
+ <name>size(Item) -> integer() >= 0</name>
<fsummary>Size of a tuple or binary</fsummary>
<type>
<v>Item = tuple() | binary()</v>
@@ -4512,28 +4589,21 @@ true</pre>
</desc>
</func>
<func>
- <name>spawn(Fun) -> pid()</name>
+ <name name="spawn" arity="1"/>
<fsummary>Create a new process with a fun as entry point</fsummary>
- <type>
- <v>Fun = fun()</v>
- </type>
<desc>
<p>Returns the pid of a new process started by the application
- of <c>Fun</c> to the empty list <c>[]</c>. Otherwise works
+ of <c><anno>Fun</anno></c> to the empty list <c>[]</c>. Otherwise works
like <seealso marker="#spawn/3">spawn/3</seealso>.</p>
</desc>
</func>
<func>
- <name>spawn(Node, Fun) -> pid()</name>
+ <name name="spawn" arity="2"/>
<fsummary>Create a new process with a fun as entry point on a given node</fsummary>
- <type>
- <v>Node = node()</v>
- <v>Fun = fun()</v>
- </type>
<desc>
<p>Returns the pid of a new process started by the application
- of <c>Fun</c> to the empty list <c>[]</c> on <c>Node</c>. If
- <c>Node</c> does not exist, a useless pid is returned.
+ of <c><anno>Fun</anno></c> to the empty list <c>[]</c> on <c><anno>Node</anno></c>. If
+ <c><anno>Node</anno></c> does not exist, a useless pid is returned.
Otherwise works like
<seealso marker="#spawn/3">spawn/3</seealso>.</p>
</desc>
@@ -4564,47 +4634,35 @@ true</pre>
</desc>
</func>
<func>
- <name>spawn(Node, Module, Function, Args) -> pid()</name>
+ <name name="spawn" arity="4"/>
<fsummary>Create a new process with a function as entry point on a given node</fsummary>
- <type>
- <v>Node = node()</v>
- <v>Module = Function = atom()</v>
- <v>Args = [term()]</v>
- </type>
<desc>
<p>Returns the pid of a new process started by the application
- of <c>Module:Function</c> to <c>Args</c> on <c>Node</c>. If
- <c>Node</c> does not exists, a useless pid is returned.
+ of <c><anno>Module</anno>:<anno>Function</anno></c> to <c><anno>Args</anno></c> on <c>Node</c>. If
+ <c><anno>Node</anno></c> does not exists, a useless pid is returned.
Otherwise works like
<seealso marker="#spawn/3">spawn/3</seealso>.</p>
</desc>
</func>
<func>
- <name>spawn_link(Fun) -> pid()</name>
+ <name name="spawn_link" arity="1"/>
<fsummary>Create and link to a new process with a fun as entry point</fsummary>
- <type>
- <v>Fun = fun()</v>
- </type>
<desc>
<p>Returns the pid of a new process started by the application
- of <c>Fun</c> to the empty list []. A link is created between
+ of <c><anno>Fun</anno></c> to the empty list []. A link is created between
the calling process and the new process, atomically.
Otherwise works like
<seealso marker="#spawn/3">spawn/3</seealso>.</p>
</desc>
</func>
<func>
- <name>spawn_link(Node, Fun) -> pid()</name>
+ <name name="spawn_link" arity="2"/>
<fsummary>Create and link to a new process with a fun as entry point on a specified node</fsummary>
- <type>
- <v>Node = node()</v>
- <v>Fun = fun()</v>
- </type>
<desc>
<p>Returns the pid of a new process started by the application
- of <c>Fun</c> to the empty list [] on <c>Node</c>. A link is
+ of <c><anno>Fun</anno></c> to the empty list [] on <c><anno>Node</anno></c>. A link is
created between the calling process and the new process,
- atomically. If <c>Node</c> does not exist, a useless pid is
+ atomically. If <c><anno>Node</anno></c> does not exist, a useless pid is
returned (and due to the link, an exit signal with exit
reason <c>noconnection</c> will be received). Otherwise works
like <seealso marker="#spawn/3">spawn/3</seealso>.</p>
@@ -4626,47 +4684,35 @@ true</pre>
</desc>
</func>
<func>
- <name>spawn_link(Node, Module, Function, Args) -> pid()</name>
+ <name name="spawn_link" arity="4"/>
<fsummary>Create and link to a new process with a function as entry point on a given node</fsummary>
- <type>
- <v>Node = node()</v>
- <v>Module = Function = atom()</v>
- <v>Args = [term()]</v>
- </type>
<desc>
<p>Returns the pid of a new process started by the application
- of <c>Module:Function</c> to <c>Args</c> on <c>Node</c>. A
+ of <c><anno>Module</anno>:<anno>Function</anno></c> to <c><anno>Args</anno></c> on <c>Node</c>. A
link is created between the calling process and the new
- process, atomically. If <c>Node</c> does not exist, a useless
+ process, atomically. If <c><anno>Node</anno></c> does not exist, a useless
pid is returned (and due to the link, an exit signal with exit
reason <c>noconnection</c> will be received). Otherwise works
like <seealso marker="#spawn/3">spawn/3</seealso>.</p>
</desc>
</func>
<func>
- <name>spawn_monitor(Fun) -> {pid(),reference()}</name>
+ <name name="spawn_monitor" arity="1"/>
<fsummary>Create and monitor a new process with a fun as entry point</fsummary>
- <type>
- <v>Fun = fun()</v>
- </type>
<desc>
<p>Returns the pid of a new process started by the application
- of <c>Fun</c> to the empty list [] and reference for a monitor
+ of <c><anno>Fun</anno></c> to the empty list [] and reference for a monitor
created to the new process.
Otherwise works like
<seealso marker="#spawn/3">spawn/3</seealso>.</p>
</desc>
</func>
<func>
- <name>spawn_monitor(Module, Function, Args) -> {pid(),reference()}</name>
+ <name name="spawn_monitor" arity="3"/>
<fsummary>Create and monitor a new process with a function as entry point</fsummary>
- <type>
- <v>Module = Function = atom()</v>
- <v>Args = [term()]</v>
- </type>
<desc>
<p>A new process is started by the application
- of <c>Module:Function</c> to <c>Args</c>, and the process is
+ of <c><anno>Module</anno>:<anno>Function</anno></c> to <c><anno>Args</anno></c>, and the process is
monitored at the same time. Returns the pid and a reference
for the monitor.
Otherwise works like
@@ -4674,19 +4720,11 @@ true</pre>
</desc>
</func>
<func>
- <name>spawn_opt(Fun, [Option]) -> pid() | {pid(),reference()}</name>
+ <name name="spawn_opt" arity="2"/>
<fsummary>Create a new process with a fun as entry point</fsummary>
- <type>
- <v>Fun = fun()</v>
- <v>Option = link | monitor | {priority, Level} | {fullsweep_after, Number} | {min_heap_size, Size} | {min_bin_vheap_size, VSize}</v>
- <v>&nbsp;Level = low | normal | high</v>
- <v>&nbsp;Number = int()</v>
- <v>&nbsp;Size = int()</v>
- <v>&nbsp;VSize = int()</v>
- </type>
<desc>
<p>Returns the pid of a new process started by the application
- of <c>Fun</c> to the empty list <c>[]</c>. Otherwise
+ of <c><anno>Fun</anno></c> to the empty list <c>[]</c>. Otherwise
works like
<seealso marker="#spawn_opt/4">spawn_opt/4</seealso>.</p>
<p>If the option <c>monitor</c> is given, the newly created
@@ -4695,37 +4733,19 @@ true</pre>
</desc>
</func>
<func>
- <name>spawn_opt(Node, Fun, [Option]) -> pid()</name>
+ <name name="spawn_opt" arity="3"/>
<fsummary>Create a new process with a fun as entry point on a given node</fsummary>
- <type>
- <v>Node = node()</v>
- <v>Fun = fun()</v>
- <v>Option = link | {priority, Level} | {fullsweep_after, Number} | {min_heap_size, Size} | {min_bin_vheap_size, VSize}</v>
- <v>&nbsp;Level = low | normal | high</v>
- <v>&nbsp;Number = int()</v>
- <v>&nbsp;Size = int()</v>
- <v>&nbsp;VSize = int()</v>
- </type>
<desc>
<p>Returns the pid of a new process started by the application
- of <c>Fun</c> to the empty list <c>[]</c> on <c>Node</c>. If
- <c>Node</c> does not exist, a useless pid is returned.
+ of <c><anno>Fun</anno></c> to the empty list <c>[]</c> on <c><anno>Node</anno></c>. If
+ <c><anno>Node</anno></c> does not exist, a useless pid is returned.
Otherwise works like
<seealso marker="#spawn_opt/4">spawn_opt/4</seealso>.</p>
</desc>
</func>
<func>
- <name>spawn_opt(Module, Function, Args, [Option]) -> pid() | {pid(),reference()}</name>
+ <name name="spawn_opt" arity="4"/>
<fsummary>Create a new process with a function as entry point</fsummary>
- <type>
- <v>Module = Function = atom()</v>
- <v>Args = [term()]</v>
- <v>Option = link | monitor | {priority, Level} | {fullsweep_after, Number} | {min_heap_size, Size} | {min_bin_vheap_size, VSize}</v>
- <v>&nbsp;Level = low | normal | high</v>
- <v>&nbsp;Number = int()</v>
- <v>&nbsp;Size = int()</v>
- <v>&nbsp;VSize = int()</v>
- </type>
<desc>
<p>Works exactly like
<seealso marker="#spawn/3">spawn/3</seealso>, except that an
@@ -4744,17 +4764,17 @@ true</pre>
<p>Monitor the new process (just like
<seealso marker="#monitor/2">monitor/2</seealso> does).</p>
</item>
- <tag><c>{priority, Level}</c></tag>
+ <tag><c>{priority, <anno>Level</anno>}</c></tag>
<item>
<p>Sets the priority of the new process. Equivalent to
executing
- <seealso marker="#process_flag_priority">process_flag(priority, Level)</seealso> in the start function of the new process,
+ <seealso marker="#process_flag_priority">process_flag(priority, <anno>Level</anno>)</seealso> in the start function of the new process,
except that the priority will be set before the process is
selected for execution for the first time. For more information
on priorities see
<seealso marker="#process_flag_priority">process_flag(priority, Level)</seealso>.</p>
</item>
- <tag><c>{fullsweep_after, Number}</c></tag>
+ <tag><c>{fullsweep_after, <anno>Number</anno>}</c></tag>
<item>
<p>This option is only useful for performance tuning.
In general, you should not use this option unless you
@@ -4776,18 +4796,18 @@ true</pre>
<p>Here are a few cases when it could be useful to change
<c>fullsweep_after</c>. Firstly, if binaries that are no
longer used should be thrown away as soon as possible.
- (Set <c>Number</c> to zero.) Secondly, a process that
+ (Set <c><anno>Number</anno></c> to zero.) Secondly, a process that
mostly have short-lived data will be fullsweeped seldom
or never, meaning that the old heap will contain mostly
garbage. To ensure a fullsweep once in a while, set
- <c>Number</c> to a suitable value such as 10 or 20.
+ <c><anno>Number</anno></c> to a suitable value such as 10 or 20.
Thirdly, in embedded systems with limited amount of RAM
and no virtual memory, one might want to preserve memory
- by setting <c>Number</c> to zero. (The value may be set
+ by setting <c><anno>Number</anno></c> to zero. (The value may be set
globally, see
<seealso marker="#system_flag/2">erlang:system_flag/2</seealso>.)</p>
</item>
- <tag><c>{min_heap_size, Size}</c></tag>
+ <tag><c>{min_heap_size, <anno>Size</anno>}</c></tag>
<item>
<p>This option is only useful for performance tuning.
In general, you should not use this option unless you
@@ -4802,9 +4822,9 @@ true</pre>
slow down the system due to worse data locality.
Therefore, it is recommended to use this option only for
fine-tuning an application and to measure the execution
- time with various <c>Size</c> values.</p>
+ time with various <c><anno>Size</anno></c> values.</p>
</item>
- <tag><c>{min_bin_vheap_size, VSize}</c></tag>
+ <tag><c>{min_bin_vheap_size, <anno>VSize</anno>}</c></tag>
<item>
<p>This option is only useful for performance tuning.
In general, you should not use this option unless you
@@ -4818,29 +4838,19 @@ true</pre>
Setting too high value, however, might waste memory.
Therefore, it is recommended to use this option only for
fine-tuning an application and to measure the execution
- time with various <c>VSize</c> values.</p>
+ time with various <c><anno>VSize</anno></c> values.</p>
</item>
</taglist>
</desc>
</func>
<func>
- <name>spawn_opt(Node, Module, Function, Args, [Option]) -> pid()</name>
+ <name name="spawn_opt" arity="5"/>
<fsummary>Create a new process with a function as entry point on a given node</fsummary>
- <type>
- <v>Node = node()</v>
- <v>Module = Function = atom()</v>
- <v>Args = [term()]</v>
- <v>Option = link | {priority, Level} | {fullsweep_after, Number} | {min_heap_size, Size} | {min_bin_vheap_size, VSize}</v>
- <v>&nbsp;Level = low | normal | high</v>
- <v>&nbsp;Number = int()</v>
- <v>&nbsp;Size = int()</v>
- <v>&nbsp;VSize = int()</v>
- </type>
<desc>
<p>Returns the pid of a new process started by the application
- of <c>Module:Function</c> to <c>Args</c> on <c>Node</c>. If
- <c>Node</c> does not exist, a useless pid is returned.
+ of <c><anno>Module</anno>:<anno>Function</anno></c> to <c><anno>Args</anno></c> on <c>Node</c>. If
+ <c><anno>Node</anno></c> does not exist, a useless pid is returned.
Otherwise works like
<seealso marker="#spawn_opt/4">spawn_opt/4</seealso>.</p>
</desc>
@@ -4874,7 +4884,7 @@ true</pre>
<name>erlang:start_timer(Time, Dest, Msg) -> TimerRef</name>
<fsummary>Start a timer</fsummary>
<type>
- <v>Time = int()</v>
+ <v>Time = integer() >= 0</v>
<v>&nbsp;0 &lt;= Time &lt;= 4294967295</v>
<v>Dest = LocalPid | RegName </v>
<v>&nbsp;LocalPid = pid() (of a process, alive or dead, on the local node)</v>
@@ -4911,6 +4921,7 @@ true</pre>
<v>Type, Res -- see below</v>
</type>
<desc>
+ <p>All times are in milliseconds unless otherwise specified.</p>
<p>Returns information about the system as specified by
<c>Type</c>:</p>
<taglist>
@@ -4924,15 +4935,20 @@ true</pre>
<item>
<p>Returns
<c>{Total_Exact_Reductions, Exact_Reductions_Since_Last_Call}</c>.</p>
- <p><em>NOTE:</em><c>statistics(exact_reductions)</c> is
- a more expensive operation than
- <seealso marker="#statistics_reductions">statistics(reductions)</seealso>
- especially on an Erlang machine with SMP support.</p>
+ <note><p><c>statistics(exact_reductions)</c> is
+ a more expensive operation than
+ <seealso marker="#statistics_reductions">statistics(reductions)</seealso>
+ especially on an Erlang machine with SMP support.</p>
+ </note>
</item>
<tag><c>garbage_collection</c></tag>
<item>
<p>Returns <c>{Number_of_GCs, Words_Reclaimed, 0}</c>. This
information may not be valid for all implementations.</p>
+ <pre>
+> <input>statistics(garbage_collection).</input>
+{85,23961,0}
+</pre>
</item>
<tag><c>io</c></tag>
<item>
@@ -4944,12 +4960,18 @@ true</pre>
<tag><marker id="statistics_reductions"><c>reductions</c></marker></tag>
<item>
<p>Returns
- <c>{Total_Reductions, Reductions_Since_Last_Call}</c>.</p>
- <p><em>NOTE:</em> From erts version 5.5 (OTP release R11B)
- this value does not include reductions performed in current
- time slices of currently scheduled processes. If an
- exact value is wanted, use
- <seealso marker="#statistics_exact_reductions">statistics(exact_reductions)</seealso>.</p>
+ <c>{Total_Reductions, Reductions_Since_Last_Call}</c>.</p>
+ <note>
+ <p>From erts version 5.5 (OTP release R11B)
+ this value does not include reductions performed in current
+ time slices of currently scheduled processes. If an
+ exact value is wanted, use
+ <seealso marker="#statistics_exact_reductions">statistics(exact_reductions)</seealso>.</p>
+ </note>
+ <pre>
+> <input>statistics(reductions).</input>
+{2046,11}
+</pre>
</item>
<tag><c>run_queue</c></tag>
<item>
@@ -4962,7 +4984,74 @@ true</pre>
Note that the run-time is the sum of the run-time for all
threads in the Erlang run-time system and may therefore be greater
than the wall-clock time.</p>
+ <pre>
+> <input>statistics(runtime).</input>
+{1690,1620}
+</pre>
+ </item>
+ <tag><marker id="statistics_scheduler_wall_time"><c>scheduler_wall_time</c></marker></tag>
+ <item>
+ <p>Returns a list of tuples with
+ <c>{SchedulerId, ActiveTime, TotalTime}</c>, where <c>SchedulerId</c> is an integer id of the scheduler, <c>ActiveTime</c> is
+ the duration the scheduler has been busy, <c>TotalTime</c> is the total time duration since
+ <seealso marker="#system_flag_scheduler_wall_time">scheduler_wall_time</seealso>
+ activation. The time unit is not defined and may be subject to change
+ between releases, operating systems and system restarts.
+ <c>scheduler_wall_time</c> should only be used to calculate relative
+ values for scheduler-utilization. <c>ActiveTime</c> can never exceed <c>TotalTime</c>.
+ </p>
+
+ <p>The definition of a busy scheduler is when it is not idle or not
+ scheduling (selecting) a process or port, meaning; executing process
+ code, executing linked-in-driver or NIF code, executing
+ built-in-functions or any other runtime handling, garbage collecting
+ or handling any other memory management. Note, a scheduler may also be
+ busy even if the operating system has scheduled out the scheduler
+ thread.
+ </p>
+
+ <p>
+ Returns <c>undefined</c> if the system flag <seealso marker="#system_flag_scheduler_wall_time">
+ scheduler_wall_time</seealso> is turned off.
+ </p>
+
+ <p>The list of scheduler information is unsorted and may appear in different order
+ between calls.
+ </p>
+ <p>Using <c>scheduler_wall_time</c> to calculate scheduler utilization.</p>
+<pre>
+> <input>erlang:system_flag(scheduler_wall_time, true).</input>
+false
+> <input>Ts0 = lists:sort(erlang:statistics(scheduler_wall_time)), ok.</input>
+ok
+</pre>
+ <p>Some time later we will take another snapshot and calculate scheduler-utilization per scheduler.</p>
+<pre>
+> <input>Ts1 = lists:sort(erlang:statistics(scheduler_wall_time)), ok.</input>
+ok
+> <input>lists:map(fun({{I, A0, T0}, {I, A1, T1}}) ->
+ {I, (A1 - A0)/(T1 - T0)} end, lists:zip(Ts0,Ts1)).</input>
+[{1,0.9743474730177548},
+ {2,0.9744843782751444},
+ {3,0.9995902361669045},
+ {4,0.9738012596572161},
+ {5,0.9717956667018103},
+ {6,0.9739235846420741},
+ {7,0.973237033077876},
+ {8,0.9741297293248656}]
+</pre>
+ <p>Using the same snapshots to calculate a total scheduler-utilization.</p>
+<pre>
+> <input>{A, T} = lists:foldl(fun({{_, A0, T0}, {_, A1, T1}}, {Ai,Ti}) ->
+ {Ai + (A1 - A0), Ti + (T1 - T0)} end, {0, 0}, lists:zip(Ts0,Ts1)), A/T.</input>
+0.9769136803764825
+</pre>
+
+ <note>
+ <p><c>scheduler_wall_time</c> is by default disabled. Use <c>erlang:system_flag(scheduler_wall_time, true)</c> to enable it. </p>
+ </note>
</item>
+
<tag><c>wall_clock</c></tag>
<item>
<p>Returns
@@ -4972,18 +5061,10 @@ true</pre>
opposed to runtime or CPU time.</p>
</item>
</taglist>
- <p>All times are in milliseconds.</p>
- <pre>
-> <input>statistics(runtime).</input>
-{1690,1620}
-> <input>statistics(reductions).</input>
-{2046,11}
-> <input>statistics(garbage_collection).</input>
-{85,23961,0}</pre>
</desc>
</func>
<func>
- <name>erlang:suspend_process(Suspendee, OptList) -> true | false</name>
+ <name>erlang:suspend_process(Suspendee, OptList) -> boolean()</name>
<fsummary>Suspend a process</fsummary>
<type>
<v>Suspendee = pid()</v>
@@ -5083,15 +5164,12 @@ true</pre>
</desc>
</func>
<func>
- <name>erlang:suspend_process(Suspendee) -> true</name>
+ <name name="suspend_process" arity="1"/>
<fsummary>Suspend a process</fsummary>
- <type>
- <v>Suspendee = pid()</v>
- </type>
<desc>
- <p>Suspends the process identified by <c>Suspendee</c>. The
+ <p>Suspends the process identified by <c><anno>Suspendee</anno></c>. The
same as calling
- <seealso marker="#suspend_process/2">erlang:suspend_process(Suspendee, [])</seealso>. For more information see the documentation of <seealso marker="#suspend_process/2">erlang:suspend_process/2</seealso>.
+ <seealso marker="#suspend_process/2">erlang:suspend_process(<anno>Suspendee</anno>, [])</seealso>. For more information see the documentation of <seealso marker="#suspend_process/2">erlang:suspend_process/2</seealso>.
</p>
<warning>
<p>This BIF is intended for debugging only.</p>
@@ -5105,6 +5183,14 @@ true</pre>
<v>Flag, Value, OldValue -- see below</v>
</type>
<desc>
+ <warning>
+ <p>The
+ <seealso marker="#system_flag_cpu_topology">cpu_topology</seealso>,
+ and
+ <seealso marker="#system_flag_scheduler_bind_type">scheduler_bind_type</seealso>
+ <c>Flag</c>s are <em>deprecated</em> and have been scheduled for
+ removal in erts-5.10/OTP-R16.</p>
+ </warning>
<p>Sets various system properties of the Erlang node. Returns
the old value of the flag.</p>
<taglist>
@@ -5115,6 +5201,12 @@ true</pre>
</item>
<tag><marker id="system_flag_cpu_topology"><c>erlang:system_flag(cpu_topology, CpuTopology)</c></marker></tag>
<item>
+ <p><em>NOTE:</em> This argument is <em>deprecated</em> and
+ scheduled for removal in erts-5.10/OTP-R16. Instead of using
+ this argument you are advised to use the <c>erl</c> command
+ line argument <seealso marker="erts:erl#+sct">+sct</seealso>.
+ When this argument has been removed a final CPU topology to use
+ will be determined at emulator boot time.</p>
<p>Sets the user defined <c>CpuTopology</c>. The user defined
CPU topology will override any automatically detected
CPU topology. By passing <c>undefined</c> as <c>CpuTopology</c>
@@ -5129,15 +5221,15 @@ true</pre>
to rebind according to the new CPU topology.
</p>
<p>The user defined CPU topology can also be set by passing
- the <seealso marker="erl#+sct">+sct</seealso> command
+ the <seealso marker="erts:erl#+sct">+sct</seealso> command
line argument to <c>erl</c>.
</p>
<p>For information on the <c>CpuTopology</c> type
and more, see the documentation of
<seealso marker="#system_info_cpu_topology">erlang:system_info(cpu_topology)</seealso>,
- the <c>erl</c> <seealso marker="erl#+sct">+sct</seealso>
- emulator flag, and
- <seealso marker="#system_flag_scheduler_bind_type">erlang:system_flag(scheduler_bind_type, How)</seealso>.
+ and the <c>erl</c> <seealso marker="erts:erl#+sct">+sct</seealso>
+ and <seealso marker="erts:erl#+sbt">+sbt</seealso>
+ command line flags.
</p>
</item>
<tag><c>erlang:system_flag(fullsweep_after, Number)</c></tag>
@@ -5213,6 +5305,12 @@ true</pre>
</item>
<tag><marker id="system_flag_scheduler_bind_type"><c>erlang:system_flag(scheduler_bind_type, How)</c></marker></tag>
<item>
+ <p><em>NOTE:</em> This argument is <em>deprecated</em> and
+ scheduled for removal in erts-5.10/OTP-R16. Instead of using
+ this argument you are advised to use the <c>erl</c> command
+ line argument <seealso marker="erts:erl#+sbt">+sbt</seealso>.
+ When this argument has been removed a final scheduler bind type
+ to use will be determined at emulator boot time.</p>
<p>Controls if and how schedulers are bound to logical
processors.</p>
<p>When <c>erlang:system_flag(scheduler_bind_type, How)</c> is
@@ -5234,93 +5332,61 @@ true</pre>
the CPU topology needs to be known. If the runtime system fails
to automatically detect the CPU topology, it can be defined.
For more information on how to define the CPU topology, see
- <seealso marker="#system_flag_cpu_topology">erlang:system_flag(cpu_topology, CpuTopology)</seealso>.
+ the <c>erl</c> <seealso marker="erts:erl#+sct">+sct</seealso> command
+ line flag.
</p>
- <p>The runtime system will by default bind schedulers to logical
- processors using the <c>default_bind</c> bind type if the amount
- of schedulers are at least equal to the amount of logical
- processors configured, binding of schedulers is supported,
- and a CPU topology is available at startup.
+ <p>The runtime system will by default <em>not</em> bind schedulers
+ to logical processors.
</p>
<p><em>NOTE:</em> If the Erlang runtime system is the only
operating system process that binds threads to logical processors,
this improves the performance of the runtime system. However,
if other operating system processes (as for example another Erlang
runtime system) also bind threads to logical processors, there
- might be a performance penalty instead. If this is the case you,
- are are advised to unbind the schedulers using the
- <seealso marker="erl#+sbt">+sbtu</seealso> command line argument,
- or <c>erlang:system_flag(scheduler_bind_type, unbound)</c>.</p>
+ might be a performance penalty instead. In some cases this
+ performance penalty might be severe. If this is the case, you
+ are advised to not bind the schedulers.</p>
<p>Schedulers can be bound in different ways. The <c>How</c>
argument determines how schedulers are bound. <c>How</c> can
currently be one of:</p>
<taglist>
<tag><c>unbound</c></tag>
- <item>
- <p>Schedulers will not be bound to logical processors, i.e.,
- the operating system decides where the scheduler threads
- execute, and when to migrate them. This is the default.</p>
- </item>
+ <item><p>Same as the <c>erl</c> command line argument
+ <seealso marker="erts:erl#+sbt">+sbt u</seealso>.
+ </p></item>
<tag><c>no_spread</c></tag>
- <item>
- <p>Schedulers with close scheduler identifiers will be bound
- as close as possible in hardware.</p>
- </item>
+ <item><p>Same as the <c>erl</c> command line argument
+ <seealso marker="erts:erl#+sbt">+sbt ns</seealso>.
+ </p></item>
<tag><c>thread_spread</c></tag>
- <item>
- <p>Thread refers to hardware threads (e.g. Intels
- hyper-threads). Schedulers with low scheduler identifiers,
- will be bound to the first hardware thread of each core,
- then schedulers with higher scheduler identifiers will be
- bound to the second hardware thread of each core, etc.</p>
- </item>
+ <item><p>Same as the <c>erl</c> command line argument
+ <seealso marker="erts:erl#+sbt">+sbt ts</seealso>.
+ </p></item>
<tag><c>processor_spread</c></tag>
- <item>
- <p>Schedulers will be spread like <c>thread_spread</c>, but
- also over physical processor chips.</p>
- </item>
+ <item><p>Same as the <c>erl</c> command line argument
+ <seealso marker="erts:erl#+sbt">+sbt ps</seealso>.
+ </p></item>
<tag><c>spread</c></tag>
- <item>
- <p>Schedulers will be spread as much as possible.</p>
- </item>
+ <item><p>Same as the <c>erl</c> command line argument
+ <seealso marker="erts:erl#+sbt">+sbt s</seealso>.
+ </p></item>
<tag><c>no_node_thread_spread</c></tag>
- <item>
- <p>Like <c>thread_spread</c>, but if multiple NUMA
- (Non-Uniform Memory Access) nodes exists,
- schedulers will be spread over one NUMA node at a time,
- i.e., all logical processors of one NUMA node will
- be bound to schedulers in sequence.</p>
- </item>
+ <item><p>Same as the <c>erl</c> command line argument
+ <seealso marker="erts:erl#+sbt">+sbt nnts</seealso>.
+ </p></item>
<tag><c>no_node_processor_spread</c></tag>
- <item>
- <p>Like <c>processor_spread</c>, but if multiple NUMA
- nodes exists, schedulers will be spread over one
- NUMA node at a time, i.e., all logical processors of
- one NUMA node will be bound to schedulers in sequence.</p>
- </item>
+ <item><p>Same as the <c>erl</c> command line argument
+ <seealso marker="erts:erl#+sbt">+sbt nnps</seealso>.
+ </p></item>
<tag><c>thread_no_node_processor_spread</c></tag>
- <item>
- <p>A combination of <c>thread_spread</c>, and
- <c>no_node_processor_spread</c>. Schedulers will be
- spread over hardware threads across NUMA nodes, but
- schedulers will only be spread over processors internally
- in one NUMA node at a time.</p>
- </item>
+ <item><p>Same as the <c>erl</c> command line argument
+ <seealso marker="erts:erl#+sbt">+sbt tnnps</seealso>.
+ </p></item>
<tag><c>default_bind</c></tag>
- <item>
- <p>Binds schedulers the default way. Currently the default
- is <c>thread_no_node_processor_spread</c> (which might change
- in the future).</p>
- </item>
+ <item><p>Same as the <c>erl</c> command line argument
+ <seealso marker="erts:erl#+sbt">+sbt db</seealso>.
+ </p></item>
</taglist>
- <p>How schedulers are bound matters. For example, in
- situations when there are fewer running processes than
- schedulers online, the runtime system tries to migrate
- processes to schedulers with low scheduler identifiers.
- The more the schedulers are spread over the hardware,
- the more resources will be available to the runtime
- system in such situations.
- </p>
<p>The value returned equals <c>How</c> before the
<c>scheduler_bind_type</c> flag was changed.</p>
<p>Failure:</p>
@@ -5339,17 +5405,25 @@ true</pre>
</item>
</taglist>
<p>The scheduler bind type can also be set by passing
- the <seealso marker="erl#+sbt">+sbt</seealso> command
+ the <seealso marker="erts:erl#+sbt">+sbt</seealso> command
line argument to <c>erl</c>.
</p>
<p>For more information, see
<seealso marker="#system_info_scheduler_bind_type">erlang:system_info(scheduler_bind_type)</seealso>,
<seealso marker="#system_info_scheduler_bindings">erlang:system_info(scheduler_bindings)</seealso>,
- the <c>erl</c> <seealso marker="erl#+sbt">+sbt</seealso>
- emulator flag, and
- <seealso marker="#system_flag_cpu_topology">erlang:system_flag(cpu_topology, CpuTopology)</seealso>.
+ the <c>erl</c> <seealso marker="erts:erl#+sbt">+sbt</seealso>
+ and <seealso marker="erts:erl#+sct">+sct</seealso> command line
+ flags.
</p>
</item>
+ <tag><marker id="system_flag_scheduler_wall_time"><c>erlang:system_flag(scheduler_wall_time, Boolean)</c></marker></tag>
+ <item>
+ <p>Turns on/off scheduler wall time measurements. </p>
+ <p>For more information see,
+ <seealso marker="#statistics_scheduler_wall_time">erlang:statistics(scheduler_wall_time)</seealso>.
+ </p>
+ </item>
+
<tag><marker id="system_flag_schedulers_online"><c>erlang:system_flag(schedulers_online, SchedulersOnline)</c></marker></tag>
<item>
<p>Sets the amount of schedulers online. Valid range is
@@ -5361,6 +5435,7 @@ true</pre>
<seealso marker="#system_info_schedulers_online">erlang:system_info(schedulers_online)</seealso>.
</p>
</item>
+
<tag><c>erlang:system_flag(trace_control_word, TCW)</c></tag>
<item>
<p>Sets the value of the node's trace control word to
@@ -5416,7 +5491,7 @@ true</pre>
<p>Types:</p>
<list type="bulleted">
<item><c>Allocator = undefined | glibc</c></item>
- <item><c>Version = [int()]</c></item>
+ <item><c>Version = [integer()]</c></item>
<item><c>Features = [atom()]</c></item>
<item><c>Settings = [{Subsystem, [{Parameter, Value}]}]</c></item>
<item><c>Subsystem = atom()</c></item>
@@ -5548,10 +5623,12 @@ true</pre>
<item>
<p>Returns the <c>CpuTopology</c> which currently is used by the
emulator. The CPU topology is used when binding schedulers
- to logical processors. The CPU topology used is the user defined
- CPU topology if such exist; otherwise, the automatically
- detected CPU topology if such exist. If no CPU topology
- exist <c>undefined</c> is returned.</p>
+ to logical processors. The CPU topology used is the
+ <seealso marker="erlang#system_info_cpu_topology_defined">user
+ defined CPU topology</seealso> if such exists; otherwise, the
+ <seealso marker="erlang#system_info_cpu_topology_detected">automatically
+ detected CPU topology</seealso> if such exists. If no CPU topology
+ exists, <c>undefined</c> is returned.</p>
<p>Types:</p>
<list type="bulleted">
<item><c>CpuTopology = LevelEntryList | undefined</c></item>
@@ -5598,8 +5675,8 @@ true</pre>
<item>
<p>Returns the user defined <c>CpuTopology</c>. For more
information see the documentation of
- <seealso marker="#system_flag_cpu_topology">erlang:system_flag(cpu_topology, CpuTopology)</seealso>
- and the documentation of the
+ the <c>erl</c> <seealso marker="erts:erl#+sct">+sct</seealso> command
+ line flag, and the documentation of the
<seealso marker="#system_info_cpu_topology">cpu_topology</seealso>
argument.
</p>
@@ -5667,6 +5744,29 @@ true</pre>
used by the runtime system. It will be on the form
<seealso marker="erts:erl_driver#version_management">"&lt;major ver&gt;.&lt;minor ver&gt;"</seealso>.</p>
</item>
+ <tag><c>dynamic_trace</c></tag>
+ <item>
+ <p>Returns an atom describing the dynamic trace framework
+ compiled into the virtual machine. It can currently be either
+ <c>dtrace</c>, <c>systemtap</c> or <c>none</c>. For a
+ commercial or standard build, this is always <c>none</c>,
+ the other return values indicate a custom configuration
+ (e.g. <c>./configure --with-dynamic-trace=dtrace</c>). See
+ the <seealso marker="runtime_tools:dyntrace">dyntrace
+ </seealso> manual page and the
+ <c>README.dtrace</c>/<c>README.systemtap</c> files in the
+ Erlang source code top directory for more information
+ about dynamic tracing.</p>
+ </item>
+ <tag><c>dynamic_trace_probes</c></tag>
+ <item>
+ <p>Returns a <c>boolean()</c> indicating if dynamic trace probes
+ (either dtrace or systemtap) are built into the
+ emulator. This can only be <c>true</c> if the virtual
+ machine was built for dynamic tracing
+ (i.e. <c>system_info(dynamic_trace)</c> returns
+ <c>dtrace</c> or <c>systemtap</c>).</p>
+ </item>
<tag><c>elib_malloc</c></tag>
<item>
<p>This option will be removed in a future release.
@@ -5677,12 +5777,12 @@ true</pre>
<item>
<p>Returns the value of the distribution buffer busy limit
in bytes. This limit can be set on startup by passing the
- <seealso marker="erl#+zdbbl">+zdbbl</seealso> command line
+ <seealso marker="erts:erl#+zdbbl">+zdbbl</seealso> command line
flag to <c>erl</c>.</p>
</item>
<tag><c>fullsweep_after</c></tag>
<item>
- <p>Returns <c>{fullsweep_after, int()}</c> which is the
+ <p>Returns <c>{fullsweep_after, integer()}</c> which is the
<c>fullsweep_after</c> garbage collection setting used
by default. For more information see
<c>garbage_collection</c> described below.</p>
@@ -5879,14 +5979,13 @@ true</pre>
<p>Returns information on how user has requested
schedulers to be bound or not bound.</p>
<p><em>NOTE:</em> Even though user has requested
- schedulers to be bound via
- <seealso marker="#system_flag_scheduler_bind_type">erlang:system_flag(scheduler_bind_type, How)</seealso>,
- they might have silently failed to bind. In order to
- inspect actual scheduler bindings call
+ schedulers to be bound, they might have silently failed
+ to bind. In order to inspect actual scheduler bindings call
<seealso marker="#system_info_scheduler_bindings">erlang:system_info(scheduler_bindings)</seealso>.
</p>
<p>For more information, see
- <seealso marker="#system_flag_scheduler_bind_type">erlang:system_flag(scheduler_bind_type, How)</seealso>, and
+ the <c>erl</c> <seealso marker="erts:erl#+sbt">+sbt</seealso>
+ command line argument, and
<seealso marker="#system_info_scheduler_bindings">erlang:system_info(scheduler_bindings)</seealso>.
</p>
</item>
@@ -5909,7 +6008,8 @@ true</pre>
<p>Note that only schedulers online can be bound to logical
processors.</p>
<p>For more information, see
- <seealso marker="#system_flag_scheduler_bind_type">erlang:system_flag(scheduler_bind_type, How)</seealso>,
+ the <c>erl</c> <seealso marker="erts:erl#+sbt">+sbt</seealso>
+ command line argument,
<seealso marker="#system_info_schedulers_online">erlang:system_info(schedulers_online)</seealso>.
</p>
</item>
@@ -6013,7 +6113,7 @@ true</pre>
</item>
<tag><c>wordsize</c></tag>
<item>
- <p>Same as <c>{wordsize, internal}</c></p>
+ <p>Same as <c>{wordsize, internal}.</c></p>
</item>
<tag><c>{wordsize, internal}</c></tag>
<item>
@@ -6022,7 +6122,7 @@ true</pre>
and on a pure 64-bit architecture 8 is returned. On a
halfword 64-bit emulator, 4 is returned, as the Erlang
terms are stored using a virtual wordsize of half the
- systems wordsize.</p>
+ system's wordsize.</p>
</item>
<tag><c>{wordsize, external}</c></tag>
<item>
@@ -6050,7 +6150,7 @@ true</pre>
<v>&nbsp;MonitorPid = pid()</v>
<v>&nbsp;Options = [Option]</v>
<v>&nbsp;&nbsp;Option = {long_gc, Time} | {large_heap, Size} | busy_port | busy_dist_port</v>
- <v>&nbsp;&nbsp;&nbsp;Time = Size = int()</v>
+ <v>&nbsp;&nbsp;&nbsp;Time = Size = integer()</v>
</type>
<desc>
<p>Returns the current system monitoring settings set by
@@ -6084,7 +6184,7 @@ true</pre>
<type>
<v>MonitorPid = pid()</v>
<v>Option = {long_gc, Time} | {large_heap, Size} | busy_port | busy_dist_port</v>
- <v>&nbsp;Time = Size = int()</v>
+ <v>&nbsp;Time = Size = integer()</v>
<v>MonSettings = {OldMonitorPid, [Option]}</v>
<v>&nbsp;OldMonitorPid = pid()</v>
</type>
@@ -6314,7 +6414,7 @@ true</pre>
<name>time() -> {Hour, Minute, Second}</name>
<fsummary>Current time</fsummary>
<type>
- <v>Hour = Minute = Second = int()</v>
+ <v>Hour = Minute = Second = integer() >= 0</v>
</type>
<desc>
<p>Returns the current time as <c>{Hour, Minute, Second}</c>.</p>
@@ -6342,11 +6442,11 @@ true</pre>
</desc>
</func>
<func>
- <name>erlang:trace(PidSpec, How, FlagList) -> int()</name>
+ <name>erlang:trace(PidSpec, How, FlagList) -> integer() >= 0</name>
<fsummary>Set trace flags for a process or processes</fsummary>
<type>
<v>PidSpec = pid() | existing | new | all</v>
- <v>How = bool()</v>
+ <v>How = boolean()</v>
<v>FlagList = [Flag]</v>
<v>&nbsp;Flag -- see below</v>
</type>
@@ -6747,7 +6847,7 @@ true</pre>
<type>
<v>PidOrFunc = pid() | new | {Module, Function, Arity} | on_load</v>
<v>&nbsp;Module = Function = atom()</v>
- <v>&nbsp;Arity = int()</v>
+ <v>&nbsp;Arity = arity()</v>
<v>Item, Res -- see below</v>
</type>
<desc>
@@ -6850,7 +6950,7 @@ true</pre>
</desc>
</func>
<func>
- <name>erlang:trace_pattern(MFA, MatchSpec) -> int()</name>
+ <name>erlang:trace_pattern(MFA, MatchSpec) -> integer() >= 0</name>
<fsummary>Set trace patterns for global call tracing</fsummary>
<desc>
<p>The same as
@@ -6859,7 +6959,7 @@ true</pre>
</desc>
</func>
<func>
- <name>erlang:trace_pattern(MFA, MatchSpec, FlagList) -> int()</name>
+ <name>erlang:trace_pattern(MFA, MatchSpec, FlagList) -> integer() >= 0</name>
<fsummary>Set trace patterns for tracing of function calls</fsummary>
<type>
<v>MFA, MatchSpec, FlagList -- see below</v>
@@ -7039,7 +7139,7 @@ true</pre>
</desc>
</func>
<func>
- <name>trunc(Number) -> int()</name>
+ <name>trunc(Number) -> integer()</name>
<fsummary>Return an integer by the truncating a number</fsummary>
<type>
<v>Number = number()</v>
@@ -7053,7 +7153,7 @@ true</pre>
</desc>
</func>
<func>
- <name>tuple_size(Tuple) -> int()</name>
+ <name>tuple_size(Tuple) -> integer() >= 0</name>
<fsummary>Return the size of a tuple</fsummary>
<type>
<v>Tuple = tuple()</v>
@@ -7081,12 +7181,10 @@ true</pre>
</desc>
</func>
<func>
- <name>erlang:universaltime() -> {Date, Time}</name>
+ <name>erlang:universaltime() -> DateTime</name>
<fsummary>Current date and time according to Universal Time Coordinated (UTC)</fsummary>
<type>
- <v>Date = {Year, Month, Day}</v>
- <v>Time = {Hour, Minute, Second}</v>
- <v>&nbsp;Year = Month = Day = Hour = Minute = Second = int()</v>
+ <v>DateTime = <seealso marker="calendar#type-datetime">calendar:datetime()</seealso></v>
</type>
<desc>
<p>Returns the current date and time according to Universal
@@ -7104,9 +7202,8 @@ true</pre>
<name>erlang:universaltime_to_localtime({Date1, Time1}) -> {Date2, Time2}</name>
<fsummary>Convert from Universal Time Coordinated (UTC) to local date and time</fsummary>
<type>
- <v>Date1 = Date2 = {Year, Month, Day}</v>
- <v>Time1 = Time2 = {Hour, Minute, Second}</v>
- <v>&nbsp;Year = Month = Day = Hour = Minute = Second = int()</v>
+ <v>Date1 = Date2 = <seealso marker="calendar#type-date">calendar:date()</seealso></v>
+ <v>Time1 = Time2 = <seealso marker="calendar#type-time">calendar:time()</seealso></v>
</type>
<desc>
<p>Converts Universal Time Coordinated (UTC) date and time to
@@ -7136,7 +7233,7 @@ true</pre>
<c>Id</c> has no effect on the caller in the future (unless
the link is setup again). If caller is trapping exits, an
<c>{'EXIT', Id, _}</c> message due to the link might have
- been placed in the callers message queue prior to the call,
+ been placed in the caller's message queue prior to the call,
though. Note, the <c>{'EXIT', Id, _}</c> message can be the
result of the link, but can also be the result of <c>Id</c>
calling <c>exit/2</c>. Therefore, it <em>may</em> be
@@ -7193,7 +7290,7 @@ true</pre>
</desc>
</func>
<func>
- <name>erlang:yield() -> true</name>
+ <name name="yield" arity="0"/>
<fsummary>Let other processes get a chance to execute</fsummary>
<desc>
<p>Voluntarily let other processes (if any) get a chance to
diff --git a/erts/doc/src/erlsrv.xml b/erts/doc/src/erlsrv.xml
index 0dfad2a112..c1ecbc7b77 100644
--- a/erts/doc/src/erlsrv.xml
+++ b/erts/doc/src/erlsrv.xml
@@ -4,7 +4,7 @@
<comref>
<header>
<copyright>
- <year>1998</year><year>2010</year>
+ <year>1998</year><year>2011</year>
<holder>Ericsson AB. All Rights Reserved.</holder>
</copyright>
<legalnotice>
@@ -273,7 +273,7 @@
</desc>
</func>
<func>
- <name>erlsrv {start | stop | disable | enable} &lt;service-name></name>
+ <name>erlsrv {start | start_disabled | stop | disable | enable} &lt;service-name></name>
<fsummary>Manipulate the current service status.</fsummary>
<desc>
<p>These commands are only added for convenience, the normal
@@ -287,6 +287,21 @@
service actually is stopped. Enabling a service sets it in
automatic mode, that is started at boot. This command cannot
set the service to manual. </p>
+
+ <p>The <c>start_disabled</c> command operates on a service
+ regardless of if it's enabled/disabled or started/stopped. It
+ does this by first enabling it (regardless of if it's enabled
+ or not), then starting it (if it's not already started) and
+ then disabling it. The result will be a disabled but started
+ service, regardless of its earlier state. This is useful for
+ starting services temporarily during a release upgrade. The
+ difference between using <c>start_disabled</c> and the
+ sequence <c>enable</c>, <c>start</c> and <c>disable</c> is
+ that all other <c>erlsrv</c> commands are locked out during
+ the sequence of operations in <c>start_disable</c>, making the
+ operation atomic from an <c>erlsrv</c> user's point of
+ view.</p>
+
</desc>
</func>
<func>
diff --git a/erts/doc/src/erts_alloc.xml b/erts/doc/src/erts_alloc.xml
index 51a4a2bca0..ec5e7d9b74 100644
--- a/erts/doc/src/erts_alloc.xml
+++ b/erts/doc/src/erts_alloc.xml
@@ -4,7 +4,7 @@
<cref>
<header>
<copyright>
- <year>2002</year><year>2010</year>
+ <year>2002</year><year>2011</year>
<holder>Ericsson AB. All Rights Reserved.</holder>
</copyright>
<legalnotice>
@@ -58,11 +58,8 @@
<item>Allocator used for memory blocks that are expected to be
long-lived, for example Erlang code.</item>
<tag><c>fix_alloc</c></tag>
- <item>A very fast allocator used for some fix-sized
- data. <c>fix_alloc</c> manages a set of memory pools from
- which memory blocks are handed out. <c>fix_alloc</c>
- allocates memory pools from <c>ll_alloc</c>. Memory pools
- that have been allocated are never deallocated.</item>
+ <item>A fast allocator used for some frequently used
+ fixed size data types.</item>
<tag><c>std_alloc</c></tag>
<item>Allocator used for most memory blocks not allocated via any of
the other allocators described above.</item>
@@ -78,14 +75,20 @@
segments are allocated, cached segments are used if possible
instead of creating new segments. This in order to reduce
the number of system calls made.</item>
+ <tag><c>sbmbc_alloc</c></tag>
+ <item>Allocator used by other allocators for allocation of carriers
+ where only small blocks are placed. Currently this allocator is
+ disabled by default.</item>
</taglist>
- <p><c>sys_alloc</c> and <c>fix_alloc</c> are always enabled and
+ <p><c>sys_alloc</c> is always enabled and
cannot be disabled. <c>mseg_alloc</c> is always enabled if it is
available and an allocator that uses it is enabled. All other
allocators can be <seealso marker="#M_e">enabled or disabled</seealso>.
By default all allocators are enabled.
- When an allocator is disabled, <c>sys_alloc</c>
- is used instead of the disabled allocator.</p>
+ When an allocator is disabled, <c>sys_alloc</c> is used instead of
+ the disabled allocator. <c>sbmbc_alloc</c> is an exception. If
+ <c>sbmbc_alloc</c> is disabled, other allocators will not handle
+ small blocks in separate carriers.</p>
<p>The main idea with the <c>erts_alloc</c> library is to separate
memory blocks that are used differently into different memory
areas, and by this achieving less memory fragmentation. By
@@ -98,26 +101,30 @@
<marker id="alloc_util"></marker>
<title>The alloc_util framework</title>
<p>Internally a framework called <c>alloc_util</c> is used for
- implementing allocators. <c>sys_alloc</c>, <c>fix_alloc</c>, and
+ implementing allocators. <c>sys_alloc</c>, and
<c>mseg_alloc</c> do not use this framework; hence, the
following does <em>not</em> apply to them.</p>
<p>An allocator manages multiple areas, called carriers, in which
memory blocks are placed. A carrier is either placed in a
- separate memory segment (allocated via <c>mseg_alloc</c>) or in
- the heap segment (allocated via <c>sys_alloc</c>). Multiblock
+ separate memory segment (allocated via <c>mseg_alloc</c>), in
+ the heap segment (allocated via <c>sys_alloc</c>), or inside
+ another carrier (in case it is a carrier created by
+ <c>sbmbc_alloc</c>). Multiblock
carriers are used for storage of several blocks. Singleblock
carriers are used for storage of one block. Blocks that are
larger than the value of the singleblock carrier threshold
(<seealso marker="#M_sbct">sbct</seealso>) parameter are placed
- in singleblock carriers. Blocks smaller than the value of the
- <c>sbct</c> parameter are placed in multiblock
- carriers. Normally an allocator creates a "main multiblock
+ in singleblock carriers. Blocks that are smaller than the value
+ of the <c>sbct</c> parameter are placed in multiblock
+ carriers. Blocks that are smaller than the small block multiblock
+ carrier threshold (<seealso marker="#M_sbmbct">sbmbct</seealso>)
+ will be placed in multiblock carriers only used for small blocks.
+ Normally an allocator creates a "main multiblock
carrier". Main multiblock carriers are never deallocated. The
size of the main multiblock carrier is determined by the value
of the <seealso marker="#M_mmbcs">mmbcs</seealso> parameter.</p>
- <p> <marker id="mseg_mbc_sizes"></marker>
-
- Sizes of multiblock carriers allocated via <c>mseg_alloc</c> are
+ <p><marker id="mseg_mbc_sizes"></marker>Sizes of multiblock carriers
+ allocated via <c>mseg_alloc</c> are
decided based on the values of the largest multiblock carrier
size (<seealso marker="#M_lmbcs">lmbcs</seealso>), the smallest
multiblock carrier size (<seealso marker="#M_smbcs">smbcs</seealso>),
@@ -133,8 +140,11 @@
<c>sbct</c> parameter should be larger than the value of the
<c>lmbcs</c> parameter, the allocator may have to create
multiblock carriers that are larger than the value of the
- <c>lmbcs</c> parameter, though. Singleblock carriers allocated
- via <c>mseg_alloc</c> are sized to whole pages.</p>
+ <c>lmbcs</c> parameter, though. The size of multiblock carriers
+ for small blocks is determined by the small block multiblock
+ carrier size (<seealso marker="#M_sbmbcs">sbmbcs</seealso>).
+ Singleblock carriers allocated via <c>mseg_alloc</c> are sized
+ to whole pages.</p>
<p>Sizes of carriers allocated via <c>sys_alloc</c> are
decided based on the value of the <c>sys_alloc</c> carrier size
(<seealso marker="#Muycs">ycs</seealso>) parameter. The size of
@@ -143,9 +153,8 @@
<p>Coalescing of free blocks are always performed immediately.
Boundary tags (headers and footers) in free blocks are used
which makes the time complexity for coalescing constant.</p>
- <p> <marker id="strategy"></marker>
-
- The memory allocation strategy used for multiblock carriers by an
+ <p><marker id="strategy"></marker>The memory allocation strategy
+ used for multiblock carriers by an
allocator is configurable via the <seealso marker="#M_as">as</seealso>
parameter. Currently the following strategies are available:</p>
<taglist>
@@ -166,6 +175,14 @@
used. The time complexity is proportional to log N, where
N is the number of free blocks.</p>
</item>
+ <tag>Address order first fit</tag>
+ <item>
+ <p>Strategy: Find the block with the lowest address that satisfies the
+ requested block size.</p>
+ <p>Implementation: A balanced binary search tree is
+ used. The time complexity is proportional to log N, where
+ N is the number of free blocks.</p>
+ </item>
<tag>Good fit</tag>
<item>
<p>Strategy: Try to find the best fit, but settle for the best fit
@@ -192,8 +209,21 @@
This since it will only cause problems for other allocators.</p>
</item>
</taglist>
+ <p>Apart from the ordinary allocators described above a number of
+ pre-allocators are used for some specific data types. These
+ pre-allocators pre-allocate a fixed amount of memory for certain data
+ types when the run-time system starts. As long as pre-allocated memory
+ is available, it will be used. When no pre-allocated memory is
+ available, memory will be allocated in ordinary allocators. These
+ pre-allocators are typically much faster than the ordinary allocators,
+ but can only satisfy a limited amount of requests.</p>
</section>
+ <note><p>
+ Currently only allocators using the best fit and the address order
+ best fit strategies are able to use "small block multi block carriers".
+ </p></note>
+
<section>
<marker id="flags"></marker>
<title>System Flags Effecting erts_alloc</title>
@@ -215,6 +245,7 @@
the currently present allocators:</p>
<list type="bulleted">
<item><c>B: binary_alloc</c></item>
+ <item><c>C: sbmbc_alloc</c></item>
<item><c>D: std_alloc</c></item>
<item><c>E: ets_alloc</c></item>
<item><c>F: fix_alloc</c></item>
@@ -246,18 +277,6 @@
Max cached segments. The maximum number of memory segments
stored in the memory segment cache. Valid range is
0-30. Default value is 5.</item>
- <tag><marker id="MMcci"><c><![CDATA[+MMcci <time>]]></c></marker></tag>
- <item>
- Cache check interval (in milliseconds). The memory segment
- cache is checked for segments to destroy at an interval
- determined by this parameter. Default value is 1000.</item>
- </taglist>
- <p>The following flags are available for configuration of
- <c>fix_alloc</c>:</p>
- <taglist>
- <tag><marker id="MFe"><c>+MFe true</c></marker></tag>
- <item>
- Enable <c>fix_alloc</c>. Note: <c>fix_alloc</c> cannot be disabled.</item>
</taglist>
<p>The following flags are available for configuration of
<c>sys_alloc</c>:</p>
@@ -296,15 +315,15 @@
based on <c>alloc_util</c>. If <c>u</c> is used as subsystem
identifier (i.e., <c><![CDATA[<S> = u]]></c>) all allocators based on
<c>alloc_util</c> will be effected. If <c>B</c>, <c>D</c>, <c>E</c>,
- <c>H</c>, <c>L</c>, <c>R</c>, <c>S</c>, or <c>T</c> is used as
+ <c>F</c>, <c>H</c>, <c>L</c>, <c>R</c>, <c>S</c>, or <c>T</c> is used as
subsystem identifier, only the specific allocator identified will be
effected:</p>
<taglist>
- <tag><marker id="M_as"><c><![CDATA[+M<S>as bf|aobf|gf|af]]></c></marker></tag>
+ <tag><marker id="M_as"><c><![CDATA[+M<S>as bf|aobf|aoff|gf|af]]></c></marker></tag>
<item>
Allocation strategy. Valid strategies are <c>bf</c> (best fit),
- <c>aobf</c> (address order best fit), <c>gf</c> (good fit),
- and <c>af</c> (a fit). See
+ <c>aobf</c> (address order best fit), <c>aoff</c> (address order first fit),
+ <c>gf</c> (good fit), and <c>af</c> (a fit). See
<seealso marker="#strategy">the description of allocation strategies</seealso> in "the <c>alloc_util</c> framework" section.</item>
<tag><marker id="M_asbcst"><c><![CDATA[+M<S>asbcst <size>]]></c></marker></tag>
<item>
@@ -395,32 +414,43 @@
threshold will be placed in singleblock carriers. Blocks
smaller than this threshold will be placed in multiblock
carriers.</item>
+ <tag><marker id="M_sbmbcs"><c><![CDATA[+M<S>sbmbcs <size>]]></c></marker></tag>
+ <item>
+ Small block multiblock carrier size (in bytes). Memory blocks smaller
+ than the small block multiblock carrier threshold
+ (<seealso marker="#M_sbmbct">sbmbct</seealso>) will be placed in
+ multiblock carriers used for small blocks only. This parameter
+ determines the size of such carriers.
+ </item>
+ <tag><marker id="M_sbmbct"><c><![CDATA[+M<S>sbmbct <size>]]></c></marker></tag>
+ <item>
+ Small block multiblock carrier threshold (in bytes). Memory blocks
+ smaller than this threshold will be placed in multiblock carriers
+ used for small blocks only.
+ </item>
<tag><marker id="M_smbcs"><c><![CDATA[+M<S>smbcs <size>]]></c></marker></tag>
<item>
Smallest (<c>mseg_alloc</c>) multiblock carrier size (in
kilobytes). See <seealso marker="#mseg_mbc_sizes">the description
on how sizes for mseg_alloc multiblock carriers are decided</seealso>
in "the <c>alloc_util</c> framework" section.</item>
- <tag><marker id="M_t"><c><![CDATA[+M<S>t true|false|<amount>]]></c></marker></tag>
+ <tag><marker id="M_t"><c><![CDATA[+M<S>t true|false]]></c></marker></tag>
<item>
- Multiple, thread specific instances of the allocator.
+ <p>Multiple, thread specific instances of the allocator.
This option will only have any effect on the runtime system
with SMP support. Default behaviour on the runtime system with
- SMP support (<c>N</c> equals the number of scheduler threads):
+ SMP support:</p>
<taglist>
- <tag><c>temp_alloc</c></tag>
- <item><c>N + 1</c> instances.</item>
<tag><c>ll_alloc</c></tag>
<item><c>1</c> instance.</item>
<tag>Other allocators</tag>
- <item><c>N</c> instances when <c>N</c> is less than or equal to
- <c>16</c>. <c>16</c> instances when <c>N</c> is greater than
- <c>16</c>.</item>
+ <item><c>NoSchedulers+1</c> instances. Each scheduler will use
+ a lock-free instance of its own and other threads will use
+ a common instance.</item>
</taglist>
- <c>temp_alloc</c> will always use <c>N + 1</c> instances when
- this option has been enabled regardless of the amount passed.
- Other allocators will use the same amount of instances as the
- amount passed as long as it isn't greater than <c>N</c>.
+ <p>It was previously (before ERTS version 5.9) possible to configure
+ a smaller amount of thread specific instances than schedulers.
+ This is, however, not possible any more.</p>
</item>
</taglist>
<p>Currently the following flags are available for configuration of
diff --git a/erts/doc/src/init.xml b/erts/doc/src/init.xml
index b0d0cda4fa..d5c43f6e57 100644
--- a/erts/doc/src/init.xml
+++ b/erts/doc/src/init.xml
@@ -47,15 +47,12 @@
</description>
<funcs>
<func>
- <name>boot(BootArgs) -> void()</name>
+ <name name="boot" arity="1"/>
<fsummary>Start the Erlang runtime system</fsummary>
- <type>
- <v>BootArgs = [binary()]</v>
- </type>
<desc>
<p>Starts the Erlang runtime system. This function is called
when the emulator is started and coordinates system start-up.</p>
- <p><c>BootArgs</c> are all command line arguments except
+ <p><c><anno>BootArgs</anno></c> are all command line arguments except
the emulator flags, that is, flags and plain arguments. See
<seealso marker="erts:erl">erl(1)</seealso>.</p>
<p><c>init</c> itself interprets some of the flags, see
@@ -67,17 +64,12 @@
</desc>
</func>
<func>
- <name>get_argument(Flag) -> {ok, Arg} | error</name>
+ <name name="get_argument" arity="1"/>
<fsummary>Get the values associated with a command line user flag</fsummary>
- <type>
- <v>Flag = atom()</v>
- <v>Arg = [Values]</v>
- <v>&nbsp;Values = [string()]</v>
- </type>
<desc>
<p>Returns all values associated with the command line user flag
- <c>Flag</c>. If <c>Flag</c> is provided several times, each
- <c>Values</c> is returned in preserved order.</p>
+ <c><anno>Flag</anno></c>. If <c><anno>Flag</anno></c> is provided several times, each
+ <c><anno>Values</anno></c> is returned in preserved order.</p>
<pre>
% <input>erl -a b c -a d</input>
...
@@ -113,48 +105,37 @@
</desc>
</func>
<func>
- <name>get_arguments() -> Flags</name>
+ <name name="get_arguments" arity="0"/>
<fsummary>Get all command line user flags</fsummary>
- <type>
- <v>Flags = [{Flag, Values}]</v>
- <v>&nbsp;Flag = atom()</v>
- <v>&nbsp;Values = [string()]</v>
- </type>
<desc>
<p>Returns all command line flags, as well as the system
defined flags, see <c>get_argument/1</c>.</p>
</desc>
</func>
<func>
- <name>get_plain_arguments() -> [Arg]</name>
+ <name name="get_plain_arguments" arity="0"/>
<fsummary>Get all non-flag command line arguments</fsummary>
- <type>
- <v>Arg = string()</v>
- </type>
<desc>
<p>Returns any plain command line arguments as a list of strings
(possibly empty).</p>
</desc>
</func>
<func>
- <name>get_status() -> {InternalStatus, ProvidedStatus}</name>
+ <name name="get_status" arity="0"/>
<fsummary>Get system status information</fsummary>
- <type>
- <v>InternalStatus = starting | started | stopping</v>
- <v>ProvidedStatus = term()</v>
- </type>
+ <type name="internal_status"/>
<desc>
<p>The current status of the <c>init</c> process can be
inspected. During system startup (initialization),
- <c>InternalStatus</c> is <c>starting</c>, and
- <c>ProvidedStatus</c> indicates how far the boot script has
+ <c><anno>InternalStatus</anno></c> is <c>starting</c>, and
+ <c><anno>ProvidedStatus</anno></c> indicates how far the boot script has
been interpreted. Each <c>{progress, Info}</c> term
- interpreted in the boot script affects <c>ProvidedStatus</c>,
- that is, <c>ProvidedStatus</c> gets the value of <c>Info</c>.</p>
+ interpreted in the boot script affects <c><anno>ProvidedStatus</anno></c>,
+ that is, <c><anno>ProvidedStatus</anno></c> gets the value of <c>Info</c>.</p>
</desc>
</func>
<func>
- <name>reboot() -> void()</name>
+ <name name="reboot" arity="0"/>
<fsummary>Take down and restart an Erlang node smoothly</fsummary>
<desc>
<p>All applications are taken down smoothly, all code is
@@ -168,7 +149,7 @@
</desc>
</func>
<func>
- <name>restart() -> void()</name>
+ <name name="restart" arity="0"/>
<fsummary>Restart the running Erlang node</fsummary>
<desc>
<p>The system is restarted <em>inside</em> the running Erlang
@@ -183,20 +164,17 @@
</desc>
</func>
<func>
- <name>script_id() -> Id</name>
+ <name name="script_id" arity="0"/>
<fsummary>Get the identity of the used boot script</fsummary>
- <type>
- <v>Id = term()</v>
- </type>
<desc>
<p>Get the identity of the boot script used to boot the system.
- <c>Id</c> can be any Erlang term. In the delivered boot
- scripts, <c>Id</c> is <c>{Name, Vsn}</c>. <c>Name</c> and
+ <c><anno>Id</anno></c> can be any Erlang term. In the delivered boot
+ scripts, <c><anno>Id</anno></c> is <c>{Name, Vsn}</c>. <c>Name</c> and
<c>Vsn</c> are strings.</p>
</desc>
</func>
<func>
- <name>stop() -> void()</name>
+ <name name="stop" arity="0"/>
<fsummary>Take down an Erlang node smoothly</fsummary>
<desc>
<p>All applications are taken down smoothly, all code is
@@ -210,15 +188,12 @@
</desc>
</func>
<func>
- <name>stop(Status) -> void()</name>
+ <name name="stop" arity="1"/>
<fsummary>Take down an Erlang node smoothly</fsummary>
- <type>
- <v>Status = int()>=0 | string()</v>
- </type>
<desc>
<p>All applications are taken down smoothly, all code is
unloaded, and all ports are closed before the system
- terminates by calling <c>halt(Status)</c>. If the
+ terminates by calling <c>halt(<anno>Status</anno>)</c>. If the
<c>-heart</c> command line flag was given, the <c>heart</c>
program is terminated before the Erlang node
terminates. Refer to <c>heart(3)</c> for more
diff --git a/erts/doc/src/make.dep b/erts/doc/src/make.dep
deleted file mode 100644
index 98bac78235..0000000000
--- a/erts/doc/src/make.dep
+++ /dev/null
@@ -1,32 +0,0 @@
-# ----------------------------------------------------
-# >>>> Do not edit this file <<<<
-# This file was automaticly generated by
-# /home/gandalf/otp/bin/docdepend
-# ----------------------------------------------------
-
-
-# ----------------------------------------------------
-# TeX files that the DVI file depend on
-# ----------------------------------------------------
-
-book.dvi: absform.tex alt_dist.tex book.tex crash_dump.tex \
- driver.tex driver_entry.tex epmd.tex erl.tex \
- erl_dist_protocol.tex erl_driver.tex erl_ext_dist.tex \
- erl_prim_loader.tex erl_set_memory_block.tex \
- erlang.tex erlc.tex erlsrv.tex erts_alloc.tex \
- escript.tex inet_cfg.tex init.tex match_spec.tex \
- part.tex ref_man.tex run_erl.tex start.tex \
- start_erl.tex tty.tex werl.tex zlib.tex
-
-# ----------------------------------------------------
-# Source inlined when transforming from source to LaTeX
-# ----------------------------------------------------
-
-book.tex: ref_man.xml
-
-# ----------------------------------------------------
-# Pictures that the DVI file depend on
-# ----------------------------------------------------
-
-book.dvi: erl_ext_fig.ps
-
diff --git a/erts/doc/src/match_spec.xml b/erts/doc/src/match_spec.xml
index f0390c9db8..bdcf9c3816 100644
--- a/erts/doc/src/match_spec.xml
+++ b/erts/doc/src/match_spec.xml
@@ -4,7 +4,7 @@
<chapter>
<header>
<copyright>
- <year>1999</year><year>2010</year>
+ <year>1999</year><year>2012</year>
<holder>Ericsson AB. All Rights Reserved.</holder>
</copyright>
<legalnotice>
@@ -75,7 +75,7 @@
<item>MatchCondition ::= { GuardFunction } |
{ GuardFunction, ConditionExpression, ... }
</item>
- <item>BoolFunction ::= <c><![CDATA[is_atom]]></c> | <c><![CDATA[is_constant]]></c> |
+ <item>BoolFunction ::= <c><![CDATA[is_atom]]></c> |
<c><![CDATA[is_float]]></c> | <c><![CDATA[is_integer]]></c> | <c><![CDATA[is_list]]></c> |
<c><![CDATA[is_number]]></c> | <c><![CDATA[is_pid]]></c> | <c><![CDATA[is_port]]></c> |
<c><![CDATA[is_reference]]></c> | <c><![CDATA[is_tuple]]></c> | <c><![CDATA[is_binary]]></c> |
@@ -133,7 +133,7 @@
<item>MatchCondition ::= { GuardFunction } |
{ GuardFunction, ConditionExpression, ... }
</item>
- <item>BoolFunction ::= <c><![CDATA[is_atom]]></c> | <c><![CDATA[is_constant]]></c> |
+ <item>BoolFunction ::= <c><![CDATA[is_atom]]></c> |
<c><![CDATA[is_float]]></c> | <c><![CDATA[is_integer]]></c> | <c><![CDATA[is_list]]></c> |
<c><![CDATA[is_number]]></c> | <c><![CDATA[is_pid]]></c> | <c><![CDATA[is_port]]></c> |
<c><![CDATA[is_reference]]></c> | <c><![CDATA[is_tuple]]></c> | <c><![CDATA[is_binary]]></c> |
@@ -172,7 +172,7 @@
<title>Functions allowed in all types of match specifications</title>
<p>The different functions allowed in <c><![CDATA[match_spec]]></c> work like this:
</p>
- <p><em>is_atom, is_constant, is_float, is_integer, is_list, is_number, is_pid, is_port, is_reference, is_tuple, is_binary, is_function: </em> Like the corresponding guard tests in
+ <p><em>is_atom, is_float, is_integer, is_list, is_number, is_pid, is_port, is_reference, is_tuple, is_binary, is_function: </em> Like the corresponding guard tests in
Erlang, return <c><![CDATA[true]]></c> or <c><![CDATA[false]]></c>.
</p>
<p><em>is_record: </em>Takes an additional parameter, which SHALL
diff --git a/erts/doc/src/notes.xml b/erts/doc/src/notes.xml
index 3733fb2db9..028a2bbf3d 100644
--- a/erts/doc/src/notes.xml
+++ b/erts/doc/src/notes.xml
@@ -30,6 +30,1033 @@
</header>
<p>This document describes the changes made to the ERTS application.</p>
+<section><title>Erts 5.9.1</title>
+
+ <section><title>Fixed Bugs and Malfunctions</title>
+ <list>
+ <item>
+ <p><c>erlang:system_profile</c> errorneous profiled the
+ profiler process when observing runnable processes. This
+ has been corrected. </p>
+ <p>
+ Own Id: OTP-9849</p>
+ </item>
+ <item>
+ <p>Calling trace_info/2 asking for information about a
+ function that had native could could crash the run-time
+ system.</p>
+ <p>
+ Own Id: OTP-9886</p>
+ </item>
+ <item>
+ <p>
+ reduce smp locking time range in erts_garbage_collect
+ (thanks to Jovi Zhang)</p>
+ <p>
+ Own Id: OTP-9912</p>
+ </item>
+ <item>
+ <p>
+ Fix typo in supervisor behaviour doc (Thanks to Ricardo
+ Catalinas Jim�nez)</p>
+ <p>
+ Own Id: OTP-9924</p>
+ </item>
+ <item>
+ <p>
+ Correct spelling of registered (Thanks to Richard
+ Carlsson)</p>
+ <p>
+ Own Id: OTP-9925</p>
+ </item>
+ <item>
+ <p>
+ erts: Remove unused variable (Thanks to Jovi Zhang)</p>
+ <p>
+ Own Id: OTP-9926</p>
+ </item>
+ <item>
+ <p>
+ Fix bug in ETS with <c>compressed</c> option and
+ insertion of term containing large integers (>2G) on
+ 64-bit machines. Seen to cause emulator crash. (Thanks to
+ Diego Llarrull for excellent bug report)</p>
+ <p>
+ Own Id: OTP-9932</p>
+ </item>
+ <item>
+ <p>
+ Handle Linux OS where /sys/devices/system/node is only
+ readable by root. Fallback to /sys/devices/system/cpu for
+ topology info.</p>
+ <p>
+ Own Id: OTP-9978</p>
+ </item>
+ <item>
+ <p> When an escript ends now all printout to standard
+ output and standard error gets out on the terminal. This
+ bug has been corrected by changing the behaviour of
+ erlang:halt/0,1, which should fix the same problem for
+ other escript-like applications, i.e that data stored in
+ the output port driver buffers got lost when printing on
+ a TTY and exiting through erlang:halt/0,1. </p>
+ <p> The BIF:s erlang:halt/0,1 has gotten improved
+ semantics and there is a new BIF erlang:halt/2 to
+ accomplish something like the old semantics. See the
+ documentation. </p>
+ <p> Now erlang:halt/0 and erlang:halt/1 with an integer
+ argument will close all ports and allow all pending async
+ threads operations to finish before exiting the emulator.
+ Previously erlang:halt/0 and erlang:halt(0) would just
+ wait for pending async threads operations but not close
+ ports. And erlang:halt/1 with a non-zero integer argument
+ would not even wait for pending async threads operations.
+ </p>
+ <p> To roughly the old behaviour, to not wait for ports
+ and async threads operations when you exit the emulator,
+ you use erlang:halt/2 with an integer first argument and
+ an option list containing {flush,false} as the second
+ argument. Note that now is flushing not dependant of the
+ exit code, and you can not only flush async threads
+ operations which we deemed as a strange behaviour anyway.
+ </p>
+ <p>Also, erlang:halt/1,2 has gotten a new feature: If the
+ first argument is the atom 'abort' the emulator is
+ aborted producing a core dump, if the operating system so
+ allows. </p>
+ <p>
+ Own Id: OTP-9985</p>
+ </item>
+ <item>
+ <p>
+ Added check to inet driver to avoid building on operating
+ systems that do not yet have IPv6 compatible socket API.
+ (Thanks to Peer Stritzinger)</p>
+ <p>
+ Own Id: OTP-9996</p>
+ </item>
+ <item>
+ <p>
+ Fix bug when the number of CPUs actually found is lower
+ than the configured value. (Thanks to Benjamin
+ Herrenschmidt)</p>
+ <p>
+ Own Id: OTP-10004</p>
+ </item>
+ <item>
+ <p>
+ The runtime system without SMP support and without thread
+ support erroneously busy waited when no work was present.
+ This bug first appeared in <c>erts-5.9</c>.</p>
+ <p>
+ Own Id: OTP-10019</p>
+ </item>
+ <item>
+ <p>
+ Various typographical errors corrected in documentation
+ for common_test, driver, erl_driver and windows
+ installation instructions. (Thanks to Tuncer Ayaz)</p>
+ <p>
+ Own Id: OTP-10037</p>
+ </item>
+ <item>
+ <p>
+ Fix memory leak caused by race on exiting process</p>
+ <p>
+ Own Id: OTP-10041</p>
+ </item>
+ </list>
+ </section>
+
+
+ <section><title>Improvements and New Features</title>
+ <list>
+ <item>
+ <p>Add <c>erlang:statistics(scheduler_wall_time)</c> to
+ ensure correct determination of scheduler utilization.
+ Measuring scheduler utilization is strongly preferred
+ over CPU utilization, since CPU utilization gives very
+ poor indications of actual scheduler/vm usage.</p>
+ <p>
+ Own Id: OTP-9858</p>
+ </item>
+ <item>
+ <p>
+ ERTS internal API improvements. In some cases the amount
+ of atomic read operations needed have been reduced due to
+ this.</p>
+ <p>
+ Own Id: OTP-9922</p>
+ </item>
+ <item>
+ <p>
+ The DTrace source patch from Scott Lystig Fritchie is
+ integrated in the source tree. Using an emulator with
+ dtrace probe is still not supported for production use,
+ but may be a valuable debugging tool. Configure with
+ --with-dynamic-trace=dtrace (or
+ --with-dynamic-trace=systemtap) to create a build with
+ dtrace probes enabled. See runtime_tools for
+ documentation and examples.</p>
+ <p>
+ Own Id: OTP-10017</p>
+ </item>
+ </list>
+ </section>
+
+
+ <section><title>Known Bugs and Problems</title>
+ <list>
+ <item>
+ <p>
+ enif_make_copy may invalidate enif_inspect_binary.</p>
+ <p>
+ Own Id: OTP-9828</p>
+ </item>
+ </list>
+ </section>
+
+</section>
+
+<section><title>Erts 5.9.0.1</title>
+
+ <section><title>Fixed Bugs and Malfunctions</title>
+ <list>
+ <item>
+ <p>
+ A feature test for the <c>lwsync</c> instruction
+ performed on PowerPC hardware at runtime system startup
+ got into an eternal loop if the instruction was not
+ supported. This bug was introduced in erts-5.9/OTP-R15B.</p>
+ <p>
+ Own Id: OTP-9843</p>
+ </item>
+ <item>
+ <p>
+ I/O events could potentially be delayed for ever when
+ enabling kernel-poll on a non-SMP runtime system
+ executing on Solaris. When also combined with
+ async-threads the runtime system hung before completing
+ the boot phase. This bug was introduced in
+ erts-5.9/OTP-R15B.</p>
+ <p>
+ Own Id: OTP-9844</p>
+ </item>
+ </list>
+ </section>
+
+</section>
+
+<section><title>Erts 5.9</title>
+
+ <section><title>Fixed Bugs and Malfunctions</title>
+ <list>
+ <item>
+ <p>
+ Honor option <c>packet_size</c> for http packet parsing
+ by both TCP socket and <c>erlang:decode_packet</c>. This
+ gives the ability to accept HTTP headers larger than the
+ default setting, but also avoid DoS attacks by accepting
+ lines only up to whatever length you wish to allow. For
+ consistency, packet type <c>line</c> also honor option
+ <c>packet_size</c>. (Thanks to Steve Vinoski)</p>
+ <p>
+ Own Id: OTP-9389</p>
+ </item>
+ <item>
+ <p> A few contracts in the <c>lists</c> module have been
+ corrected. </p>
+ <p>
+ Own Id: OTP-9616</p>
+ </item>
+ <item>
+ <p>
+ The Unicode noncharacter code points 16#FFFE and 16#FFFE
+ were not allowed to be encoded or decoded using the
+ <c>unicode</c> module or bit syntax. That was
+ inconsistent with the other noncharacters 16#FDD0 to
+ 16#FDEF that could be encoded/decoded. To resolve the
+ inconsistency, 16#FFFE and 16#FFFE can now be encoded and
+ decoded. (Thanks to Alisdair Sullivan.)</p>
+ <p>
+ *** POTENTIAL INCOMPATIBILITY ***</p>
+ <p>
+ Own Id: OTP-9624</p>
+ </item>
+ <item>
+ <p>
+ Make epp search directory of current file first when
+ including another file This completes a partial fix in
+ R11 that only worked for include_lib(). (Thanks to
+ Richard Carlsson)</p>
+ <p>
+ Own Id: OTP-9645</p>
+ </item>
+ <item>
+ <p>
+ Fixed memory leak in
+ <c>enif_inspect_io_list_as_binary</c> when applied on a
+ process independent environment.</p>
+ <p>
+ Own Id: OTP-9668</p>
+ </item>
+ <item>
+ <p>The number of beam catches allowed in code are no
+ longer statically defined and will grow according to its
+ need.</p>
+ <p>
+ Own Id: OTP-9692</p>
+ </item>
+ <item>
+ <p>
+ Add missing parenthesis in heart doc.</p>
+ <p>
+ Add missing spaces in the Reference Manual distributed
+ section.</p>
+ <p>
+ In the HTML version of the doc those spaces are necessary
+ to separate those words.</p>
+ <p>
+ Own Id: OTP-9693</p>
+ </item>
+ <item>
+ <p>
+ Fixes module erlang doc style: option description (Thanks
+ to Ricardo Catalinas Jim�nez)</p>
+ <p>
+ Own Id: OTP-9697</p>
+ </item>
+ <item>
+ <p>
+ Specifying a scope to binary:match/3 when using multiple
+ searchstrings resulted in faulty return values. This is
+ now corrected.</p>
+ <p>
+ Own Id: OTP-9701</p>
+ </item>
+ <item>
+ <p>
+ The runtime system crashed if more than one thread tried
+ to exit the runtime system at the same time.</p>
+ <p>
+ Own Id: OTP-9705</p>
+ </item>
+ <item>
+ <p>
+ Fix documentation for erlang:process_flag/2</p>
+ <p>
+ For the subsection about process_flag(save_calls, N)
+ there's an unrelated paragraph about process priorities
+ which was copied from the preceeding subsection regarding
+ process_flag(priority, Level). (Thanks to Filipe David
+ Manana)</p>
+ <p>
+ Own Id: OTP-9714</p>
+ </item>
+ <item>
+ <p>
+ Calls to <c>erlang:system_flag(schedulers_online, N)</c>
+ and/or <c>erlang:system_flag(multi_scheduling,
+ block|unblock)</c> could cause internal data used by this
+ functionality to get into an inconsistent state. When
+ this happened various problems occurred. This bug was
+ quite hard to trigger, so hopefully no-one has been
+ effected by it.</p>
+ <p>
+ A spinlock used by the run-queue management sometimes got
+ heavily contended. This code has now been rewritten, and
+ the spinlock has been removed.</p>
+ <p>
+ Own Id: OTP-9727</p>
+ </item>
+ <item>
+ <p>
+ Use libdlpi to get physical address (Thanks to Trond
+ Norbye)</p>
+ <p>
+ Own Id: OTP-9818</p>
+ </item>
+ </list>
+ </section>
+
+
+ <section><title>Improvements and New Features</title>
+ <list>
+ <item>
+ <p> An option list argument can now be passed to
+ <c>file:read_file_info/2, file:read_link_info/2</c> and
+ <c>file:write_file_info/3</c> and set time type
+ information in the call. Valid options are <c>{time,
+ local}, {time, universal}</c> and <c>{time, posix}</c>.
+ In the case of <c>posix</c> time no conversions are made
+ which makes the operation a bit faster. </p>
+ <p>
+ Own Id: OTP-7687</p>
+ </item>
+ <item>
+ <p>A number of memory allocation optimizations have been
+ implemented. Most optimizations reduce contention caused
+ by synchronization between threads during allocation and
+ deallocation of memory. Most notably:</p> <list> <item>
+ Synchronization of memory management in scheduler
+ specific allocator instances has been rewritten to use
+ lock-free synchronization. </item> <item> Synchronization
+ of memory management in scheduler specific pre-allocators
+ has been rewritten to use lock-free synchronization.
+ </item> <item> The 'mseg_alloc' memory segment allocator
+ now use scheduler specific instances instead of one
+ instance. Apart from reducing contention this also
+ ensures that memory allocators always create memory
+ segments on the local NUMA node on a NUMA system. </item>
+ </list>
+ <p>
+ Own Id: OTP-7775</p>
+ </item>
+ <item>
+ <p>
+ The ethread atomic memory operations API used by the
+ runtime system has been extended and improved.</p>
+ <p>
+ The ethread library now also performs runtime tests for
+ presence of hardware features, such as for example SSE2
+ instructions, instead of requiring this to be determined
+ at compile time.</p>
+ <p>
+ All uses of the old deprecated atomic API in the runtime
+ system have been replaced with the use of the new atomic
+ API. In a lot of places this change imply a relaxation of
+ memory barriers used.</p>
+ <p>
+ Own Id: OTP-9014</p>
+ </item>
+ <item>
+ <p>gen_sctp:open/0-2 may now return
+ {error,eprotonosupport} if SCTP is not supported</p>
+ <p>gen_sctp:peeloff/1 has been implemented and creates a
+ one-to-one socket which also are supported now</p>
+ <p>
+ *** POTENTIAL INCOMPATIBILITY ***</p>
+ <p>
+ Own Id: OTP-9239</p>
+ </item>
+ <item>
+ <p>
+ Sendfile has been added to the file module's API.
+ sendfile/2 is used to read data from a file and send it
+ to a tcp socket using a zero copying mechanism if
+ available on that OS.</p>
+ <p>
+ Thanks to Tuncer Ayaz and Steve Vinovski for original
+ implementation</p>
+ <p>
+ Own Id: OTP-9240</p>
+ </item>
+ <item>
+ <p>
+ enif_get_reverse_list function added to nif API. This
+ function should be used to reverse small lists which are
+ deep within other structures making it impractical to do
+ the reverse in Erlang.</p>
+ <p>
+ Own Id: OTP-9392</p>
+ </item>
+ <item>
+ <p>
+ The deprecated concat_binary/1 BIF has been removed. Use
+ <c>list_to_binary</c> or <c>iolist_to_binary/1</c>
+ instead.</p>
+ <p>
+ *** POTENTIAL INCOMPATIBILITY ***</p>
+ <p>
+ Own Id: OTP-9421</p>
+ </item>
+ <item>
+ <p>Erlang/OTP can now be built using parallel make if you
+ limit the number of jobs, for instance using '<c>make
+ -j6</c>' or '<c>make -j10</c>'. '<c>make -j</c>' does not
+ work at the moment because of some missing
+ dependencies.</p>
+ <p>
+ Own Id: OTP-9451</p>
+ </item>
+ <item>
+ <p>Line number and filename information are now included
+ in exception backtraces as a fourth element in the MFA
+ tuple. The information will be pretty-printed by the
+ shell and used by <c>common_test</c> to provide better
+ indication of where a test case.</p>
+ <p>
+ *** POTENTIAL INCOMPATIBILITY ***</p>
+ <p>
+ Own Id: OTP-9468</p>
+ </item>
+ <item>
+ <p>All binary constants used to be handled as heap
+ binaries (i.e. the entire binary would be copied when
+ sent to another process). Binary constants larger than 64
+ bytes are now refc binaries (i.e. the actual data in the
+ binary will not be copied when sent to another
+ process).</p>
+ <p>
+ Own Id: OTP-9486</p>
+ </item>
+ <item>
+ <p>
+ If a float and an integer is compared, the integer is
+ only converted to a float if the float datatype can
+ contain it. Otherwise the float is converted to an
+ integer.</p>
+ <p>
+ *** POTENTIAL INCOMPATIBILITY ***</p>
+ <p>
+ Own Id: OTP-9497</p>
+ </item>
+ <item>
+ <p>
+ Add NIF function enif_is_number</p>
+ <p>
+ This function allows for easily determining if a term
+ represents or not a number (integer, float, small or
+ big).(Thanks to Filipe David Manana)</p>
+ <p>
+ Own Id: OTP-9629</p>
+ </item>
+ <item>
+ <p>
+ The ERTS internal system block functionality has been
+ replaced by new functionality for blocking the system.
+ The old system block functionality had contention issues
+ and complexity issues. The new functionality piggy-backs
+ on thread progress tracking functionality needed by newly
+ introduced lock-free synchronization in the runtime
+ system. When the functionality for blocking the system
+ isn't used, there is more or less no overhead at all.
+ This since the functionality for tracking thread progress
+ is there and needed anyway.</p>
+ <p>
+ Own Id: OTP-9631</p>
+ </item>
+ <item>
+ <p>
+ An ERTS internal, generic, many to one, lock-free queue
+ for communication between threads has been introduced.
+ The many to one scenario is very common in ERTS, so it
+ can be used in a lot of places in the future. Currently
+ it is used by scheduling of certain jobs, and the async
+ thread pool, but more uses are planned for the future.</p>
+ <p>
+ Drivers using the driver_async functionality are not
+ automatically locked to the system anymore, and can be
+ unloaded as any dynamically linked in driver.</p>
+ <p>
+ Scheduling of ready async jobs is now also interleaved in
+ between other jobs. Previously all ready async jobs were
+ performed at once.</p>
+ <p>
+ Own Id: OTP-9632</p>
+ </item>
+ <item>
+ <p>
+ Tuple funs (a two-element tuple with a module name and a
+ function) are now officially deprecated and will be
+ removed in R16. Use '<c>fun M:F/A</c>' instead. To make
+ you aware that your system uses tuple funs, the very
+ first time a tuple fun is applied, a warning will be sent
+ to the error logger.</p>
+ <p>
+ Own Id: OTP-9649</p>
+ </item>
+ <item>
+ <p>
+ Changed the internal BIF calling convention. Will make
+ simpler faster code and allow BIFs with an arbitrary
+ arity.</p>
+ <p>
+ Own Id: OTP-9662</p>
+ </item>
+ <item>
+ <p>
+ Windows native critical sections are now used internally
+ in the runtime system on Windows as mutex implementation.
+ This since they perform better under extreme contention
+ than our own implementation.</p>
+ <p>
+ Own Id: OTP-9671</p>
+ </item>
+ <item>
+ <p>
+ Convert some erl_nif macros into inline functions. Allow
+ for better compile time type checking. (Thanks to Tuncer
+ Ayaz)</p>
+ <p>
+ Own Id: OTP-9675</p>
+ </item>
+ <item>
+ <p>
+ The <c>+scl</c> command line flag has been added. It can
+ be used for disabling compaction of scheduler load. For
+ more information see the <c>erl(1)</c> documentation.</p>
+ <p>
+ Own Id: OTP-9695</p>
+ </item>
+ <item>
+ <p>The build system has been updated so that Erlang/OTP
+ can be built on Mac OS X Lion systems without a GCC
+ compiler. The INSTALL guide has been updated with
+ instructions on how to install a GCC compiler and build
+ Erlang/OTP with it, in order to get a run-time system
+ with better performance.</p>
+ <p>
+ Own Id: OTP-9712</p>
+ </item>
+ <item>
+ <p>
+ When loading a module, the system use to run on a single
+ scheduler during the entire loading process. This has
+ been changed to only take down the system just before
+ inserting the loaded code into the system tables,
+ resulting in a much shorter disruption if a module is
+ loaded in a busy system. (Suggested by Bob Ippolito.)</p>
+ <p>
+ Own Id: OTP-9720</p>
+ </item>
+ <item>
+ <p>
+ Possible to run HiPE without floating point exceptions
+ (FPE). Useful on platforms that lack reliable FPE. Slower
+ float operations compared to HiPE with FPE.</p>
+ <p>
+ Own Id: OTP-9724</p>
+ </item>
+ <item>
+ <p>
+ As of ERTS version 5.9 (OTP-R15B) the runtime system will
+ by default <em>not</em> bind schedulers to logical
+ processors.</p>
+ <p>
+ If the Erlang runtime system is the only operating system
+ process that binds threads to logical processors, this
+ improves the performance of the runtime system. However,
+ if other operating system processes (as for example
+ another Erlang runtime system) also bind threads to
+ logical processors, there might be a performance penalty
+ instead. In some cases this performance penalty might be
+ severe. Due to this, we change the default so that the
+ user must make an active decision in order to bind
+ schedulers.</p>
+ <p>
+ *** POTENTIAL INCOMPATIBILITY ***</p>
+ <p>
+ Own Id: OTP-9726</p>
+ </item>
+ <item>
+ <p>
+ The use of <c>erlang:system_flag(scheduler_bind_type,
+ _)</c> and <c>erlang:system_flag(cpu_topology, _)</c>
+ have been deprecated and scheduled for removal in
+ erts-5.10/OTP-R16. For more information see the
+ documentation of <c>erlang:system_flag/2</c>.</p>
+ <p>
+ *** POTENTIAL INCOMPATIBILITY ***</p>
+ <p>
+ Own Id: OTP-9749</p>
+ </item>
+ <item>
+ <p>
+ An ancient workaround for a Windows bug was removed from
+ the open_port code, open_port({spawn,...}...) is now
+ faster. Thanks to Daniel Goertzen.</p>
+ <p>
+ Own Id: OTP-9766</p>
+ </item>
+ <item>
+ <p>
+ The use of deprecated 32bit time_t on 32bit Windows is
+ removed.</p>
+ <p>
+ Own Id: OTP-9767</p>
+ </item>
+ <item>
+ <p>
+ The NIF <c>reload</c> mechanism is deprecated. Do not use
+ it as an upgrade method for live production systems. It
+ might be removed in future releases. It can still serve
+ as a development feature but a warning message will be
+ logged each time it is used.</p>
+ <p>
+ *** POTENTIAL INCOMPATIBILITY ***</p>
+ <p>
+ Own Id: OTP-9771</p>
+ </item>
+ <item>
+ <p>The driver interface has been changed to enable 64-bit
+ aware drivers. Most importantly the return types for
+ ErlDrvEntry callbacks 'call' and 'control' has ben
+ enlarged which require drivers to be changed or they will
+ cause emulator crashes. See <seealso
+ marker="erl_driver#rewrites_for_64_bits"> Rewrites for
+ 64-bit driver interface </seealso> in the driver manual.
+ </p>
+ <p>Due to this driver <seealso
+ marker="erl_driver#version_management">version
+ management</seealso> is now mandatory. A driver that is
+ not written with version management or a driver that was
+ compiled with the wrong major version will be not be
+ loaded by the emulator.</p>
+ <p>
+ *** POTENTIAL INCOMPATIBILITY ***</p>
+ <p>
+ Own Id: OTP-9795</p>
+ </item>
+ <item>
+ <p>
+ Eliminate use of deprecated regexp module</p>
+ <p>
+ Own Id: OTP-9810</p>
+ </item>
+ </list>
+ </section>
+
+</section>
+
+<section><title>Erts 5.8.5</title>
+
+ <section><title>Fixed Bugs and Malfunctions</title>
+ <list>
+ <item>
+ <p>
+ Several bugs concerning constant binary constructions
+ such as &lt;&lt;0:4294967295&gt;&gt; have been corrected.
+ Depending on the actual size of the binary and the type
+ of run-time system (32-bit, halfword, 64-bit), such
+ expression could either crash the run-time system or make
+ the loader refuse loading of the module.</p>
+ <p>
+ Own Id: OTP-9284</p>
+ </item>
+ <item>
+ <p>
+ The Erlsrv utility failed to stop the erlang machine if
+ no StopAction was defined when the service was stopped.
+ This is now corrected.</p>
+ <p>
+ Own Id: OTP-9344</p>
+ </item>
+ <item>
+ <p>
+ Due to a bug in glibc the runtime system could abort
+ while trying to destroy a mutex. A fix for this was
+ introduced in R14B02. This fix did, however, not solve
+ the problem. The runtime system will now issue a warning
+ instead of aborting.</p>
+ <p>
+ Own Id: OTP-9373 Aux Id: OTP-9009 </p>
+ </item>
+ <item>
+ <p>
+ Replace atom in DRV macro in prim_file with string</p>
+ <p>
+ An experimental version of Dialyzer discovered that the
+ atom that replaced the DRV macro in prim_file ends up in
+ calls to erlang:open_port({spawn, Driver}, Portopts) as
+ the Driver argument. The documentation states that this
+ call requires a string there.</p>
+ <p>
+ This change is also consistent with the one introduced in
+ commit 0f03b1e9d2bef3bc830c31a369261af4c5234727 by Kostis
+ Sagonas.</p>
+ <p>
+ Own Id: OTP-9377</p>
+ </item>
+ <item>
+ <p>
+ Fix typos in the epmd documentation (Thanks to Holger
+ Wei� )</p>
+ <p>
+ Own Id: OTP-9387</p>
+ </item>
+ <item>
+ <p>
+ Fix faulty integer terms created by NIF API from 64-bit
+ integers on halfword emulator. (Thanks to Paolo Negri and
+ Paul Davis)</p>
+ <p>
+ Own Id: OTP-9394</p>
+ </item>
+ <item>
+ <p>
+ Fix <c>epmd</c> crash on vxworks caused by faulty
+ argument to select() system call.</p>
+ <p>
+ Own Id: OTP-9427 Aux Id: seq11855 </p>
+ </item>
+ <item>
+ <p>
+ The ets:test_ms function could in rare cases truncate the
+ error messages. This is now corrected.</p>
+ <p>
+ Own Id: OTP-9435</p>
+ </item>
+ <item>
+ <p>
+ Fix bug related to hibernate and HiPE (clear
+ F_HIBERNATE_SCHED flag)</p>
+ <p>
+ F_HIBERNATE_SCHED flag that was introduced in
+ b7ecdcd1ae9e11b8f75e must be cleared in hipe_mode_switch
+ as well. Otherwise, processes running HiPE code that
+ hibernate, wake up and then trap into a BIF will not be
+ rescheduled.(Thanks to Paul Guyot)</p>
+ <p>
+ Own Id: OTP-9452</p>
+ </item>
+ <item>
+ <p>
+ Fix bug in FreeBSD topology detection code (Thanks to
+ Paul Guyot)</p>
+ <p>
+ Own Id: OTP-9453</p>
+ </item>
+ <item>
+ <p>
+ Fix use of logical operator &amp;&amp; with constant
+ operand instead of bitwise &amp; (Thanks to Cristian
+ Greco)</p>
+ <p>
+ Own Id: OTP-9454</p>
+ </item>
+ <item>
+ <p>
+ inet: error if fd does not match socket domain</p>
+ <p>
+ If an IPv4 fd is opened as an IPv6 socket, unexpected
+ behaviour can occur. For example, if an IPv4 UDP socket
+ is opened and passed into Erlang as an IPv6 socket, the
+ first 3 bytes (corresponding to 1 byte representing the
+ protocol family, 2 bytes set to the port) are stripped
+ from the payload. The cause of the UDP payload truncation
+ happens in inet_drv.c:packet_inet_input when a call to
+ inet_get_address fails silently because the family is set
+ to PF_INET6 but the buffer len is the size of an IPv4
+ struct sockaddr_in.</p>
+ <p>
+ (Thanks to Andrew Tunnell-Jones for finding the bug and
+ the test case!)</p>
+ <p>
+ Own Id: OTP-9455</p>
+ </item>
+ <item>
+ <p>
+ erts: use a union to avoid strict aliasing issues</p>
+ <p>
+ Use a union for pointer type conversion to avoid compiler
+ warnings about strict-aliasing violations with gcc-4.1.
+ gcc >= 4.2 does not emit the warning. erts: adapt
+ matrix_nif to R14 erl_nif API changes (Thanks To Tuncer
+ Ayaz)</p>
+ <p>
+ Own Id: OTP-9487</p>
+ </item>
+ <item>
+ <p>
+ fix 64-bit issues in the garbage collection (Thanks to
+ Richard Carlsson)</p>
+ <p>
+ Own Id: OTP-9488</p>
+ </item>
+ <item>
+ <p>
+ epmd: fix compiler warnings</p>
+ <p>
+ Suppress compiler warnings about ignored return values.
+ (Thanks to Michael Santos )</p>
+ <p>
+ Own Id: OTP-9500</p>
+ </item>
+ <item>
+ <p>
+ Fix non-existing function (erlang:disconnect/1) in
+ distributed reference manual (Thanks to Fabian Kr�l)</p>
+ <p>
+ Own Id: OTP-9504</p>
+ </item>
+ <item>
+ <p>
+ Document fdatasync -lrt requirement (SunOS &lt;= 5.10)
+ (Thanks to Tuncer Ayaz)</p>
+ <p>
+ Own Id: OTP-9512</p>
+ </item>
+ <item>
+ <p>
+ Let epmd ignore empty ERL_EPMD_ADDRESS</p>
+ <p>
+ If the environment variable ERL_EPMD_ADDRESS is set to
+ the empty string, empd now behaves like it does by
+ default when ERL_EPMD_ADDRESS is unset. That is, in this
+ case, epmd now listens on all available interfaces
+ instead of using only the loopback interface, which
+ happened because epmd added the loopback address to the
+ (in this case empty) list of addresses specified via
+ ERL_EPMD_ADDRESS.</p>
+ <p>
+ Also, epmd now ignores ERL_EPMD_ADDRESS if it contains
+ only separator characters (comma and space).</p>
+ <p>
+ The same applies to epmd's -address option.(Thanks to
+ Holger Wei�)</p>
+ <p>
+ Own Id: OTP-9525</p>
+ </item>
+ <item>
+ <p>
+ Remove dead code in erl_compile (Thanks to Tuncer Ayaz)</p>
+ <p>
+ Own Id: OTP-9527</p>
+ </item>
+ <item>
+ <p>
+ Add erlang:external_size/2 BIF</p>
+ <p>
+ This BIF's second parameter is a list of options.
+ Currently the only allowed option is {minor_version,
+ Version} where version is either 0 (default) or 1.
+ (Thanks to Filipe David Manana )</p>
+ <p>
+ Own Id: OTP-9528</p>
+ </item>
+ <item>
+ <p>
+ Fix enif_compare on 64bits machines</p>
+ <p>
+ In 64bits machines the Sint type has a size of 8 bytes,
+ while on 32bits machines it has a 4 bytes size.
+ enif_compare was ignoring this and therefore returning
+ incorrect values when the result of the CMP function
+ (which returns a Sint value) doesn't fit in 4 bytes.
+ (Thanks to Filipe David Manana)</p>
+ <p>
+ Own Id: OTP-9533</p>
+ </item>
+ <item>
+ <p>
+ Implement or fix -Werror option</p>
+ <p>
+ If -Werror is enabled and there are warnings no output
+ file is written. Also make sure that error/warning
+ reporting is consistent. (Thanks to Tuncer Ayaz)</p>
+ <p>
+ Own Id: OTP-9536</p>
+ </item>
+ <item>
+ <p>In some rare cases we did not have a run queue when
+ scheduling misc ops. This is now fixed.</p>
+ <p>
+ Own Id: OTP-9537</p>
+ </item>
+ <item>
+ <p>Remove misc. compiler warnings</p>
+ <p>
+ Own Id: OTP-9542</p>
+ </item>
+ <item>
+ <p>
+ Two bugs in gen_sctp has been corrected: getopts/setopts
+ hence also send could only be called from socket owner,
+ and options 'linger', 'rcvbuf' and 'sndbuf' was read from
+ wrong protocol layer hence read wrong values by getopts.</p>
+ <p>
+ Own Id: OTP-9544</p>
+ </item>
+ <item>
+ <p>
+ Erlang/OTP can now be built on MacOS X Lion.</p>
+ <p>
+ Own Id: OTP-9547</p>
+ </item>
+ <item>
+ <p> XML files have been corrected. </p>
+ <p>
+ Own Id: OTP-9550 Aux Id: OTP-9541 </p>
+ </item>
+ <item>
+ <p>
+ Fix potential errors inspired by running cppcheck(1)
+ (Thanks to Christian von Roques)</p>
+ <p>
+ Own Id: OTP-9557</p>
+ </item>
+ <item>
+ <p>When auxiliary work was enqueued on a scheduler, the
+ wakeup of the scheduler in order to handle this work
+ could be lost. Wakeups in order to handle ordinary work
+ were not effected by this bug. The bug only effected
+ runtime systems with SMP support as follows:</p> <list>
+ <item>Deallocation of some ETS data structures could be
+ delayed.</item> <item>On Linux systems not using the NPTL
+ thread library (typically ancient systems with kernel
+ versions prior to 2.6) and Windows systems, the <c>{Port,
+ {exit_status, Status}}</c> message from a terminating
+ port program could be delayed. That is, it only effected
+ port programs which had been started by passing
+ <c>exit_status</c> as an option to
+ <c>open_port/2</c>.</item> </list>
+ <p>
+ Own Id: OTP-9567</p>
+ </item>
+ <item>
+ <p>
+ Handle rare race in the crypto key server functionality</p>
+ <p>
+ Own Id: OTP-9586</p>
+ </item>
+ </list>
+ </section>
+
+
+ <section><title>Improvements and New Features</title>
+ <list>
+ <item>
+ <p> Types and specifications have been added. </p>
+ <p>
+ Own Id: OTP-9356</p>
+ </item>
+ <item>
+ <p>
+ New allocator strategy "address order first fit". May
+ ease the emptying of memory carriers and thereby real
+ release of memory back to the OS.</p>
+ <p>
+ Own Id: OTP-9424</p>
+ </item>
+ <item>
+ <p>
+ The new <c>erlang:check_old_code/1</c> BIF checks whether
+ a module has old code.</p>
+ <p>
+ Own Id: OTP-9495</p>
+ </item>
+ <item>
+ <p> Update documentation and specifications of some of
+ the zlib functions. </p>
+ <p>
+ Own Id: OTP-9506</p>
+ </item>
+ <item>
+ <p>
+ Detect the available CPUs on IRIX</p>
+ <p>
+ Add support for querying the number of configured and
+ online processors on SGI systems running IRIX.(Thanks to
+ Holger Wei�)</p>
+ <p>
+ Own Id: OTP-9531</p>
+ </item>
+ </list>
+ </section>
+
+</section>
+
<section><title>Erts 5.8.4</title>
<section><title>Fixed Bugs and Malfunctions</title>
@@ -1949,6 +2976,24 @@
</section>
+<section><title>Erts 5.7.5.2</title>
+
+ <section><title>Known Bugs and Problems</title>
+ <list>
+ <item>
+ <p>
+ Two bugs in gen_sctp has been corrected: getopts/setopts
+ hence also send could only be called from socket owner,
+ and options 'linger', 'rcvbuf' and 'sndbuf' was read from
+ wrong protocol layer hence read wrong values by getopts.</p>
+ <p>
+ Own Id: OTP-9544</p>
+ </item>
+ </list>
+ </section>
+
+</section>
+
<section><title>Erts 5.7.5.1</title>
<section><title>Fixed Bugs and Malfunctions</title>
@@ -4534,7 +5579,7 @@
The race occurred when a process removed a table during
termination simultaneously as another process removed the
same table via <c>ets:delete/1</c> and a third process
- created a table that accidentaly got the same internal
+ created a table that accidentally got the same internal
table index as the table being removed.</p>
<p>
Own Id: OTP-7349</p>
diff --git a/erts/doc/src/specs.xml b/erts/doc/src/specs.xml
new file mode 100644
index 0000000000..e5c2f4783f
--- /dev/null
+++ b/erts/doc/src/specs.xml
@@ -0,0 +1,7 @@
+<?xml version="1.0" encoding="latin1" ?>
+<specs xmlns:xi="http://www.w3.org/2001/XInclude">
+ <xi:include href="../specs/specs_erl_prim_loader.xml"/>
+ <xi:include href="../specs/specs_erlang.xml"/>
+ <xi:include href="../specs/specs_init.xml"/>
+ <xi:include href="../specs/specs_zlib.xml"/>
+</specs>
diff --git a/erts/doc/src/start_erl.xml b/erts/doc/src/start_erl.xml
index 21cc901f52..92d87b095a 100644
--- a/erts/doc/src/start_erl.xml
+++ b/erts/doc/src/start_erl.xml
@@ -4,7 +4,7 @@
<comref>
<header>
<copyright>
- <year>1998</year><year>2009</year>
+ <year>1998</year><year>2011</year>
<holder>Ericsson AB. All Rights Reserved.</holder>
</copyright>
<legalnotice>
@@ -69,12 +69,29 @@
<c><![CDATA[erl]]></c> program. Everything <em>after</em><c><![CDATA[++]]></c> is
interpreted as options to <c><![CDATA[start_erl]]></c> itself.</item>
<tag>-reldir &lt;release root&gt;</tag>
- <item>Mandatory if the environment variable <c><![CDATA[RELDIR]]></c> is not
- specified. Tells start_erl where the root of the
- release tree is placed in the file-system
- (like &lt;Erlang root&gt;\\releases). The
- <c><![CDATA[start_erl.data]]></c> file is expected to be placed in
- this directory (if not otherwise specified).</item>
+
+ <item>Mandatory if the environment variable
+ <c><![CDATA[RELDIR]]></c> is not specified and no
+ <c>-rootdir</c> option is given. Tells start_erl where the
+ root of the release tree is placed in the file-system (typically
+ &lt;Erlang root&gt;\\releases). The
+ <c><![CDATA[start_erl.data]]></c> file is expected to be
+ placed in this directory (if not otherwise specified). If
+ only the <c>-rootdir</c> option is given, the directory is
+ assumed to be &lt;Erlang root&gt;\\releases.</item>
+
+ <tag>-rootdir &lt;Erlang root directory&gt;</tag>
+
+ <item>Mandatory if <c>-reldir</c> is not given and there is
+ no <c><![CDATA[RELDIR]]></c> in the environment. This
+ specifies the Erlang installation root directory (under
+ which the <c>lib</c>, <c>releases</c> and
+ <c>erts-&lt;Version&gt;</c> directories are placed). If only
+ <c>-reldir</c> (or the environment variable
+ <c><![CDATA[RELDIR]]></c>) is given, the Erlang root is assumed to
+ be the directory exactly one level above the release
+ directory.</item>
+
<tag>-data &lt;data file name&gt;</tag>
<item>Optional, specifies another data file than start_erl.data
in the &lt;release root&gt;. It is specified relative to the
diff --git a/erts/doc/src/zlib.xml b/erts/doc/src/zlib.xml
index b1e768bce9..8917ab5c3a 100644
--- a/erts/doc/src/zlib.xml
+++ b/erts/doc/src/zlib.xml
@@ -4,7 +4,7 @@
<erlref>
<header>
<copyright>
- <year>2005</year><year>2010</year>
+ <year>2005</year><year>2011</year>
<holder>Ericsson AB. All Rights Reserved.</holder>
</copyright>
<legalnotice>
@@ -76,96 +76,92 @@ list_to_binary([Compressed|Last])</pre>
</taglist>
</description>
- <section>
- <title>DATA TYPES</title>
- <code type="none">
-iodata = iolist() | binary()
-
-iolist = [char() | binary() | iolist()]
- a binary is allowed as the tail of the list
-
-zstream = a zlib stream, see open/0</code>
- </section>
+ <datatypes>
+ <datatype>
+ <name name="zstream"/>
+ <desc>
+ <p>A zlib stream, see <seealso marker="#open/0">open/0</seealso>.
+ </p>
+ </desc>
+ </datatype>
+ <datatype>
+ <name name="zlevel"/>
+ </datatype>
+ <datatype>
+ <name name="zmemlevel"/>
+ </datatype>
+ <datatype>
+ <name name="zmethod"/>
+ </datatype>
+ <datatype>
+ <name name="zstrategy"/>
+ </datatype>
+ <datatype>
+ <name name="zwindowbits"/>
+ <desc>
+ <p>Normally in the range <c>-15..-9 | 9..15</c>.</p>
+ </desc>
+ </datatype>
+ </datatypes>
<funcs>
<func>
- <name>open() -> Z </name>
+ <name name="open" arity="0"/>
<fsummary>Open a stream and return a stream reference</fsummary>
- <type>
- <v>Z = zstream()</v>
- </type>
<desc>
<p>Open a zlib stream.</p>
</desc>
</func>
<func>
- <name>close(Z) -> ok</name>
+ <name name="close" arity="1"/>
<fsummary>Close a stream</fsummary>
- <type>
- <v>Z = zstream()</v>
- </type>
<desc>
- <p>Closes the stream referenced by <c>Z</c>.</p>
+ <p>Closes the stream referenced by <c><anno>Z</anno></c>.</p>
</desc>
</func>
<func>
- <name>deflateInit(Z) -> ok</name>
+ <name name="deflateInit" arity="1"/>
<fsummary>Initialize a session for compression</fsummary>
- <type>
- <v>Z = zstream()</v>
- </type>
<desc>
- <p>Same as <c>zlib:deflateInit(Z, default)</c>.</p>
+ <p>Same as <c>zlib:deflateInit(<anno>Z</anno>, default)</c>.</p>
</desc>
</func>
<func>
- <name>deflateInit(Z, Level) -> ok</name>
+ <name name="deflateInit" arity="2"/>
<fsummary>Initialize a session for compression</fsummary>
- <type>
- <v>Z = zstream()</v>
- <v>Level = none | default | best_speed | best_compression | 0..9</v>
- </type>
<desc>
<p>Initialize a zlib stream for compression.</p>
- <p><c>Level</c> decides the compression level to be used, 0
+ <p><c><anno>Level</anno></c> decides the compression level to be used, 0
(<c>none</c>), gives no compression at all, 1
(<c>best_speed</c>) gives best speed and 9
(<c>best_compression</c>) gives best compression.</p>
</desc>
</func>
<func>
- <name>deflateInit(Z, Level, Method, WindowBits, MemLevel, Strategy) -> ok</name>
+ <name name="deflateInit" arity="6"/>
<fsummary>Initialize a session for compression</fsummary>
- <type>
- <v>Z = zstream()</v>
- <v>Level = none | default | best_speed | best_compression | 0..9</v>
- <v>Method = deflated</v>
- <v>WindowBits = 9..15|-9..-15</v>
- <v>MemLevel = 1..9</v>
- <v>Strategy = default|filtered|huffman_only</v>
- </type>
<desc>
<p>Initiates a zlib stream for compression.</p>
- <p>The <c>Level</c> parameter decides the compression level to be
+ <p>The <c><anno>Level</anno></c> parameter decides the compression level to be
used, 0 (<c>none</c>), gives no compression at all, 1
(<c>best_speed</c>) gives best speed and 9
(<c>best_compression</c>) gives best compression.</p>
- <p>The <c>Method</c> parameter decides which compression method to use,
+ <p>The <c><anno>Method</anno></c> parameter decides which compression method to use,
currently the only supported method is <c>deflated</c>.</p>
- <p>The <c>WindowBits</c> parameter is the base two logarithm
+ <p>The <c><anno>WindowBits</anno></c> parameter is the base two logarithm
of the window size (the size of the history buffer). It
should be in the range 9 through 15. Larger values
of this parameter result in better compression at the
expense of memory usage. The default value is 15 if
- <c>deflateInit/2</c>. A negative <c>WindowBits</c>
+ <c>deflateInit/2</c>. A negative <c><anno>WindowBits</anno></c>
value suppresses the zlib header (and checksum) from the
stream. Note that the zlib source mentions this only as a
undocumented feature.</p>
- <p>The <c>MemLevel</c> parameter specifies how much memory
+ <p>The <c><anno>MemLevel</anno></c> parameter specifies how much memory
should be allocated for the internal compression
- state. <c>MemLevel</c>=1 uses minimum memory but is slow and
- reduces compression ratio; <c>MemLevel</c>=9 uses maximum
+ state. <c><anno>MemLevel</anno></c>=1 uses minimum memory but is slow and
+ reduces compression ratio; <c><anno>MemLevel</anno></c>=9 uses maximum
memory for optimal speed. The default value is 8.</p>
- <p>The <c>Strategy</c> parameter is used to tune the
+ <p>The <c><anno>Strategy</anno></c> parameter is used to tune the
compression algorithm. Use the value <c>default</c> for
normal data, <c>filtered</c> for data produced by a filter
(or predictor), or <c>huffman_only</c> to force Huffman
@@ -175,54 +171,43 @@ zstream = a zlib stream, see open/0</code>
tuned to compress them better. The effect of
<c>filtered</c>is to force more Huffman coding and less
string matching; it is somewhat intermediate between
- <c>default</c> and <c>huffman_only</c>. The <c>Strategy</c>
+ <c>default</c> and <c>huffman_only</c>. The <c><anno>Strategy</anno></c>
parameter only affects the compression ratio but not the
correctness of the compressed output even if it is not set
appropriately.</p>
</desc>
</func>
<func>
- <name>deflate(Z, Data) -> Compressed</name>
+ <name name="deflate" arity="2"/>
<fsummary>Compress data</fsummary>
- <type>
- <v>Z = zstream()</v>
- <v>Data = iodata()</v>
- <v>Compressed = iolist()</v>
- </type>
<desc>
- <p>Same as <c>deflate(Z, Data, none)</c>.</p>
+ <p>Same as <c>deflate(<anno>Z</anno>, <anno>Data</anno>, none)</c>.</p>
</desc>
</func>
<func>
- <name>deflate(Z, Data, Flush) -> </name>
+ <name name="deflate" arity="3"/>
<fsummary>Compress data</fsummary>
- <type>
- <v>Z = zstream()</v>
- <v>Data = iodata()</v>
- <v>Flush = none | sync | full | finish</v>
- <v>Compressed = iolist()</v>
- </type>
<desc>
<p><c>deflate/3</c> compresses as much data as possible, and
stops when the input buffer becomes empty. It may introduce
some output latency (reading input without producing any
output) except when forced to flush.</p>
- <p>If the parameter <c>Flush</c> is set to <c>sync</c>, all
+ <p>If the parameter <c><anno>Flush</anno></c> is set to <c>sync</c>, all
pending output is flushed to the output buffer and the
output is aligned on a byte boundary, so that the
decompressor can get all input data available so far.
Flushing may degrade compression for some compression algorithms and so
it should be used only when necessary.</p>
- <p>If <c>Flush</c> is set to <c>full</c>, all output is flushed as with
+ <p>If <c><anno>Flush</anno></c> is set to <c>full</c>, all output is flushed as with
<c>sync</c>, and the compression state is reset so that decompression can
restart from this point if previous compressed data has been damaged or if
random access is desired. Using <c>full</c> too often can seriously degrade
the compression.</p>
- <p>If the parameter <c>Flush</c> is set to <c>finish</c>,
+ <p>If the parameter <c><anno>Flush</anno></c> is set to <c>finish</c>,
pending input is processed, pending output is flushed and
<c>deflate/3</c> returns. Afterwards the only possible
operations on the stream are <c>deflateReset/1</c> or <c>deflateEnd/1</c>.</p>
- <p><c>Flush</c> can be set to <c>finish</c> immediately after
+ <p><c><anno>Flush</anno></c> can be set to <c>finish</c> immediately after
<c>deflateInit</c> if all compression is to be done in one step.</p>
<pre>
@@ -234,13 +219,8 @@ list_to_binary([B1,B2])</pre>
</desc>
</func>
<func>
- <name>deflateSetDictionary(Z, Dictionary) -> Adler32</name>
+ <name name="deflateSetDictionary" arity="2"/>
<fsummary>Initialize the compression dictionary</fsummary>
- <type>
- <v>Z = zstream()</v>
- <v>Dictionary = binary()</v>
- <v>Adler32 = integer()</v>
- </type>
<desc>
<p>Initializes the compression dictionary from the given byte
sequence without producing any compressed output. This
@@ -253,11 +233,8 @@ list_to_binary([B1,B2])</pre>
</desc>
</func>
<func>
- <name>deflateReset(Z) -> ok</name>
+ <name name="deflateReset" arity="1"/>
<fsummary>Reset the deflate session</fsummary>
- <type>
- <v>Z = zstream()</v>
- </type>
<desc>
<p>This function is equivalent to <c>deflateEnd/1</c>
followed by <c>deflateInit/[1|2|6]</c>, but does not free
@@ -267,34 +244,26 @@ list_to_binary([B1,B2])</pre>
</desc>
</func>
<func>
- <name>deflateParams(Z, Level, Strategy) -> ok </name>
+ <name name="deflateParams" arity="3"/>
<fsummary>Dynamicly update deflate parameters</fsummary>
- <type>
- <v>Z = zstream()</v>
- <v>Level = none | default | best_speed | best_compression | 0..9</v>
- <v>Strategy = default|filtered|huffman_only</v>
- </type>
<desc>
<p>Dynamically update the compression level and compression
- strategy. The interpretation of <c>Level</c> and
- <c>Strategy</c> is as in <c>deflateInit/6</c>. This can be
+ strategy. The interpretation of <c><anno>Level</anno></c> and
+ <c><anno>Strategy</anno></c> is as in <c>deflateInit/6</c>. This can be
used to switch between compression and straight copy of the
input data, or to switch to a different kind of input data
requiring a different strategy. If the compression level is
changed, the input available so far is compressed with the
old level (and may be flushed); the new level will take
effect only at the next call of <c>deflate/3</c>.</p>
- <p>Before the call of deflateParams, the stream state must be set as for
+ <p>Before the call of <c>deflateParams</c>, the stream state must be set as for
a call of <c>deflate/3</c>, since the currently available input may have to
be compressed and flushed.</p>
</desc>
</func>
<func>
- <name>deflateEnd(Z) -> ok</name>
+ <name name="deflateEnd" arity="1"/>
<fsummary>End deflate session</fsummary>
- <type>
- <v>Z = zstream()</v>
- </type>
<desc>
<p>End the deflate session and cleans all data used.
Note that this function will throw an <c>data_error</c>
@@ -304,43 +273,31 @@ list_to_binary([B1,B2])</pre>
</desc>
</func>
<func>
- <name>inflateInit(Z) -> ok </name>
+ <name name="inflateInit" arity="1"/>
<fsummary>Initialize a session for decompression</fsummary>
- <type>
- <v>Z = zstream()</v>
- </type>
<desc>
<p>Initialize a zlib stream for decompression.</p>
</desc>
</func>
<func>
- <name>inflateInit(Z, WindowBits) -> ok </name>
+ <name name="inflateInit" arity="2"/>
<fsummary>Initialize a session for decompression</fsummary>
- <type>
- <v>Z = zstream()</v>
- <v>WindowBits = 9..15|-9..-15</v>
- </type>
<desc>
<p>Initialize decompression session on zlib stream.</p>
- <p>The <c>WindowBits</c> parameter is the base two logarithm
+ <p>The <c><anno>WindowBits</anno></c> parameter is the base two logarithm
of the maximum window size (the size of the history buffer).
It should be in the range 9 through 15.
The default value is 15 if <c>inflateInit/1</c> is used.
If a compressed stream with a larger window size is
given as input, inflate() will throw the <c>data_error</c>
- exception. A negative <c>WindowBits</c> value makes zlib ignore the
+ exception. A negative <c><anno>WindowBits</anno></c> value makes zlib ignore the
zlib header (and checksum) from the stream. Note that the zlib
source mentions this only as a undocumented feature.</p>
</desc>
</func>
<func>
- <name>inflate(Z, Data) -> DeCompressed </name>
+ <name name="inflate" arity="2"/>
<fsummary>Decompress data</fsummary>
- <type>
- <v>Z = zstream()</v>
- <v>Data = iodata()</v>
- <v>DeCompressed = iolist()</v>
- </type>
<desc>
<p><c>inflate/2</c> decompresses as much data as possible.
It may some introduce some output latency (reading
@@ -353,12 +310,8 @@ list_to_binary([B1,B2])</pre>
</desc>
</func>
<func>
- <name>inflateSetDictionary(Z, Dictionary) -> ok</name>
+ <name name="inflateSetDictionary" arity="2"/>
<fsummary>Initialize the decompression dictionary</fsummary>
- <type>
- <v>Z = zstream()</v>
- <v>Dictionary = binary()</v>
- </type>
<desc>
<p>Initializes the decompression dictionary from the given
uncompressed byte sequence. This function must be called
@@ -381,11 +334,8 @@ unpack(Z, Compressed, Dict) ->
</desc>
</func>
<func>
- <name>inflateReset(Z) -> ok</name>
+ <name name="inflateReset" arity="1"/>
<fsummary>>Reset the inflate session</fsummary>
- <type>
- <v>Z = zstream()</v>
- </type>
<desc>
<p>This function is equivalent to <c>inflateEnd/1</c> followed
by <c>inflateInit/1</c>, but does not free and reallocate all
@@ -394,11 +344,8 @@ unpack(Z, Compressed, Dict) ->
</desc>
</func>
<func>
- <name>inflateEnd(Z) -> ok</name>
+ <name name="inflateEnd" arity="1"/>
<fsummary>End inflate session</fsummary>
- <type>
- <v>Z = zstream()</v>
- </type>
<desc>
<p>End the inflate session and cleans all data used. Note
that this function will throw a <c>data_error</c> exception
@@ -407,198 +354,132 @@ unpack(Z, Compressed, Dict) ->
</desc>
</func>
<func>
- <name>setBufSize(Z, Size) -> ok</name>
+ <name name="setBufSize" arity="2"/>
<fsummary>Set buffer size</fsummary>
- <type>
- <v>Z = zstream()</v>
- <v>Size = integer()</v>
- </type>
<desc>
<p>Sets the intermediate buffer size.</p>
</desc>
</func>
<func>
- <name>getBufSize(Z) -> Size</name>
+ <name name="getBufSize" arity="1"/>
<fsummary>Get buffer size</fsummary>
- <type>
- <v>Z = zstream()</v>
- <v>Size = integer()</v>
- </type>
<desc>
<p>Get the size of intermediate buffer.</p>
</desc>
</func>
<func>
- <name>crc32(Z) -> CRC</name>
+ <name name="crc32" arity="1"/>
<fsummary>Get current CRC</fsummary>
- <type>
- <v>Z = zstream()</v>
- <v>CRC = integer()</v>
- </type>
<desc>
<p>Get the current calculated CRC checksum.</p>
</desc>
</func>
<func>
- <name>crc32(Z, Binary) -> CRC</name>
+ <name name="crc32" arity="2"/>
<fsummary>Calculate CRC</fsummary>
- <type>
- <v>Z = zstream()</v>
- <v>Binary = binary()</v>
- <v>CRC = integer()</v>
- </type>
<desc>
- <p>Calculate the CRC checksum for <c>Binary</c>.</p>
+ <p>Calculate the CRC checksum for <c><anno>Data</anno></c>.</p>
</desc>
</func>
<func>
- <name>crc32(Z, PrevCRC, Binary) -> CRC </name>
+ <name name="crc32" arity="3"/>
<fsummary>Calculate CRC</fsummary>
- <type>
- <v>Z = zstream()</v>
- <v>PrevCRC = integer()</v>
- <v>Binary = binary()</v>
- <v>CRC = integer()</v>
- </type>
- <desc>
- <p>Update a running CRC checksum for <c>Binary</c>.
- If <c>Binary</c> is the empty binary, this function returns
+ <desc>
+ <p>Update a running CRC checksum for <c><anno>Data</anno></c>.
+ If <c><anno>Data</anno></c> is the empty binary or the empty iolist, this function returns
the required initial value for the crc.</p>
<pre>
-Crc = lists:foldl(fun(Bin,Crc0) ->
- zlib:crc32(Z, Crc0, Bin),
- end, zlib:crc32(Z,&lt;&lt; &gt;&gt;), Bins)</pre>
+Crc = lists:foldl(fun(Data,Crc0) ->
+ zlib:crc32(Z, Crc0, Data),
+ end, zlib:crc32(Z,&lt;&lt; &gt;&gt;), Datas)</pre>
</desc>
</func>
<func>
- <name>crc32_combine(Z, CRC1, CRC2, Size2) -> CRC </name>
+ <name name="crc32_combine" arity="4"/>
<fsummary>Combine two CRC's</fsummary>
- <type>
- <v>Z = zstream()</v>
- <v>CRC = integer()</v>
- <v>CRC1 = integer()</v>
- <v>CRC2 = integer()</v>
- <v>Size2 = integer()</v>
- </type>
- <desc>
- <p>Combine two CRC checksums into one. For two binaries,
- <c>Bin1</c> and <c>Bin2</c> with sizes of <c>Size1</c> and
- <c>Size2</c>, with CRC checksums <c>CRC1</c> and
- <c>CRC2</c>. <c>crc32_combine/4</c> returns the <c>CRC</c>
- checksum of <c>&lt;&lt;Bin1/binary,Bin2/binary&gt;&gt;</c>, requiring
- only <c>CRC1</c>, <c>CRC2</c>, and <c>Size2</c>.
+ <desc>
+ <p>Combine two CRC checksums into one. For two binaries or iolists,
+ <c>Data1</c> and <c>Data2</c> with sizes of <c>Size1</c> and
+ <c><anno>Size2</anno></c>, with CRC checksums <c><anno>CRC1</anno></c> and
+ <c><anno>CRC2</anno></c>. <c>crc32_combine/4</c> returns the <c><anno>CRC</anno></c>
+ checksum of <c>[Data1,Data2]</c>, requiring
+ only <c><anno>CRC1</anno></c>, <c><anno>CRC2</anno></c>, and <c><anno>Size2</anno></c>.
</p>
</desc>
</func>
<func>
- <name>adler32(Z, Binary) -> Checksum</name>
+ <name name="adler32" arity="2"/>
<fsummary>Calculate the adler checksum</fsummary>
- <type>
- <v>Z = zstream()</v>
- <v>Binary = binary()</v>
- <v>Checksum = integer()</v>
- </type>
<desc>
- <p>Calculate the Adler-32 checksum for <c>Binary</c>.</p>
+ <p>Calculate the Adler-32 checksum for <c><anno>Data</anno></c>.</p>
</desc>
</func>
<func>
- <name>adler32(Z, PrevAdler, Binary) -> Checksum</name>
+ <name name="adler32" arity="3"/>
<fsummary>Calculate the adler checksum</fsummary>
- <type>
- <v>Z = zstream()</v>
- <v>PrevAdler = integer()</v>
- <v>Binary = binary()</v>
- <v>Checksum = integer()</v>
- </type>
- <desc>
- <p>Update a running Adler-32 checksum for <c>Binary</c>.
- If <c>Binary</c> is the empty binary, this function returns
+ <desc>
+ <p>Update a running Adler-32 checksum for <c><anno>Data</anno></c>.
+ If <c><anno>Data</anno></c> is the empty binary or the empty iolist, this function returns
the required initial value for the checksum.</p>
<pre>
-Crc = lists:foldl(fun(Bin,Crc0) ->
- zlib:adler32(Z, Crc0, Bin),
- end, zlib:adler32(Z,&lt;&lt; &gt;&gt;), Bins)</pre>
+Crc = lists:foldl(fun(Data,Crc0) ->
+ zlib:adler32(Z, Crc0, Data),
+ end, zlib:adler32(Z,&lt;&lt; &gt;&gt;), Datas)</pre>
</desc>
</func>
<func>
- <name>adler32_combine(Z, Adler1, Adler2, Size2) -> Adler </name>
+ <name name="adler32_combine" arity="4"/>
<fsummary>Combine two Adler-32 checksums</fsummary>
- <type>
- <v>Z = zstream()</v>
- <v>Adler = integer()</v>
- <v>Adler1 = integer()</v>
- <v>Adler2 = integer()</v>
- <v>Size2 = integer()</v>
- </type>
- <desc>
- <p>Combine two Adler-32 checksums into one. For two binaries,
- <c>Bin1</c> and <c>Bin2</c> with sizes of <c>Size1</c> and
- <c>Size2</c>, with Adler-32 checksums <c>Adler1</c> and
- <c>Adler2</c>. <c>adler32_combine/4</c> returns the <c>Adler</c>
- checksum of <c>&lt;&lt;Bin1/binary,Bin2/binary&gt;&gt;</c>, requiring
- only <c>Adler1</c>, <c>Adler2</c>, and <c>Size2</c>.
+ <desc>
+ <p>Combine two Adler-32 checksums into one. For two binaries or iolists,
+ <c>Data1</c> and <c>Data2</c> with sizes of <c>Size1</c> and
+ <c><anno>Size2</anno></c>, with Adler-32 checksums <c><anno>Adler1</anno></c> and
+ <c><anno>Adler2</anno></c>. <c>adler32_combine/4</c> returns the <c><anno>Adler</anno></c>
+ checksum of <c>[Data1,Data2]</c>, requiring
+ only <c><anno>Adler1</anno></c>, <c><anno>Adler2</anno></c>, and <c><anno>Size2</anno></c>.
</p>
</desc>
</func>
<func>
- <name>compress(Binary) -> Compressed </name>
- <fsummary>Compress a binary with standard zlib functionality</fsummary>
- <type>
- <v>Binary = Compressed = binary()</v>
- </type>
+ <name name="compress" arity="1"/>
+ <fsummary>Compress data with standard zlib functionality</fsummary>
<desc>
- <p>Compress a binary (with zlib headers and checksum).</p>
+ <p>Compress data (with zlib headers and checksum).</p>
</desc>
</func>
<func>
- <name>uncompress(Binary) -> Decompressed</name>
- <fsummary>Uncompress a binary with standard zlib functionality</fsummary>
- <type>
- <v>Binary = Decompressed = binary()</v>
- </type>
+ <name name="uncompress" arity="1"/>
+ <fsummary>Uncompress data with standard zlib functionality</fsummary>
<desc>
- <p>Uncompress a binary (with zlib headers and checksum).</p>
+ <p>Uncompress data (with zlib headers and checksum).</p>
</desc>
</func>
<func>
- <name>zip(Binary) -> Compressed</name>
- <fsummary>Compress a binary without the zlib headers</fsummary>
- <type>
- <v>Binary = Compressed = binary()</v>
- </type>
+ <name name="zip" arity="1"/>
+ <fsummary>Compress data without the zlib headers</fsummary>
<desc>
- <p>Compress a binary (without zlib headers and checksum).</p>
+ <p>Compress data (without zlib headers and checksum).</p>
</desc>
</func>
<func>
- <name>unzip(Binary) -> Decompressed</name>
- <fsummary>Uncompress a binary without the zlib headers</fsummary>
- <type>
- <v>Binary = Decompressed = binary()</v>
- </type>
+ <name name="unzip" arity="1"/>
+ <fsummary>Uncompress data without the zlib headers</fsummary>
<desc>
- <p>Uncompress a binary (without zlib headers and checksum).</p>
+ <p>Uncompress data (without zlib headers and checksum).</p>
</desc>
</func>
<func>
- <name>gzip(Data) -> Compressed</name>
- <fsummary>Compress a binary with gz header</fsummary>
- <type>
- <v>Binary = Compressed = binary()</v>
- </type>
+ <name name="gzip" arity="1"/>
+ <fsummary>Compress data with gz header</fsummary>
<desc>
- <p>Compress a binary (with gz headers and checksum).</p>
+ <p>Compress data (with gz headers and checksum).</p>
</desc>
</func>
<func>
- <name>gunzip(Bin) -> Decompressed</name>
- <fsummary>Uncompress a binary with gz header</fsummary>
- <type>
- <v>Binary = Decompressed = binary()</v>
- </type>
+ <name name="gunzip" arity="1"/>
+ <fsummary>Uncompress data with gz header</fsummary>
<desc>
- <p>Uncompress a binary (with gz headers and checksum).</p>
+ <p>Uncompress data (with gz headers and checksum).</p>
</desc>
</func>
</funcs>
diff --git a/erts/emulator/Makefile.in b/erts/emulator/Makefile.in
index d9362a2a8f..2efbe2d57e 100644
--- a/erts/emulator/Makefile.in
+++ b/erts/emulator/Makefile.in
@@ -1,7 +1,7 @@
#
# %CopyrightBegin%
#
-# Copyright Ericsson AB 1996-2011. All Rights Reserved.
+# Copyright Ericsson AB 1996-2012. All Rights Reserved.
#
# The contents of this file are subject to the Erlang Public License,
# Version 1.1, (the "License"); you may not use this file except in
@@ -23,26 +23,34 @@ include $(ERL_TOP)/make/$(TARGET)/otp.mk
ENABLE_ALLOC_TYPE_VARS = @ENABLE_ALLOC_TYPE_VARS@
HIPE_ENABLED=@HIPE_ENABLED@
+DTRACE_ENABLED=@DTRACE_ENABLED@
+DTRACE_ENABLED_2STEP=@DTRACE_ENABLED_2STEP@
+USE_VM_PROBES=@USE_VM_PROBES@
LIBS = @LIBS@
Z_LIB=@Z_LIB@
NO_INLINE_FUNCTIONS=false
OPCODE_TABLES = $(ERL_TOP)/lib/compiler/src/genop.tab beam/ops.tab
+DEBUG_CFLAGS = @DEBUG_CFLAGS@
+CONFIGURE_CFLAGS = @CFLAGS@
+
#
# Run this make file with TYPE set to the type of emulator you want.
# Different versions of the emulator for different uses. The default
# is "debug". For a normal version use "opt".
#
+DEFS=@DEFS@
THR_DEFS=@EMU_THR_DEFS@
M4FLAGS=
CREATE_DIRS=
LDFLAGS=@LDFLAGS@
+ARFLAGS=rc
ifeq ($(TYPE),debug)
PURIFY =
TYPEMARKER = .debug
-TYPE_FLAGS = @DEBUG_CFLAGS@ -DDEBUG
+TYPE_FLAGS = $(DEBUG_CFLAGS) -DDEBUG
ENABLE_ALLOC_TYPE_VARS += debug
ifeq ($(TARGET),win32)
TYPE_FLAGS += -DNO_JUMP_TABLE
@@ -53,7 +61,7 @@ else
ifeq ($(TYPE),purify)
PURIFY = purify $(PURIFY_BUILD_OPTIONS)
TYPEMARKER = .purify
-TYPE_FLAGS = @DEBUG_CFLAGS@ -DPURIFY -DNO_JUMP_TABLE -DERTS_MSEG_FAKE_SEGMENTS
+TYPE_FLAGS = $(DEBUG_CFLAGS) -DPURIFY -DNO_JUMP_TABLE -DERTS_MSEG_FAKE_SEGMENTS
ENABLE_ALLOC_TYPE_VARS += purify
else
@@ -67,14 +75,14 @@ else
ifeq ($(TYPE),purecov)
PURIFY = purecov --follow-child-processes=yes $(PURECOV_BUILD_OPTIONS)
TYPEMARKER = .purecov
-TYPE_FLAGS = @DEBUG_CFLAGS@ -DPURECOV -DNO_JUMP_TABLE
+TYPE_FLAGS = $(DEBUG_CFLAGS) -DPURECOV -DNO_JUMP_TABLE
ENABLE_ALLOC_TYPE_VARS += purecov
else
ifeq ($(TYPE),gcov)
PURIFY =
TYPEMARKER = .gcov
-TYPE_FLAGS = @DEBUG_CFLAGS@ -DERTS_GCOV -DNO_JUMP_TABLE -fprofile-arcs -ftest-coverage -O0 -DERTS_CAN_INLINE=0 -DERTS_INLINE=
+TYPE_FLAGS = $(DEBUG_CFLAGS) -DERTS_GCOV -DNO_JUMP_TABLE -fprofile-arcs -ftest-coverage -O0 -DERTS_CAN_INLINE=0 -DERTS_INLINE=
ifneq ($(findstring solaris,$(TARGET)),solaris)
LIBS += -lgcov
endif
@@ -84,7 +92,7 @@ else
ifeq ($(TYPE),valgrind)
PURIFY =
TYPEMARKER = .valgrind
-TYPE_FLAGS = @DEBUG_CFLAGS@ -DVALGRIND -DNO_JUMP_TABLE -DERTS_MSEG_FAKE_SEGMENTS
+TYPE_FLAGS = $(DEBUG_CFLAGS) -DVALGRIND -DNO_JUMP_TABLE -DERTS_MSEG_FAKE_SEGMENTS
ENABLE_ALLOC_TYPE_VARS += valgrind
else
@@ -147,6 +155,15 @@ endif
TF_MARKER=$(TYPEMARKER)$(FLAVOR_MARKER)
+ifeq ($(FLAVOR)-@ERTS_BUILD_SMP_EMU@,smp-no)
+VOID_EMULATOR = '*** SMP emulator disabled by configure'
+else
+ifeq ($(TYPE)-@HAVE_VALGRIND@,valgrind-no)
+VOID_EMULATOR = '*** valgrind emulator disabled by configure'
+else
+VOID_EMULATOR =
+endif
+endif
OPSYS=@OPSYS@
sol2CFLAGS=
@@ -187,7 +204,7 @@ else
EMU_CC = @EMU_CC@
endif
WFLAGS = @WFLAGS@
-CFLAGS = @STATIC_CFLAGS@ $(TYPE_FLAGS) $(FLAVOR_FLAGS) @DEFS@ $(WFLAGS) $(THR_DEFS) $(ARCHCFLAGS)
+CFLAGS = @STATIC_CFLAGS@ $(TYPE_FLAGS) $(FLAVOR_FLAGS) $(DEFS) $(WFLAGS) $(THR_DEFS) $(ARCHCFLAGS)
HCC = @HCC@
LD = @LD@
DEXPORT = @DEXPORT@
@@ -198,6 +215,7 @@ RM = @RM@
MKDIR = @MKDIR@
USING_MINGW=@MIXED_CYGWIN_MINGW@
+MIXED_MSYS=@MIXED_MSYS@
ifeq ($(TARGET),win32)
LIB_PREFIX=
@@ -237,9 +255,12 @@ ifeq ($(TARGET), win32)
GEN_OPT_FLGS = $(OPT_LEVEL)
UNROLL_FLG =
RC=rc.sh
+ifeq ($(MIXED_MSYS), yes)
+MAKE_PRELOAD_EXTRA = -msys
+endif
ifeq ($(USING_MINGW), yes)
-RES_EXT=@OBJEXT@
-MAKE_PRELOAD_EXTRA=-windres
+RES_EXT = @OBJEXT@
+MAKE_PRELOAD_EXTRA += " -windres"
else
RES_EXT=res
endif
@@ -259,30 +280,29 @@ CS_PURIFY =
CS_TYPE_FLAGS = $(subst QUANTIFY,FAKE_QUANTIFY, \
$(subst PURIFY,FAKE_PURIFY, $(TYPE_FLAGS)))
endif
-CS_CFLAGS_ = $(CS_TYPE_FLAGS) @DEFS@ $(WFLAGS)
+CS_CFLAGS_ = $(CS_TYPE_FLAGS) $(DEFS) $(WFLAGS)
ifeq ($(GCC),yes)
CS_CFLAGS = $(subst -O2, $(GEN_OPT_FLGS) $(UNROLL_FLG), $(CS_CFLAGS_))
else
CS_CFLAGS = $(CS_CFLAGS_)
endif
CS_LDFLAGS = $(LDFLAGS)
-CS_LIBS = -L../lib/internal/$(TARGET) -lerts_internal @ERTS_INTERNAL_X_LIBS@
+CS_LIBS = -L../lib/internal/$(TARGET) -lerts_internal$(TYPEMARKER) @ERTS_INTERNAL_X_LIBS@
LIBS += @TERMCAP_LIB@ -L../lib/internal/$(TARGET) @ERTS_INTERNAL_X_LIBS@
ifdef Z_LIB
# Use shared zlib library
LIBS += $(Z_LIB)
+DEPLIBS =
else
+DEPLIBS=$(ZLIB_LIBRARY)
ifeq ($(TARGET),win32)
-LIBS += -L$(ERL_TOP)/erts/emulator/zlib/obj/$(TARGET)/$(TYPE) -lz
-DEPLIBS = $(ERL_TOP)/erts/emulator/zlib/obj/$(TARGET)/$(TYPE)/z.lib
+LIBS += -L$(ZLIB_OBJDIR) -lz
else
# Build on darwin fails if -lz is used
-LIBS += $(ERL_TOP)/erts/emulator/zlib/obj/$(TARGET)/$(TYPE)/libz.a
-DEPLIBS = $(ERL_TOP)/erts/emulator/zlib/obj/$(TARGET)/$(TYPE)/libz.a
+LIBS += $(ZLIB_LIBRARY)
endif
-
endif
ifeq ($(TARGET),win32)
@@ -291,7 +311,8 @@ else
LIBS += $(ERL_TOP)/erts/emulator/pcre/obj/$(TARGET)/$(TYPE)/$(LIB_PREFIX)epcre$(LIB_SUFFIX)
endif
-DEPLIBS += $(ERL_TOP)/erts/emulator/pcre/obj/$(TARGET)/$(TYPE)/$(LIB_PREFIX)epcre$(LIB_SUFFIX)
+EPCRE_LIB = $(ERL_TOP)/erts/emulator/pcre/obj/$(TARGET)/$(TYPE)/$(LIB_PREFIX)epcre$(LIB_SUFFIX)
+DEPLIBS += $(EPCRE_LIB)
PERFCTR_PATH=@PERFCTR_PATH@
USE_PERFCTR=@USE_PERFCTR@
@@ -308,12 +329,9 @@ LIBSCTP = @LIBSCTP@
ORG_THR_LIBS=@EMU_THR_LIBS@
THR_LIB_NAME=@EMU_THR_LIB_NAME@
-ifneq ($(strip $(THR_LIB_NAME)),)
-DEPLIBS += $(ERL_TOP)/erts/lib/internal/$(TARGET)/$(LIB_PREFIX)erts_internal_r$(TYPEMARKER)$(LIB_SUFFIX) \
- $(ERL_TOP)/erts/lib/internal/$(TARGET)/$(LIB_PREFIX)ethread$(TYPEMARKER)$(LIB_SUFFIX)
-else
-DEPLIBS += $(ERL_TOP)/erts/lib/internal/$(TARGET)/$(LIB_PREFIX)erts_internal$(TYPEMARKER)$(LIB_SUFFIX)
-endif
+ERTS_LIB_DIR = $(ERL_TOP)/erts/lib_src
+ERTS_LIB = $(ERTS_LIB_DIR)/obj/$(TARGET)/$(TYPE)/MADE
+DEPLIBS += $(ERTS_LIB)
THR_LIBS=$(subst -l$(THR_LIB_NAME),-l$(THR_LIB_NAME)$(TYPEMARKER), \
$(subst -lerts_internal_r,-lerts_internal_r$(TYPEMARKER),$(ORG_THR_LIBS)))
@@ -349,8 +367,7 @@ OBJDIR = obj/$(TTF_DIR)
CREATE_DIRS += $(OBJDIR) \
pcre/obj/$(TARGET)/$(TYPE) \
- zlib/obj/$(TARGET)/$(TYPE)
-
+ $(ZLIB_OBJDIR)
BINDIR = $(ERL_TOP)/bin/$(TARGET)
@@ -358,7 +375,6 @@ ERLANG_OSTYPE = @ERLANG_OSTYPE@
ENABLE_ALLOC_TYPE_VARS += @ERLANG_OSTYPE@
-EMULATOR_EXECUTABLE_ELIB = beam.elib$(TF_MARKER)
ifeq ($(TARGET), win32)
EMULATOR_EXECUTABLE = beam$(TF_MARKER).dll
else
@@ -374,15 +390,12 @@ else
UNIX_ONLY_BUILDS =
endif
-ifeq ($(TYPE)-@HAVE_VALGRIND@,valgrind-no)
-all:
- @echo '*** valgrind not found by configure'
-else
-ifeq ($(FLAVOR)-@ERTS_BUILD_SMP_EMU@,smp-no)
+.PHONY: all
+ifdef VOID_EMULATOR
all:
- @echo '*** Omitted build of emulator with smp support'
+ @echo $(VOID_EMULATOR)' - omitted target all'
else
-all: generate erts_lib zlib pcre $(BINDIR)/$(EMULATOR_EXECUTABLE) $(UNIX_ONLY_BUILDS)
+all: $(BINDIR)/$(EMULATOR_EXECUTABLE) $(UNIX_ONLY_BUILDS)
ifeq ($(OMIT_OMIT_FP),yes)
@echo '* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *'
@echo '* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *'
@@ -393,37 +406,25 @@ ifeq ($(OMIT_OMIT_FP),yes)
@echo '* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *'
endif
endif
-endif
-ifdef Z_LIB
-zlib:
- @echo 'Skip zlib directory, use shared library'
-else
-zlib:
- @set -e ; cd zlib && $(MAKE) TYPE=$(TYPE) $(TYPE)
-endif
+include zlib/zlib.mk
+include pcre/pcre.mk
-pcre:
- @set -e ; cd pcre && $(MAKE) TYPE=$(TYPE) $(TYPE)
-
-erts_lib:
- cd $(ERL_TOP)/erts/lib_src && $(MAKE) $(TYPE)
+$(ERTS_LIB):
+ cd $(ERTS_LIB_DIR) && $(MAKE) $(TYPE)
+.PHONY: clean
clean:
-ifeq ($(TARGET),win32)
- $(RM) -f $(TARGET)/beams.rc
-endif
- $(RM) -f $(TARGET)/*.c $(TARGET)/*.h $(TARGET)/depend.mk
- $(RM) -f $(TARGET)/*/*/*.c $(TARGET)/*/*/*.h $(TARGET)/*/*/*.S
- $(RM) -f $(ERL_TOP)/erts/emulator/obj/$(TARGET)/*/*/*.o
- $(RM) -f $(BINDIR)/beam $(BINDIR)/beam.*
- $(RM) -rf $(BINDIR)/child_setup $(BINDIR)/child_setup.*
- $(RM) -f $(BINDIR)/hipe_mkliterals $(BINDIR)/hipe_mkliterals.*
- @set -e ; cd zlib && $(MAKE) clean
- @set -e ; cd pcre && $(MAKE) clean
-
-.PHONY: all zlib pcre clean
-
+ $(RM) -f $(GENERATE)
+ $(RM) -rf $(TARGET)/*.c $(TARGET)/*.h $(TARGET)/*-GENERATED
+ $(RM) -rf $(TARGET)/*/*
+ $(RM) -rf obj/$(TARGET)
+ $(RM) -rf pcre/obj/$(TARGET) $(PCRE_GENINC)
+ $(RM) -rf zlib/obj/$(TARGET)
+ $(RM) -rf bin/$(TARGET)
+ cd $(ERTS_LIB_DIR) && $(MAKE) clean
+
+.PHONY: docs
docs:
# ----------------------------------------------------------------------
@@ -436,13 +437,11 @@ ifeq ($(TARGET),win32)
RELEASE_INCLUDES += sys/$(ERLANG_OSTYPE)/erl_win_dyn_driver.h
endif
-ifeq ($(TYPE)-@HAVE_VALGRIND@,valgrind-no)
-release_spec:
- @echo '*** valgrind not found by configure'
-else
-ifeq ($(FLAVOR)-@ERTS_BUILD_SMP_EMU@,smp-no)
+
+.PHONY: release_spec
+ifdef VOID_EMULATOR
release_spec:
- @echo '*** No emulator with smp support to install'
+ @echo $(VOID_EMULATOR)' - omitted target release_spec (install)'
else
release_spec: all
$(INSTALL_DIR) $(RELSYSDIR)
@@ -459,57 +458,60 @@ ifeq ($(ERLANG_OSTYPE), unix)
$(INSTALL_PROGRAM) $(BINDIR)/$(CS_EXECUTABLE) $(RELSYSDIR)/bin
endif
endif
-endif
+.PHONY: release_docs_spec
release_docs_spec:
# ----------------------------------------------------------------------
# Generated source code. Put in $(TARGET) directory
#
-.PHONY : generate
+_create_dirs := $(shell mkdir -p $(CREATE_DIRS))
-GENERATE= $(CREATE_DIRS) \
- $(TTF_DIR)/beam_opcodes.h \
- $(TARGET)/erl_bif_table.c \
- $(TARGET)/erl_version.h \
- $(TTF_DIR)/driver_tab.c \
- $(TTF_DIR)/erl_alloc_types.h
-
-ifeq ($(TARGET),win32)
-GENERATE += $(TARGET)/beams.rc
-else
-GENERATE += $(TARGET)/preload.c
-endif
+GENERATE =
+HIPE_ASM =
ifeq ($(findstring vxworks,$(TARGET)),vxworks)
else
ifdef HIPE_ENABLED
-GENERATE += $(TTF_DIR)/hipe_x86_asm.h \
+HIPE_ASM += $(TTF_DIR)/hipe_x86_asm.h \
$(TTF_DIR)/hipe_amd64_asm.h \
$(TTF_DIR)/hipe_sparc_asm.h \
$(TTF_DIR)/hipe_ppc_asm.h \
- $(TTF_DIR)/hipe_arm_asm.h \
+ $(TTF_DIR)/hipe_arm_asm.h
+
+GENERATE += $(HIPE_ASM) \
$(TTF_DIR)/hipe_literals.h \
$(BINDIR)/hipe_mkliterals$(TF_MARKER)
endif
endif
-ifeq ($(FLAVOR)-@ERTS_BUILD_SMP_EMU@,smp-no)
-GENERATE=
-endif
-
+ifdef DTRACE_ENABLED
+# global.h causes problems by including dtrace-wrapper.h which includes
+# the autogenerated erlang_dtrace.h ... so make erlang_dtrace.h very early.
+generate: $(TARGET)/erlang_dtrace.h $(GENERATE)
+else
generate: $(GENERATE)
+endif
ifdef HIPE_ENABLED
OPCODE_TABLES += hipe/hipe_ops.tab
endif
-$(TTF_DIR)/beam_opcodes.h $(TTF_DIR)/beam_opcodes.c: $(OPCODE_TABLES) utils/beam_makeops
+$(TTF_DIR)/beam_cold.h \
+$(TTF_DIR)/beam_hot.h \
+$(TTF_DIR)/beam_opcodes.c \
+$(TTF_DIR)/beam_opcodes.h \
+$(TTF_DIR)/beam_pred_funcs.h \
+$(TTF_DIR)/beam_tr_funcs.h \
+ : $(TTF_DIR)/OPCODES-GENERATED
+$(TTF_DIR)/OPCODES-GENERATED: $(OPCODE_TABLES) utils/beam_makeops
LANG=C $(PERL) utils/beam_makeops \
-wordsize @EXTERNAL_WORD_SIZE@ \
-outdir $(TTF_DIR) \
- -emulator $(OPCODE_TABLES)
+ -DUSE_VM_PROBES=$(if $(USE_VM_PROBES),1,0) \
+ -emulator $(OPCODE_TABLES) && echo $? >$(TTF_DIR)/OPCODES-GENERATED
+GENERATE += $(TTF_DIR)/OPCODES-GENERATED
# bif and atom table
ATOMS= beam/atom.names
@@ -529,32 +531,44 @@ BIFS += hipe/hipe_perfctr.tab
endif
endif
-TABLES= $(TARGET)/erl_bif_table.c $(TARGET)/erl_bif_table.h \
- $(TARGET)/erl_bif_wrap.c $(TARGET)/erl_bif_list.h \
- $(TARGET)/erl_atom_table.c $(TARGET)/erl_atom_table.h \
- $(TARGET)/erl_pbifs.c
-
-$(TABLES): $(ATOMS) $(BIFS) utils/make_tables
+$(TARGET)/erl_bif_table.c \
+$(TARGET)/erl_bif_table.h \
+$(TARGET)/erl_bif_wrap.c \
+$(TARGET)/erl_bif_list.h \
+$(TARGET)/erl_atom_table.c \
+$(TARGET)/erl_atom_table.h \
+$(TARGET)/erl_pbifs.c \
+ : $(TARGET)/TABLES-GENERATED
+$(TARGET)/TABLES-GENERATED: $(ATOMS) $(BIFS) utils/make_tables
LANG=C $(PERL) utils/make_tables -src $(TARGET) -include $(TARGET)\
- $(ATOMS) $(BIFS)
+ $(ATOMS) $(BIFS) && echo $? >$(TARGET)/TABLES-GENERATED
+GENERATE += $(TARGET)/TABLES-GENERATED
$(TTF_DIR)/erl_alloc_types.h: beam/erl_alloc.types utils/make_alloc_types
LANG=C $(PERL) utils/make_alloc_types -src $< -dst $@ $(ENABLE_ALLOC_TYPE_VARS)
+GENERATE += $(TTF_DIR)/erl_alloc_types.h
# version include file
$(TARGET)/erl_version.h: ../vsn.mk
LANG=C $(PERL) utils/make_version -o $@ $(SYSTEM_VSN) $(VSN)$(SERIALNO) $(TARGET)
+GENERATE += $(TARGET)/erl_version.h
# driver table
$(TTF_DIR)/driver_tab.c: Makefile.in
LANG=C $(PERL) utils/make_driver_tab -o $@ $(DRV_OBJS)
+GENERATE += $(TTF_DIR)/driver_tab.c
+
+
# Preloaded code.
#
# This list must be consistent with PRE_LOADED_MODULES in
# lib/kernel/src/Makefile.
ifeq ($(TARGET),win32)
-$(TARGET)/beams.rc: $(ERL_TOP)/erts/preloaded/ebin/otp_ring0.beam \
+# On windows the preloaded objects are in a resource object.
+PRELOAD_OBJ = $(OBJDIR)/beams.$(RES_EXT)
+PRELOAD_SRC = $(TARGET)/beams.rc
+$(PRELOAD_SRC): $(ERL_TOP)/erts/preloaded/ebin/otp_ring0.beam \
$(ERL_TOP)/erts/preloaded/ebin/init.beam \
$(ERL_TOP)/erts/preloaded/ebin/prim_inet.beam \
$(ERL_TOP)/erts/preloaded/ebin/prim_file.beam \
@@ -564,7 +578,9 @@ $(TARGET)/beams.rc: $(ERL_TOP)/erts/preloaded/ebin/otp_ring0.beam \
$(ERL_TOP)/erts/preloaded/ebin/erlang.beam
LANG=C $(PERL) utils/make_preload $(MAKE_PRELOAD_EXTRA) -rc $^ > $@
else
-$(TARGET)/preload.c: $(ERL_TOP)/erts/preloaded/ebin/otp_ring0.beam \
+PRELOAD_OBJ = $(OBJDIR)/preload.o
+PRELOAD_SRC = $(TARGET)/preload.c
+$(PRELOAD_SRC): $(ERL_TOP)/erts/preloaded/ebin/otp_ring0.beam \
$(ERL_TOP)/erts/preloaded/ebin/init.beam \
$(ERL_TOP)/erts/preloaded/ebin/prim_inet.beam \
$(ERL_TOP)/erts/preloaded/ebin/prim_file.beam \
@@ -575,6 +591,22 @@ $(TARGET)/preload.c: $(ERL_TOP)/erts/preloaded/ebin/otp_ring0.beam \
LANG=C $(PERL) utils/make_preload -old $^ > $@
endif
+.PHONY : generate
+ifdef VOID_EMULATOR
+generate:
+ @echo $(VOID_EMULATOR)' - omitted target generate'
+else
+generate: $(TTF_DIR)/GENERATED $(PRELOAD_SRC)
+
+$(TTF_DIR)/GENERATED: $(GENERATE)
+ echo $? >$(TTF_DIR)/GENERATED
+endif
+
+$(TARGET)/erlang_dtrace.h: beam/erlang_dtrace.d
+ dtrace -h -C -Ibeam -s $< -o ./erlang_dtrace.tmp
+ sed -e '/^#define[ ]*ERLANG_[A-Z0-9_]*(.*)/y/ABCDEFGHIJKLMNOPQRSTUVWXYZ/abcdefghijklmnopqrstuvwxyz/' ./erlang_dtrace.tmp > $@
+ rm ./erlang_dtrace.tmp
+
# ----------------------------------------------------------------------
# Pattern rules
#
@@ -598,11 +630,6 @@ INCLUDES += -I$(ERL_TOP)/erts/etc/vxworks
endif
ifeq ($(TARGET),win32)
-# Usually the same as the default rule, but certain platforms (i.e. win32) mix
-# different compilers
-$(OBJDIR)/beam_emu.o: beam/beam_emu.c
- $(EMU_CC) $(subst -O2, $(GEN_OPT_FLGS), $(CFLAGS)) $(INCLUDES) -c $< -o $@
-
$(OBJDIR)/dll_sys.o: sys/$(ERLANG_OSTYPE)/sys.c
$(CC) $(CFLAGS) -DERL_RUN_SHARED_LIB=1 $(INCLUDES) -c $< -o $@
@@ -616,9 +643,13 @@ $(OBJDIR)/beam_emu.o: beam/beam_emu.c
$(CC) $(subst -O2, $(GEN_OPT_FLGS), $(CFLAGS)) \
-OPT:Olimit=0 -WOPT:lpre=off:spre=off:epre=off \
$(INCLUDES) -c $< -o $@
+else
+# Usually the same as the default rule, but certain platforms (e.g. win32) mix
+# different compilers
+$(OBJDIR)/beam_emu.o: beam/beam_emu.c
+ $(EMU_CC) $(subst -O2, $(GEN_OPT_FLGS), $(CFLAGS)) $(INCLUDES) -c $< -o $@
endif
-
$(OBJDIR)/%.o: beam/%.c
$(CC) $(subst -O2, $(GEN_OPT_FLGS), $(CFLAGS)) $(INCLUDES) -c $< -o $@
@@ -651,7 +682,7 @@ endif
#
CS_SRC = sys/$(ERLANG_OSTYPE)/erl_child_setup.c
-$(BINDIR)/$(CS_EXECUTABLE): $(CS_SRC)
+$(BINDIR)/$(CS_EXECUTABLE): $(TTF_DIR)/GENERATED $(PRELOAD_SRC) $(CS_SRC) $(ERTS_LIB)
$(CS_PURIFY) $(CC) $(CS_LDFLAGS) -o $(BINDIR)/$(CS_EXECUTABLE) \
$(CS_CFLAGS) $(COMMON_INCLUDES) $(CS_SRC) $(CS_LIBS)
@@ -672,30 +703,15 @@ endif
# rebuilding (is this a good idea?) add a dummy dependency to this target.
#
-ifeq ($(findstring clearmake,$(MAKE)),clearmake)
-BEAMFILE_MAKEFLAG=-T
-else
-BEAMFILE_MAKEFLAG=
-endif
-
$(ERL_TOP)/lib/%.beam:
- cd $(@D)/../src && $(MAKE) $(BEAMFILE_MAKEFLAG) ../ebin/$(@F)
+ cd $(@D)/../src && $(MAKE) ../ebin/$(@F)
# ----------------------------------------------------------------------
# Object files
#
-# On windows the preloaded objects are in a resource object.
-
-ifeq ($(TARGET),win32)
-PRELOAD = $(OBJDIR)/beams.$(RES_EXT)
-else
-PRELOAD = $(OBJDIR)/preload.o
-endif
-
-
-INIT_OBJS = $(OBJDIR)/erl_main.o $(PRELOAD)
+INIT_OBJS = $(OBJDIR)/erl_main.o $(PRELOAD_OBJ)
EMU_OBJS = \
$(OBJDIR)/beam_emu.o $(OBJDIR)/beam_opcodes.o \
@@ -725,7 +741,7 @@ RUN_OBJS = \
$(OBJDIR)/external.o $(OBJDIR)/dist.o \
$(OBJDIR)/binary.o $(OBJDIR)/erl_db.o \
$(OBJDIR)/erl_db_util.o $(OBJDIR)/erl_db_hash.o \
- $(OBJDIR)/erl_db_tree.o $(OBJDIR)/fix_alloc.o \
+ $(OBJDIR)/erl_db_tree.o $(OBJDIR)/erl_thr_progress.o \
$(OBJDIR)/big.o $(OBJDIR)/hash.o \
$(OBJDIR)/index.o $(OBJDIR)/atom.o \
$(OBJDIR)/module.o $(OBJDIR)/export.o \
@@ -742,7 +758,8 @@ RUN_OBJS = \
$(OBJDIR)/erl_bif_re.o $(OBJDIR)/erl_unicode.o \
$(OBJDIR)/packet_parser.o $(OBJDIR)/safe_hash.o \
$(OBJDIR)/erl_zlib.o $(OBJDIR)/erl_nif.o \
- $(OBJDIR)/erl_bif_binary.o
+ $(OBJDIR)/erl_bif_binary.o $(OBJDIR)/erl_ao_firstfit_alloc.o \
+ $(OBJDIR)/erl_thr_queue.o $(OBJDIR)/erl_sched_spec_pre_alloc.o
ifeq ($(TARGET),win32)
DRV_OBJS = \
@@ -832,7 +849,21 @@ endif
BASE_OBJS = $(RUN_OBJS) $(EMU_OBJS) $(OS_OBJS) $(EXTRA_BASE_OBJS)
-OBJS = $(BASE_OBJS) $(DRV_OBJS)
+before_DTrace_OBJS = $(BASE_OBJS) $(DRV_OBJS)
+
+DTRACE_OBJS =
+ifdef DTRACE_ENABLED_2STEP
+DTRACE_OBJS = $(OBJDIR)/erlang_dtrace.o
+$(OBJDIR)/erlang_dtrace.o: $(before_DTrace_OBJS) $(TARGET)/erlang_dtrace.h
+ dtrace -G -C -Ibeam \
+ -s beam/erlang_dtrace.d \
+ -o $@ $(before_DTrace_OBJS)
+endif
+
+OBJS = $(before_DTrace_OBJS) $(DTRACE_OBJS)
+
+$(INIT_OBJS): $(TTF_DIR)/GENERATED
+$(OBJS): $(TTF_DIR)/GENERATED
########################################
# HiPE section
@@ -857,30 +888,49 @@ $(OBJDIR)/%.o: hipe/%.c
$(BINDIR)/hipe_mkliterals$(TF_MARKER): $(OBJDIR)/hipe_mkliterals.o
$(CC) $(CFLAGS) $(INCLUDES) -o $@ $<
-$(OBJDIR)/hipe_mkliterals.o: $(TTF_DIR)/hipe_x86_asm.h $(TTF_DIR)/hipe_ppc_asm.h
+$(OBJDIR)/hipe_mkliterals.o: $(HIPE_ASM) $(TTF_DIR)/erl_alloc_types.h \
+ $(TTF_DIR)/OPCODES-GENERATED $(TARGET)/TABLES-GENERATED
$(TTF_DIR)/hipe_literals.h: $(BINDIR)/hipe_mkliterals$(TF_MARKER)
$(BINDIR)/hipe_mkliterals$(TF_MARKER) -c > $@
-$(OBJDIR)/hipe_x86_glue.o: hipe/hipe_x86_glue.S $(TTF_DIR)/hipe_x86_asm.h $(TTF_DIR)/hipe_literals.h hipe/hipe_mode_switch.h
-$(TTF_DIR)/hipe_x86_bifs.S: hipe/hipe_x86_bifs.m4 hipe/hipe_x86_asm.m4 hipe/hipe_bif_list.m4 $(TARGET)/erl_bif_list.h hipe/hipe_gbif_list.h
-$(OBJDIR)/hipe_x86_bifs.o: $(TTF_DIR)/hipe_x86_bifs.S $(TTF_DIR)/hipe_literals.h
-
-$(OBJDIR)/hipe_amd64_glue.o: hipe/hipe_amd64_glue.S $(TTF_DIR)/hipe_amd64_asm.h $(TTF_DIR)/hipe_literals.h hipe/hipe_mode_switch.h
-$(TTF_DIR)/hipe_amd64_bifs.S: hipe/hipe_amd64_bifs.m4 hipe/hipe_amd64_asm.m4 hipe/hipe_bif_list.m4 $(TARGET)/erl_bif_list.h hipe/hipe_gbif_list.h
-$(OBJDIR)/hipe_amd64_bifs.o: $(TTF_DIR)/hipe_amd64_bifs.S $(TTF_DIR)/hipe_literals.h
-
-$(OBJDIR)/hipe_sparc_glue.o: hipe/hipe_sparc_glue.S $(TTF_DIR)/hipe_sparc_asm.h hipe/hipe_mode_switch.h $(TTF_DIR)/hipe_literals.h
-$(TTF_DIR)/hipe_sparc_bifs.S: hipe/hipe_sparc_bifs.m4 hipe/hipe_sparc_asm.m4 hipe/hipe_bif_list.m4 $(TARGET)/erl_bif_list.h hipe/hipe_gbif_list.h
-$(OBJDIR)/hipe_sparc_bifs.o: $(TTF_DIR)/hipe_sparc_bifs.S $(TTF_DIR)/hipe_literals.h
-
-$(OBJDIR)/hipe_ppc_glue.o: hipe/hipe_ppc_glue.S $(TTF_DIR)/hipe_ppc_asm.h hipe/hipe_mode_switch.h $(TTF_DIR)/hipe_literals.h
-$(TTF_DIR)/hipe_ppc_bifs.S: hipe/hipe_ppc_bifs.m4 hipe/hipe_ppc_asm.m4 hipe/hipe_bif_list.m4 $(TARGET)/erl_bif_list.h hipe/hipe_gbif_list.h
-$(OBJDIR)/hipe_ppc_bifs.o: $(TTF_DIR)/hipe_ppc_bifs.S $(TTF_DIR)/hipe_literals.h
-
-$(OBJDIR)/hipe_arm_glue.o: hipe/hipe_arm_glue.S $(TTF_DIR)/hipe_arm_asm.h hipe/hipe_mode_switch.h $(TTF_DIR)/hipe_literals.h
-$(TTF_DIR)/hipe_arm_bifs.S: hipe/hipe_arm_bifs.m4 hipe/hipe_arm_asm.m4 hipe/hipe_bif_list.m4 $(TARGET)/erl_bif_list.h hipe/hipe_gbif_list.h
-$(OBJDIR)/hipe_arm_bifs.o: $(TTF_DIR)/hipe_arm_bifs.S $(TTF_DIR)/hipe_literals.h
+$(OBJDIR)/hipe_x86_glue.o: hipe/hipe_x86_glue.S \
+ $(TTF_DIR)/hipe_x86_asm.h $(TTF_DIR)/hipe_literals.h \
+ hipe/hipe_mode_switch.h
+$(TTF_DIR)/hipe_x86_bifs.S: hipe/hipe_x86_bifs.m4 hipe/hipe_x86_asm.m4 \
+ hipe/hipe_bif_list.m4 $(TARGET)/erl_bif_list.h hipe/hipe_gbif_list.h
+$(OBJDIR)/hipe_x86_bifs.o: $(TTF_DIR)/hipe_x86_bifs.S \
+ $(TTF_DIR)/hipe_literals.h
+
+$(OBJDIR)/hipe_amd64_glue.o: hipe/hipe_amd64_glue.S \
+ $(TTF_DIR)/hipe_amd64_asm.h $(TTF_DIR)/hipe_literals.h \
+ hipe/hipe_mode_switch.h
+$(TTF_DIR)/hipe_amd64_bifs.S: hipe/hipe_amd64_bifs.m4 hipe/hipe_amd64_asm.m4 \
+ hipe/hipe_bif_list.m4 $(TARGET)/erl_bif_list.h hipe/hipe_gbif_list.h
+$(OBJDIR)/hipe_amd64_bifs.o: $(TTF_DIR)/hipe_amd64_bifs.S \
+ $(TTF_DIR)/hipe_literals.h
+
+$(OBJDIR)/hipe_sparc_glue.o: hipe/hipe_sparc_glue.S \
+ $(TTF_DIR)/hipe_sparc_asm.h hipe/hipe_mode_switch.h \
+ $(TTF_DIR)/hipe_literals.h
+$(TTF_DIR)/hipe_sparc_bifs.S: hipe/hipe_sparc_bifs.m4 hipe/hipe_sparc_asm.m4 \
+ hipe/hipe_bif_list.m4 $(TARGET)/erl_bif_list.h hipe/hipe_gbif_list.h
+$(OBJDIR)/hipe_sparc_bifs.o: $(TTF_DIR)/hipe_sparc_bifs.S \
+ $(TTF_DIR)/hipe_literals.h
+
+$(OBJDIR)/hipe_ppc_glue.o: hipe/hipe_ppc_glue.S $(TTF_DIR)/hipe_ppc_asm.h \
+ hipe/hipe_mode_switch.h $(TTF_DIR)/hipe_literals.h
+$(TTF_DIR)/hipe_ppc_bifs.S: hipe/hipe_ppc_bifs.m4 hipe/hipe_ppc_asm.m4 \
+ hipe/hipe_bif_list.m4 $(TARGET)/erl_bif_list.h hipe/hipe_gbif_list.h
+$(OBJDIR)/hipe_ppc_bifs.o: $(TTF_DIR)/hipe_ppc_bifs.S \
+ $(TTF_DIR)/hipe_literals.h
+
+$(OBJDIR)/hipe_arm_glue.o: hipe/hipe_arm_glue.S $(TTF_DIR)/hipe_arm_asm.h \
+ hipe/hipe_mode_switch.h $(TTF_DIR)/hipe_literals.h
+$(TTF_DIR)/hipe_arm_bifs.S: hipe/hipe_arm_bifs.m4 hipe/hipe_arm_asm.m4 \
+ hipe/hipe_bif_list.m4 $(TARGET)/erl_bif_list.h hipe/hipe_gbif_list.h
+$(OBJDIR)/hipe_arm_bifs.o: $(TTF_DIR)/hipe_arm_bifs.S \
+ $(TTF_DIR)/hipe_literals.h
# end of HiPE section
########################################
@@ -927,13 +977,6 @@ $(BINDIR)/$(EMULATOR_EXECUTABLE): $(INIT_OBJS) $(OBJS) $(DEPLIBS)
endif
-#
-# Create directories
-#
-
-$(CREATE_DIRS):
- $(MKDIR) -p $@
-
# ----------------------------------------------------------------------
# Dependencies
#
@@ -945,7 +988,7 @@ $(TARGET)/Makefile: Makefile.in
#SED_REPL_WIN_DRIVE=s|\([ ]\)\([A-Za-z]\):|\1/cygdrive/\2|g;s|^\([A-Za-z]\):|/cygdrive/\1|g
SED_REPL_O=s|^\([^:]*:\)|$$(OBJDIR)/\1|g
-SED_REPL_ELIB_O=s|^\([^:]*\).o[ ]*:|$$(OBJDIR)/\1.elib.o:|g
+SED_REPL_O_ZLIB=s|^\([^:]*:\)|$$(ZLIB_OBJDIR)/\1|g
SED_REPL_TTF_DIR=s|$(TTF_DIR)/|$$(TTF_DIR)/|g
SED_REPL_ERL_TOP=s|\([ ]\)$(ERL_TOP)/|\1$$(ERL_TOP)/|g;s|^$(ERL_TOP)/|$$(ERL_TOP)/|g
SED_REPL_POLL=s|$$(OBJDIR)/erl_poll.o|$$(OBJDIR)/erl_poll.kp.o $$(OBJDIR)/erl_poll.nkp.o|g
@@ -965,7 +1008,7 @@ SED_SUFFIX=
endif
SED_DEPEND=sed '$(SED_PREFIX)$(SED_REPL_O);$(SED_REPL_TTF_DIR);$(SED_REPL_ERL_TOP)$(SED_SUFFIX)'
-SED_ELIB_DEPEND=sed '$(SED_PREFIX)$(SED_REPL_ELIB_O);$(SED_REPL_TTF_DIR);$(SED_REPL_ERL_TOP)$(SED_SUFFIX)'
+SED_DEPEND_ZLIB=sed '$(SED_PREFIX)$(SED_REPL_O_ZLIB)'
ifdef HIPE_ENABLED
HIPE_SRC=$(wildcard hipe/*.c)
@@ -974,7 +1017,8 @@ HIPE_SRC=
endif
BEAM_SRC=$(wildcard beam/*.c)
-DRV_SRC=$(wildcard drivers/common/*.c) $(wildcard drivers/$(ERLANG_OSTYPE)/*.c)
+DRV_COMMON_SRC=$(wildcard drivers/common/*.c)
+DRV_OSTYPE_SRC=$(wildcard drivers/$(ERLANG_OSTYPE)/*.c)
ALL_SYS_SRC=$(wildcard sys/$(ERLANG_OSTYPE)/*.c) $(wildcard sys/common/*.c)
TARGET_SRC=$(wildcard $(TARGET)/*.c) $(wildcard $(TTF_DIR)/*.c)
@@ -985,7 +1029,7 @@ ifeq ($(TARGET),win32)
#DEP_CC=$(EMU_CC)
DEP_CC=$(CC)
-DEP_FLAGS=-MM $(subst -O2,,$(CFLAGS)) $(INCLUDES) -I../etc/win32 -Idrivers/common
+DEP_FLAGS=-MM $(subst -O2,,$(CFLAGS)) $(INCLUDES) -I../etc/win32 -Idrivers/common -Idrivers/$(ERLANG_OSTYPE)
# ifeq (@MIXED_CYGWIN_VC@,yes)
# VC++ used for compiling. If __GNUC__ is defined we will include
# other headers then when compiling which will result in faulty
@@ -1005,28 +1049,40 @@ MG_FLAG=-MG
endif
DEP_CC=$(CC)
-DEP_FLAGS=-MM $(MG_FLAG) $(CFLAGS) $(INCLUDES) -Idrivers/common
+DEP_FLAGS=-MM $(MG_FLAG) $(CFLAGS) $(INCLUDES) -Idrivers/common -Idrivers/$(ERLANG_OSTYPE)
SYS_SRC=$(ALL_SYS_SRC)
endif
+.PHONY: depend
+ifdef VOID_EMULATOR
depend:
+ @echo $(VOID_EMULATOR)' - omitted target depend'
+else
+depend: $(TTF_DIR)/depend.mk
+$(TTF_DIR)/depend.mk: $(TTF_DIR)/GENERATED $(PRELOAD_SRC)
$(DEP_CC) $(DEP_FLAGS) $(BEAM_SRC) \
- | $(SED_DEPEND) > $(TARGET)/depend.mk
- $(DEP_CC) $(DEP_FLAGS) $(DRV_SRC) \
- | $(SED_DEPEND) >> $(TARGET)/depend.mk
+ | $(SED_DEPEND) > $(TTF_DIR)/depend.mk
+ $(DEP_CC) $(DEP_FLAGS) -DLIBSCTP=$(LIBSCTP) $(DRV_COMMON_SRC) \
+ | $(SED_DEPEND) >> $(TTF_DIR)/depend.mk
+ $(DEP_CC) $(DEP_FLAGS) -I../etc/$(ERLANG_OSTYPE) $(DRV_OSTYPE_SRC) \
+ | $(SED_DEPEND) >> $(TTF_DIR)/depend.mk
$(DEP_CC) $(DEP_FLAGS) $(SYS_SRC) \
- | $(SED_DEPEND) >> $(TARGET)/depend.mk
+ | $(SED_DEPEND) >> $(TTF_DIR)/depend.mk
$(DEP_CC) $(DEP_FLAGS) $(TARGET_SRC) \
- | $(SED_DEPEND) >> $(TARGET)/depend.mk
-ifneq ($(TARGET),win32)
- $(DEP_CC) $(DEP_FLAGS) $(ELIB_C_FILES) \
- | $(SED_ELIB_DEPEND) >> $(TARGET)/depend.mk
-endif
+ | $(SED_DEPEND) >> $(TTF_DIR)/depend.mk
+ $(DEP_CC) $(DEP_FLAGS) $(ZLIB_SRC) \
+ | $(SED_DEPEND_ZLIB) >> $(TTF_DIR)/depend.mk
ifdef HIPE_ENABLED
$(DEP_CC) $(DEP_FLAGS) $(HIPE_SRC) \
- | $(SED_DEPEND) >> $(TARGET)/depend.mk
+ | $(SED_DEPEND) >> $(TTF_DIR)/depend.mk
+endif
+ cd $(ERTS_LIB_DIR) && $(MAKE) depend
endif
--include $(TARGET)/depend.mk
-
-
+ifneq ($(MAKECMDGOALS),clean)
+ifneq ($(MAKECMDGOALS),generate)
+ifndef VOID_EMULATOR
+-include $(TTF_DIR)/depend.mk
+endif
+endif
+endif
diff --git a/erts/emulator/beam/atom.c b/erts/emulator/beam/atom.c
index b97705ed96..d7c7f117cf 100644
--- a/erts/emulator/beam/atom.c
+++ b/erts/emulator/beam/atom.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1996-2010. All Rights Reserved.
+ * Copyright Ericsson AB 1996-2011. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -75,7 +75,7 @@ void atom_info(int to, void *to_arg)
index_info(to, to_arg, &erts_atom_table);
#ifdef ERTS_ATOM_PUT_OPS_STAT
erts_print(to, to_arg, "atom_put_ops: %ld\n",
- erts_smp_atomic_read(&atom_put_ops));
+ erts_smp_atomic_read_nob(&atom_put_ops));
#endif
if (lock)
@@ -213,7 +213,7 @@ am_atom_put(const char* name, int len)
len = MAX_ATOM_LENGTH;
}
#ifdef ERTS_ATOM_PUT_OPS_STAT
- erts_smp_atomic_inc(&atom_put_ops);
+ erts_smp_atomic_inc_nob(&atom_put_ops);
#endif
a.len = len;
a.name = (byte*)name;
@@ -309,7 +309,7 @@ init_atom_table(void)
rwmtx_opt.lived = ERTS_SMP_RWMTX_LONG_LIVED;
#ifdef ERTS_ATOM_PUT_OPS_STAT
- erts_smp_atomic_init(&atom_put_ops, 0);
+ erts_smp_atomic_init_nob(&atom_put_ops, 0);
#endif
erts_smp_rwmtx_init_opt(&atom_table_lock, &rwmtx_opt, "atom_tab");
diff --git a/erts/emulator/beam/atom.h b/erts/emulator/beam/atom.h
index cb245a87b1..fd9c04d3d0 100644
--- a/erts/emulator/beam/atom.h
+++ b/erts/emulator/beam/atom.h
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1996-2010. All Rights Reserved.
+ * Copyright Ericsson AB 1996-2011. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -34,7 +34,7 @@
/* Internal atom cache needs MAX_ATOM_TABLE_SIZE to be less than an
unsigned 32 bit integer. See external.c(erts_encode_ext_dist_header_setup)
for more details. */
-#define MAX_ATOM_TABLE_SIZE ((MAX_ATOM_INDEX + 1 < (1UL << 32)) ? MAX_ATOM_INDEX + 1 : (1UL << 32))
+#define MAX_ATOM_TABLE_SIZE ((MAX_ATOM_INDEX + 1 < (UWORD_CONSTANT(1) << 32)) ? MAX_ATOM_INDEX + 1 : (UWORD_CONSTANT(1) << 32))
#else
#define MAX_ATOM_TABLE_SIZE (MAX_ATOM_INDEX + 1)
#endif
diff --git a/erts/emulator/beam/atom.names b/erts/emulator/beam/atom.names
index 68d64fb7b0..02735d4b68 100644
--- a/erts/emulator/beam/atom.names
+++ b/erts/emulator/beam/atom.names
@@ -1,7 +1,7 @@
#
# %CopyrightBegin%
#
-# Copyright Ericsson AB 1996-2011. All Rights Reserved.
+# Copyright Ericsson AB 1996-2012. All Rights Reserved.
#
# The contents of this file are subject to the Erlang Public License,
# Version 1.1, (the "License"); you may not use this file except in
@@ -69,6 +69,8 @@ atom ac
atom active
atom all
atom all_but_first
+atom alloc_info
+atom alloc_sizes
atom allocated
atom allocated_areas
atom allocator
@@ -93,6 +95,7 @@ atom atom
atom atom_used
atom attributes
atom await_proc_exit
+atom await_sched_wall_time_modifications
atom awaiting_load
atom awaiting_unload
atom backtrace backtrace_depth
@@ -156,6 +159,8 @@ atom cr
atom crlf
atom creation
atom current_function
+atom current_location
+atom current_stacktrace
atom data
atom debug_flags
atom delay_trap
@@ -235,6 +240,7 @@ atom generational
atom get_seq_token
atom get_tcw
atom getenv
+atom gather_sched_wall_time_result
atom getting_linked
atom getting_unlinked
atom global
@@ -242,6 +248,7 @@ atom global_heaps_size
atom Gt='>'
atom grun
atom group_leader
+atom have_dt_utag
atom heap_block_size
atom heap_size
atom heap_sizes
@@ -550,8 +557,10 @@ atom waiting
atom wall_clock
atom warning
atom warning_msg
+atom scheduler_wall_time
atom wordsize
atom write_concurrency
atom xor
+atom x86
atom yes
atom yield
diff --git a/erts/emulator/beam/beam_bif_load.c b/erts/emulator/beam/beam_bif_load.c
index d76a7d8e9f..78a9d76a20 100644
--- a/erts/emulator/beam/beam_bif_load.c
+++ b/erts/emulator/beam/beam_bif_load.c
@@ -33,12 +33,14 @@
#include "beam_catches.h"
#include "erl_binary.h"
#include "erl_nif.h"
+#include "erl_thr_progress.h"
static void set_default_trace_pattern(Eterm module);
static Eterm check_process_code(Process* rp, Module* modp);
static void delete_code(Process *c_p, ErtsProcLocks c_p_locks, Module* modp);
static void delete_export_references(Eterm module);
static int purge_module(int module);
+static void decrement_refc(BeamInstr* code);
static int is_native(BeamInstr* code);
static int any_heap_ref_ptrs(Eterm* start, Eterm* end, char* mod_start, Uint mod_size);
static int any_heap_refs(Eterm* start, Eterm* end, char* mod_start, Uint mod_size);
@@ -49,11 +51,11 @@ load_module_2(BIF_ALIST_2)
{
Eterm reason;
Eterm* hp;
- int i;
int sz;
byte* code;
Eterm res;
byte* temp_alloc = NULL;
+ struct LoaderState* stp;
if (is_not_atom(BIF_ARG_1)) {
error:
@@ -63,49 +65,37 @@ load_module_2(BIF_ALIST_2)
if ((code = erts_get_aligned_binary_bytes(BIF_ARG_2, &temp_alloc)) == NULL) {
goto error;
}
- erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
- erts_smp_block_system(0);
-
- erts_export_consolidate();
-
hp = HAlloc(BIF_P, 3);
+
+ /*
+ * Read the BEAM file and prepare the module for loading.
+ */
+ stp = erts_alloc_loader_state();
sz = binary_size(BIF_ARG_2);
- if ((i = erts_load_module(BIF_P, 0,
- BIF_P->group_leader, &BIF_ARG_1, code, sz)) < 0) {
- switch (i) {
- case -1: reason = am_badfile; break;
- case -2: reason = am_nofile; break;
- case -3: reason = am_not_purged; break;
- case -4:
- reason = am_atom_put("native_code", sizeof("native_code")-1);
- break;
- case -5:
- {
- /*
- * The module contains an on_load function. The loader
- * has loaded the module as usual, except that the
- * export entries does not point into the module, so it
- * is not possible to call any code in the module.
- */
-
- ERTS_DECL_AM(on_load);
- reason = AM_on_load;
- break;
- }
- default: reason = am_badfile; break;
- }
+ reason = erts_prepare_loading(stp, BIF_P, BIF_P->group_leader,
+ &BIF_ARG_1, code, sz);
+ erts_free_aligned_binary_bytes(temp_alloc);
+ if (reason != NIL) {
res = TUPLE2(hp, am_error, reason);
- goto done;
+ BIF_RET(res);
}
- set_default_trace_pattern(BIF_ARG_1);
- res = TUPLE2(hp, am_module, BIF_ARG_1);
+ /*
+ * Stop all other processes and finish the loading of the module.
+ */
+ erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
+ erts_smp_thr_progress_block();
+
+ reason = erts_finish_loading(stp, BIF_P, 0, &BIF_ARG_1);
+ if (reason != NIL) {
+ res = TUPLE2(hp, am_error, reason);
+ } else {
+ set_default_trace_pattern(BIF_ARG_1);
+ res = TUPLE2(hp, am_module, BIF_ARG_1);
+ }
- done:
- erts_free_aligned_binary_bytes(temp_alloc);
- erts_smp_release_system();
+ erts_smp_thr_progress_unblock();
erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
-
BIF_RET(res);
}
@@ -118,12 +108,12 @@ BIF_RETTYPE purge_module_1(BIF_ALIST_1)
}
erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
- erts_smp_block_system(0);
+ erts_smp_thr_progress_block();
erts_export_consolidate();
purge_res = purge_module(atom_val(BIF_ARG_1));
- erts_smp_release_system();
+ erts_smp_thr_progress_unblock();
erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
if (purge_res < 0) {
@@ -152,16 +142,33 @@ BIF_RETTYPE code_make_stub_module_3(BIF_ALIST_3)
Eterm res;
erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
- erts_smp_block_system(0);
+ erts_smp_thr_progress_block();
erts_export_consolidate();
res = erts_make_stub_module(BIF_P, BIF_ARG_1, BIF_ARG_2, BIF_ARG_3);
- erts_smp_release_system();
+ erts_smp_thr_progress_unblock();
erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
return res;
}
+BIF_RETTYPE
+check_old_code_1(BIF_ALIST_1)
+{
+ Module* modp;
+
+ if (is_not_atom(BIF_ARG_1)) {
+ BIF_ERROR(BIF_P, BADARG);
+ }
+ modp = erts_get_module(BIF_ARG_1);
+ if (modp == NULL) { /* Doesn't exist. */
+ BIF_RET(am_false);
+ } else if (modp->old_code == NULL) { /* No old code. */
+ BIF_RET(am_false);
+ }
+ BIF_RET(am_true);
+}
+
Eterm
check_process_code_2(BIF_ALIST_2)
{
@@ -175,6 +182,13 @@ check_process_code_2(BIF_ALIST_2)
Eterm res;
if (internal_pid_index(BIF_ARG_1) >= erts_max_processes)
goto error;
+ modp = erts_get_module(BIF_ARG_2);
+ if (modp == NULL) { /* Doesn't exist. */
+ return am_false;
+ } else if (modp->old_code == NULL) { /* No old code. */
+ return am_false;
+ }
+
#ifdef ERTS_SMP
rp = erts_pid2proc_suspend(BIF_P, ERTS_PROC_LOCK_MAIN,
BIF_ARG_1, ERTS_PROC_LOCK_MAIN);
@@ -188,7 +202,6 @@ check_process_code_2(BIF_ALIST_2)
ERTS_BIF_YIELD2(bif_export[BIF_check_process_code_2], BIF_P,
BIF_ARG_1, BIF_ARG_2);
}
- modp = erts_get_module(BIF_ARG_2);
res = check_process_code(rp, modp);
#ifdef ERTS_SMP
if (BIF_P != rp) {
@@ -216,7 +229,7 @@ BIF_RETTYPE delete_module_1(BIF_ALIST_1)
goto badarg;
erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
- erts_smp_block_system(0);
+ erts_smp_thr_progress_block();
{
Module *modp = erts_get_module(BIF_ARG_1);
@@ -237,7 +250,7 @@ BIF_RETTYPE delete_module_1(BIF_ALIST_1)
}
}
- erts_smp_release_system();
+ erts_smp_thr_progress_unblock();
erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
if (res == am_badarg) {
@@ -329,7 +342,7 @@ BIF_RETTYPE finish_after_on_load_2(BIF_ALIST_2)
}
erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
- erts_smp_block_system(0);
+ erts_smp_thr_progress_block();
if (BIF_ARG_2 == am_true) {
int i;
@@ -368,7 +381,7 @@ BIF_RETTYPE finish_after_on_load_2(BIF_ALIST_2)
modp->catches = BEAM_CATCHES_NIL;
remove_from_address_table(code);
}
- erts_smp_release_system();
+ erts_smp_thr_progress_unblock();
erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
BIF_RET(am_true);
}
@@ -412,11 +425,6 @@ check_process_code(Process* rp, Module* modp)
#endif
#define INSIDE(a) (start <= (a) && (a) < end)
- if (modp == NULL) { /* Doesn't exist. */
- return am_false;
- } else if (modp->old_code == NULL) { /* No old code. */
- return am_false;
- }
/*
* Pick up limits for the module.
@@ -546,6 +554,7 @@ check_process_code(Process* rp, Module* modp)
} else {
Eterm* literals;
Uint lit_size;
+ struct erl_off_heap_header* oh;
/*
* Try to get rid of constants by by garbage collecting.
@@ -559,7 +568,9 @@ check_process_code(Process* rp, Module* modp)
(void) erts_garbage_collect(rp, 0, rp->arg_reg, rp->arity);
literals = (Eterm *) modp->old_code[MI_LITERALS_START];
lit_size = (Eterm *) modp->old_code[MI_LITERALS_END] - literals;
- erts_garbage_collect_literals(rp, literals, lit_size);
+ oh = (struct erl_off_heap_header *)
+ modp->old_code[MI_LITERALS_OFF_HEAP];
+ erts_garbage_collect_literals(rp, literals, lit_size, oh);
}
}
return am_false;
@@ -567,7 +578,7 @@ check_process_code(Process* rp, Module* modp)
}
#define in_area(ptr,start,nbytes) \
- ((unsigned long)((char*)(ptr) - (char*)(start)) < (nbytes))
+ ((UWord)((char*)(ptr) - (char*)(start)) < (nbytes))
static int
any_heap_ref_ptrs(Eterm* start, Eterm* end, char* mod_start, Uint mod_size)
@@ -637,9 +648,6 @@ purge_module(int module)
* Any code to purge?
*/
if (modp->old_code == 0) {
- if (display_loads) {
- erts_printf("No code to purge for %T\n", make_atom(module));
- }
return -1;
}
@@ -660,6 +668,7 @@ purge_module(int module)
end = (BeamInstr *)((char *)code + modp->old_code_length);
erts_cleanup_funs_on_purge(code, end);
beam_catches_delmod(modp->old_catches, code, modp->old_code_length);
+ decrement_refc(code);
erts_free(ERTS_ALC_T_CODE, (void *) code);
modp->old_code = NULL;
modp->old_code_length = 0;
@@ -669,6 +678,23 @@ purge_module(int module)
}
static void
+decrement_refc(BeamInstr* code)
+{
+ struct erl_off_heap_header* oh =
+ (struct erl_off_heap_header *) code[MI_LITERALS_OFF_HEAP];
+
+ while (oh) {
+ Binary* bptr;
+ ASSERT(thing_subtag(oh->thing_word) == REFC_BINARY_SUBTAG);
+ bptr = ((ProcBin*)oh)->val;
+ if (erts_refc_dectest(&bptr->refc, 0) == 0) {
+ erts_bin_free(bptr);
+ }
+ oh = oh->next;
+ }
+}
+
+static void
remove_from_address_table(BeamInstr* code)
{
int i;
@@ -710,10 +736,10 @@ delete_code(Process *c_p, ErtsProcLocks c_p_locks, Module* modp)
if (modp->code != NULL && modp->code[MI_NUM_BREAKPOINTS] > 0) {
if (c_p && c_p_locks)
erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_MAIN);
- erts_smp_block_system(0);
+ erts_smp_thr_progress_block();
erts_clear_module_break(modp);
modp->code[MI_NUM_BREAKPOINTS] = 0;
- erts_smp_release_system();
+ erts_smp_thr_progress_unblock();
if (c_p && c_p_locks)
erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_MAIN);
}
@@ -755,7 +781,7 @@ delete_export_references(Eterm module)
}
-int
+Eterm
beam_make_current_old(Process *c_p, ErtsProcLocks c_p_locks, Eterm module)
{
Module* modp = erts_put_module(module);
@@ -766,15 +792,12 @@ beam_make_current_old(Process *c_p, ErtsProcLocks c_p_locks, Eterm module)
*/
if (modp->code != NULL && modp->old_code != NULL) {
- return -3;
+ return am_not_purged;
} else if (modp->old_code == NULL) { /* Make the current version old. */
- if (display_loads) {
- erts_printf("saving old code\n");
- }
delete_code(c_p, c_p_locks, modp);
delete_export_references(module);
}
- return 0;
+ return NIL;
}
static int
diff --git a/erts/emulator/beam/beam_bp.c b/erts/emulator/beam/beam_bp.c
index 31910888d1..d772bea02f 100644
--- a/erts/emulator/beam/beam_bp.c
+++ b/erts/emulator/beam/beam_bp.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2000-2010. All Rights Reserved.
+ * Copyright Ericsson AB 2000-2012. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -167,7 +167,7 @@ erts_bp_init(void) {
int
erts_set_trace_break(Eterm mfa[3], int specified, Binary *match_spec,
Eterm tracer_pid) {
- ERTS_SMP_LC_ASSERT(erts_smp_is_system_blocked(0));
+ ERTS_SMP_LC_ASSERT(erts_smp_thr_progress_is_blocking());
return set_break(mfa, specified, match_spec,
(BeamInstr) BeamOp(op_i_trace_breakpoint), 0, tracer_pid);
}
@@ -175,7 +175,7 @@ erts_set_trace_break(Eterm mfa[3], int specified, Binary *match_spec,
int
erts_set_mtrace_break(Eterm mfa[3], int specified, Binary *match_spec,
Eterm tracer_pid) {
- ERTS_SMP_LC_ASSERT(erts_smp_is_system_blocked(0));
+ ERTS_SMP_LC_ASSERT(erts_smp_thr_progress_is_blocking());
return set_break(mfa, specified, match_spec,
(BeamInstr) BeamOp(op_i_mtrace_breakpoint), 0, tracer_pid);
}
@@ -184,7 +184,7 @@ erts_set_mtrace_break(Eterm mfa[3], int specified, Binary *match_spec,
void
erts_set_mtrace_bif(BeamInstr *pc, Binary *match_spec, Eterm tracer_pid) {
- ERTS_SMP_LC_ASSERT(erts_smp_is_system_blocked(0));
+ ERTS_SMP_LC_ASSERT(erts_smp_thr_progress_is_blocking());
set_function_break(NULL, pc, BREAK_IS_BIF, match_spec, (BeamInstr) BeamOp(op_i_mtrace_breakpoint), 0, tracer_pid);
}
@@ -198,35 +198,35 @@ void erts_clear_time_trace_bif(BeamInstr *pc) {
int
erts_set_debug_break(Eterm mfa[3], int specified) {
- ERTS_SMP_LC_ASSERT(erts_smp_is_system_blocked(0));
+ ERTS_SMP_LC_ASSERT(erts_smp_thr_progress_is_blocking());
return set_break(mfa, specified, NULL,
(BeamInstr) BeamOp(op_i_debug_breakpoint), 0, NIL);
}
int
erts_set_count_break(Eterm mfa[3], int specified, enum erts_break_op count_op) {
- ERTS_SMP_LC_ASSERT(erts_smp_is_system_blocked(0));
+ ERTS_SMP_LC_ASSERT(erts_smp_thr_progress_is_blocking());
return set_break(mfa, specified, NULL,
(BeamInstr) BeamOp(op_i_count_breakpoint), count_op, NIL);
}
int
erts_set_time_break(Eterm mfa[3], int specified, enum erts_break_op count_op) {
- ERTS_SMP_LC_ASSERT(erts_smp_is_system_blocked(0));
+ ERTS_SMP_LC_ASSERT(erts_smp_thr_progress_is_blocking());
return set_break(mfa, specified, NULL,
(BeamInstr) BeamOp(op_i_time_breakpoint), count_op, NIL);
}
int
erts_clear_trace_break(Eterm mfa[3], int specified) {
- ERTS_SMP_LC_ASSERT(erts_smp_is_system_blocked(0));
+ ERTS_SMP_LC_ASSERT(erts_smp_thr_progress_is_blocking());
return clear_break(mfa, specified,
(BeamInstr) BeamOp(op_i_trace_breakpoint));
}
int
erts_clear_mtrace_break(Eterm mfa[3], int specified) {
- ERTS_SMP_LC_ASSERT(erts_smp_is_system_blocked(0));
+ ERTS_SMP_LC_ASSERT(erts_smp_thr_progress_is_blocking());
return clear_break(mfa, specified,
(BeamInstr) BeamOp(op_i_mtrace_breakpoint));
}
@@ -238,41 +238,41 @@ erts_clear_mtrace_bif(BeamInstr *pc) {
int
erts_clear_debug_break(Eterm mfa[3], int specified) {
- ERTS_SMP_LC_ASSERT(erts_smp_is_system_blocked(0));
+ ERTS_SMP_LC_ASSERT(erts_smp_thr_progress_is_blocking());
return clear_break(mfa, specified,
(BeamInstr) BeamOp(op_i_debug_breakpoint));
}
int
erts_clear_count_break(Eterm mfa[3], int specified) {
- ERTS_SMP_LC_ASSERT(erts_smp_is_system_blocked(0));
+ ERTS_SMP_LC_ASSERT(erts_smp_thr_progress_is_blocking());
return clear_break(mfa, specified,
(BeamInstr) BeamOp(op_i_count_breakpoint));
}
int
erts_clear_time_break(Eterm mfa[3], int specified) {
- ERTS_SMP_LC_ASSERT(erts_smp_is_system_blocked(0));
+ ERTS_SMP_LC_ASSERT(erts_smp_thr_progress_is_blocking());
return clear_break(mfa, specified,
(BeamInstr) BeamOp(op_i_time_breakpoint));
}
int
erts_clear_break(Eterm mfa[3], int specified) {
- ERTS_SMP_LC_ASSERT(erts_smp_is_system_blocked(0));
+ ERTS_SMP_LC_ASSERT(erts_smp_thr_progress_is_blocking());
return clear_break(mfa, specified, 0);
}
int
erts_clear_module_break(Module *modp) {
- ERTS_SMP_LC_ASSERT(erts_smp_is_system_blocked(0));
+ ERTS_SMP_LC_ASSERT(erts_smp_thr_progress_is_blocking());
ASSERT(modp);
return clear_module_break(modp, NULL, 0, 0);
}
int
erts_clear_function_break(Module *modp, BeamInstr *pc) {
- ERTS_SMP_LC_ASSERT(erts_smp_is_system_blocked(0));
+ ERTS_SMP_LC_ASSERT(erts_smp_thr_progress_is_blocking());
ASSERT(modp);
return clear_function_break(modp, pc, BREAK_IS_ERL, 0);
}
@@ -408,7 +408,7 @@ erts_is_count_break(BeamInstr *pc, Sint *count_ret) {
if (bdc) {
if (count_ret) {
- *count_ret = (Sint) erts_smp_atomic_read(&bdc->acount);
+ *count_ret = (Sint) erts_smp_atomic_read_nob(&bdc->acount);
}
return !0;
}
@@ -495,16 +495,6 @@ erts_find_local_func(Eterm mfa[3]) {
return NULL;
}
-/* bp_hash */
-ERTS_INLINE Uint bp_sched2ix() {
-#ifdef ERTS_SMP
- ErtsSchedulerData *esdp;
- esdp = erts_get_scheduler_data();
- return esdp->no - 1;
-#else
- return 0;
-#endif
-}
static void bp_hash_init(bp_time_hash_t *hash, Uint n) {
Uint size = sizeof(bp_data_time_item_t)*n;
Uint i;
@@ -612,9 +602,13 @@ static void bp_hash_delete(bp_time_hash_t *hash) {
static void bp_time_diff(bp_data_time_item_t *item, /* out */
process_breakpoint_time_t *pbt, /* in */
Uint ms, Uint s, Uint us) {
- int dms,ds,dus;
+ int ds,dus;
+#ifdef DEBUG
+ int dms;
+
dms = ms - pbt->ms;
+#endif
ds = s - pbt->s;
dus = us - pbt->us;
@@ -622,7 +616,9 @@ static void bp_time_diff(bp_data_time_item_t *item, /* out */
* this is ok.
*/
+#ifdef DEBUG
ASSERT(dms >= 0 || ds >= 0 || dus >= 0);
+#endif
if (dus < 0) {
dus += 1000000;
@@ -958,24 +954,24 @@ static int set_function_break(Module *modp, BeamInstr *pc, int bif,
if (break_op == (BeamInstr) BeamOp(op_i_count_breakpoint)) {
if (count_op == erts_break_stop) {
- count = erts_smp_atomic_read(&bdc->acount);
+ count = erts_smp_atomic_read_nob(&bdc->acount);
if (count >= 0) {
while(1) {
- res = erts_smp_atomic_cmpxchg(&bdc->acount, -count - 1, count);
+ res = erts_smp_atomic_cmpxchg_nob(&bdc->acount, -count - 1, count);
if ((res == count) || count < 0) break;
count = res;
}
}
} else {
/* Reset call counter */
- erts_smp_atomic_set(&bdc->acount, 0);
+ erts_smp_atomic_set_nob(&bdc->acount, 0);
}
} else if (break_op == (BeamInstr) BeamOp(op_i_time_breakpoint)) {
BpDataTime *bdt = (BpDataTime *) bd;
Uint i = 0;
- ERTS_SMP_LC_ASSERT(erts_smp_is_system_blocked(0));
+ ERTS_SMP_LC_ASSERT(erts_smp_thr_progress_is_blocking());
if (count_op == erts_break_stop) {
bdt->pause = 1;
@@ -1097,7 +1093,7 @@ static int set_function_break(Module *modp, BeamInstr *pc, int bif,
}
} else if (break_op == (BeamInstr) BeamOp(op_i_count_breakpoint)) {
BpDataCount *bdc = (BpDataCount *) bd;
- erts_smp_atomic_init(&bdc->acount, 0);
+ erts_smp_atomic_init_nob(&bdc->acount, 0);
}
if (bif == BREAK_IS_ERL) {
@@ -1333,15 +1329,19 @@ static BpData *get_break(Process *p, BeamInstr *pc, BeamInstr break_op) {
}
static BpData *is_break(BeamInstr *pc, BeamInstr break_op) {
- BpData **rs = (BpData **) pc[-4];
+ BpData **rs;
BpData *bd = NULL, *ebd = NULL;
ASSERT(pc[-5] == (BeamInstr) BeamOp(op_i_func_info_IaaI));
+ if (erts_is_native_break(pc)) {
+ return NULL;
+ }
+ rs = (BpData **) pc[-4];
if (! rs) {
return NULL;
}
- bd = ebd = rs[bp_sched2ix()];
+ bd = ebd = rs[erts_bp_sched2ix()];
ASSERT(bd);
if ( (break_op == 0) || (bd->this_instr == break_op)) {
return bd;
diff --git a/erts/emulator/beam/beam_bp.h b/erts/emulator/beam/beam_bp.h
index bd8a7249a7..167069552f 100644
--- a/erts/emulator/beam/beam_bp.h
+++ b/erts/emulator/beam/beam_bp.h
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2000-2010. All Rights Reserved.
+ * Copyright Ericsson AB 2000-2011. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -144,8 +144,6 @@ extern erts_smp_spinlock_t erts_bp_lock;
#define ErtsSmpBPUnlock(BDC)
#endif
-ERTS_INLINE Uint bp_sched2ix(void);
-
#ifdef ERTS_SMP
#define bp_sched2ix_proc(p) ((p)->scheduler_data->no - 1)
#else
@@ -165,8 +163,8 @@ do { \
bdc = (BpDataCount *) bdc->next; \
ASSERT(bdc); \
bds[ix] = (BpData *) bdc; \
- count = erts_smp_atomic_read(&bdc->acount); \
- if (count >= 0) erts_smp_atomic_inc(&bdc->acount); \
+ count = erts_smp_atomic_read_nob(&bdc->acount); \
+ if (count >= 0) erts_smp_atomic_inc_nob(&bdc->acount); \
*(instr_result) = bdc->orig_instr; \
} while (0)
@@ -247,4 +245,19 @@ BpData *erts_get_time_break(Process *p, BeamInstr *pc);
BeamInstr *erts_find_local_func(Eterm mfa[3]);
+ERTS_GLB_INLINE Uint erts_bp_sched2ix(void);
+
+#if ERTS_GLB_INLINE_INCL_FUNC_DEF
+ERTS_GLB_INLINE Uint erts_bp_sched2ix(void)
+{
+#ifdef ERTS_SMP
+ ErtsSchedulerData *esdp;
+ esdp = erts_get_scheduler_data();
+ return esdp->no - 1;
+#else
+ return 0;
+#endif
+}
+#endif
+
#endif /* _BEAM_BP_H */
diff --git a/erts/emulator/beam/beam_catches.c b/erts/emulator/beam/beam_catches.c
index e795b4efbd..406ef1db5f 100644
--- a/erts/emulator/beam/beam_catches.c
+++ b/erts/emulator/beam/beam_catches.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2000-2010. All Rights Reserved.
+ * Copyright Ericsson AB 2000-2011. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -22,21 +22,27 @@
#endif
#include "sys.h"
#include "beam_catches.h"
+#include "global.h"
-/* XXX: should use dynamic reallocation */
-#define TABSIZ (16*1024)
-static struct {
+/* R14B04 has about 380 catches when starting erlang */
+#define DEFAULT_TABSIZE (1024)
+typedef struct {
BeamInstr *cp;
unsigned cdr;
-} beam_catches[TABSIZ];
+} beam_catch_t;
static int free_list;
static unsigned high_mark;
+static unsigned tabsize;
+static beam_catch_t *beam_catches;
void beam_catches_init(void)
{
+ tabsize = DEFAULT_TABSIZE;
free_list = -1;
high_mark = 0;
+
+ beam_catches = erts_alloc(ERTS_ALC_T_CODE, sizeof(beam_catch_t)*DEFAULT_TABSIZE);
}
unsigned beam_catches_cons(BeamInstr *cp, unsigned cdr)
@@ -50,16 +56,21 @@ unsigned beam_catches_cons(BeamInstr *cp, unsigned cdr)
* This avoids the need to initialise the free list in
* beam_catches_init(), which would cost O(TABSIZ) time.
*/
- if( (i = free_list) >= 0 ) {
+ if( free_list >= 0 ) {
+ i = free_list;
free_list = beam_catches[i].cdr;
- } else if( (i = high_mark) < TABSIZ ) {
- high_mark = i + 1;
+ } else if( high_mark < tabsize ) {
+ i = high_mark;
+ high_mark++;
} else {
- fprintf(stderr, "beam_catches_cons: no free slots :-(\r\n");
- exit(1);
+ /* No free slots and table is full: realloc table */
+ tabsize = 2*tabsize;
+ beam_catches = erts_realloc(ERTS_ALC_T_CODE, beam_catches, sizeof(beam_catch_t)*tabsize);
+ i = high_mark;
+ high_mark++;
}
- beam_catches[i].cp = cp;
+ beam_catches[i].cp = cp;
beam_catches[i].cdr = cdr;
return i;
@@ -67,10 +78,8 @@ unsigned beam_catches_cons(BeamInstr *cp, unsigned cdr)
BeamInstr *beam_catches_car(unsigned i)
{
- if( i >= TABSIZ ) {
- fprintf(stderr,
- "beam_catches_car: index %#x is out of range\r\n", i);
- abort();
+ if( i >= tabsize ) {
+ erl_exit(1, "beam_catches_delmod: index %#x is out of range\r\n", i);
}
return beam_catches[i].cp;
}
@@ -80,18 +89,15 @@ void beam_catches_delmod(unsigned head, BeamInstr *code, unsigned code_bytes)
unsigned i, cdr;
for(i = head; i != (unsigned)-1;) {
- if( i >= TABSIZ ) {
- fprintf(stderr,
- "beam_catches_delmod: index %#x is out of range\r\n", i);
- abort();
+ if( i >= tabsize ) {
+ erl_exit(1, "beam_catches_delmod: index %#x is out of range\r\n", i);
}
if( (char*)beam_catches[i].cp - (char*)code >= code_bytes ) {
- fprintf(stderr,
+ erl_exit(1,
"beam_catches_delmod: item %#x has cp %#lx which is not "
"in module's range [%#lx,%#lx[\r\n",
i, (long)beam_catches[i].cp,
(long)code, (long)((char*)code + code_bytes));
- abort();
}
beam_catches[i].cp = 0;
cdr = beam_catches[i].cdr;
diff --git a/erts/emulator/beam/beam_debug.c b/erts/emulator/beam/beam_debug.c
index fffb172c68..8041c92162 100644
--- a/erts/emulator/beam/beam_debug.c
+++ b/erts/emulator/beam/beam_debug.c
@@ -37,6 +37,7 @@
#include "beam_load.h"
#include "beam_bp.h"
#include "erl_binary.h"
+#include "erl_thr_progress.h"
#ifdef ARCH_64
# define HEXF "%016bpX"
@@ -49,15 +50,18 @@ void dbg_bt(Process* p, Eterm* sp);
void dbg_where(BeamInstr* addr, Eterm x0, Eterm* reg);
static int print_op(int to, void *to_arg, int op, int size, BeamInstr* addr);
-Eterm
-erts_debug_same_2(Process* p, Eterm term1, Eterm term2)
+
+BIF_RETTYPE
+erts_debug_same_2(BIF_ALIST_2)
{
- return (term1 == term2) ? am_true : am_false;
+ return (BIF_ARG_1 == BIF_ARG_2) ? am_true : am_false;
}
-Eterm
-erts_debug_flat_size_1(Process* p, Eterm term)
+BIF_RETTYPE
+erts_debug_flat_size_1(BIF_ALIST_1)
{
+ Process* p = BIF_P;
+ Eterm term = BIF_ARG_1;
Uint size = size_object(term);
if (IS_USMALL(0, size)) {
@@ -68,9 +72,13 @@ erts_debug_flat_size_1(Process* p, Eterm term)
}
}
-Eterm
-erts_debug_breakpoint_2(Process* p, Eterm MFA, Eterm bool)
+
+BIF_RETTYPE
+erts_debug_breakpoint_2(BIF_ALIST_2)
{
+ Process* p = BIF_P;
+ Eterm MFA = BIF_ARG_1;
+ Eterm bool = BIF_ARG_2;
Eterm* tp;
Eterm mfa[3];
int i;
@@ -107,7 +115,7 @@ erts_debug_breakpoint_2(Process* p, Eterm MFA, Eterm bool)
}
erts_smp_proc_unlock(p, ERTS_PROC_LOCK_MAIN);
- erts_smp_block_system(0);
+ erts_smp_thr_progress_block();
if (bool == am_true) {
res = make_small(erts_set_debug_break(mfa, specified));
@@ -115,7 +123,7 @@ erts_debug_breakpoint_2(Process* p, Eterm MFA, Eterm bool)
res = make_small(erts_clear_debug_break(mfa, specified));
}
- erts_smp_release_system();
+ erts_smp_thr_progress_unblock();
erts_smp_proc_lock(p, ERTS_PROC_LOCK_MAIN);
return res;
@@ -175,9 +183,11 @@ erts_debug_instructions_0(BIF_ALIST_0)
return res;
}
-Eterm
-erts_debug_disassemble_1(Process* p, Eterm addr)
+BIF_RETTYPE
+erts_debug_disassemble_1(BIF_ALIST_1)
{
+ Process* p = BIF_P;
+ Eterm addr = BIF_ARG_1;
erts_dsprintf_buf_t *dsbufp;
Eterm* hp;
Eterm* tp;
diff --git a/erts/emulator/beam/beam_emu.c b/erts/emulator/beam/beam_emu.c
index fb90a7d4f7..18a57931ae 100644
--- a/erts/emulator/beam/beam_emu.c
+++ b/erts/emulator/beam/beam_emu.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1996-2011. All Rights Reserved.
+ * Copyright Ericsson AB 1996-2012. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -36,15 +36,17 @@
#include "dist.h"
#include "beam_bp.h"
#include "beam_catches.h"
+#include "erl_thr_progress.h"
#ifdef HIPE
#include "hipe_mode_switch.h"
#include "hipe_bif1.h"
#endif
+#include "dtrace-wrapper.h"
/* #define HARDDEBUG 1 */
#if defined(NO_JUMP_TABLE)
-# define OpCase(OpCode) case op_##OpCode: lb_##OpCode
+# define OpCase(OpCode) case op_##OpCode
# define CountCase(OpCode) case op_count_##OpCode
# define OpCode(OpCode) ((Uint*)op_##OpCode)
# define Goto(Rel) {Go = (int)(Rel); goto emulator_loop;}
@@ -52,7 +54,7 @@
#else
# define OpCase(OpCode) lb_##OpCode
# define CountCase(OpCode) lb_count_##OpCode
-# define Goto(Rel) goto *(Rel)
+# define Goto(Rel) goto *((void *)Rel)
# define LabelAddr(Label) &&Label
# define OpCode(OpCode) (&&lb_##OpCode)
#endif
@@ -70,7 +72,7 @@ do { \
} \
else \
erts_lc_check_exact(NULL, 0); \
- ERTS_SMP_LC_ASSERT(!ERTS_LC_IS_BLOCKING); \
+ ERTS_SMP_LC_ASSERT(!erts_thr_progress_is_blocking()); \
} while (0)
# define ERTS_SMP_REQ_PROC_MAIN_LOCK(P) \
if ((P)) erts_proc_lc_require_lock((P), ERTS_PROC_LOCK_MAIN)
@@ -198,7 +200,7 @@ do { \
} \
} while (0)
-#define ClauseFail() goto lb_jump_f
+#define ClauseFail() goto jump_f
#define SAVE_CP(X) \
do { \
@@ -233,6 +235,12 @@ BeamInstr beam_return_trace[1]; /* OpCode(i_return_trace) */
BeamInstr beam_exception_trace[1]; /* UGLY also OpCode(i_return_trace) */
BeamInstr beam_return_time_trace[1]; /* OpCode(i_return_time_trace) */
+
+/*
+ * We should warn only once for tuple funs.
+ */
+static erts_smp_atomic_t warned_for_tuple_funs;
+
/*
* All Beam instructions in numerical order.
*/
@@ -303,44 +311,6 @@ extern int count_instructions;
PROCESS_MAIN_CHK_LOCKS((P)); \
ERTS_SMP_UNREQ_PROC_MAIN_LOCK((P))
-#if defined(HYBRID)
-# define POST_BIF_GC_SWAPIN_0(_p, _res) \
- if (((_p)->mbuf) || (MSO(_p).overhead >= BIN_VHEAP_SZ(_p)) ) { \
- _res = erts_gc_after_bif_call((_p), (_res), NULL, 0); \
- } \
- SWAPIN
-
-# define POST_BIF_GC_SWAPIN(_p, _res, _regs, _arity) \
- if (((_p)->mbuf) || (MSO(_p).overhead >= BIN_VHEAP_SZ(_p)) ) { \
- _regs[0] = r(0); \
- _res = erts_gc_after_bif_call((_p), (_res), _regs, (_arity)); \
- r(0) = _regs[0]; \
- } \
- SWAPIN
-#else
-# define POST_BIF_GC_SWAPIN_0(_p, _res) \
- ERTS_SMP_REQ_PROC_MAIN_LOCK((_p)); \
- PROCESS_MAIN_CHK_LOCKS((_p)); \
- ERTS_VERIFY_UNUSED_TEMP_ALLOC((_p)); \
- if (((_p)->mbuf) || (MSO(_p).overhead >= BIN_VHEAP_SZ(_p)) ) { \
- _res = erts_gc_after_bif_call((_p), (_res), NULL, 0); \
- E = (_p)->stop; \
- } \
- HTOP = HEAP_TOP((_p))
-
-# define POST_BIF_GC_SWAPIN(_p, _res, _regs, _arity) \
- ERTS_VERIFY_UNUSED_TEMP_ALLOC((_p)); \
- ERTS_SMP_REQ_PROC_MAIN_LOCK((_p)); \
- PROCESS_MAIN_CHK_LOCKS((_p)); \
- if (((_p)->mbuf) || (MSO(_p).overhead >= BIN_VHEAP_SZ(_p)) ) { \
- _regs[0] = r(0); \
- _res = erts_gc_after_bif_call((_p), (_res), _regs, (_arity)); \
- r(0) = _regs[0]; \
- E = (_p)->stop; \
- } \
- HTOP = HEAP_TOP((_p))
-#endif
-
#define db(N) (N)
#define tb(N) (N)
#define xb(N) (*(Eterm *) (((unsigned char *)reg) + (N)))
@@ -794,11 +764,11 @@ extern int count_instructions;
} \
} while (0)
-#define IsFunction2(F, A, Action) \
- do { \
- if (is_function_2(c_p, F, A) != am_true ) {\
- Action; \
- } \
+#define IsFunction2(F, A, Action) \
+ do { \
+ if (erl_is_function(c_p, F, A) != am_true ) { \
+ Action; \
+ } \
} while (0)
#define IsTupleOfArity(Src, Arity, Fail) \
@@ -1052,6 +1022,7 @@ init_emulator(void)
#if defined(VXWORKS)
init_done = 0;
#endif
+ erts_smp_atomic_init_nob(&warned_for_tuple_funs, (erts_aint_t) 0);
process_main();
}
@@ -1080,6 +1051,101 @@ init_emulator(void)
# define REG_tmp_arg2
#endif
+#ifdef USE_VM_PROBES
+# define USE_VM_CALL_PROBES
+#endif
+
+#ifdef USE_VM_CALL_PROBES
+
+#define DTRACE_LOCAL_CALL(p, m, f, a) \
+ if (DTRACE_ENABLED(local_function_entry)) { \
+ DTRACE_CHARBUF(process_name, DTRACE_TERM_BUF_SIZE); \
+ DTRACE_CHARBUF(mfa, DTRACE_TERM_BUF_SIZE); \
+ int depth = STACK_START(p) - STACK_TOP(p); \
+ dtrace_fun_decode(p, m, f, a, \
+ process_name, mfa); \
+ DTRACE3(local_function_entry, process_name, mfa, depth); \
+ }
+
+#define DTRACE_GLOBAL_CALL(p, m, f, a) \
+ if (DTRACE_ENABLED(global_function_entry)) { \
+ DTRACE_CHARBUF(process_name, DTRACE_TERM_BUF_SIZE); \
+ DTRACE_CHARBUF(mfa, DTRACE_TERM_BUF_SIZE); \
+ int depth = STACK_START(p) - STACK_TOP(p); \
+ dtrace_fun_decode(p, m, f, a, \
+ process_name, mfa); \
+ DTRACE3(global_function_entry, process_name, mfa, depth); \
+ }
+
+#define DTRACE_RETURN(p, m, f, a) \
+ if (DTRACE_ENABLED(function_return)) { \
+ DTRACE_CHARBUF(process_name, DTRACE_TERM_BUF_SIZE); \
+ DTRACE_CHARBUF(mfa, DTRACE_TERM_BUF_SIZE); \
+ int depth = STACK_START(p) - STACK_TOP(p); \
+ dtrace_fun_decode(p, m, f, a, \
+ process_name, mfa); \
+ DTRACE3(function_return, process_name, mfa, depth); \
+ }
+
+#define DTRACE_BIF_ENTRY(p, m, f, a) \
+ if (DTRACE_ENABLED(bif_entry)) { \
+ DTRACE_CHARBUF(process_name, DTRACE_TERM_BUF_SIZE); \
+ DTRACE_CHARBUF(mfa, DTRACE_TERM_BUF_SIZE); \
+ dtrace_fun_decode(p, m, f, a, \
+ process_name, mfa); \
+ DTRACE2(bif_entry, process_name, mfa); \
+ }
+
+#define DTRACE_BIF_RETURN(p, m, f, a) \
+ if (DTRACE_ENABLED(bif_return)) { \
+ DTRACE_CHARBUF(process_name, DTRACE_TERM_BUF_SIZE); \
+ DTRACE_CHARBUF(mfa, DTRACE_TERM_BUF_SIZE); \
+ dtrace_fun_decode(p, m, f, a, \
+ process_name, mfa); \
+ DTRACE2(bif_return, process_name, mfa); \
+ }
+
+#define DTRACE_NIF_ENTRY(p, m, f, a) \
+ if (DTRACE_ENABLED(nif_entry)) { \
+ DTRACE_CHARBUF(process_name, DTRACE_TERM_BUF_SIZE); \
+ DTRACE_CHARBUF(mfa, DTRACE_TERM_BUF_SIZE); \
+ dtrace_fun_decode(p, m, f, a, \
+ process_name, mfa); \
+ DTRACE2(nif_entry, process_name, mfa); \
+ }
+
+#define DTRACE_NIF_RETURN(p, m, f, a) \
+ if (DTRACE_ENABLED(nif_return)) { \
+ DTRACE_CHARBUF(process_name, DTRACE_TERM_BUF_SIZE); \
+ DTRACE_CHARBUF(mfa, DTRACE_TERM_BUF_SIZE); \
+ dtrace_fun_decode(p, m, f, a, \
+ process_name, mfa); \
+ DTRACE2(nif_return, process_name, mfa); \
+ }
+
+#else /* USE_VM_PROBES */
+
+#define DTRACE_LOCAL_CALL(p, m, f, a) do {} while (0)
+#define DTRACE_GLOBAL_CALL(p, m, f, a) do {} while (0)
+#define DTRACE_RETURN(p, m, f, a) do {} while (0)
+#define DTRACE_BIF_ENTRY(p, m, f, a) do {} while (0)
+#define DTRACE_BIF_RETURN(p, m, f, a) do {} while (0)
+#define DTRACE_NIF_ENTRY(p, m, f, a) do {} while (0)
+#define DTRACE_NIF_RETURN(p, m, f, a) do {} while (0)
+
+#endif /* USE_VM_PROBES */
+
+#ifdef USE_VM_PROBES
+void
+dtrace_drvport_str(ErlDrvPort drvport, char *port_buf)
+{
+ Port *port = erts_drvport2port(drvport);
+
+ erts_snprintf(port_buf, DTRACE_TERM_BUF_SIZE, "#Port<%lu.%lu>",
+ port_channel_no(port->id),
+ port_number(port->id));
+}
+#endif
/*
* process_main() is called twice:
* The first call performs some initialisation, including exporting
@@ -1094,7 +1160,7 @@ void process_main(void)
Process* c_p = NULL;
int reds_used;
#ifdef DEBUG
- Eterm pid;
+ ERTS_DECLARE_DUMMY(Eterm pid);
#endif
/*
@@ -1145,26 +1211,11 @@ void process_main(void)
Eterm *tmp_big; /* Temporary buffer for small bignums if !HEAP_ON_C_STACK. */
#endif
-#ifndef ERTS_SMP
-#if !HALFWORD_HEAP
- static Eterm save_reg[ERTS_X_REGS_ALLOCATED];
- /* X registers -- not used directly, but
- * through 'reg', because using it directly
- * needs two instructions on a SPARC,
- * while using it through reg needs only
- * one.
- */
-#endif
/*
- * Floating point registers.
- */
- static FloatDef freg[MAX_REG];
-#else
- /* X regisers and floating point registers are located in
+ * X registers and floating point registers are located in
* scheduler specific data.
*/
register FloatDef *freg;
-#endif
/*
* For keeping the negative old value of 'reds' when call saving is active.
@@ -1201,14 +1252,6 @@ void process_main(void)
init_done = 1;
goto init_emulator;
}
-#ifndef ERTS_SMP
-#if !HALFWORD_HEAP
- reg = save_reg; /* XXX: probably wastes a register on x86 */
-#else
- /* Registers need to be heap allocated (correct memory range) for tracing to work */
- reg = erts_alloc(ERTS_ALC_T_BEAM_REGISTER, ERTS_X_REGS_ALLOCATED * sizeof(Eterm));
-#endif
-#endif
c_p = NULL;
reds_used = 0;
goto do_schedule1;
@@ -1225,14 +1268,12 @@ void process_main(void)
c_p = schedule(c_p, reds_used);
ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
#ifdef DEBUG
- pid = c_p->id;
+ pid = c_p->id; /* Save for debugging purpouses */
#endif
ERTS_SMP_REQ_PROC_MAIN_LOCK(c_p);
PROCESS_MAIN_CHK_LOCKS(c_p);
-#ifdef ERTS_SMP
- reg = c_p->scheduler_data->save_reg;
- freg = c_p->scheduler_data->freg;
-#endif
+ reg = ERTS_PROC_GET_SCHDATA(c_p)->x_reg_array;
+ freg = ERTS_PROC_GET_SCHDATA(c_p)->f_reg_array;
#if !HEAP_ON_C_STACK
tmp_big = ERTS_PROC_GET_SCHDATA(c_p)->beam_emu_tmp_heap;
#endif
@@ -1276,6 +1317,30 @@ void process_main(void)
#endif
SWAPIN;
ASSERT(VALID_INSTR(next));
+
+#ifdef USE_VM_PROBES
+ if (DTRACE_ENABLED(process_scheduled)) {
+ DTRACE_CHARBUF(process_buf, DTRACE_TERM_BUF_SIZE);
+ DTRACE_CHARBUF(fun_buf, DTRACE_TERM_BUF_SIZE);
+ dtrace_proc_str(c_p, process_buf);
+
+ if (ERTS_PROC_IS_EXITING(c_p)) {
+ strcpy(fun_buf, "<exiting>");
+ } else {
+ BeamInstr *fptr = find_function_from_pc(c_p->i);
+ if (fptr) {
+ dtrace_fun_decode(c_p, (Eterm)fptr[0],
+ (Eterm)fptr[1], (Uint)fptr[2],
+ NULL, fun_buf);
+ } else {
+ erts_snprintf(fun_buf, sizeof(fun_buf),
+ "<unknown/%p>", next);
+ }
+ }
+
+ DTRACE2(process_scheduled, process_buf, fun_buf);
+ }
+#endif
Goto(next);
}
@@ -1452,6 +1517,7 @@ void process_main(void)
/* FALL THROUGH */
OpCase(i_call_only_f): {
SET_I((BeamInstr *) Arg(0));
+ DTRACE_LOCAL_CALL(c_p, (Eterm)I[-3], (Eterm)I[-2], I[-1]);
Dispatch();
}
@@ -1463,6 +1529,7 @@ void process_main(void)
RESTORE_CP(E);
E = ADD_BYTE_OFFSET(E, Arg(1));
SET_I((BeamInstr *) Arg(0));
+ DTRACE_LOCAL_CALL(c_p, (Eterm)I[-3], (Eterm)I[-2], I[-1]);
Dispatch();
}
@@ -1474,6 +1541,7 @@ void process_main(void)
OpCase(i_call_f): {
SET_CP(c_p, I+2);
SET_I((BeamInstr *) Arg(0));
+ DTRACE_LOCAL_CALL(c_p, (Eterm)I[-3], (Eterm)I[-2], I[-1]);
Dispatch();
}
@@ -1490,6 +1558,12 @@ void process_main(void)
* is not loaded, it points to code which will invoke the error handler
* (see lb_call_error_handler below).
*/
+#ifdef USE_VM_CALL_PROBES
+ if (DTRACE_ENABLED(global_function_entry)) {
+ BeamInstr* fp = (BeamInstr *) (((Export *) Arg(0))->address);
+ DTRACE_GLOBAL_CALL(c_p, (Eterm)fp[-3], (Eterm)fp[-2], fp[-1]);
+ }
+#endif
Dispatchx();
OpCase(i_move_call_ext_cre): {
@@ -1499,6 +1573,12 @@ void process_main(void)
/* FALL THROUGH */
OpCase(i_call_ext_e):
SET_CP(c_p, I+2);
+#ifdef USE_VM_CALL_PROBES
+ if (DTRACE_ENABLED(global_function_entry)) {
+ BeamInstr* fp = (BeamInstr *) (((Export *) Arg(0))->address);
+ DTRACE_GLOBAL_CALL(c_p, (Eterm)fp[-3], (Eterm)fp[-2], fp[-1]);
+ }
+#endif
Dispatchx();
OpCase(i_move_call_ext_only_ecr): {
@@ -1506,6 +1586,12 @@ void process_main(void)
}
/* FALL THROUGH */
OpCase(i_call_ext_only_e):
+#ifdef USE_VM_CALL_PROBES
+ if (DTRACE_ENABLED(global_function_entry)) {
+ BeamInstr* fp = (BeamInstr *) (((Export *) Arg(0))->address);
+ DTRACE_GLOBAL_CALL(c_p, (Eterm)fp[-3], (Eterm)fp[-2], fp[-1]);
+ }
+#endif
Dispatchx();
OpCase(init_y): {
@@ -1541,7 +1627,16 @@ void process_main(void)
OpCase(return): {
+#ifdef USE_VM_CALL_PROBES
+ BeamInstr* fptr;
+#endif
SET_I(c_p->cp);
+
+#ifdef USE_VM_CALL_PROBES
+ if (DTRACE_ENABLED(function_return) && (fptr = find_function_from_pc(c_p->cp))) {
+ DTRACE_RETURN(c_p, (Eterm)fptr[0], (Eterm)fptr[1], (Uint)fptr[2]);
+ }
+#endif
/*
* We must clear the CP to make sure that a stale value do not
* create a false module dependcy preventing code upgrading.
@@ -1566,9 +1661,17 @@ void process_main(void)
PRE_BIF_SWAPOUT(c_p);
c_p->fcalls = FCALLS - 1;
- result = send_2(c_p, r(0), x(1));
+ reg[0] = r(0);
+ result = erl_send(c_p, r(0), x(1));
PreFetch(0, next);
- POST_BIF_GC_SWAPIN(c_p, result, reg, 2);
+ ERTS_SMP_REQ_PROC_MAIN_LOCK(c_p);
+ PROCESS_MAIN_CHK_LOCKS(c_p);
+ if (c_p->mbuf || MSO(c_p).overhead >= BIN_VHEAP_SZ(c_p)) {
+ result = erts_gc_after_bif_call(c_p, result, reg, 2);
+ r(0) = reg[0];
+ E = c_p->stop;
+ }
+ HTOP = HEAP_TOP(c_p);
FCALLS = c_p->fcalls;
if (is_value(result)) {
r(0) = result;
@@ -1576,10 +1679,9 @@ void process_main(void)
NextPF(0, next);
} else if (c_p->freason == TRAP) {
SET_CP(c_p, I+1);
- SET_I(*((BeamInstr **) (BeamInstr) ((c_p)->def_arg_reg + 3)));
+ SET_I(c_p->i);
SWAPIN;
- r(0) = c_p->def_arg_reg[0];
- x(1) = c_p->def_arg_reg[1];
+ r(0) = reg[0];
Dispatch();
}
goto find_func_info;
@@ -1803,6 +1905,7 @@ void process_main(void)
* remove it...
*/
ASSERT(!msgp->data.attached);
+ /* TODO: Add DTrace probe for this bad message situation? */
UNLINK_MESSAGE(c_p, msgp);
free_message(msgp);
goto loop_rec__;
@@ -1828,24 +1931,88 @@ void process_main(void)
save_calls(c_p, &exp_receive);
}
if (ERL_MESSAGE_TOKEN(msgp) == NIL) {
- SEQ_TRACE_TOKEN(c_p) = NIL;
+#ifdef USE_VM_PROBES
+ if (DT_UTAG(c_p) != NIL) {
+ if (DT_UTAG_FLAGS(c_p) & DT_UTAG_PERMANENT) {
+ SEQ_TRACE_TOKEN(c_p) = am_have_dt_utag;
+#ifdef DTRACE_TAG_HARDDEBUG
+ if (DT_UTAG_FLAGS(c_p) & DT_UTAG_SPREADING)
+ erts_fprintf(stderr,
+ "Dtrace -> (%T) stop spreading "
+ "tag %T with message %T\r\n",
+ c_p->id,DT_UTAG(c_p),ERL_MESSAGE_TERM(msgp));
+#endif
+ } else {
+#ifdef DTRACE_TAG_HARDDEBUG
+ erts_fprintf(stderr,
+ "Dtrace -> (%T) kill tag %T with "
+ "message %T\r\n",
+ c_p->id,DT_UTAG(c_p),ERL_MESSAGE_TERM(msgp));
+#endif
+ DT_UTAG(c_p) = NIL;
+ SEQ_TRACE_TOKEN(c_p) = NIL;
+ }
+ } else {
+#endif
+ SEQ_TRACE_TOKEN(c_p) = NIL;
+#ifdef USE_VM_PROBES
+ }
+ DT_UTAG_FLAGS(c_p) &= ~DT_UTAG_SPREADING;
+#endif
} else if (ERL_MESSAGE_TOKEN(msgp) != am_undefined) {
Eterm msg;
SEQ_TRACE_TOKEN(c_p) = ERL_MESSAGE_TOKEN(msgp);
- ASSERT(is_tuple(SEQ_TRACE_TOKEN(c_p)));
- ASSERT(SEQ_TRACE_TOKEN_ARITY(c_p) == 5);
- ASSERT(is_small(SEQ_TRACE_TOKEN_SERIAL(c_p)));
- ASSERT(is_small(SEQ_TRACE_TOKEN_LASTCNT(c_p)));
- ASSERT(is_small(SEQ_TRACE_TOKEN_FLAGS(c_p)));
- ASSERT(is_pid(SEQ_TRACE_TOKEN_SENDER(c_p)));
- c_p->seq_trace_lastcnt = unsigned_val(SEQ_TRACE_TOKEN_SERIAL(c_p));
- if (c_p->seq_trace_clock < unsigned_val(SEQ_TRACE_TOKEN_SERIAL(c_p))) {
- c_p->seq_trace_clock = unsigned_val(SEQ_TRACE_TOKEN_SERIAL(c_p));
+#ifdef USE_VM_PROBES
+ if (ERL_MESSAGE_TOKEN(msgp) == am_have_dt_utag) {
+ if (DT_UTAG(c_p) == NIL) {
+ DT_UTAG(c_p) = ERL_MESSAGE_DT_UTAG(msgp);
+ }
+ DT_UTAG_FLAGS(c_p) |= DT_UTAG_SPREADING;
+#ifdef DTRACE_TAG_HARDDEBUG
+ erts_fprintf(stderr,
+ "Dtrace -> (%T) receive tag (%T) "
+ "with message %T\r\n",
+ c_p->id, DT_UTAG(c_p), ERL_MESSAGE_TERM(msgp));
+#endif
+ } else {
+#endif
+ ASSERT(is_tuple(SEQ_TRACE_TOKEN(c_p)));
+ ASSERT(SEQ_TRACE_TOKEN_ARITY(c_p) == 5);
+ ASSERT(is_small(SEQ_TRACE_TOKEN_SERIAL(c_p)));
+ ASSERT(is_small(SEQ_TRACE_TOKEN_LASTCNT(c_p)));
+ ASSERT(is_small(SEQ_TRACE_TOKEN_FLAGS(c_p)));
+ ASSERT(is_pid(SEQ_TRACE_TOKEN_SENDER(c_p)));
+ c_p->seq_trace_lastcnt = unsigned_val(SEQ_TRACE_TOKEN_SERIAL(c_p));
+ if (c_p->seq_trace_clock < unsigned_val(SEQ_TRACE_TOKEN_SERIAL(c_p))) {
+ c_p->seq_trace_clock = unsigned_val(SEQ_TRACE_TOKEN_SERIAL(c_p));
+ }
+ msg = ERL_MESSAGE_TERM(msgp);
+ seq_trace_output(SEQ_TRACE_TOKEN(c_p), msg, SEQ_TRACE_RECEIVE,
+ c_p->id, c_p);
+#ifdef USE_VM_PROBES
}
- msg = ERL_MESSAGE_TERM(msgp);
- seq_trace_output(SEQ_TRACE_TOKEN(c_p), msg, SEQ_TRACE_RECEIVE,
- c_p->id, c_p);
+#endif
+ }
+#ifdef USE_VM_PROBES
+ if (DTRACE_ENABLED(message_receive)) {
+ Eterm token2 = NIL;
+ DTRACE_CHARBUF(receiver_name, DTRACE_TERM_BUF_SIZE);
+ Sint tok_label = 0;
+ Sint tok_lastcnt = 0;
+ Sint tok_serial = 0;
+
+ dtrace_proc_str(c_p, receiver_name);
+ token2 = SEQ_TRACE_TOKEN(c_p);
+ if (token2 != NIL && token2 != am_have_dt_utag) {
+ tok_label = signed_val(SEQ_TRACE_T_LABEL(token2));
+ tok_lastcnt = signed_val(SEQ_TRACE_T_LASTCNT(token2));
+ tok_serial = signed_val(SEQ_TRACE_T_SERIAL(token2));
+ }
+ DTRACE6(message_receive,
+ receiver_name, size_object(ERL_MESSAGE_TERM(msgp)),
+ c_p->msg.len - 1, tok_label, tok_lastcnt, tok_serial);
}
+#endif
UNLINK_MESSAGE(c_p, msgp);
JOIN_MESSAGE(c_p);
CANCEL_TIMER(c_p);
@@ -2234,16 +2401,16 @@ void process_main(void)
OpCase(bif1_fbsd):
{
- Eterm (*bf)(Process*, Eterm);
- Eterm arg;
+ Eterm (*bf)(Process*, Eterm*);
+ Eterm tmp_reg[1];
Eterm result;
- GetArg1(2, arg);
+ GetArg1(2, tmp_reg[0]);
bf = (BifFunction) Arg(1);
c_p->fcalls = FCALLS;
PROCESS_MAIN_CHK_LOCKS(c_p);
ASSERT(!ERTS_PROC_IS_EXITING(c_p));
- result = (*bf)(c_p, arg);
+ result = (*bf)(c_p, tmp_reg);
ASSERT(!ERTS_PROC_IS_EXITING(c_p) || is_non_value(result));
ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
PROCESS_MAIN_CHK_LOCKS(c_p);
@@ -2262,17 +2429,17 @@ void process_main(void)
OpCase(bif1_body_bsd):
{
- Eterm (*bf)(Process*, Eterm);
+ Eterm (*bf)(Process*, Eterm*);
- Eterm arg;
+ Eterm tmp_reg[1];
Eterm result;
- GetArg1(1, arg);
+ GetArg1(1, tmp_reg[0]);
bf = (BifFunction) Arg(0);
c_p->fcalls = FCALLS;
PROCESS_MAIN_CHK_LOCKS(c_p);
ASSERT(!ERTS_PROC_IS_EXITING(c_p));
- result = (*bf)(c_p, arg);
+ result = (*bf)(c_p, tmp_reg);
ASSERT(!ERTS_PROC_IS_EXITING(c_p) || is_non_value(result));
ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
PROCESS_MAIN_CHK_LOCKS(c_p);
@@ -2281,7 +2448,7 @@ void process_main(void)
if (is_value(result)) {
StoreBifResult(2, result);
}
- reg[0] = arg;
+ reg[0] = tmp_reg[0];
SWAPOUT;
I = handle_error(c_p, I, reg, bf);
goto post_error_handling;
@@ -2405,14 +2572,15 @@ void process_main(void)
*/
OpCase(i_bif2_fbd):
{
- Eterm (*bf)(Process*, Eterm, Eterm);
+ Eterm tmp_reg[2] = {tmp_arg1, tmp_arg2};
+ Eterm (*bf)(Process*, Eterm*);
Eterm result;
bf = (BifFunction) Arg(1);
c_p->fcalls = FCALLS;
PROCESS_MAIN_CHK_LOCKS(c_p);
ASSERT(!ERTS_PROC_IS_EXITING(c_p));
- result = (*bf)(c_p, tmp_arg1, tmp_arg2);
+ result = (*bf)(c_p, tmp_reg);
ASSERT(!ERTS_PROC_IS_EXITING(c_p) || is_non_value(result));
ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
PROCESS_MAIN_CHK_LOCKS(c_p);
@@ -2430,13 +2598,14 @@ void process_main(void)
*/
OpCase(i_bif2_body_bd):
{
- Eterm (*bf)(Process*, Eterm, Eterm);
+ Eterm tmp_reg[2] = {tmp_arg1, tmp_arg2};
+ Eterm (*bf)(Process*, Eterm*);
Eterm result;
bf = (BifFunction) Arg(0);
PROCESS_MAIN_CHK_LOCKS(c_p);
ASSERT(!ERTS_PROC_IS_EXITING(c_p));
- result = (*bf)(c_p, tmp_arg1, tmp_arg2);
+ result = (*bf)(c_p, tmp_reg);
ASSERT(!ERTS_PROC_IS_EXITING(c_p) || is_non_value(result));
ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
PROCESS_MAIN_CHK_LOCKS(c_p);
@@ -2456,77 +2625,9 @@ void process_main(void)
* The most general BIF call. The BIF may build any amount of data
* on the heap. The result is always returned in r(0).
*/
- OpCase(call_bif0_e):
- {
- Eterm (*bf)(Process*, BeamInstr*) = GET_BIF_ADDRESS(Arg(0));
-
- PRE_BIF_SWAPOUT(c_p);
- c_p->fcalls = FCALLS - 1;
- if (FCALLS <= 0) {
- save_calls(c_p, (Export *) Arg(0));
- }
-
- ASSERT(!ERTS_PROC_IS_EXITING(c_p));
- r(0) = (*bf)(c_p, I);
- ASSERT(!ERTS_PROC_IS_EXITING(c_p) || is_non_value(r(0)));
- ERTS_HOLE_CHECK(c_p);
- POST_BIF_GC_SWAPIN_0(c_p, r(0));
- FCALLS = c_p->fcalls;
- if (is_value(r(0))) {
- CHECK_TERM(r(0));
- Next(1);
- }
- else if (c_p->freason == TRAP) {
- goto call_bif_trap3;
- }
-
- /*
- * Error handling. SWAPOUT is not needed because it was done above.
- */
- ASSERT(c_p->stop == E);
- reg[0] = r(0);
- I = handle_error(c_p, I, reg, bf);
- goto post_error_handling;
- }
-
- OpCase(call_bif1_e):
+ OpCase(call_bif_e):
{
- Eterm (*bf)(Process*, Eterm, BeamInstr*) = GET_BIF_ADDRESS(Arg(0));
- Eterm result;
- BeamInstr *next;
-
- c_p->fcalls = FCALLS - 1;
- if (FCALLS <= 0) {
- save_calls(c_p, (Export *) Arg(0));
- }
- PreFetch(1, next);
- PRE_BIF_SWAPOUT(c_p);
- ASSERT(!ERTS_PROC_IS_EXITING(c_p));
- result = (*bf)(c_p, r(0), I);
- ASSERT(!ERTS_PROC_IS_EXITING(c_p) || is_non_value(result));
- ERTS_HOLE_CHECK(c_p);
- POST_BIF_GC_SWAPIN(c_p, result, reg, 1);
- FCALLS = c_p->fcalls;
- if (is_value(result)) {
- r(0) = result;
- CHECK_TERM(r(0));
- NextPF(1, next);
- } else if (c_p->freason == TRAP) {
- goto call_bif_trap3;
- }
-
- /*
- * Error handling. SWAPOUT is not needed because it was done above.
- */
- ASSERT(c_p->stop == E);
- reg[0] = r(0);
- I = handle_error(c_p, I, reg, bf);
- goto post_error_handling;
- }
-
- OpCase(call_bif2_e):
- {
- Eterm (*bf)(Process*, Eterm, Eterm, BeamInstr*) = GET_BIF_ADDRESS(Arg(0));
+ Eterm (*bf)(Process*, Eterm*, BeamInstr*) = GET_BIF_ADDRESS(Arg(0));
Eterm result;
BeamInstr *next;
@@ -2536,61 +2637,29 @@ void process_main(void)
save_calls(c_p, (Export *) Arg(0));
}
PreFetch(1, next);
- CHECK_TERM(r(0));
- CHECK_TERM(x(1));
ASSERT(!ERTS_PROC_IS_EXITING(c_p));
- result = (*bf)(c_p, r(0), x(1), I);
- ASSERT(!ERTS_PROC_IS_EXITING(c_p) || is_non_value(result));
- ERTS_HOLE_CHECK(c_p);
- POST_BIF_GC_SWAPIN(c_p, result, reg, 2);
- FCALLS = c_p->fcalls;
- if (is_value(result)) {
- r(0) = result;
- CHECK_TERM(r(0));
- NextPF(1, next);
- } else if (c_p->freason == TRAP) {
- goto call_bif_trap3;
- }
-
- /*
- * Error handling. SWAPOUT is not needed because it was done above.
- */
- ASSERT(c_p->stop == E);
reg[0] = r(0);
- I = handle_error(c_p, I, reg, bf);
- goto post_error_handling;
- }
-
- OpCase(call_bif3_e):
- {
- Eterm (*bf)(Process*, Eterm, Eterm, Eterm, BeamInstr*) = GET_BIF_ADDRESS(Arg(0));
- Eterm result;
- BeamInstr *next;
-
- PRE_BIF_SWAPOUT(c_p);
- c_p->fcalls = FCALLS - 1;
- if (FCALLS <= 0) {
- save_calls(c_p, (Export *) Arg(0));
- }
- PreFetch(1, next);
- ASSERT(!ERTS_PROC_IS_EXITING(c_p));
- result = (*bf)(c_p, r(0), x(1), x(2), I);
+ result = (*bf)(c_p, reg, I);
ASSERT(!ERTS_PROC_IS_EXITING(c_p) || is_non_value(result));
ERTS_HOLE_CHECK(c_p);
- POST_BIF_GC_SWAPIN(c_p, result, reg, 3);
+ ERTS_SMP_REQ_PROC_MAIN_LOCK(c_p);
+ PROCESS_MAIN_CHK_LOCKS(c_p);
+ if (c_p->mbuf || MSO(c_p).overhead >= BIN_VHEAP_SZ(c_p)) {
+ Uint arity = ((Export *)Arg(0))->code[2];
+ result = erts_gc_after_bif_call(c_p, result, reg, arity);
+ E = c_p->stop;
+ }
+ HTOP = HEAP_TOP(c_p);
FCALLS = c_p->fcalls;
if (is_value(result)) {
r(0) = result;
CHECK_TERM(r(0));
NextPF(1, next);
} else if (c_p->freason == TRAP) {
- call_bif_trap3:
SET_CP(c_p, I+2);
- SET_I(*((BeamInstr **) (UWord) ((c_p)->def_arg_reg + 3)));
+ SET_I(c_p->i);
SWAPIN;
- r(0) = c_p->def_arg_reg[0];
- x(1) = c_p->def_arg_reg[1];
- x(2) = c_p->def_arg_reg[2];
+ r(0) = reg[0];
Dispatch();
}
@@ -2598,7 +2667,6 @@ void process_main(void)
* Error handling. SWAPOUT is not needed because it was done above.
*/
ASSERT(c_p->stop == E);
- reg[0] = r(0);
I = handle_error(c_p, I, reg, bf);
goto post_error_handling;
}
@@ -2694,6 +2762,7 @@ void process_main(void)
lb_Cl_error: {
if (Arg(0) != 0) {
OpCase(jump_f): {
+ jump_f:
SET_I((BeamInstr *) Arg(0));
Goto(*I);
}
@@ -3267,7 +3336,7 @@ void process_main(void)
/* Fall through */
OpCase(error_action_code): {
- no_error_handler:
+ handle_error:
reg[0] = r(0);
SWAPOUT;
I = handle_error(c_p, NULL, reg, NULL);
@@ -3303,6 +3372,7 @@ void process_main(void)
*/
BifFunction vbf;
+ DTRACE_NIF_ENTRY(c_p, (Eterm)I[-3], (Eterm)I[-2], (Uint)I[-1]);
c_p->current = I-3; /* current and vbf set to please handle_error */
SWAPOUT;
c_p->fcalls = FCALLS - 1;
@@ -3324,6 +3394,8 @@ void process_main(void)
ASSERT(!ERTS_PROC_IS_EXITING(c_p) || is_non_value(nif_bif_result));
PROCESS_MAIN_CHK_LOCKS(c_p);
ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
+
+ DTRACE_NIF_RETURN(c_p, (Eterm)I[-3], (Eterm)I[-2], (Uint)I[-1]);
goto apply_bif_or_nif_epilogue;
OpCase(apply_bif):
@@ -3343,6 +3415,8 @@ void process_main(void)
c_p->arity = 0; /* To allow garbage collection on ourselves
* (check_process_code/2).
*/
+ DTRACE_BIF_ENTRY(c_p, (Eterm)I[-3], (Eterm)I[-2], (Uint)I[-1]);
+
SWAPOUT;
c_p->fcalls = FCALLS - 1;
vbf = (BifFunction) Arg(0);
@@ -3351,64 +3425,25 @@ void process_main(void)
ASSERT(bif_nif_arity <= 3);
ERTS_SMP_UNREQ_PROC_MAIN_LOCK(c_p);
ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
- switch (bif_nif_arity) {
- case 3:
- {
- Eterm (*bf)(Process*, Eterm, Eterm, Eterm, BeamInstr*) = vbf;
- ASSERT(!ERTS_PROC_IS_EXITING(c_p));
- nif_bif_result = (*bf)(c_p, r(0), x(1), x(2), I);
- ASSERT(!ERTS_PROC_IS_EXITING(c_p) ||
- is_non_value(nif_bif_result));
- ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
- PROCESS_MAIN_CHK_LOCKS(c_p);
- }
- break;
- case 2:
- {
- Eterm (*bf)(Process*, Eterm, Eterm, BeamInstr*) = vbf;
- ASSERT(!ERTS_PROC_IS_EXITING(c_p));
- nif_bif_result = (*bf)(c_p, r(0), x(1), I);
- ASSERT(!ERTS_PROC_IS_EXITING(c_p) ||
- is_non_value(nif_bif_result));
- ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
- PROCESS_MAIN_CHK_LOCKS(c_p);
- }
- break;
- case 1:
- {
- Eterm (*bf)(Process*, Eterm, BeamInstr*) = vbf;
- ASSERT(!ERTS_PROC_IS_EXITING(c_p));
- nif_bif_result = (*bf)(c_p, r(0), I);
- ASSERT(!ERTS_PROC_IS_EXITING(c_p) ||
- is_non_value(nif_bif_result));
- ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
- PROCESS_MAIN_CHK_LOCKS(c_p);
- }
- break;
- case 0:
- {
- Eterm (*bf)(Process*, BeamInstr*) = vbf;
- ASSERT(!ERTS_PROC_IS_EXITING(c_p));
- nif_bif_result = (*bf)(c_p, I);
- ASSERT(!ERTS_PROC_IS_EXITING(c_p) ||
- is_non_value(nif_bif_result));
- ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
- PROCESS_MAIN_CHK_LOCKS(c_p);
- break;
- }
- default:
- erl_exit(1, "apply_bif: invalid arity: %u\n",
- bif_nif_arity);
+ reg[0] = r(0);
+ {
+ Eterm (*bf)(Process*, Eterm*, BeamInstr*) = vbf;
+ ASSERT(!ERTS_PROC_IS_EXITING(c_p));
+ nif_bif_result = (*bf)(c_p, reg, I);
+ ASSERT(!ERTS_PROC_IS_EXITING(c_p) ||
+ is_non_value(nif_bif_result));
+ ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
+ PROCESS_MAIN_CHK_LOCKS(c_p);
}
+ DTRACE_BIF_RETURN(c_p, (Eterm)I[-3], (Eterm)I[-2], (Uint)I[-1]);
+
apply_bif_or_nif_epilogue:
ERTS_SMP_REQ_PROC_MAIN_LOCK(c_p);
ERTS_HOLE_CHECK(c_p);
if (c_p->mbuf) {
- reg[0] = r(0);
nif_bif_result = erts_gc_after_bif_call(c_p, nif_bif_result,
reg, bif_nif_arity);
- r(0) = reg[0];
}
SWAPIN; /* There might have been a garbage collection. */
FCALLS = c_p->fcalls;
@@ -3419,17 +3454,14 @@ void process_main(void)
c_p->cp = 0;
Goto(*I);
} else if (c_p->freason == TRAP) {
- SET_I(*((BeamInstr **) (UWord) ((c_p)->def_arg_reg + 3)));
- r(0) = c_p->def_arg_reg[0];
- x(1) = c_p->def_arg_reg[1];
- x(2) = c_p->def_arg_reg[2];
+ SET_I(c_p->i);
+ r(0) = reg[0];
if (c_p->flags & F_HIBERNATE_SCHED) {
c_p->flags &= ~F_HIBERNATE_SCHED;
goto do_schedule;
}
Dispatch();
}
- reg[0] = r(0);
I = handle_error(c_p, c_p->cp, reg, vbf);
goto post_error_handling;
}
@@ -3472,7 +3504,7 @@ void process_main(void)
OpCase(i_func_info_IaaI): {
c_p->freason = EXC_FUNCTION_CLAUSE;
c_p->current = I + 2;
- goto lb_error_action_code;
+ goto handle_error;
}
OpCase(try_case_end_s):
@@ -3561,7 +3593,7 @@ void process_main(void)
* Operands: NotUsed Live Dst
*/
do_bs_init_bits_known:
- num_bytes = (num_bits+7) >> 3;
+ num_bytes = ((Uint64)num_bits+(Uint64)7) >> 3;
if (num_bits & 7) {
alloc += ERL_SUB_BIN_SIZE;
}
@@ -3992,8 +4024,7 @@ void process_main(void)
* too big numbers).
*/
if (is_not_small(val) || val > make_small(0x10FFFFUL) ||
- (make_small(0xD800UL) <= val && val <= make_small(0xDFFFUL)) ||
- val == make_small(0xFFFEUL) || val == make_small(0xFFFFUL)) {
+ (make_small(0xD800UL) <= val && val <= make_small(0xDFFFUL))) {
goto badarg;
}
Next(2);
@@ -4012,8 +4043,8 @@ void process_main(void)
* the valid range).
*/
if (is_not_small(tmp_arg1) || tmp_arg1 > make_small(0x10FFFFUL) ||
- (make_small(0xD800UL) <= tmp_arg1 && tmp_arg1 <= make_small(0xDFFFUL)) ||
- tmp_arg1 == make_small(0xFFFEUL) || tmp_arg1 == make_small(0xFFFFUL)) {
+ (make_small(0xD800UL) <= tmp_arg1 &&
+ tmp_arg1 <= make_small(0xDFFFUL))) {
ErlBinMatchBuffer *mb = ms_matchbuffer(tmp_arg2);
mb->offset -= 32;
@@ -4888,92 +4919,6 @@ void process_main(void)
}
/*
- * Instructions for allocating on the message area.
- */
-
- OpCase(i_global_cons):
- {
- BeamInstr *next;
-#ifdef HYBRID
- Eterm *hp;
-
- PreFetch(0,next);
- TestGlobalHeap(2,2,hp);
- hp[0] = r(0);
- hp[1] = x(1);
- r(0) = make_list(hp);
-#ifndef INCREMENTAL
- global_htop += 2;
-#endif
- NextPF(0,next);
-#else
- PreFetch(0,next);
- c_p->freason = EXC_INTERNAL_ERROR;
- goto find_func_info;
-#endif
- }
-
- OpCase(i_global_tuple):
- {
- BeamInstr *next;
- int len;
-#ifdef HYBRID
- Eterm list;
- Eterm *hp;
-#endif
-
- if ((len = list_length(r(0))) < 0) {
- goto badarg;
- }
-
- PreFetch(0,next);
-#ifdef HYBRID
- TestGlobalHeap(len + 1,1,hp);
- list = r(0);
- r(0) = make_tuple(hp);
- *hp++ = make_arityval(len);
- while(is_list(list))
- {
- Eterm* cons = list_val(list);
- *hp++ = CAR(cons);
- list = CDR(cons);
- }
-#ifndef INCREMENTAL
- global_htop += len + 1;
-#endif
- NextPF(0,next);
-#else
- c_p->freason = EXC_INTERNAL_ERROR;
- goto find_func_info;
-#endif
- }
-
- OpCase(i_global_copy):
- {
- BeamInstr *next;
- PreFetch(0,next);
-#ifdef HYBRID
- if (!IS_CONST(r(0)))
- {
- BM_SWAP_TIMER(system,copy);
- SWAPOUT;
- reg[0] = r(0);
- reg[1] = NIL;
- r(0) = copy_struct_lazy(c_p,r(0),0);
- ASSERT(ma_src_top == 0);
- ASSERT(ma_dst_top == 0);
- ASSERT(ma_offset_top == 0);
- SWAPIN;
- BM_SWAP_TIMER(copy,system);
- }
- NextPF(0,next);
-#else
- c_p->freason = EXC_INTERNAL_ERROR;
- goto find_func_info;
-#endif
- }
-
- /*
* New floating point instructions.
*/
@@ -5032,7 +4977,12 @@ void process_main(void)
OpCase(fclearerror):
OpCase(i_fcheckerror):
erl_exit(1, "fclearerror/i_fcheckerror without fpe signals (beam_emu)");
+# define ERTS_NO_FPE_CHECK_INIT ERTS_FP_CHECK_INIT
+# define ERTS_NO_FPE_ERROR ERTS_FP_ERROR
#else
+# define ERTS_NO_FPE_CHECK_INIT(p)
+# define ERTS_NO_FPE_ERROR(p, a, b)
+
OpCase(fclearerror): {
BeamInstr *next;
@@ -5048,10 +4998,6 @@ void process_main(void)
ERTS_FP_ERROR(c_p, freg[0].fd, goto fbadarith);
NextPF(0, next);
}
-# undef ERTS_FP_CHECK_INIT
-# undef ERTS_FP_ERROR
-# define ERTS_FP_CHECK_INIT(p)
-# define ERTS_FP_ERROR(p, a, b)
#endif
@@ -5059,45 +5005,45 @@ void process_main(void)
BeamInstr *next;
PreFetch(3, next);
- ERTS_FP_CHECK_INIT(c_p);
+ ERTS_NO_FPE_CHECK_INIT(c_p);
fb(Arg(2)) = fb(Arg(0)) + fb(Arg(1));
- ERTS_FP_ERROR(c_p, fb(Arg(2)), goto fbadarith);
+ ERTS_NO_FPE_ERROR(c_p, fb(Arg(2)), goto fbadarith);
NextPF(3, next);
}
OpCase(i_fsub_lll): {
BeamInstr *next;
PreFetch(3, next);
- ERTS_FP_CHECK_INIT(c_p);
+ ERTS_NO_FPE_CHECK_INIT(c_p);
fb(Arg(2)) = fb(Arg(0)) - fb(Arg(1));
- ERTS_FP_ERROR(c_p, fb(Arg(2)), goto fbadarith);
+ ERTS_NO_FPE_ERROR(c_p, fb(Arg(2)), goto fbadarith);
NextPF(3, next);
}
OpCase(i_fmul_lll): {
BeamInstr *next;
PreFetch(3, next);
- ERTS_FP_CHECK_INIT(c_p);
+ ERTS_NO_FPE_CHECK_INIT(c_p);
fb(Arg(2)) = fb(Arg(0)) * fb(Arg(1));
- ERTS_FP_ERROR(c_p, fb(Arg(2)), goto fbadarith);
+ ERTS_NO_FPE_ERROR(c_p, fb(Arg(2)), goto fbadarith);
NextPF(3, next);
}
OpCase(i_fdiv_lll): {
BeamInstr *next;
PreFetch(3, next);
- ERTS_FP_CHECK_INIT(c_p);
+ ERTS_NO_FPE_CHECK_INIT(c_p);
fb(Arg(2)) = fb(Arg(0)) / fb(Arg(1));
- ERTS_FP_ERROR(c_p, fb(Arg(2)), goto fbadarith);
+ ERTS_NO_FPE_ERROR(c_p, fb(Arg(2)), goto fbadarith);
NextPF(3, next);
}
OpCase(i_fnegate_ll): {
BeamInstr *next;
PreFetch(2, next);
- ERTS_FP_CHECK_INIT(c_p);
+ ERTS_NO_FPE_CHECK_INIT(c_p);
fb(Arg(1)) = -fb(Arg(0));
- ERTS_FP_ERROR(c_p, fb(Arg(1)), goto fbadarith);
+ ERTS_NO_FPE_ERROR(c_p, fb(Arg(1)), goto fbadarith);
NextPF(2, next);
fbadarith:
@@ -5151,10 +5097,8 @@ void process_main(void)
c_p->def_arg_reg[4] = -neg_o_reds;
reg[0] = r(0);
c_p = hipe_mode_switch(c_p, cmd, reg);
-#ifdef ERTS_SMP
- reg = c_p->scheduler_data->save_reg;
- freg = c_p->scheduler_data->freg;
-#endif
+ reg = ERTS_PROC_GET_SCHDATA(c_p)->x_reg_array;
+ freg = ERTS_PROC_GET_SCHDATA(c_p)->f_reg_array;
ERL_BITS_RELOAD_STATEP(c_p);
neg_o_reds = -c_p->def_arg_reg[4];
FCALLS = c_p->fcalls;
@@ -5246,7 +5190,7 @@ void process_main(void)
if (I) {
Goto(*I);
}
- goto no_error_handler;
+ goto handle_error;
}
@@ -5268,8 +5212,8 @@ void process_main(void)
OpCase(int_code_end):
OpCase(label_L):
- OpCase(too_old_compiler):
OpCase(on_load):
+ OpCase(line_I):
erl_exit(1, "meta op\n");
/*
@@ -5686,6 +5630,25 @@ expand_error_value(Process* c_p, Uint freason, Eterm Value) {
* that c_p->ftrace will point to a cons cell which holds the given args
* and the saved data (encoded as a bignum).
*
+ * There is an issue with line number information. Line number
+ * information is associated with the address *before* an operation
+ * that may fail or be stored stored on the stack. But continuation
+ * pointers point after its call instruction, not before. To avoid
+ * finding the wrong line number, we'll need to adjust them so that
+ * they point at the beginning of the call instruction or inside the
+ * call instruction. Since its impractical to point at the beginning,
+ * we'll do the simplest thing and decrement the continuation pointers
+ * by one.
+ *
+ * Here is an example of what can go wrong. Without the adjustment
+ * of continuation pointers, the call at line 42 below would seem to
+ * be at line 43:
+ *
+ * line 42
+ * call ...
+ * line 43
+ * gc_bif ...
+ *
* (It would be much better to put the arglist - when it exists - in the
* error value instead of in the actual trace; e.g. '{badarg, Args}'
* instead of using 'badarg' with Args in the trace. The arglist may
@@ -5752,7 +5715,7 @@ save_stacktrace(Process* c_p, BeamInstr* pc, Eterm* reg, BifFunction bf,
}
/* Save second stack entry if CP is valid and different from pc */
if (depth > 0 && c_p->cp != 0 && c_p->cp != pc) {
- s->trace[s->depth++] = c_p->cp;
+ s->trace[s->depth++] = c_p->cp - 1;
depth--;
}
s->pc = NULL;
@@ -5772,13 +5735,13 @@ save_stacktrace(Process* c_p, BeamInstr* pc, Eterm* reg, BifFunction bf,
/* Save first stack entry */
ASSERT(c_p->cp);
if (depth > 0) {
- s->trace[s->depth++] = c_p->cp;
+ s->trace[s->depth++] = c_p->cp - 1;
depth--;
}
s->pc = NULL; /* Ignore pc */
} else {
if (depth > 0 && c_p->cp != 0 && c_p->cp != pc) {
- s->trace[s->depth++] = c_p->cp;
+ s->trace[s->depth++] = c_p->cp - 1;
depth--;
}
s->pc = pc;
@@ -5793,24 +5756,31 @@ save_stacktrace(Process* c_p, BeamInstr* pc, Eterm* reg, BifFunction bf,
}
/* Save the actual stack trace */
+ erts_save_stacktrace(c_p, s, depth);
+}
+
+void
+erts_save_stacktrace(Process* p, struct StackTrace* s, int depth)
+{
if (depth > 0) {
Eterm *ptr;
BeamInstr *prev = s->depth ? s->trace[s->depth-1] : NULL;
BeamInstr i_return_trace = beam_return_trace[0];
BeamInstr i_return_to_trace = beam_return_to_trace[0];
+
/*
* Traverse the stack backwards and add all unique continuation
* pointers to the buffer, up to the maximum stack trace size.
*
* Skip trace stack frames.
*/
- ptr = c_p->stop;
- if (ptr < STACK_START(c_p)
- && (is_not_CP(*ptr)|| (*cp_val(*ptr) != i_return_trace &&
- *cp_val(*ptr) != i_return_to_trace))
- && c_p->cp) {
- /* Can not follow cp here - code may be unloaded */
- BeamInstr *cpp = c_p->cp;
+ ptr = p->stop;
+ if (ptr < STACK_START(p) &&
+ (is_not_CP(*ptr)|| (*cp_val(*ptr) != i_return_trace &&
+ *cp_val(*ptr) != i_return_to_trace)) &&
+ p->cp) {
+ /* Cannot follow cp here - code may be unloaded */
+ BeamInstr *cpp = p->cp;
if (cpp == beam_exception_trace || cpp == beam_return_trace) {
/* Skip return_trace parameters */
ptr += 2;
@@ -5819,7 +5789,7 @@ save_stacktrace(Process* c_p, BeamInstr* pc, Eterm* reg, BifFunction bf,
ptr += 1;
}
}
- while (ptr < STACK_START(c_p) && depth > 0) {
+ while (ptr < STACK_START(p) && depth > 0) {
if (is_CP(*ptr)) {
if (*cp_val(*ptr) == i_return_trace) {
/* Skip stack frame variables */
@@ -5834,7 +5804,7 @@ save_stacktrace(Process* c_p, BeamInstr* pc, Eterm* reg, BifFunction bf,
if (cp != prev) {
/* Record non-duplicates only */
prev = cp;
- s->trace[s->depth++] = cp;
+ s->trace[s->depth++] = cp - 1;
depth--;
}
ptr++;
@@ -5902,9 +5872,14 @@ build_stacktrace(Process* c_p, Eterm exc) {
struct StackTrace* s;
Eterm args;
int depth;
- BeamInstr* current;
- Eterm Where = NIL;
- Eterm *next_p = &Where;
+ FunctionInfo fi;
+ FunctionInfo* stk;
+ FunctionInfo* stkp;
+ Eterm res = NIL;
+ Uint heap_size;
+ Eterm* hp;
+ Eterm mfa;
+ int i;
if (! (s = get_trace_from_exc(exc))) {
return NIL;
@@ -5923,64 +5898,56 @@ build_stacktrace(Process* c_p, Eterm exc) {
* saved s->current should already contain the proper value.
*/
if (s->pc != NULL) {
- current = find_function_from_pc(s->pc);
+ erts_lookup_function_info(&fi, s->pc, 1);
+ } else if (GET_EXC_INDEX(s->freason) ==
+ GET_EXC_INDEX(EXC_FUNCTION_CLAUSE)) {
+ erts_lookup_function_info(&fi, s->current, 1);
} else {
- current = s->current;
+ erts_set_current_function(&fi, s->current);
}
+
/*
- * If current is still NULL, default to the initial function
+ * If fi.current is still NULL, default to the initial function
* (e.g. spawn_link(erlang, abs, [1])).
*/
- if (current == NULL) {
- current = c_p->initial;
+ if (fi.current == NULL) {
+ erts_set_current_function(&fi, c_p->initial);
args = am_true; /* Just in case */
} else {
args = get_args_from_exc(exc);
}
- depth = s->depth;
-
/*
- * Add the {M,F,A} for the current function
- * (where A is arity or [Argument]).
+ * Look up all saved continuation pointers and calculate
+ * needed heap space.
*/
- {
- int i;
- Eterm mfa;
- Uint heap_size = 6*(depth+1);
- Eterm* hp = HAlloc(c_p, heap_size);
- Eterm* hp_end = hp + heap_size;
-
- if (args != am_true) {
- /* We have an arglist - use it */
- mfa = TUPLE3(hp, current[0], current[1], args);
- } else {
- Eterm arity = make_small(current[2]);
- mfa = TUPLE3(hp, current[0], current[1], arity);
+ depth = s->depth;
+ stk = stkp = (FunctionInfo *) erts_alloc(ERTS_ALC_T_TMP,
+ depth*sizeof(FunctionInfo));
+ heap_size = fi.needed + 2;
+ for (i = 0; i < depth; i++) {
+ erts_lookup_function_info(stkp, s->trace[i], 1);
+ if (stkp->current) {
+ heap_size += stkp->needed + 2;
+ stkp++;
}
- hp += 4;
- ASSERT(*next_p == NIL);
- *next_p = CONS(hp, mfa, NIL);
- next_p = &CDR(list_val(*next_p));
- hp += 2;
+ }
- /*
- * Finally, we go through the saved continuation pointers.
- */
- for (i = 0; i < depth; i++) {
- BeamInstr *fi = find_function_from_pc((BeamInstr *) s->trace[i]);
- if (fi == NULL) continue;
- mfa = TUPLE3(hp, fi[0], fi[1], make_small(fi[2]));
- hp += 4;
- ASSERT(*next_p == NIL);
- *next_p = CONS(hp, mfa, NIL);
- next_p = &CDR(list_val(*next_p));
- hp += 2;
- }
- ASSERT(hp <= hp_end);
- HRelease(c_p, hp_end, hp);
+ /*
+ * Allocate heap space and build the stacktrace.
+ */
+ hp = HAlloc(c_p, heap_size);
+ while (stkp > stk) {
+ stkp--;
+ hp = erts_build_mfa_item(stkp, hp, am_true, &mfa);
+ res = CONS(hp, mfa, res);
+ hp += 2;
}
- return Where;
+ hp = erts_build_mfa_item(&fi, hp, args, &mfa);
+ res = CONS(hp, mfa, res);
+
+ erts_free(ERTS_ALC_T_TMP, (void *) stk);
+ return res;
}
@@ -6154,6 +6121,12 @@ apply(Process* p, Eterm module, Eterm function, Eterm args, Eterm* reg)
save_calls(p, ep);
}
+#ifdef USE_VM_CALL_PROBES
+ if (DTRACE_ENABLED(global_function_entry)) {
+ BeamInstr *fptr = (BeamInstr *) ep->address;
+ DTRACE_GLOBAL_CALL(p, (Eterm)fptr[-3], (Eterm)fptr[-2], (Uint)fptr[-1]);
+ }
+#endif
return ep->address;
}
@@ -6203,6 +6176,12 @@ fixed_apply(Process* p, Eterm* reg, Uint arity)
save_calls(p, ep);
}
+#ifdef USE_VM_CALL_PROBES
+ if (DTRACE_ENABLED(global_function_entry)) {
+ BeamInstr *fptr = (BeamInstr *) ep->address;
+ DTRACE_GLOBAL_CALL(p, (Eterm)fptr[-3], (Eterm)fptr[-2], (Uint)fptr[-1]);
+ }
+#endif
return ep->address;
}
@@ -6252,6 +6231,15 @@ erts_hibernate(Process* c_p, Eterm module, Eterm function, Eterm args, Eterm* re
c_p->max_arg_reg = sizeof(c_p->def_arg_reg)/sizeof(c_p->def_arg_reg[0]);
}
+#ifdef USE_VM_PROBES
+ if (DTRACE_ENABLED(process_hibernate)) {
+ DTRACE_CHARBUF(process_name, DTRACE_TERM_BUF_SIZE);
+ DTRACE_CHARBUF(mfa, DTRACE_TERM_BUF_SIZE);
+ dtrace_fun_decode(c_p, module, function, arity,
+ process_name, mfa);
+ DTRACE2(process_hibernate, process_name, mfa);
+ }
+#endif
/*
* Arrange for the process to be resumed at the given MFA with
* the stack cleared.
@@ -6327,6 +6315,9 @@ call_fun(Process* p, /* Current process. */
actual_arity = (int) code_ptr[-1];
if (actual_arity == arity+num_free) {
+ DTRACE_LOCAL_CALL(p, (Eterm)code_ptr[-3],
+ (Eterm)code_ptr[-2],
+ code_ptr[-1]);
if (num_free == 0) {
return code_ptr;
} else {
@@ -6344,7 +6335,7 @@ call_fun(Process* p, /* Current process. */
} else {
/*
* Something wrong here. First build a list of the arguments.
- */
+ */
if (is_non_value(args)) {
Uint sz = 2 * arity;
@@ -6419,6 +6410,7 @@ call_fun(Process* p, /* Current process. */
actual_arity = (int) ep->code[2];
if (arity == actual_arity) {
+ DTRACE_GLOBAL_CALL(p, ep->code[0], ep->code[1], (Uint)ep->code[2]);
return ep->address;
} else {
/*
@@ -6450,6 +6442,26 @@ call_fun(Process* p, /* Current process. */
if (!is_atom(module) || !is_atom(function)) {
goto badfun;
}
+
+ /*
+ * If this is the first time a tuple fun is used,
+ * send a warning to the logger.
+ */
+ if (erts_smp_atomic_xchg_nob(&warned_for_tuple_funs,
+ (erts_aint_t) 1) == 0) {
+ erts_dsprintf_buf_t* dsbufp;
+
+ dsbufp = erts_create_logger_dsbuf();
+ erts_dsprintf(dsbufp, "Call to tuple fun {%T,%T}.\n\n"
+ "Tuple funs are deprecated and will be removed "
+ "in R16. Use \"fun M:F/A\" instead, for example "
+ "\"fun %T:%T/%d\".\n\n"
+ "(This warning will only be shown the first time "
+ "a tuple fun is called.)\n",
+ module, function, module, function, arity);
+ erts_send_warning_to_logger(p->group_leader, dsbufp);
+ }
+
if ((ep = erts_find_export_entry(module, function, arity)) == NULL) {
ep = erts_find_export_entry(erts_proc_get_error_handler(p),
am_undefined_function, 3);
@@ -6474,6 +6486,7 @@ call_fun(Process* p, /* Current process. */
reg[1] = function;
reg[2] = args;
}
+ DTRACE_GLOBAL_CALL(p, module, function, arity);
return ep->address;
} else {
badfun:
diff --git a/erts/emulator/beam/beam_load.c b/erts/emulator/beam/beam_load.c
index 57fe25453d..dd788df6e4 100644
--- a/erts/emulator/beam/beam_load.c
+++ b/erts/emulator/beam/beam_load.c
@@ -158,6 +158,7 @@ typedef struct {
#define LITERAL_CHUNK 6
#define ATTR_CHUNK 7
#define COMPILE_CHUNK 8
+#define LINE_CHUNK 9
#define NUM_CHUNK_TYPES (sizeof(chunk_types)/sizeof(chunk_types[0]))
@@ -182,6 +183,7 @@ static Uint chunk_types[] = {
MakeIffId('L', 'i', 't', 'T'), /* 6 */
MakeIffId('A', 't', 't', 'r'), /* 7 */
MakeIffId('C', 'I', 'n', 'f'), /* 8 */
+ MakeIffId('L', 'i', 'n', 'e'), /* 9 */
};
/*
@@ -204,6 +206,7 @@ typedef struct {
Eterm term; /* The tagged term (in the heap). */
Uint heap_size; /* (Exact) size on the heap. */
Uint offset; /* Offset from temporary location to final. */
+ ErlOffHeap off_heap; /* Start of linked list of ProcBins. */
Eterm* heap; /* Heap for term. */
} Literal;
@@ -231,10 +234,19 @@ struct string_patch {
};
/*
+ * This structure associates a code offset with a source code location.
+ */
+
+typedef struct {
+ int pos; /* Position in code */
+ Uint32 loc; /* Location in source code */
+} LineInstr;
+
+/*
* This structure contains all information about the module being loaded.
*/
-typedef struct {
+typedef struct LoaderState {
/*
* The current logical file within the binary.
*/
@@ -242,6 +254,7 @@ typedef struct {
char* file_name; /* Name of file we are reading (usually chunk name). */
byte* file_p; /* Current pointer within file. */
unsigned file_left; /* Number of bytes left in file. */
+ ErlDrvBinary* bin; /* Binary holding BEAM file (or NULL) */
/*
* The following are used mainly for diagnostics.
@@ -276,7 +289,6 @@ typedef struct {
BeamInstr* code; /* Loaded code. */
int ci; /* Current index into loaded code. */
Label* labels;
- BeamInstr new_bs_put_strings; /* Linked list of i_new_bs_put_string instructions. */
StringPatch* string_patches; /* Linked list of position into string table to patch. */
BeamInstr catches; /* Linked list of catch_yf instructions. */
unsigned loaded_size; /* Final size of code when loaded. */
@@ -325,27 +337,58 @@ typedef struct {
Literal* literals; /* Array of literals. */
LiteralPatch* literal_patches; /* Operands that need to be patched. */
Uint total_literal_size; /* Total heap size for all literals. */
+
+ /*
+ * Line table.
+ */
+ BeamInstr* line_item; /* Line items from the BEAM file. */
+ int num_line_items; /* Number of line items. */
+ LineInstr* line_instr; /* Line instructions */
+ int num_line_instrs; /* Maximum number of line instructions */
+ int current_li; /* Current line instruction */
+ int* func_line; /* Mapping from function to first line instr */
+ Eterm* fname; /* List of file names */
+ int num_fnames; /* Number of filenames in fname table */
+ int loc_size; /* Size of location info in bytes (2/4) */
} LoaderState;
-typedef struct {
- unsigned num_functions; /* Number of functions. */
- Eterm* func_tab[1]; /* Pointers to each function. */
-} LoadedCode;
-
-#define GetTagAndValue(Stp, Tag, Val) \
- do { \
- BeamInstr __w; \
- GetByte(Stp, __w); \
- Tag = __w & 0x07; \
- if ((__w & 0x08) == 0) { \
- Val = __w >> 4; \
- } else if ((__w & 0x10) == 0) { \
- Val = ((__w >> 5) << 8); \
- GetByte(Stp, __w); \
- Val |= __w; \
- } else { \
- if (!get_int_val(Stp, __w, &(Val))) goto load_error; \
- } \
+/*
+ * Layout of the line table.
+ */
+
+#define MI_LINE_FNAME_PTR 0
+#define MI_LINE_LOC_TAB 1
+#define MI_LINE_LOC_SIZE 2
+#define MI_LINE_FUNC_TAB 3
+
+#define LINE_INVALID_LOCATION (0)
+
+/*
+ * Macros for manipulating locations.
+ */
+
+#define IS_VALID_LOCATION(File, Line) \
+ ((unsigned) (File) < 255 && (unsigned) (Line) < ((1 << 24) - 1))
+#define MAKE_LOCATION(File, Line) (((File) << 24) | (Line))
+#define LOC_FILE(Loc) ((Loc) >> 24)
+#define LOC_LINE(Loc) ((Loc) & ((1 << 24)-1))
+
+#define GetTagAndValue(Stp, Tag, Val) \
+ do { \
+ BeamInstr __w; \
+ GetByte(Stp, __w); \
+ Tag = __w & 0x07; \
+ if ((__w & 0x08) == 0) { \
+ Val = __w >> 4; \
+ } else if ((__w & 0x10) == 0) { \
+ Val = ((__w >> 5) << 8); \
+ GetByte(Stp, __w); \
+ Val |= __w; \
+ } else { \
+ int __res = get_tag_and_value(Stp, __w, (Tag), &(Val)); \
+ if (__res < 0) goto load_error; \
+ Tag = (unsigned) __res; \
+ } \
} while (0)
@@ -453,19 +496,20 @@ typedef struct {
} while (0)
-static int bin_load(Process *c_p, ErtsProcLocks c_p_locks,
- Eterm group_leader, Eterm* modp, byte* bytes, int unloaded_size);
-static void init_state(LoaderState* stp);
-static int insert_new_code(Process *c_p, ErtsProcLocks c_p_locks,
- Eterm group_leader, Eterm module,
- BeamInstr* code, Uint size, BeamInstr catches);
+static void free_state(LoaderState* stp);
+static Eterm insert_new_code(Process *c_p, ErtsProcLocks c_p_locks,
+ Eterm group_leader, Eterm module,
+ BeamInstr* code, Uint size);
+static int init_iff_file(LoaderState* stp, byte* code, Uint size);
static int scan_iff_file(LoaderState* stp, Uint* chunk_types,
Uint num_types, Uint num_mandatory);
+static int verify_chunks(LoaderState* stp);
static int load_atom_table(LoaderState* stp);
static int load_import_table(LoaderState* stp);
static int read_export_table(LoaderState* stp);
static int read_lambda_table(LoaderState* stp);
static int read_literal_table(LoaderState* stp);
+static int read_line_table(LoaderState* stp);
static int read_code_header(LoaderState* stp);
static int load_code(LoaderState* stp);
static GenOp* gen_element(LoaderState* stp, GenOpArg Fail, GenOpArg Index,
@@ -489,8 +533,8 @@ static void load_printf(int line, LoaderState* context, char *fmt, ...);
static int transform_engine(LoaderState* st);
static void id_to_string(Uint id, char* s);
static void new_genop(LoaderState* stp);
-static int get_int_val(LoaderState* stp, Uint len_code, BeamInstr* result);
-static int get_erlang_integer(LoaderState* stp, Uint len_code, BeamInstr* result);
+static int get_tag_and_value(LoaderState* stp, Uint len_code,
+ unsigned tag, BeamInstr* result);
static int new_label(LoaderState* stp);
static void new_literal_patch(LoaderState* stp, int pos);
static void new_string_patch(LoaderState* stp, int pos);
@@ -504,6 +548,8 @@ static Eterm native_addresses(Process* p, Eterm mod);
int patch_funentries(Eterm Patchlist);
int patch(Eterm Addresses, Uint fe);
static int safe_mul(UWord a, UWord b, UWord* resp);
+static void lookup_loc(FunctionInfo* fi, BeamInstr* pc,
+ BeamInstr* modp, int idx);
static int must_swap_floats;
@@ -548,7 +594,7 @@ define_file(LoaderState* stp, char* name, int idx)
stp->file_left = stp->chunks[idx].size;
}
-int
+Eterm
erts_load_module(Process *c_p,
ErtsProcLocks c_p_locks,
Eterm group_leader, /* Group leader or NIL if none. */
@@ -557,29 +603,17 @@ erts_load_module(Process *c_p,
* On return, contains the actual module name.
*/
byte* code, /* Points to the code to load */
- int size) /* Size of code to load. */
+ Uint size) /* Size of code to load. */
{
- ErlDrvBinary* bin;
- int result;
+ LoaderState* stp = erts_alloc_loader_state();
+ Eterm retval;
- if (size >= 4 && code[0] == 'F' && code[1] == 'O' &&
- code[2] == 'R' && code[3] == '1') {
- /*
- * The BEAM module is not compressed.
- */
- result = bin_load(c_p, c_p_locks, group_leader, modp, code, size);
- } else {
- /*
- * The BEAM module is compressed (or possibly invalid/corrupted).
- */
- if ((bin = (ErlDrvBinary *) erts_gzinflate_buffer((char*)code, size)) == NULL) {
- return -1;
- }
- result = bin_load(c_p, c_p_locks, group_leader, modp,
- (byte*)bin->orig_bytes, bin->orig_size);
- driver_free_binary(bin);
+ retval = erts_prepare_loading(stp, c_p, group_leader, modp,
+ code, size);
+ if (retval != NIL) {
+ return retval;
}
- return result;
+ return erts_finish_loading(stp, c_p, c_p_locks, modp);
}
/* #define LOAD_MEMORY_HARD_DEBUG 1*/
@@ -594,31 +628,28 @@ extern void check_allocated_block(Uint type, void *blk);
#define CHKBLK(TYPE,BLK) /* nothing */
#endif
-static int
-bin_load(Process *c_p, ErtsProcLocks c_p_locks,
- Eterm group_leader, Eterm* modp, byte* bytes, int unloaded_size)
+Eterm
+erts_prepare_loading(LoaderState* stp, Process *c_p, Eterm group_leader,
+ Eterm* modp, byte* code, Uint unloaded_size)
{
- LoaderState state;
- int rval = -1;
+ Eterm retval = am_badfile;
- init_state(&state);
- state.module = *modp;
- state.group_leader = group_leader;
-
- /*
- * Scan the IFF file.
- */
+ stp->module = *modp;
+ stp->group_leader = group_leader;
#if defined(LOAD_MEMORY_HARD_DEBUG) && defined(DEBUG)
erts_fprintf(stderr,"Loading a module\n");
#endif
+ /*
+ * Scan the IFF file.
+ */
+
CHKALLOC();
- CHKBLK(ERTS_ALC_T_CODE,state.code);
- state.file_name = "IFF header for Beam file";
- state.file_p = bytes;
- state.file_left = unloaded_size;
- if (!scan_iff_file(&state, chunk_types, NUM_CHUNK_TYPES, NUM_MANDATORY)) {
+ CHKBLK(ERTS_ALC_T_CODE,stp->code);
+ if (!init_iff_file(stp, code, unloaded_size) ||
+ !scan_iff_file(stp, chunk_types, NUM_CHUNK_TYPES, NUM_MANDATORY) ||
+ !verify_chunks(stp)) {
goto load_error;
}
@@ -626,19 +657,38 @@ bin_load(Process *c_p, ErtsProcLocks c_p_locks,
* Read the header for the code chunk.
*/
- CHKBLK(ERTS_ALC_T_CODE,state.code);
- define_file(&state, "code chunk header", CODE_CHUNK);
- if (!read_code_header(&state)) {
+ CHKBLK(ERTS_ALC_T_CODE,stp->code);
+ define_file(stp, "code chunk header", CODE_CHUNK);
+ if (!read_code_header(stp)) {
goto load_error;
}
/*
+ * Initialize code area.
+ */
+ stp->code_buffer_size = erts_next_heap_size(2048 + stp->num_functions, 0);
+ stp->code = (BeamInstr *) erts_alloc(ERTS_ALC_T_CODE,
+ sizeof(BeamInstr) * stp->code_buffer_size);
+
+ stp->code[MI_NUM_FUNCTIONS] = stp->num_functions;
+ stp->ci = MI_FUNCTIONS + stp->num_functions + 1;
+
+ stp->code[MI_ATTR_PTR] = 0;
+ stp->code[MI_ATTR_SIZE] = 0;
+ stp->code[MI_ATTR_SIZE_ON_HEAP] = 0;
+ stp->code[MI_COMPILE_PTR] = 0;
+ stp->code[MI_COMPILE_SIZE] = 0;
+ stp->code[MI_COMPILE_SIZE_ON_HEAP] = 0;
+ stp->code[MI_NUM_BREAKPOINTS] = 0;
+
+
+ /*
* Read the atom table.
*/
- CHKBLK(ERTS_ALC_T_CODE,state.code);
- define_file(&state, "atom table", ATOM_CHUNK);
- if (!load_atom_table(&state)) {
+ CHKBLK(ERTS_ALC_T_CODE,stp->code);
+ define_file(stp, "atom table", ATOM_CHUNK);
+ if (!load_atom_table(stp)) {
goto load_error;
}
@@ -646,9 +696,9 @@ bin_load(Process *c_p, ErtsProcLocks c_p_locks,
* Read the import table.
*/
- CHKBLK(ERTS_ALC_T_CODE,state.code);
- define_file(&state, "import table", IMP_CHUNK);
- if (!load_import_table(&state)) {
+ CHKBLK(ERTS_ALC_T_CODE,stp->code);
+ define_file(stp, "import table", IMP_CHUNK);
+ if (!load_import_table(stp)) {
goto load_error;
}
@@ -656,10 +706,10 @@ bin_load(Process *c_p, ErtsProcLocks c_p_locks,
* Read the lambda (fun) table.
*/
- CHKBLK(ERTS_ALC_T_CODE,state.code);
- if (state.chunks[LAMBDA_CHUNK].size > 0) {
- define_file(&state, "lambda (fun) table", LAMBDA_CHUNK);
- if (!read_lambda_table(&state)) {
+ CHKBLK(ERTS_ALC_T_CODE,stp->code);
+ if (stp->chunks[LAMBDA_CHUNK].size > 0) {
+ define_file(stp, "lambda (fun) table", LAMBDA_CHUNK);
+ if (!read_lambda_table(stp)) {
goto load_error;
}
}
@@ -668,10 +718,22 @@ bin_load(Process *c_p, ErtsProcLocks c_p_locks,
* Read the literal table.
*/
- CHKBLK(ERTS_ALC_T_CODE,state.code);
- if (state.chunks[LITERAL_CHUNK].size > 0) {
- define_file(&state, "literals table (constant pool)", LITERAL_CHUNK);
- if (!read_literal_table(&state)) {
+ CHKBLK(ERTS_ALC_T_CODE,stp->code);
+ if (stp->chunks[LITERAL_CHUNK].size > 0) {
+ define_file(stp, "literals table (constant pool)", LITERAL_CHUNK);
+ if (!read_literal_table(stp)) {
+ goto load_error;
+ }
+ }
+
+ /*
+ * Read the line table (if present).
+ */
+
+ CHKBLK(ERTS_ALC_T_CODE,stp->code);
+ if (stp->chunks[LINE_CHUNK].size > 0) {
+ define_file(stp, "line table", LINE_CHUNK);
+ if (!read_line_table(stp)) {
goto load_error;
}
}
@@ -680,15 +742,15 @@ bin_load(Process *c_p, ErtsProcLocks c_p_locks,
* Load the code chunk.
*/
- CHKBLK(ERTS_ALC_T_CODE,state.code);
- state.file_name = "code chunk";
- state.file_p = state.code_start;
- state.file_left = state.code_size;
- if (!load_code(&state)) {
+ CHKBLK(ERTS_ALC_T_CODE,stp->code);
+ stp->file_name = "code chunk";
+ stp->file_p = stp->code_start;
+ stp->file_left = stp->code_size;
+ if (!load_code(stp)) {
goto load_error;
}
- CHKBLK(ERTS_ALC_T_CODE,state.code);
- if (!freeze_code(&state)) {
+ CHKBLK(ERTS_ALC_T_CODE,stp->code);
+ if (!freeze_code(stp)) {
goto load_error;
}
@@ -698,9 +760,49 @@ bin_load(Process *c_p, ErtsProcLocks c_p_locks,
* loading the code, because it contains labels.)
*/
- CHKBLK(ERTS_ALC_T_CODE,state.code);
- define_file(&state, "export table", EXP_CHUNK);
- if (!read_export_table(&state)) {
+ CHKBLK(ERTS_ALC_T_CODE,stp->code);
+ define_file(stp, "export table", EXP_CHUNK);
+ if (!read_export_table(stp)) {
+ goto load_error;
+ }
+
+ /*
+ * Good so far.
+ */
+
+ retval = NIL;
+
+ load_error:
+ if (retval != NIL) {
+ free_state(stp);
+ }
+ return retval;
+}
+
+Eterm
+erts_finish_loading(LoaderState* stp, Process* c_p,
+ ErtsProcLocks c_p_locks, Eterm* modp)
+{
+ Eterm retval;
+
+ /*
+ * No other process may run since we will update the export
+ * table which is not protected by any locks.
+ */
+
+ ERTS_SMP_LC_ASSERT(erts_initialized == 0 ||
+ erts_smp_thr_progress_is_blocking());
+
+ /*
+ * Make current code for the module old and insert the new code
+ * as current. This will fail if there already exists old code
+ * for the module.
+ */
+
+ CHKBLK(ERTS_ALC_T_CODE,stp->code);
+ retval = insert_new_code(c_p, c_p_locks, stp->group_leader, stp->module,
+ stp->code, stp->loaded_size);
+ if (retval != NIL) {
goto load_error;
}
@@ -709,88 +811,43 @@ bin_load(Process *c_p, ErtsProcLocks c_p_locks,
* exported and imported functions. This can't fail.
*/
- CHKBLK(ERTS_ALC_T_CODE,state.code);
- rval = insert_new_code(c_p, c_p_locks, state.group_leader, state.module,
- state.code, state.loaded_size, state.catches);
- if (rval < 0) {
- goto load_error;
- }
- CHKBLK(ERTS_ALC_T_CODE,state.code);
- final_touch(&state);
+ erts_export_consolidate();
+ CHKBLK(ERTS_ALC_T_CODE,stp->code);
+ final_touch(stp);
/*
* Loading succeded.
*/
- CHKBLK(ERTS_ALC_T_CODE,state.code);
+ CHKBLK(ERTS_ALC_T_CODE,stp->code);
#if defined(LOAD_MEMORY_HARD_DEBUG) && defined(DEBUG)
erts_fprintf(stderr,"Loaded %T\n",*modp);
#if 0
- debug_dump_code(state.code,state.ci);
+ debug_dump_code(stp->code,stp->ci);
#endif
#endif
- rval = 0;
- state.code = NULL; /* Prevent code from being freed. */
- *modp = state.module;
+ stp->code = NULL; /* Prevent code from being freed. */
+ *modp = stp->module;
/*
* If there is an on_load function, signal an error to
* indicate that the on_load function must be run.
*/
- if (state.on_load) {
- rval = -5;
+ if (stp->on_load) {
+ retval = am_on_load;
}
load_error:
- if (state.code != 0) {
- erts_free(ERTS_ALC_T_CODE, state.code);
- }
- if (state.labels != NULL) {
- erts_free(ERTS_ALC_T_LOADER_TMP, (void *) state.labels);
- }
- if (state.atom != NULL) {
- erts_free(ERTS_ALC_T_LOADER_TMP, (void *) state.atom);
- }
- if (state.import != NULL) {
- erts_free(ERTS_ALC_T_LOADER_TMP, (void *) state.import);
- }
- if (state.export != NULL) {
- erts_free(ERTS_ALC_T_LOADER_TMP, (void *) state.export);
- }
- if (state.lambdas != state.def_lambdas) {
- erts_free(ERTS_ALC_T_LOADER_TMP, (void *) state.lambdas);
- }
- if (state.literals != NULL) {
- int i;
- for (i = 0; i < state.num_literals; i++) {
- if (state.literals[i].heap != NULL) {
- erts_free(ERTS_ALC_T_LOADER_TMP, (void *) state.literals[i].heap);
- }
- }
- erts_free(ERTS_ALC_T_LOADER_TMP, (void *) state.literals);
- }
- while (state.literal_patches != NULL) {
- LiteralPatch* next = state.literal_patches->next;
- erts_free(ERTS_ALC_T_LOADER_TMP, (void *) state.literal_patches);
- state.literal_patches = next;
- }
- while (state.string_patches != NULL) {
- StringPatch* next = state.string_patches->next;
- erts_free(ERTS_ALC_T_LOADER_TMP, (void *) state.string_patches);
- state.string_patches = next;
- }
- while (state.genop_blocks) {
- GenOpBlock* next = state.genop_blocks->next;
- erts_free(ERTS_ALC_T_LOADER_TMP, (void *) state.genop_blocks);
- state.genop_blocks = next;
- }
-
- return rval;
+ free_state(stp);
+ return retval;
}
-
-static void
-init_state(LoaderState* stp)
+LoaderState*
+erts_alloc_loader_state(void)
{
+ LoaderState* stp;
+
+ stp = erts_alloc(ERTS_ALC_T_LOADER_TMP, sizeof(LoaderState));
+ stp->bin = NULL;
stp->function = THE_NON_VALUE; /* Function not known yet */
stp->arity = 0;
stp->specific_op = -1;
@@ -814,23 +871,98 @@ init_state(LoaderState* stp)
stp->string_patches = 0;
stp->may_load_nif = 0;
stp->on_load = 0;
+ stp->line_item = 0;
+ stp->line_instr = 0;
+ stp->func_line = 0;
+ stp->fname = 0;
+ return stp;
}
-static int
+static void
+free_state(LoaderState* stp)
+{
+ if (stp->bin != 0) {
+ driver_free_binary(stp->bin);
+ }
+ if (stp->code != 0) {
+ erts_free(ERTS_ALC_T_CODE, stp->code);
+ }
+ if (stp->labels != NULL) {
+ erts_free(ERTS_ALC_T_LOADER_TMP, (void *) stp->labels);
+ }
+ if (stp->atom != NULL) {
+ erts_free(ERTS_ALC_T_LOADER_TMP, (void *) stp->atom);
+ }
+ if (stp->import != NULL) {
+ erts_free(ERTS_ALC_T_LOADER_TMP, (void *) stp->import);
+ }
+ if (stp->export != NULL) {
+ erts_free(ERTS_ALC_T_LOADER_TMP, (void *) stp->export);
+ }
+ if (stp->lambdas != stp->def_lambdas) {
+ erts_free(ERTS_ALC_T_LOADER_TMP, (void *) stp->lambdas);
+ }
+ if (stp->literals != NULL) {
+ int i;
+ for (i = 0; i < stp->num_literals; i++) {
+ if (stp->literals[i].heap != NULL) {
+ erts_free(ERTS_ALC_T_LOADER_TMP,
+ (void *) stp->literals[i].heap);
+ }
+ }
+ erts_free(ERTS_ALC_T_LOADER_TMP, (void *) stp->literals);
+ }
+ while (stp->literal_patches != NULL) {
+ LiteralPatch* next = stp->literal_patches->next;
+ erts_free(ERTS_ALC_T_LOADER_TMP, (void *) stp->literal_patches);
+ stp->literal_patches = next;
+ }
+ while (stp->string_patches != NULL) {
+ StringPatch* next = stp->string_patches->next;
+ erts_free(ERTS_ALC_T_LOADER_TMP, (void *) stp->string_patches);
+ stp->string_patches = next;
+ }
+ while (stp->genop_blocks) {
+ GenOpBlock* next = stp->genop_blocks->next;
+ erts_free(ERTS_ALC_T_LOADER_TMP, (void *) stp->genop_blocks);
+ stp->genop_blocks = next;
+ }
+
+ if (stp->line_item != 0) {
+ erts_free(ERTS_ALC_T_LOADER_TMP, stp->line_item);
+ }
+
+ if (stp->line_instr != 0) {
+ erts_free(ERTS_ALC_T_LOADER_TMP, stp->line_instr);
+ }
+
+ if (stp->func_line != 0) {
+ erts_free(ERTS_ALC_T_LOADER_TMP, stp->func_line);
+ }
+
+ if (stp->fname != 0) {
+ erts_free(ERTS_ALC_T_LOADER_TMP, stp->fname);
+ }
+
+ erts_free(ERTS_ALC_T_LOADER_TMP, stp);
+}
+
+static Eterm
insert_new_code(Process *c_p, ErtsProcLocks c_p_locks,
- Eterm group_leader, Eterm module, BeamInstr* code, Uint size, BeamInstr catches)
+ Eterm group_leader, Eterm module, BeamInstr* code,
+ Uint size)
{
Module* modp;
- int rval;
+ Eterm retval;
int i;
- if ((rval = beam_make_current_old(c_p, c_p_locks, module)) < 0) {
+ if ((retval = beam_make_current_old(c_p, c_p_locks, module)) != NIL) {
erts_dsprintf_buf_t *dsbufp = erts_create_logger_dsbuf();
erts_dsprintf(dsbufp,
"Module %T must be purged before loading\n",
module);
erts_send_error_to_logger(group_leader, dsbufp);
- return rval;
+ return retval;
}
/*
@@ -841,7 +973,7 @@ insert_new_code(Process *c_p, ErtsProcLocks c_p_locks,
modp = erts_put_module(module);
modp->code = code;
modp->code_length = size;
- modp->catches = catches;
+ modp->catches = BEAM_CATCHES_NIL; /* Will be filled in later. */
/*
* Update address table (used for finding a function from a PC value).
@@ -863,27 +995,51 @@ insert_new_code(Process *c_p, ErtsProcLocks c_p_locks,
modules[i].end = (BeamInstr *) (((byte *)code) + size);
num_loaded_modules++;
mid_module = &modules[num_loaded_modules/2];
- return 0;
+ return NIL;
}
static int
-scan_iff_file(LoaderState* stp, Uint* chunk_types, Uint num_types, Uint num_mandatory)
+init_iff_file(LoaderState* stp, byte* code, Uint size)
{
- MD5_CTX context;
+ Uint form_id = MakeIffId('F', 'O', 'R', '1');
Uint id;
Uint count;
- int i;
+
+ if (size < 4) {
+ goto load_error;
+ }
/*
- * The binary must start with an IFF 'FOR1' chunk.
+ * Check if the module is compressed (or possibly invalid/corrupted).
*/
+ if (MakeIffId(code[0], code[1], code[2], code[3]) != form_id) {
+ stp->bin = (ErlDrvBinary *) erts_gzinflate_buffer((char*)code, size);
+ if (stp->bin == NULL) {
+ goto load_error;
+ }
+ code = (byte*)stp->bin->orig_bytes;
+ size = stp->bin->orig_size;
+ if (size < 4) {
+ goto load_error;
+ }
+ }
- GetInt(stp, 4, id);
- if (id != MakeIffId('F', 'O', 'R', '1')) {
+ /*
+ * The binary must start with an IFF 'FOR1' chunk.
+ */
+ if (MakeIffId(code[0], code[1], code[2], code[3]) != form_id) {
LoadError0(stp, "not a BEAM file: no IFF 'FOR1' chunk");
}
/*
+ * Initialize our "virtual file system".
+ */
+
+ stp->file_name = "IFF header for Beam file";
+ stp->file_p = code + 4;
+ stp->file_left = size - 4;
+
+ /*
* Retrieve the chunk size and verify it. If the size is equal to
* or less than the size of the binary, it is ok and we will use it
* as the limit for the logical file size.
@@ -904,6 +1060,21 @@ scan_iff_file(LoaderState* stp, Uint* chunk_types, Uint num_types, Uint num_mand
if (id != MakeIffId('B', 'E', 'A', 'M')) {
LoadError0(stp, "not a BEAM file: IFF form type is not 'BEAM'");
}
+ return 1;
+
+ load_error:
+ return 0;
+}
+
+/*
+ * Scan the IFF file. The header should have been verified by init_iff_file().
+ */
+static int
+scan_iff_file(LoaderState* stp, Uint* chunk_types, Uint num_types, Uint num_mandatory)
+{
+ Uint count;
+ Uint id;
+ int i;
/*
* Initialize the chunks[] array in the state.
@@ -960,17 +1131,25 @@ scan_iff_file(LoaderState* stp, Uint* chunk_types, Uint num_types, Uint num_mand
stp->file_p += count;
stp->file_left -= count;
}
+ return 1;
- /*
- * At this point, we have read the entire IFF file, and we
- * know that it is syntactically correct.
- *
- * Now check that it contains all mandatory chunks. At the
- * same time calculate the MD5 for the module.
- */
+ load_error:
+ return 0;
+}
+
+/*
+ * Verify that all mandatory chunks are present and calculate
+ * MD5 for the module.
+ */
+
+static int
+verify_chunks(LoaderState* stp)
+{
+ int i;
+ MD5_CTX context;
MD5Init(&context);
- for (i = 0; i < num_mandatory; i++) {
+ for (i = 0; i < NUM_MANDATORY; i++) {
if (stp->chunks[i].start != NULL) {
MD5Update(&context, stp->chunks[i].start, stp->chunks[i].size);
} else {
@@ -980,41 +1159,49 @@ scan_iff_file(LoaderState* stp, Uint* chunk_types, Uint num_types, Uint num_mand
LoadError1(stp, "mandatory chunk of type '%s' not found\n", sbuf);
}
}
- if (LITERAL_CHUNK < num_types) {
- if (stp->chunks[LAMBDA_CHUNK].start != 0) {
- byte* start = stp->chunks[LAMBDA_CHUNK].start;
- Uint left = stp->chunks[LAMBDA_CHUNK].size;
- /*
- * The idea here is to ignore the OldUniq field for the fun; it is
- * based on the old broken hash function, which can be different
- * on little endian and big endian machines.
- */
- if (left >= 4) {
- static byte zero[4];
- MD5Update(&context, start, 4);
- start += 4;
- left -= 4;
+ /*
+ * If there is a lambda chunk, include parts of it in the MD5.
+ */
+ if (stp->chunks[LAMBDA_CHUNK].start != 0) {
+ byte* start = stp->chunks[LAMBDA_CHUNK].start;
+ Uint left = stp->chunks[LAMBDA_CHUNK].size;
+
+ /*
+ * The idea here is to ignore the OldUniq field for the fun; it is
+ * based on the old broken hash function, which can be different
+ * on little endian and big endian machines.
+ */
+ if (left >= 4) {
+ static byte zero[4];
+ MD5Update(&context, start, 4);
+ start += 4;
+ left -= 4;
- while (left >= 24) {
- /* Include: Function Arity Index NumFree */
- MD5Update(&context, start, 20);
- /* Set to zero: OldUniq */
- MD5Update(&context, zero, 4);
- start += 24;
- left -= 24;
- }
- }
- /* Can't happen for a correct 'FunT' chunk */
- if (left > 0) {
- MD5Update(&context, start, left);
+ while (left >= 24) {
+ /* Include: Function Arity Index NumFree */
+ MD5Update(&context, start, 20);
+ /* Set to zero: OldUniq */
+ MD5Update(&context, zero, 4);
+ start += 24;
+ left -= 24;
}
}
- if (stp->chunks[LITERAL_CHUNK].start != 0) {
- MD5Update(&context, stp->chunks[LITERAL_CHUNK].start,
- stp->chunks[LITERAL_CHUNK].size);
+ /* Can't happen for a correct 'FunT' chunk */
+ if (left > 0) {
+ MD5Update(&context, start, left);
}
}
+
+
+ /*
+ * If there is a literal chunk, include it in the MD5.
+ */
+ if (stp->chunks[LITERAL_CHUNK].start != 0) {
+ MD5Update(&context, stp->chunks[LITERAL_CHUNK].start,
+ stp->chunks[LITERAL_CHUNK].size);
+ }
+
MD5Final(stp->mod_md5, &context);
return 1;
@@ -1247,7 +1434,7 @@ static int
read_literal_table(LoaderState* stp)
{
int i;
- BeamInstr uncompressed_sz;
+ uLongf uncompressed_sz;
byte* uncompressed = 0;
GetInt(stp, 4, uncompressed_sz);
@@ -1257,7 +1444,7 @@ read_literal_table(LoaderState* stp)
LoadError0(stp, "failed to uncompress literal table (constant pool)");
}
stp->file_p = uncompressed;
- stp->file_left = uncompressed_sz;
+ stp->file_left = (unsigned) uncompressed_sz;
GetInt(stp, 4, stp->num_literals);
stp->literals = (Literal *) erts_alloc(ERTS_ALC_T_LOADER_TMP,
stp->num_literals * sizeof(Literal));
@@ -1276,12 +1463,14 @@ read_literal_table(LoaderState* stp)
GetInt(stp, 4, sz); /* Size of external term format. */
GetString(stp, p, sz);
- if ((heap_size = erts_decode_ext_size(p, sz, 1)) < 0) {
+ if ((heap_size = erts_decode_ext_size(p, sz)) < 0) {
LoadError1(stp, "literal %d: bad external format", i);
}
hp = stp->literals[i].heap = erts_alloc(ERTS_ALC_T_LOADER_TMP,
heap_size*sizeof(Eterm));
- val = erts_decode_ext(&hp, NULL, &p);
+ stp->literals[i].off_heap.first = 0;
+ stp->literals[i].off_heap.overhead = 0;
+ val = erts_decode_ext(&hp, &stp->literals[i].off_heap, &p);
stp->literals[i].heap_size = hp - stp->literals[i].heap;
if (stp->literals[i].heap_size > heap_size) {
erl_exit(1, "overrun by %d word(s) for literal heap, term %d",
@@ -1303,6 +1492,138 @@ read_literal_table(LoaderState* stp)
return 0;
}
+static int
+read_line_table(LoaderState* stp)
+{
+ unsigned version;
+ ERTS_DECLARE_DUMMY(unsigned flags);
+ int num_line_items;
+ BeamInstr* lp;
+ int i;
+ BeamInstr fname_index;
+ BeamInstr tag;
+
+ /*
+ * If the emulator flag ignoring the line information was given,
+ * return immediately.
+ */
+
+ if (erts_no_line_info) {
+ return 1;
+ }
+
+ /*
+ * Check version of line table.
+ */
+
+ GetInt(stp, 4, version);
+ if (version != 0) {
+ /*
+ * Wrong version. Silently ignore the line number chunk.
+ */
+ return 1;
+ }
+
+ /*
+ * Read the remaining header words. The flag word is reserved
+ * for possible future use; for the moment we ignore it.
+ */
+ GetInt(stp, 4, flags);
+ GetInt(stp, 4, stp->num_line_instrs);
+ GetInt(stp, 4, num_line_items);
+ GetInt(stp, 4, stp->num_fnames);
+
+ /*
+ * Calculate space and allocate memory for the line item table.
+ */
+
+ num_line_items++;
+ lp = (BeamInstr *) erts_alloc(ERTS_ALC_T_LOADER_TMP,
+ num_line_items * sizeof(BeamInstr));
+ stp->line_item = lp;
+ stp->num_line_items = num_line_items;
+
+ /*
+ * The zeroth entry in the line item table is special.
+ * It contains the undefined location.
+ */
+
+ *lp++ = LINE_INVALID_LOCATION;
+ num_line_items--;
+
+ /*
+ * Read all the line items.
+ */
+
+ stp->loc_size = stp->num_fnames ? 4 : 2;
+ fname_index = 0;
+ while (num_line_items-- > 0) {
+ BeamInstr val;
+ BeamInstr loc;
+
+ GetTagAndValue(stp, tag, val);
+ if (tag == TAG_i) {
+ if (IS_VALID_LOCATION(fname_index, val)) {
+ loc = MAKE_LOCATION(fname_index, val);
+ } else {
+ /*
+ * Too many files or huge line number. Silently invalidate
+ * the location.
+ */
+ loc = LINE_INVALID_LOCATION;
+ }
+ *lp++ = loc;
+ if (val > 0xFFFF) {
+ stp->loc_size = 4;
+ }
+ } else if (tag == TAG_a) {
+ if (val > stp->num_fnames) {
+ LoadError2(stp, "file index overflow (%d/%d)",
+ val, stp->num_fnames);
+ }
+ fname_index = val;
+ num_line_items++;
+ } else {
+ LoadError1(stp, "bad tag '%c' (expected 'a' or 'i')",
+ tag_to_letter[tag]);
+ }
+ }
+
+ /*
+ * Read all filenames.
+ */
+
+ if (stp->num_fnames != 0) {
+ stp->fname = (Eterm *) erts_alloc(ERTS_ALC_T_LOADER_TMP,
+ stp->num_fnames *
+ sizeof(Eterm));
+ for (i = 0; i < stp->num_fnames; i++) {
+ byte* fname;
+ Uint n;
+
+ GetInt(stp, 2, n);
+ GetString(stp, fname, n);
+ stp->fname[i] = am_atom_put((char*)fname, n);
+ }
+ }
+
+ /*
+ * Allocate the arrays to be filled while code is being loaded.
+ */
+ stp->line_instr = (LineInstr *) erts_alloc(ERTS_ALC_T_LOADER_TMP,
+ stp->num_line_instrs *
+ sizeof(LineInstr));
+ stp->current_li = 0;
+ stp->func_line = (int *) erts_alloc(ERTS_ALC_T_LOADER_TMP,
+ stp->num_functions *
+ sizeof(int));
+
+ return 1;
+
+ load_error:
+ return 0;
+}
+
static int
read_code_header(LoaderState* stp)
@@ -1337,10 +1658,15 @@ read_code_header(LoaderState* stp)
/*
* Verify the number of the highest opcode used.
*/
-
GetInt(stp, 4, opcode_max);
if (opcode_max > MAX_GENERIC_OPCODE) {
- LoadError2(stp, "use of opcode %d; this emulator supports only up to %d",
+ LoadError2(stp,
+ "This BEAM file was compiled for a later version"
+ " of the run-time system than " ERLANG_OTP_RELEASE ".\n"
+ " To fix this, please recompile this module with an "
+ ERLANG_OTP_RELEASE " compiler.\n"
+ " (Use of opcode %d; this emulator supports "
+ "only up to %d.)",
opcode_max, MAX_GENERIC_OPCODE);
}
@@ -1361,25 +1687,6 @@ read_code_header(LoaderState* stp)
#endif
}
- /*
- * Initialize code area.
- */
- stp->code_buffer_size = erts_next_heap_size(2048 + stp->num_functions, 0);
- stp->code = (BeamInstr *) erts_alloc(ERTS_ALC_T_CODE,
- sizeof(BeamInstr) * stp->code_buffer_size);
-
- stp->code[MI_NUM_FUNCTIONS] = stp->num_functions;
- stp->ci = MI_FUNCTIONS + stp->num_functions + 1;
-
- stp->code[MI_ATTR_PTR] = 0;
- stp->code[MI_ATTR_SIZE] = 0;
- stp->code[MI_ATTR_SIZE_ON_HEAP] = 0;
- stp->code[MI_COMPILE_PTR] = 0;
- stp->code[MI_COMPILE_SIZE] = 0;
- stp->code[MI_COMPILE_SIZE_ON_HEAP] = 0;
- stp->code[MI_NUM_BREAKPOINTS] = 0;
-
- stp->new_bs_put_strings = 0;
stp->catches = 0;
return 1;
@@ -1412,7 +1719,7 @@ load_code(LoaderState* stp)
{
int i;
int ci;
- int last_func_start = 0;
+ int last_func_start = 0; /* Needed by nif loading and line instructions */
char* sign;
int arg; /* Number of current argument. */
int num_specific; /* Number of specific ops for current. */
@@ -1425,6 +1732,14 @@ load_code(LoaderState* stp)
GenOp** last_op_next = NULL;
int arity;
+ /*
+ * The size of the loaded func_info instruction is needed
+ * by both the nif functionality and line instructions.
+ */
+ enum {
+ FUNC_INFO_SZ = 5
+ };
+
code = stp->code;
code_buffer_size = stp->code_buffer_size;
ci = stp->ci;
@@ -1470,46 +1785,15 @@ load_code(LoaderState* stp)
last_op->arity = 0;
ASSERT(arity <= MAX_OPARGS);
-#define GetValue(Stp, First, Val) \
- do { \
- if (((First) & 0x08) == 0) { \
- Val = (First) >> 4; \
- } else if (((First) & 0x10) == 0) { \
- BeamInstr __w; \
- GetByte(Stp, __w); \
- Val = (((First) >> 5) << 8) | __w; \
- } else { \
- if (!get_int_val(Stp, (First), &(Val))) goto load_error; \
- } \
- } while (0)
-
for (arg = 0; arg < arity; arg++) {
- BeamInstr first;
-
- GetByte(stp, first);
- last_op->a[arg].type = first & 0x07;
+ GetTagAndValue(stp, last_op->a[arg].type, last_op->a[arg].val);
switch (last_op->a[arg].type) {
case TAG_i:
- if ((first & 0x08) == 0) {
- last_op->a[arg].val = first >> 4;
- } else if ((first & 0x10) == 0) {
- BeamInstr w;
- GetByte(stp, w);
- ASSERT(first < 0x800);
- last_op->a[arg].val = ((first >> 5) << 8) | w;
- } else {
- int i = get_erlang_integer(stp, first, &(last_op->a[arg].val));
- if (i < 0) {
- goto load_error;
- }
- last_op->a[arg].type = i;
- }
- break;
case TAG_u:
- GetValue(stp, first, last_op->a[arg].val);
+ case TAG_q:
+ case TAG_o:
break;
case TAG_x:
- GetValue(stp, first, last_op->a[arg].val);
if (last_op->a[arg].val == 0) {
last_op->a[arg].type = TAG_r;
} else if (last_op->a[arg].val >= MAX_REG) {
@@ -1518,7 +1802,6 @@ load_code(LoaderState* stp)
}
break;
case TAG_y:
- GetValue(stp, first, last_op->a[arg].val);
if (last_op->a[arg].val >= MAX_REG) {
LoadError1(stp, "invalid y register number: %u",
last_op->a[arg].val);
@@ -1526,7 +1809,6 @@ load_code(LoaderState* stp)
last_op->a[arg].val += CP_SIZE;
break;
case TAG_a:
- GetValue(stp, first, last_op->a[arg].val);
if (last_op->a[arg].val == 0) {
last_op->a[arg].type = TAG_n;
} else if (last_op->a[arg].val >= stp->num_atoms) {
@@ -1536,7 +1818,6 @@ load_code(LoaderState* stp)
}
break;
case TAG_f:
- GetValue(stp, first, last_op->a[arg].val);
if (last_op->a[arg].val == 0) {
last_op->a[arg].type = TAG_p;
} else if (last_op->a[arg].val >= stp->num_labels) {
@@ -1544,7 +1825,6 @@ load_code(LoaderState* stp)
}
break;
case TAG_h:
- GetValue(stp, first, last_op->a[arg].val);
if (last_op->a[arg].val > 65535) {
LoadError1(stp, "invalid range for character data type: %u",
last_op->a[arg].val);
@@ -1552,11 +1832,9 @@ load_code(LoaderState* stp)
break;
case TAG_z:
{
- BeamInstr ext_tag;
unsigned tag;
- GetValue(stp, first, ext_tag);
- switch (ext_tag) {
+ switch (last_op->a[arg].val) {
case 0: /* Floating point number */
{
Eterm* hp;
@@ -1648,7 +1926,8 @@ load_code(LoaderState* stp)
break;
}
default:
- LoadError1(stp, "invalid extended tag %d", ext_tag);
+ LoadError1(stp, "invalid extended tag %d",
+ last_op->a[arg].val);
break;
}
}
@@ -1659,7 +1938,6 @@ load_code(LoaderState* stp)
}
last_op->arity++;
}
-#undef GetValue
ASSERT(arity == last_op->arity);
@@ -1701,14 +1979,6 @@ load_code(LoaderState* stp)
}
/*
- * Special error message instruction.
- */
- if (stp->genop->op == genop_too_old_compiler_0) {
- LoadError0(stp, "please re-compile this module with an "
- ERLANG_OTP_RELEASE " compiler");
- }
-
- /*
* From the collected generic instruction, find the specific
* instruction.
*/
@@ -1759,7 +2029,27 @@ load_code(LoaderState* stp)
ERLANG_OTP_RELEASE " compiler ");
}
- LoadError0(stp, "no specific operation found");
+ /*
+ * Some generic instructions should have a special
+ * error message.
+ */
+ switch (stp->genop->op) {
+ case genop_too_old_compiler_0:
+ LoadError0(stp, "please re-compile this module with an "
+ ERLANG_OTP_RELEASE " compiler");
+ case genop_unsupported_guard_bif_3:
+ {
+ Eterm Mod = (Eterm) stp->genop->a[0].val;
+ Eterm Name = (Eterm) stp->genop->a[1].val;
+ Uint arity = (Uint) stp->genop->a[2].val;
+ FREE_GENOP(stp, stp->genop);
+ stp->genop = 0;
+ LoadError3(stp, "unsupported guard BIF: %T:%T/%d\n",
+ Mod, Name, arity);
+ }
+ default:
+ LoadError0(stp, "no specific operation found");
+ }
}
stp->specific_op = specific;
@@ -2048,7 +2338,6 @@ load_code(LoaderState* stp)
case op_i_func_info_IaaI:
{
Uint offset;
- enum { FINFO_SZ = 5 };
if (function_number >= stp->num_functions) {
LoadError1(stp, "too many functions in module (header said %d)",
@@ -2056,27 +2345,37 @@ load_code(LoaderState* stp)
}
if (stp->may_load_nif) {
- const int finfo_ix = ci - FINFO_SZ;
+ const int finfo_ix = ci - FUNC_INFO_SZ;
enum { MIN_FUNC_SZ = 3 };
if (finfo_ix - last_func_start < MIN_FUNC_SZ && last_func_start) {
/* Must make room for call_nif op */
int pad = MIN_FUNC_SZ - (finfo_ix - last_func_start);
ASSERT(pad > 0 && pad < MIN_FUNC_SZ);
CodeNeed(pad);
- sys_memmove(&code[finfo_ix+pad], &code[finfo_ix], FINFO_SZ*sizeof(BeamInstr));
+ sys_memmove(&code[finfo_ix+pad], &code[finfo_ix],
+ FUNC_INFO_SZ*sizeof(BeamInstr));
sys_memset(&code[finfo_ix], 0, pad*sizeof(BeamInstr));
ci += pad;
stp->labels[last_label].value += pad;
}
}
last_func_start = ci;
+
+ /*
+ * Save current offset of into the line instruction array.
+ */
+
+ if (stp->func_line) {
+ stp->func_line[function_number] = stp->current_li;
+ }
+
/*
* Save context for error messages.
*/
stp->function = code[ci-2];
stp->arity = code[ci-1];
- ASSERT(stp->labels[last_label].value == ci - FINFO_SZ);
+ ASSERT(stp->labels[last_label].value == ci - FUNC_INFO_SZ);
offset = MI_FUNCTIONS + function_number;
code[offset] = stp->labels[last_label].patches;
stp->labels[last_label].patches = offset;
@@ -2099,32 +2398,6 @@ load_code(LoaderState* stp)
stp->on_load = ci;
break;
case op_bs_put_string_II:
- {
- /*
- * At entry:
- *
- * code[ci-3] &&lb_i_new_bs_put_string_II
- * code[ci-2] length of string
- * code[ci-1] offset into string table
- *
- * Since we don't know the address of the string table yet,
- * just check the offset and length for validity, and use
- * the instruction field as a link field to link all put_string
- * instructions into a single linked list. At exit:
- *
- * code[ci-3] pointer to next i_new_bs_put_string instruction (or 0
- * if this is the last)
- */
- Uint offset = code[ci-1];
- Uint len = code[ci-2];
- unsigned strtab_size = stp->chunks[STR_CHUNK].size;
- if (offset > strtab_size || offset + len > strtab_size) {
- LoadError2(stp, "invalid string reference %d, size %d", offset, len);
- }
- code[ci-3] = stp->new_bs_put_strings;
- stp->new_bs_put_strings = ci - 3;
- }
- break;
case op_i_bs_match_string_rfII:
case op_i_bs_match_string_xfII:
new_string_patch(stp, ci-1);
@@ -2139,6 +2412,45 @@ load_code(LoaderState* stp)
stp->catches = ci-3;
break;
+ case op_line_I:
+ if (stp->line_item) {
+ BeamInstr item = code[ci-1];
+ BeamInstr loc;
+ int li;
+ if (item >= stp->num_line_items) {
+ LoadError2(stp, "line instruction index overflow (%d/%d)",
+ item, stp->num_line_items);
+ }
+ li = stp->current_li;
+ if (li >= stp->num_line_instrs) {
+ LoadError2(stp, "line instruction table overflow (%d/%d)",
+ li, stp->num_line_instrs);
+ }
+ loc = stp->line_item[item];
+
+ if (ci - 2 == last_func_start) {
+ /*
+ * This line instruction directly follows the func_info
+ * instruction. Its address must be adjusted to point to
+ * func_info instruction.
+ */
+ stp->line_instr[li].pos = last_func_start - FUNC_INFO_SZ;
+ stp->line_instr[li].loc = stp->line_item[item];
+ stp->current_li++;
+ } else if (li <= stp->func_line[function_number-1] ||
+ stp->line_instr[li-1].loc != loc) {
+ /*
+ * Only store the location if it is different
+ * from the previous location in the same function.
+ */
+ stp->line_instr[li].pos = ci - 2;
+ stp->line_instr[li].loc = stp->line_item[item];
+ stp->current_li++;
+ }
+ }
+ ci -= 2; /* Get rid of the instruction */
+ break;
+
/*
* End of code found.
*/
@@ -2175,6 +2487,8 @@ load_code(LoaderState* stp)
#define no_fpe_signals(St) 0
#endif
+#define never(St) 0
+
/*
* Predicate that tests whether a jump table can be used.
*/
@@ -2562,13 +2876,8 @@ should_gen_heap_bin(LoaderState* stp, GenOpArg Src)
static int
binary_too_big(LoaderState* stp, GenOpArg Size)
{
- return Size.type == TAG_u && ((Size.val >> (8*sizeof(Uint)-3)) != 0);
-}
-
-static int
-binary_too_big_bits(LoaderState* stp, GenOpArg Size)
-{
- return Size.type == TAG_u && (((Size.val+7)/8) >> (8*sizeof(Uint)-3) != 0);
+ return Size.type == TAG_o ||
+ (Size.type == TAG_u && ((Size.val >> (8*sizeof(Uint)-3)) != 0));
}
static GenOp*
@@ -3191,7 +3500,6 @@ gen_jump_tab(LoaderState* stp, GenOpArg S, GenOpArg Fail, GenOpArg Size, GenOpAr
}
size = max - min + 1;
-
/*
* Allocate structure and fill in the fixed fields.
*/
@@ -3223,7 +3531,7 @@ gen_jump_tab(LoaderState* stp, GenOpArg S, GenOpArg Fail, GenOpArg Size, GenOpAr
op->a[i] = Fail;
}
for (i = 0; i < Size.val; i += 2) {
- int index;
+ Sint index;
index = fixed_args+Rest[i].val-min;
ASSERT(fixed_args <= index && index < arity);
op->a[index] = Rest[i+1];
@@ -3435,10 +3743,7 @@ gen_guard_bif1(LoaderState* stp, GenOpArg Fail, GenOpArg Live, GenOpArg Bif,
BifFunction bf;
NEW_GENOP(stp, op);
- op->op = genop_i_gc_bif1_5;
- op->arity = 5;
- op->a[0] = Fail;
- op->a[1].type = TAG_u;
+ op->next = NULL;
bf = stp->import[Bif.val].bf;
/* The translations here need to have a reverse counterpart in
beam_emu.c:translate_gc_bif for error handling to work properly. */
@@ -3459,19 +3764,30 @@ gen_guard_bif1(LoaderState* stp, GenOpArg Fail, GenOpArg Live, GenOpArg Bif,
} else if (bf == trunc_1) {
op->a[1].val = (BeamInstr) (void *) erts_gc_trunc_1;
} else {
- abort();
+ op->op = genop_unsupported_guard_bif_3;
+ op->arity = 3;
+ op->a[0].type = TAG_a;
+ op->a[0].val = stp->import[Bif.val].module;
+ op->a[1].type = TAG_a;
+ op->a[1].val = stp->import[Bif.val].function;
+ op->a[2].type = TAG_u;
+ op->a[2].val = stp->import[Bif.val].arity;
+ return op;
}
+ op->op = genop_i_gc_bif1_5;
+ op->arity = 5;
+ op->a[0] = Fail;
+ op->a[1].type = TAG_u;
op->a[2] = Src;
op->a[3] = Live;
op->a[4] = Dst;
- op->next = NULL;
return op;
}
/*
- * This is used by the ops.tab rule that rewrites gc_bifs with two parameters
+ * This is used by the ops.tab rule that rewrites gc_bifs with two parameters.
* The instruction returned is then again rewritten to an i_load instruction
- * folowed by i_gc_bif2_jIId, to handle literals properly.
+ * followed by i_gc_bif2_jIId, to handle literals properly.
* As opposed to the i_gc_bif1_jIsId, the instruction i_gc_bif2_jIId is
* always rewritten, regardless of if there actually are any literals.
*/
@@ -3483,31 +3799,39 @@ gen_guard_bif2(LoaderState* stp, GenOpArg Fail, GenOpArg Live, GenOpArg Bif,
BifFunction bf;
NEW_GENOP(stp, op);
- op->op = genop_ii_gc_bif2_6;
- op->arity = 6;
- op->a[0] = Fail;
- op->a[1].type = TAG_u;
+ op->next = NULL;
bf = stp->import[Bif.val].bf;
/* The translations here need to have a reverse counterpart in
beam_emu.c:translate_gc_bif for error handling to work properly. */
if (bf == binary_part_2) {
op->a[1].val = (BeamInstr) (void *) erts_gc_binary_part_2;
} else {
- abort();
+ op->op = genop_unsupported_guard_bif_3;
+ op->arity = 3;
+ op->a[0].type = TAG_a;
+ op->a[0].val = stp->import[Bif.val].module;
+ op->a[1].type = TAG_a;
+ op->a[1].val = stp->import[Bif.val].function;
+ op->a[2].type = TAG_u;
+ op->a[2].val = stp->import[Bif.val].arity;
+ return op;
}
+ op->op = genop_ii_gc_bif2_6;
+ op->arity = 6;
+ op->a[0] = Fail;
+ op->a[1].type = TAG_u;
op->a[2] = S1;
op->a[3] = S2;
op->a[4] = Live;
op->a[5] = Dst;
- op->next = NULL;
return op;
}
/*
- * This is used by the ops.tab rule that rewrites gc_bifs with three parameters
+ * This is used by the ops.tab rule that rewrites gc_bifs with three parameters.
* The instruction returned is then again rewritten to a move instruction that
* uses r[0] for temp storage, followed by an i_load instruction,
- * folowed by i_gc_bif3_jIsId, to handle literals properly. Rewriting
+ * followed by i_gc_bif3_jIsId, to handle literals properly. Rewriting
* always occur, as with the gc_bif2 counterpart.
*/
static GenOp*
@@ -3518,18 +3842,27 @@ gen_guard_bif3(LoaderState* stp, GenOpArg Fail, GenOpArg Live, GenOpArg Bif,
BifFunction bf;
NEW_GENOP(stp, op);
- op->op = genop_ii_gc_bif3_7;
- op->arity = 7;
- op->a[0] = Fail;
- op->a[1].type = TAG_u;
+ op->next = NULL;
bf = stp->import[Bif.val].bf;
/* The translations here need to have a reverse counterpart in
beam_emu.c:translate_gc_bif for error handling to work properly. */
if (bf == binary_part_3) {
op->a[1].val = (BeamInstr) (void *) erts_gc_binary_part_3;
} else {
- abort();
+ op->op = genop_unsupported_guard_bif_3;
+ op->arity = 3;
+ op->a[0].type = TAG_a;
+ op->a[0].val = stp->import[Bif.val].module;
+ op->a[1].type = TAG_a;
+ op->a[1].val = stp->import[Bif.val].function;
+ op->a[2].type = TAG_u;
+ op->a[2].val = stp->import[Bif.val].arity;
+ return op;
}
+ op->op = genop_ii_gc_bif3_7;
+ op->arity = 7;
+ op->a[0] = Fail;
+ op->a[1].type = TAG_u;
op->a[2] = S1;
op->a[3] = S2;
op->a[4] = S3;
@@ -3600,15 +3933,14 @@ freeze_code(LoaderState* stp)
{
BeamInstr* code = stp->code;
Uint *literal_end = NULL;
- Uint index;
int i;
byte* str_table;
unsigned strtab_size = stp->chunks[STR_CHUNK].size;
unsigned attr_size = stp->chunks[ATTR_CHUNK].size;
unsigned compile_size = stp->chunks[COMPILE_CHUNK].size;
Uint size;
- unsigned catches;
Sint decoded_size;
+ Uint line_size;
/*
* Verify that there was a correct 'FunT' chunk if there were
@@ -3619,13 +3951,19 @@ freeze_code(LoaderState* stp)
LoadError0(stp, stp->lambda_error);
}
-
/*
* Calculate the final size of the code.
*/
-
- size = (stp->ci * sizeof(BeamInstr)) + (stp->total_literal_size * sizeof(Eterm)) +
- strtab_size + attr_size + compile_size;
+ if (stp->line_instr == 0) {
+ line_size = 0;
+ } else {
+ line_size = (MI_LINE_FUNC_TAB + (stp->num_functions + 1) +
+ (stp->current_li+1) + stp->num_fnames) *
+ sizeof(Eterm) + (stp->current_li+1) * stp->loc_size;
+ }
+ size = (stp->ci * sizeof(BeamInstr)) +
+ (stp->total_literal_size * sizeof(Eterm)) +
+ strtab_size + attr_size + compile_size + line_size;
/*
* Move the code to its final location.
@@ -3662,6 +4000,8 @@ freeze_code(LoaderState* stp)
Uint* low;
Uint* high;
LiteralPatch* lp;
+ struct erl_off_heap_header* off_heap = 0;
+ struct erl_off_heap_header** off_heap_last = &off_heap;
low = (Uint *) (code+stp->ci);
high = low + stp->total_literal_size;
@@ -3670,6 +4010,7 @@ freeze_code(LoaderState* stp)
ptr = low;
for (i = 0; i < stp->num_literals; i++) {
Uint offset;
+ struct erl_off_heap_header* t_off_heap;
sys_memcpy(ptr, stp->literals[i].heap,
stp->literals[i].heap_size*sizeof(Eterm));
@@ -3684,9 +4025,19 @@ freeze_code(LoaderState* stp)
*ptr++ = offset_ptr(val, offset);
break;
case TAG_PRIMARY_HEADER:
- ptr++;
- if (header_is_thing(val)) {
- ptr += thing_arityval(val);
+ if (header_is_transparent(val)) {
+ ptr++;
+ } else {
+ if (thing_subtag(val) == REFC_BINARY_SUBTAG) {
+ struct erl_off_heap_header* oh;
+
+ oh = (struct erl_off_heap_header*) ptr;
+ if (oh->next) {
+ Eterm** uptr = (Eterm **) (void *) &oh->next;
+ *uptr += offset;
+ }
+ }
+ ptr += 1 + thing_arityval(val);
}
break;
default:
@@ -3695,7 +4046,23 @@ freeze_code(LoaderState* stp)
}
}
ASSERT(ptr == high);
+
+ /*
+ * Re-link the off_heap list for this term onto the
+ * off_heap list for the entire module.
+ */
+ t_off_heap = stp->literals[i].off_heap.first;
+ if (t_off_heap) {
+ t_off_heap = (struct erl_off_heap_header *)
+ offset_ptr((UWord) t_off_heap, offset);
+ while (t_off_heap) {
+ *off_heap_last = t_off_heap;
+ off_heap_last = &t_off_heap->next;
+ t_off_heap = t_off_heap->next;
+ }
+ }
}
+ code[MI_LITERALS_OFF_HEAP] = (BeamInstr) off_heap;
lp = stp->literal_patches;
while (lp != 0) {
BeamInstr* op_ptr;
@@ -3713,21 +4080,72 @@ freeze_code(LoaderState* stp)
}
literal_end += stp->total_literal_size;
}
-
+ CHKBLK(ERTS_ALC_T_CODE,code);
+
/*
- * Place the string table and, optionally, attributes, after the literal heap.
+ * If there is line information, place it here.
*/
- CHKBLK(ERTS_ALC_T_CODE,code);
+ if (stp->line_instr == 0) {
+ code[MI_LINE_TABLE] = (BeamInstr) 0;
+ str_table = (byte *) literal_end;
+ } else {
+ Eterm* line_tab = (Eterm *) literal_end;
+ Eterm* p;
+ int ftab_size = stp->num_functions;
+ int num_instrs = stp->current_li;
+ Eterm* first_line_item;
+
+ code[MI_LINE_TABLE] = (BeamInstr) line_tab;
+ p = line_tab + MI_LINE_FUNC_TAB;
+
+ first_line_item = (p + ftab_size + 1);
+ for (i = 0; i < ftab_size; i++) {
+ *p++ = (Eterm) (BeamInstr) (first_line_item + stp->func_line[i]);
+ }
+ *p++ = (Eterm) (BeamInstr) (first_line_item + num_instrs);
+ ASSERT(p == first_line_item);
+ for (i = 0; i < num_instrs; i++) {
+ *p++ = (Eterm) (BeamInstr) (code + stp->line_instr[i].pos);
+ }
+ *p++ = (Eterm) (BeamInstr) (code + stp->ci - 1);
+
+ line_tab[MI_LINE_FNAME_PTR] = (Eterm) (BeamInstr) p;
+ memcpy(p, stp->fname, stp->num_fnames*sizeof(Eterm));
+ p += stp->num_fnames;
+
+ line_tab[MI_LINE_LOC_TAB] = (Eterm) (BeamInstr) p;
+ line_tab[MI_LINE_LOC_SIZE] = stp->loc_size;
+ if (stp->loc_size == 2) {
+ Uint16* locp = (Uint16 *) p;
+ for (i = 0; i < num_instrs; i++) {
+ *locp++ = (Uint16) stp->line_instr[i].loc;
+ }
+ *locp++ = LINE_INVALID_LOCATION;
+ str_table = (byte *) locp;
+ } else {
+ Uint32* locp = (Uint32 *) p;
+ ASSERT(stp->loc_size == 4);
+ for (i = 0; i < num_instrs; i++) {
+ *locp++ = stp->line_instr[i].loc;
+ }
+ *locp++ = LINE_INVALID_LOCATION;
+ str_table = (byte *) locp;
+ }
- sys_memcpy(literal_end, stp->chunks[STR_CHUNK].start, strtab_size);
+ CHKBLK(ERTS_ALC_T_CODE,code);
+ }
+
+ /*
+ * Place the string table and, optionally, attributes here.
+ */
+ sys_memcpy(str_table, stp->chunks[STR_CHUNK].start, strtab_size);
CHKBLK(ERTS_ALC_T_CODE,code);
- str_table = (byte *) literal_end;
if (attr_size) {
byte* attr = str_table + strtab_size;
sys_memcpy(attr, stp->chunks[ATTR_CHUNK].start, stp->chunks[ATTR_CHUNK].size);
code[MI_ATTR_PTR] = (BeamInstr) attr;
code[MI_ATTR_SIZE] = (BeamInstr) stp->chunks[ATTR_CHUNK].size;
- decoded_size = erts_decode_ext_size(attr, attr_size, 0);
+ decoded_size = erts_decode_ext_size(attr, attr_size);
if (decoded_size < 0) {
LoadError0(stp, "bad external term representation of module attributes");
}
@@ -3745,7 +4163,7 @@ freeze_code(LoaderState* stp)
CHKBLK(ERTS_ALC_T_CODE,code);
code[MI_COMPILE_SIZE] = (BeamInstr) stp->chunks[COMPILE_CHUNK].size;
CHKBLK(ERTS_ALC_T_CODE,code);
- decoded_size = erts_decode_ext_size(compile_info, compile_size, 0);
+ decoded_size = erts_decode_ext_size(compile_info, compile_size);
CHKBLK(ERTS_ALC_T_CODE,code);
if (decoded_size < 0) {
LoadError0(stp, "bad external term representation of compilation information");
@@ -3762,20 +4180,8 @@ freeze_code(LoaderState* stp)
((byte *) code) + size);
/*
- * Go through all i_new_bs_put_strings instructions, restore the pointer to
- * the instruction and convert string offsets to pointers (to the
- * FIRST character).
+ * Patch all instructions that refer to the string table.
*/
-
- index = stp->new_bs_put_strings;
- while (index != 0) {
- Uint next = code[index];
- code[index] = BeamOpCode(op_bs_put_string_II);
- code[index+2] = (BeamInstr) (str_table + code[index+2]);
- index = next;
- }
- CHKBLK(ERTS_ALC_T_CODE,code);
-
{
StringPatch* sp = stp->string_patches;
@@ -3816,21 +4222,6 @@ freeze_code(LoaderState* stp)
CHKBLK(ERTS_ALC_T_CODE,code);
/*
- * Fix all catch_yf instructions.
- */
- index = stp->catches;
- catches = BEAM_CATCHES_NIL;
- while (index != 0) {
- BeamInstr next = code[index];
- code[index] = BeamOpCode(op_catch_yf);
- catches = beam_catches_cons((BeamInstr *)code[index+2], catches);
- code[index+2] = make_catch(catches);
- index = next;
- }
- stp->catches = catches;
- CHKBLK(ERTS_ALC_T_CODE,code);
-
- /*
* Save the updated code pointer and code size.
*/
@@ -3855,6 +4246,26 @@ final_touch(LoaderState* stp)
{
int i;
int on_load = stp->on_load;
+ unsigned catches;
+ Uint index;
+ BeamInstr* code = stp->code;
+ Module* modp;
+
+ /*
+ * Allocate catch indices and fix up all catch_yf instructions.
+ */
+
+ index = stp->catches;
+ catches = BEAM_CATCHES_NIL;
+ while (index != 0) {
+ BeamInstr next = code[index];
+ code[index] = BeamOpCode(op_catch_yf);
+ catches = beam_catches_cons((BeamInstr *)code[index+2], catches);
+ code[index+2] = make_catch(catches);
+ index = next;
+ }
+ modp = erts_put_module(stp->module);
+ modp->catches = catches;
/*
* Export functions.
@@ -3938,6 +4349,7 @@ transform_engine(LoaderState* st)
GenOp* instr;
Uint* pc;
int rval;
+ static Uint restart_fail[1] = {TOP_fail};
ASSERT(gen_opc[st->genop->op].transform != -1);
pc = op_transform + gen_opc[st->genop->op].transform;
@@ -3951,7 +4363,6 @@ transform_engine(LoaderState* st)
ASSERT(restart != NULL);
pc = restart;
ASSERT(*pc < NUM_TOPS); /* Valid instruction? */
- ASSERT(*pc == TOP_try_me_else || *pc == TOP_fail);
instr = st->genop;
#define RETURN(r) rval = (r); goto do_return;
@@ -3964,7 +4375,9 @@ transform_engine(LoaderState* st)
op = *pc++;
switch (op) {
- case TOP_is_op:
+ case TOP_next_instr:
+ instr = instr->next;
+ ap = 0;
if (instr == NULL) {
/*
* We'll need at least one more instruction to decide whether
@@ -4151,10 +4564,6 @@ transform_engine(LoaderState* st)
case TOP_next_arg:
ap++;
break;
- case TOP_next_instr:
- instr = instr->next;
- ap = 0;
- break;
case TOP_commit:
instr = instr->next; /* The next_instr was optimized away. */
@@ -4172,8 +4581,8 @@ transform_engine(LoaderState* st)
#endif
break;
-#if defined(TOP_call)
- case TOP_call:
+#if defined(TOP_call_end)
+ case TOP_call_end:
{
GenOp** lastp;
GenOp* new_instr;
@@ -4210,7 +4619,7 @@ transform_engine(LoaderState* st)
*lastp = st->genop;
st->genop = new_instr;
}
- break;
+ RETURN(TE_OK);
#endif
case TOP_new_instr:
/*
@@ -4219,12 +4628,10 @@ transform_engine(LoaderState* st)
NEW_GENOP(st, instr);
instr->next = st->genop;
st->genop = instr;
+ instr->op = op = *pc++;
+ instr->arity = gen_opc[op].arity;
ap = 0;
break;
- case TOP_store_op:
- instr->op = *pc++;
- instr->arity = *pc++;
- break;
case TOP_store_type:
i = *pc++;
instr->a[ap].type = i;
@@ -4234,21 +4641,25 @@ transform_engine(LoaderState* st)
i = *pc++;
instr->a[ap].val = i;
break;
- case TOP_store_var:
+ case TOP_store_var_next_arg:
i = *pc++;
ASSERT(i < TE_MAX_VARS);
instr->a[ap].type = var[i].type;
instr->a[ap].val = var[i].val;
+ ap++;
break;
case TOP_try_me_else:
restart = pc + 1;
restart += *pc++;
ASSERT(*pc < NUM_TOPS); /* Valid instruction? */
break;
+ case TOP_try_me_else_fail:
+ restart = restart_fail;
+ break;
case TOP_end:
RETURN(TE_OK);
case TOP_fail:
- RETURN(TE_FAIL)
+ RETURN(TE_FAIL);
default:
ASSERT(0);
}
@@ -4317,41 +4728,9 @@ load_printf(int line, LoaderState* context, char *fmt,...)
erts_send_error_to_logger(context->group_leader, dsbufp);
}
-
-static int
-get_int_val(LoaderState* stp, Uint len_code, BeamInstr* result)
-{
- Uint count;
- Uint val;
-
- len_code >>= 5;
- ASSERT(len_code < 8);
- if (len_code == 7) {
- LoadError0(stp, "can't load integers bigger than 8 bytes yet\n");
- }
- count = len_code + 2;
- if (count == 5) {
- Uint msb;
- GetByte(stp, msb);
- if (msb == 0) {
- count--;
- }
- GetInt(stp, 4, *result);
- } else if (count <= 4) {
- GetInt(stp, count, val);
- *result = ((val << 8*(sizeof(val)-count)) >> 8*(sizeof(val)-count));
- } else {
- LoadError1(stp, "too big integer; %d bytes\n", count);
- }
- return 1;
-
- load_error:
- return 0;
-}
-
-
static int
-get_erlang_integer(LoaderState* stp, Uint len_code, BeamInstr* result)
+get_tag_and_value(LoaderState* stp, Uint len_code,
+ unsigned tag, BeamInstr* result)
{
Uint count;
Sint val;
@@ -4371,17 +4750,62 @@ get_erlang_integer(LoaderState* stp, Uint len_code, BeamInstr* result)
if (len_code < 7) {
count = len_code + 2;
} else {
- Uint tag;
+ unsigned sztag;
UWord len_word;
ASSERT(len_code == 7);
- GetTagAndValue(stp, tag, len_word);
- VerifyTag(stp, TAG_u, tag);
+ GetTagAndValue(stp, sztag, len_word);
+ VerifyTag(stp, sztag, TAG_u);
count = len_word + 9;
}
/*
- * Handle values up to the size of an int, meaning either a small or bignum.
+ * The value for tags except TAG_i must be an unsigned integer
+ * fitting in an Uint. If it does not fit, we'll indicate overflow
+ * by changing the tag to TAG_o.
+ */
+
+ if (tag != TAG_i) {
+ if (count == sizeof(Uint)+1) {
+ Uint msb;
+
+ /*
+ * The encoded value has one more byte than an Uint.
+ * It will still fit in an Uint if the most significant
+ * byte is 0.
+ */
+ GetByte(stp, msb);
+ GetInt(stp, sizeof(Uint), *result);
+ if (msb != 0) {
+ /* Overflow: Negative or too big. */
+ return TAG_o;
+ }
+ } else if (count == sizeof(Uint)) {
+ /*
+ * The value must be positive (or the encoded value would
+ * have been one byte longer).
+ */
+ GetInt(stp, count, *result);
+ } else if (count < sizeof(Uint)) {
+ GetInt(stp, count, *result);
+
+ /*
+ * If the sign bit is set, the value is negative
+ * (not allowed).
+ */
+ if (*result & ((Uint)1 << (count*8-1))) {
+ return TAG_o;
+ }
+ } else {
+ GetInt(stp, count, *result);
+ return TAG_o;
+ }
+ return tag;
+ }
+
+ /*
+ * TAG_i: First handle values up to the size of an Uint (i.e. either
+ * a small or a bignum).
*/
if (count <= sizeof(val)) {
@@ -4558,6 +4982,8 @@ new_literal(LoaderState* stp, Eterm** hpp, Uint heap_size)
lit->heap_size = heap_size;
lit->heap = erts_alloc(ERTS_ALC_T_LOADER_TMP, heap_size*sizeof(Eterm));
lit->term = make_boxed(lit->heap);
+ lit->off_heap.first = 0;
+ lit->off_heap.overhead = 0;
*hpp = lit->heap;
return stp->num_literals++;
}
@@ -4836,17 +5262,24 @@ compilation_info_for_module(Process* p, /* Process whose heap to use. */
return result;
}
-
/*
- * Returns a pointer to {module, function, arity}, or NULL if not found.
+ * Find a function from the given pc and fill information in
+ * the FunctionInfo struct. If the full_info is non-zero, fill
+ * in all available information (including location in the
+ * source code). If no function is found, the 'current' field
+ * will be set to NULL.
*/
-BeamInstr *
-find_function_from_pc(BeamInstr* pc)
+
+void
+erts_lookup_function_info(FunctionInfo* fi, BeamInstr* pc, int full_info)
{
Range* low = modules;
Range* high = low + num_loaded_modules;
Range* mid = mid_module;
+ fi->current = NULL;
+ fi->needed = 5;
+ fi->loc = LINE_INVALID_LOCATION;
while (low < high) {
if (pc < mid->start) {
high = mid;
@@ -4863,26 +5296,160 @@ find_function_from_pc(BeamInstr* pc)
high1 = mid1;
} else if (pc < mid1[1]) {
mid_module = mid;
- return mid1[0]+2;
+ fi->current = mid1[0]+2;
+ if (full_info) {
+ BeamInstr** fp = (BeamInstr **) (mid->start +
+ MI_FUNCTIONS);
+ int idx = mid1 - fp;
+ lookup_loc(fi, pc, mid->start, idx);
+ }
+ return;
} else {
low1 = mid1 + 1;
}
}
- return NULL;
+ return;
}
mid = low + (high-low) / 2;
}
- return NULL;
+}
+
+static void
+lookup_loc(FunctionInfo* fi, BeamInstr* orig_pc, BeamInstr* modp, int idx)
+{
+ Eterm* line = (Eterm *) modp[MI_LINE_TABLE];
+ Eterm* low;
+ Eterm* high;
+ Eterm* mid;
+ Eterm pc;
+
+ if (line == 0) {
+ return;
+ }
+
+ pc = (Eterm) (BeamInstr) orig_pc;
+ fi->fname_ptr = (Eterm *) (BeamInstr) line[MI_LINE_FNAME_PTR];
+ low = (Eterm *) (BeamInstr) line[MI_LINE_FUNC_TAB+idx];
+ high = (Eterm *) (BeamInstr) line[MI_LINE_FUNC_TAB+idx+1];
+ while (high > low) {
+ mid = low + (high-low) / 2;
+ if (pc < mid[0]) {
+ high = mid;
+ } else if (pc < mid[1]) {
+ int file;
+ int index = mid - (Eterm *) (BeamInstr) line[MI_LINE_FUNC_TAB];
+
+ if (line[MI_LINE_LOC_SIZE] == 2) {
+ Uint16* loc_table =
+ (Uint16 *) (BeamInstr) line[MI_LINE_LOC_TAB];
+ fi->loc = loc_table[index];
+ } else {
+ Uint32* loc_table =
+ (Uint32 *) (BeamInstr) line[MI_LINE_LOC_TAB];
+ ASSERT(line[MI_LINE_LOC_SIZE] == 4);
+ fi->loc = loc_table[index];
+ }
+ if (fi->loc == LINE_INVALID_LOCATION) {
+ return;
+ }
+ fi->needed += 3+2+3+2;
+ file = LOC_FILE(fi->loc);
+ if (file == 0) {
+ /* Special case: Module name with ".erl" appended */
+ Atom* mod_atom = atom_tab(atom_val(fi->current[0]));
+ fi->needed += 2*(mod_atom->len+4);
+ } else {
+ Atom* ap = atom_tab(atom_val((fi->fname_ptr)[file-1]));
+ fi->needed += 2*ap->len;
+ }
+ return;
+ } else {
+ low = mid + 1;
+ }
+ }
+}
+
+/*
+ * Build a single {M,F,A,Loction} item to be part of
+ * a stack trace.
+ */
+Eterm*
+erts_build_mfa_item(FunctionInfo* fi, Eterm* hp, Eterm args, Eterm* mfa_p)
+{
+ BeamInstr* current = fi->current;
+ Eterm loc = NIL;
+
+ if (fi->loc != LINE_INVALID_LOCATION) {
+ Eterm tuple;
+ int line = LOC_LINE(fi->loc);
+ int file = LOC_FILE(fi->loc);
+ Eterm file_term = NIL;
+
+ if (file == 0) {
+ Atom* ap = atom_tab(atom_val(fi->current[0]));
+ file_term = buf_to_intlist(&hp, ".erl", 4, NIL);
+ file_term = buf_to_intlist(&hp, (char*)ap->name, ap->len, file_term);
+ } else {
+ Atom* ap = atom_tab(atom_val((fi->fname_ptr)[file-1]));
+ file_term = buf_to_intlist(&hp, (char*)ap->name, ap->len, NIL);
+ }
+
+ tuple = TUPLE2(hp, am_line, make_small(line));
+ hp += 3;
+ loc = CONS(hp, tuple, loc);
+ hp += 2;
+ tuple = TUPLE2(hp, am_file, file_term);
+ hp += 3;
+ loc = CONS(hp, tuple, loc);
+ hp += 2;
+ }
+
+ if (is_list(args) || is_nil(args)) {
+ *mfa_p = TUPLE4(hp, current[0], current[1], args, loc);
+ } else {
+ Eterm arity = make_small(current[2]);
+ *mfa_p = TUPLE4(hp, current[0], current[1], arity, loc);
+ }
+ return hp + 5;
+}
+
+/*
+ * Force setting of the current function in a FunctionInfo
+ * structure. No source code location will be associated with
+ * the function.
+ */
+void
+erts_set_current_function(FunctionInfo* fi, BeamInstr* current)
+{
+ fi->current = current;
+ fi->needed = 5;
+ fi->loc = LINE_INVALID_LOCATION;
+}
+
+
+/*
+ * Returns a pointer to {module, function, arity}, or NULL if not found.
+ */
+BeamInstr*
+find_function_from_pc(BeamInstr* pc)
+{
+ FunctionInfo fi;
+
+ erts_lookup_function_info(&fi, pc, 0);
+ return fi.current;
}
/*
* Read a specific chunk from a Beam binary.
*/
-Eterm
-code_get_chunk_2(Process* p, Eterm Bin, Eterm Chunk)
+BIF_RETTYPE
+code_get_chunk_2(BIF_ALIST_2)
{
- LoaderState state;
+ Process* p = BIF_P;
+ Eterm Bin = BIF_ARG_1;
+ Eterm Chunk = BIF_ARG_2;
+ LoaderState* stp;
Uint chunk = 0;
ErlSubBin* sb;
Uint offset;
@@ -4894,15 +5461,16 @@ code_get_chunk_2(Process* p, Eterm Bin, Eterm Chunk)
Eterm real_bin;
byte* temp_alloc = NULL;
+ stp = erts_alloc_loader_state();
if ((start = erts_get_aligned_binary_bytes(Bin, &temp_alloc)) == NULL) {
error:
erts_free_aligned_binary_bytes(temp_alloc);
+ if (stp) {
+ free_state(stp);
+ }
BIF_ERROR(p, BADARG);
}
- state.module = THE_NON_VALUE; /* Suppress diagnostiscs */
- state.file_name = "IFF header for Beam file";
- state.file_p = start;
- state.file_left = binary_size(Bin);
+ stp->module = THE_NON_VALUE; /* Suppress diagnostics */
for (i = 0; i < 4; i++) {
Eterm* chunkp;
Eterm num;
@@ -4920,25 +5488,30 @@ code_get_chunk_2(Process* p, Eterm Bin, Eterm Chunk)
if (is_not_nil(Chunk)) {
goto error;
}
- if (!scan_iff_file(&state, &chunk, 1, 1)) {
- erts_free_aligned_binary_bytes(temp_alloc);
- return am_undefined;
+ if (!init_iff_file(stp, start, binary_size(Bin)) ||
+ !scan_iff_file(stp, &chunk, 1, 1) ||
+ stp->chunks[0].start == NULL) {
+ res = am_undefined;
+ goto done;
}
ERTS_GET_REAL_BIN(Bin, real_bin, offset, bitoffs, bitsize);
if (bitoffs) {
- res = new_binary(p, state.chunks[0].start, state.chunks[0].size);
+ res = new_binary(p, stp->chunks[0].start, stp->chunks[0].size);
} else {
sb = (ErlSubBin *) HAlloc(p, ERL_SUB_BIN_SIZE);
sb->thing_word = HEADER_SUB_BIN;
sb->orig = real_bin;
- sb->size = state.chunks[0].size;
+ sb->size = stp->chunks[0].size;
sb->bitsize = 0;
sb->bitoffs = 0;
- sb->offs = offset + (state.chunks[0].start - start);
+ sb->offs = offset + (stp->chunks[0].start - start);
sb->is_writable = 0;
res = make_binary(sb);
}
+
+ done:
erts_free_aligned_binary_bytes(temp_alloc);
+ free_state(stp);
return res;
}
@@ -4946,24 +5519,34 @@ code_get_chunk_2(Process* p, Eterm Bin, Eterm Chunk)
* Calculate the MD5 for a module.
*/
-Eterm
-code_module_md5_1(Process* p, Eterm Bin)
+BIF_RETTYPE
+code_module_md5_1(BIF_ALIST_1)
{
- LoaderState state;
+ Process* p = BIF_P;
+ Eterm Bin = BIF_ARG_1;
+ LoaderState* stp;
+ byte* bytes;
byte* temp_alloc = NULL;
+ Eterm res;
- if ((state.file_p = erts_get_aligned_binary_bytes(Bin, &temp_alloc)) == NULL) {
+ stp = erts_alloc_loader_state();
+ if ((bytes = erts_get_aligned_binary_bytes(Bin, &temp_alloc)) == NULL) {
+ free_state(stp);
BIF_ERROR(p, BADARG);
}
- state.module = THE_NON_VALUE; /* Suppress diagnostiscs */
- state.file_name = "IFF header for Beam file";
- state.file_left = binary_size(Bin);
-
- if (!scan_iff_file(&state, chunk_types, NUM_CHUNK_TYPES, NUM_MANDATORY)) {
- return am_undefined;
+ stp->module = THE_NON_VALUE; /* Suppress diagnostiscs */
+ if (!init_iff_file(stp, bytes, binary_size(Bin)) ||
+ !scan_iff_file(stp, chunk_types, NUM_CHUNK_TYPES, NUM_MANDATORY) ||
+ !verify_chunks(stp)) {
+ res = am_undefined;
+ goto done;
}
+ res = new_binary(p, stp->mod_md5, sizeof(stp->mod_md5));
+
+ done:
erts_free_aligned_binary_bytes(temp_alloc);
- return new_binary(p, state.mod_md5, sizeof(state.mod_md5));
+ free_state(stp);
+ return res;
}
#define WORDS_PER_FUNCTION 6
@@ -4998,7 +5581,7 @@ stub_copy_info(LoaderState* stp,
if (size != 0) {
memcpy(info, stp->chunks[chunk].start, size);
*ptr_word = (BeamInstr) info;
- decoded_size = erts_decode_ext_size(info, size, 0);
+ decoded_size = erts_decode_ext_size(info, size);
if (decoded_size < 0) {
return 0;
}
@@ -5205,7 +5788,17 @@ patch_funentries(Eterm Patchlist)
fe = erts_get_fun_entry(Mod, uniq, index);
fe->native_address = (Uint *)native_address;
- erts_refc_dec(&fe->refc, 1);
+
+ /* Deliberate MEMORY LEAK of native fun entries!!!
+ *
+ * Uncomment line below when hipe code upgrade and purging works correctly.
+ * Today we may get cases when old (leaked) native code of a purged module
+ * gets called and tries to create instances of a deleted fun entry.
+ *
+ * Reproduced on a debug emulator with stdlib_test/qlc_SUITE:join_merge
+ *
+ * erts_refc_dec(&fe->refc, 1);
+ */
if (!patch(Addresses, (Uint) fe))
return 0;
@@ -5226,7 +5819,7 @@ patch_funentries(Eterm Patchlist)
Eterm
erts_make_stub_module(Process* p, Eterm Mod, Eterm Beam, Eterm Info)
{
- LoaderState state;
+ LoaderState* stp;
BeamInstr Funcs;
BeamInstr Patchlist;
Eterm* tp;
@@ -5239,16 +5832,15 @@ erts_make_stub_module(Process* p, Eterm Mod, Eterm Beam, Eterm Info)
int code_size;
int rval;
int i;
- ErlDrvBinary* bin = NULL;
byte* temp_alloc = NULL;
byte* bytes;
Uint size;
/*
- * Must initialize state.lambdas here because the error handling code
+ * Must initialize stp->lambdas here because the error handling code
* at label 'error' uses it.
*/
- init_state(&state);
+ stp = erts_alloc_loader_state();
if (is_not_atom(Mod)) {
goto error;
@@ -5272,47 +5864,35 @@ erts_make_stub_module(Process* p, Eterm Mod, Eterm Beam, Eterm Info)
size = binary_size(Beam);
/*
- * Uncompressed if needed.
- */
- if (!(size >= 4 && bytes[0] == 'F' && bytes[1] == 'O' &&
- bytes[2] == 'R' && bytes[3] == '1')) {
- bin = (ErlDrvBinary *) erts_gzinflate_buffer((char*)bytes, size);
- if (bin == NULL) {
- goto error;
- }
- bytes = (byte*)bin->orig_bytes;
- size = bin->orig_size;
- }
-
- /*
* Scan the Beam binary and read the interesting sections.
*/
- state.file_name = "IFF header for Beam file";
- state.file_p = bytes;
- state.file_left = size;
- state.module = Mod;
- state.group_leader = p->group_leader;
- state.num_functions = n;
- if (!scan_iff_file(&state, chunk_types, NUM_CHUNK_TYPES, NUM_MANDATORY)) {
+ stp->module = Mod;
+ stp->group_leader = p->group_leader;
+ stp->num_functions = n;
+ if (!init_iff_file(stp, bytes, size)) {
+ goto error;
+ }
+ if (!scan_iff_file(stp, chunk_types, NUM_CHUNK_TYPES, NUM_MANDATORY) ||
+ !verify_chunks(stp)) {
goto error;
}
- define_file(&state, "code chunk header", CODE_CHUNK);
- if (!read_code_header(&state)) {
+ define_file(stp, "code chunk header", CODE_CHUNK);
+ if (!read_code_header(stp)) {
goto error;
}
- define_file(&state, "atom table", ATOM_CHUNK);
- if (!load_atom_table(&state)) {
+ define_file(stp, "atom table", ATOM_CHUNK);
+ if (!load_atom_table(stp)) {
goto error;
}
- define_file(&state, "export table", EXP_CHUNK);
- if (!stub_read_export_table(&state)) {
+ define_file(stp, "export table", EXP_CHUNK);
+ if (!stub_read_export_table(stp)) {
goto error;
}
- if (state.chunks[LAMBDA_CHUNK].size > 0) {
- define_file(&state, "lambda (fun) table", LAMBDA_CHUNK);
- if (!read_lambda_table(&state)) {
+ if (stp->chunks[LAMBDA_CHUNK].size > 0) {
+ define_file(stp, "lambda (fun) table", LAMBDA_CHUNK);
+ if (!read_lambda_table(stp)) {
goto error;
}
}
@@ -5322,8 +5902,8 @@ erts_make_stub_module(Process* p, Eterm Mod, Eterm Beam, Eterm Info)
*/
code_size = ((WORDS_PER_FUNCTION+1)*n + MI_FUNCTIONS + 2) * sizeof(BeamInstr);
- code_size += state.chunks[ATTR_CHUNK].size;
- code_size += state.chunks[COMPILE_CHUNK].size;
+ code_size += stp->chunks[ATTR_CHUNK].size;
+ code_size += stp->chunks[COMPILE_CHUNK].size;
code = erts_alloc_fnf(ERTS_ALC_T_CODE, code_size);
if (!code) {
goto error;
@@ -5341,6 +5921,9 @@ erts_make_stub_module(Process* p, Eterm Mod, Eterm Beam, Eterm Info)
code[MI_COMPILE_SIZE] = 0;
code[MI_COMPILE_SIZE_ON_HEAP] = 0;
code[MI_NUM_BREAKPOINTS] = 0;
+ code[MI_LITERALS_START] = 0;
+ code[MI_LITERALS_END] = 0;
+ code[MI_LITERALS_OFF_HEAP] = 0;
code[MI_ON_LOAD_FUNCTION_PTR] = 0;
ci = MI_FUNCTIONS + n + 1;
@@ -5413,12 +5996,12 @@ erts_make_stub_module(Process* p, Eterm Mod, Eterm Beam, Eterm Info)
*/
info = (byte *) fp;
- info = stub_copy_info(&state, ATTR_CHUNK, info,
+ info = stub_copy_info(stp, ATTR_CHUNK, info,
code+MI_ATTR_PTR, code+MI_ATTR_SIZE_ON_HEAP);
if (info == NULL) {
goto error;
}
- info = stub_copy_info(&state, COMPILE_CHUNK, info,
+ info = stub_copy_info(stp, COMPILE_CHUNK, info,
code+MI_COMPILE_PTR, code+MI_COMPILE_SIZE_ON_HEAP);
if (info == NULL) {
goto error;
@@ -5428,9 +6011,8 @@ erts_make_stub_module(Process* p, Eterm Mod, Eterm Beam, Eterm Info)
* Insert the module in the module table.
*/
- rval = insert_new_code(p, 0, p->group_leader, Mod, code, code_size,
- BEAM_CATCHES_NIL);
- if (rval < 0) {
+ rval = insert_new_code(p, 0, p->group_leader, Mod, code, code_size);
+ if (rval != NIL) {
goto error;
}
@@ -5440,46 +6022,19 @@ erts_make_stub_module(Process* p, Eterm Mod, Eterm Beam, Eterm Info)
fp = code + ci;
for (i = 0; i < n; i++) {
- stub_final_touch(&state, fp);
+ stub_final_touch(stp, fp);
fp += WORDS_PER_FUNCTION;
}
if (patch_funentries(Patchlist)) {
erts_free_aligned_binary_bytes(temp_alloc);
- if (state.lambdas != state.def_lambdas) {
- erts_free(ERTS_ALC_T_LOADER_TMP, (void *) state.lambdas);
- }
- erts_free(ERTS_ALC_T_LOADER_TMP, (void *) state.labels);
- erts_free(ERTS_ALC_T_LOADER_TMP, (void *) state.atom);
- erts_free(ERTS_ALC_T_LOADER_TMP, (void *) state.export);
- if (bin != NULL) {
- driver_free_binary(bin);
- }
+ free_state(stp);
return Mod;
}
error:
erts_free_aligned_binary_bytes(temp_alloc);
- if (code != NULL) {
- erts_free(ERTS_ALC_T_CODE, code);
- }
- if (state.labels != NULL) {
- erts_free(ERTS_ALC_T_LOADER_TMP, (void *) state.labels);
- }
- if (state.lambdas != state.def_lambdas) {
- erts_free(ERTS_ALC_T_LOADER_TMP, (void *) state.lambdas);
- }
- if (state.atom != NULL) {
- erts_free(ERTS_ALC_T_LOADER_TMP, (void *) state.atom);
- }
- if (state.export != NULL) {
- erts_free(ERTS_ALC_T_LOADER_TMP, (void *) state.export);
- }
- if (bin != NULL) {
- driver_free_binary(bin);
- }
-
-
+ free_state(stp);
BIF_ERROR(p, BADARG);
}
diff --git a/erts/emulator/beam/beam_load.h b/erts/emulator/beam/beam_load.h
index 26e3054c4b..997ba197db 100644
--- a/erts/emulator/beam/beam_load.h
+++ b/erts/emulator/beam/beam_load.h
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1999-2010. All Rights Reserved.
+ * Copyright Ericsson AB 1999-2011. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -23,7 +23,9 @@
#include "beam_opcodes.h"
#include "erl_process.h"
-int beam_make_current_old(Process *c_p, ErtsProcLocks c_p_locks, Eterm module);
+Eterm beam_make_current_old(Process *c_p, ErtsProcLocks c_p_locks,
+ Eterm module);
+
typedef struct gen_op_entry {
char* name;
@@ -101,11 +103,18 @@ extern Uint erts_total_code_size;
*/
#define MI_LITERALS_START 8
#define MI_LITERALS_END 9
+#define MI_LITERALS_OFF_HEAP 10
+
/*
* Pointer to the on_load function (or NULL if none).
*/
-#define MI_ON_LOAD_FUNCTION_PTR 10
+#define MI_ON_LOAD_FUNCTION_PTR 11
+
+/*
+ * Pointer to the line table (or NULL if none).
+ */
+#define MI_LINE_TABLE 12
/*
* Start of function pointer table. This table contains pointers to
@@ -116,5 +125,5 @@ extern Uint erts_total_code_size;
* this table.
*/
-#define MI_FUNCTIONS 11
+#define MI_FUNCTIONS 13
#endif /* _BEAM_LOAD_H */
diff --git a/erts/emulator/beam/bif.c b/erts/emulator/beam/bif.c
index 68b3350d7f..39d4582435 100644
--- a/erts/emulator/beam/bif.c
+++ b/erts/emulator/beam/bif.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1996-2011. All Rights Reserved.
+ * Copyright Ericsson AB 1996-2012. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -36,12 +36,16 @@
#include "beam_bp.h"
#include "erl_db_util.h"
#include "register.h"
+#include "erl_thr_progress.h"
static Export* flush_monitor_message_trap = NULL;
static Export* set_cpu_topology_trap = NULL;
static Export* await_proc_exit_trap = NULL;
Export* erts_format_cpu_topology_trap = NULL;
+static Export *await_sched_wall_time_mod_trap;
+static erts_smp_atomic32_t sched_wall_time;
+
#define DECL_AM(S) Eterm AM_ ## S = am_atom_put(#S, sizeof(#S) - 1)
/*
@@ -559,7 +563,11 @@ erts_queue_monitor_message(Process *p,
ref_copy = copy_struct(ref, ref_size, &hp, ohp);
tup = TUPLE5(hp, am_DOWN, ref_copy, type, item_copy, reason_copy);
- erts_queue_message(p, p_locksp, bp, tup, NIL);
+ erts_queue_message(p, p_locksp, bp, tup, NIL
+#ifdef USE_VM_PROBES
+ , NIL
+#endif
+ );
}
static BIF_RETTYPE
@@ -811,7 +819,7 @@ BIF_RETTYPE spawn_opt_1(BIF_ALIST_1)
so.min_heap_size = H_MIN_SIZE;
so.min_vheap_size = BIN_VH_MIN_SIZE;
so.priority = PRIORITY_NORMAL;
- so.max_gen_gcs = (Uint16) erts_smp_atomic32_read(&erts_max_gen_gcs);
+ so.max_gen_gcs = (Uint16) erts_smp_atomic32_read_nob(&erts_max_gen_gcs);
so.scheduler = 0;
/*
@@ -869,8 +877,6 @@ BIF_RETTYPE spawn_opt_1(BIF_ALIST_1)
}
} else if (arg == am_scheduler && is_small(val)) {
Sint scheduler = signed_val(val);
- if (erts_common_run_queue && erts_no_schedulers > 1)
- goto error;
if (scheduler < 0 || erts_no_schedulers < scheduler)
goto error;
so.scheduler = (int) scheduler;
@@ -1107,9 +1113,9 @@ BIF_RETTYPE hibernate_3(BIF_ALIST_3)
/**********************************************************************/
-BIF_RETTYPE get_stacktrace_0(Process* p)
+BIF_RETTYPE get_stacktrace_0(BIF_ALIST_0)
{
- Eterm t = build_stacktrace(p, p->ftrace);
+ Eterm t = build_stacktrace(BIF_P, BIF_P->ftrace);
BIF_RET(t);
}
@@ -1119,10 +1125,10 @@ BIF_RETTYPE get_stacktrace_0(Process* p)
* the process, and the final error value will be {Term,StackTrace}.
*/
-BIF_RETTYPE error_1(Process* p, Eterm term)
+BIF_RETTYPE error_1(BIF_ALIST_1)
{
- p->fvalue = term;
- BIF_ERROR(p, EXC_ERROR);
+ BIF_P->fvalue = BIF_ARG_1;
+ BIF_ERROR(BIF_P, EXC_ERROR);
}
/**********************************************************************/
@@ -1131,12 +1137,12 @@ BIF_RETTYPE error_1(Process* p, Eterm term)
* in the stacktrace.
*/
-BIF_RETTYPE error_2(Process* p, Eterm value, Eterm args)
+BIF_RETTYPE error_2(BIF_ALIST_2)
{
- Eterm* hp = HAlloc(p, 3);
+ Eterm* hp = HAlloc(BIF_P, 3);
- p->fvalue = TUPLE2(hp, value, args);
- BIF_ERROR(p, EXC_ERROR_2);
+ BIF_P->fvalue = TUPLE2(hp, BIF_ARG_1, BIF_ARG_2);
+ BIF_ERROR(BIF_P, EXC_ERROR_2);
}
/**********************************************************************/
@@ -1146,10 +1152,10 @@ BIF_RETTYPE error_2(Process* p, Eterm value, Eterm args)
* It is useful in stub functions for NIFs.
*/
-BIF_RETTYPE nif_error_1(Process* p, Eterm term)
+BIF_RETTYPE nif_error_1(BIF_ALIST_1)
{
- p->fvalue = term;
- BIF_ERROR(p, EXC_ERROR);
+ BIF_P->fvalue = BIF_ARG_1;
+ BIF_ERROR(BIF_P, EXC_ERROR);
}
/**********************************************************************/
@@ -1159,12 +1165,12 @@ BIF_RETTYPE nif_error_1(Process* p, Eterm term)
* It is useful in stub functions for NIFs.
*/
-BIF_RETTYPE nif_error_2(Process* p, Eterm value, Eterm args)
+BIF_RETTYPE nif_error_2(BIF_ALIST_2)
{
- Eterm* hp = HAlloc(p, 3);
+ Eterm* hp = HAlloc(BIF_P, 3);
- p->fvalue = TUPLE2(hp, value, args);
- BIF_ERROR(p, EXC_ERROR_2);
+ BIF_P->fvalue = TUPLE2(hp, BIF_ARG_1, BIF_ARG_2);
+ BIF_ERROR(BIF_P, EXC_ERROR_2);
}
/**********************************************************************/
@@ -1183,14 +1189,19 @@ BIF_RETTYPE exit_1(BIF_ALIST_1)
* If there is an error in the argument format,
* return the atom 'badarg' instead.
*/
-Eterm
-raise_3(Process *c_p, Eterm class, Eterm value, Eterm stacktrace) {
+BIF_RETTYPE raise_3(BIF_ALIST_3)
+{
+ Process *c_p = BIF_P;
+ Eterm class = BIF_ARG_1;
+ Eterm value = BIF_ARG_2;
+ Eterm stacktrace = BIF_ARG_3;
Eterm reason;
Eterm l, *hp, *hp_end, *tp;
int depth, cnt;
size_t sz;
+ int must_copy = 0;
struct StackTrace *s;
-
+
if (class == am_error) {
c_p->fvalue = value;
reason = EXC_ERROR;
@@ -1206,35 +1217,74 @@ raise_3(Process *c_p, Eterm class, Eterm value, Eterm stacktrace) {
/* Check syntax of stacktrace, and count depth.
* Accept anything that can be returned from erlang:get_stacktrace/0,
* as well as a 2-tuple with a fun as first element that the
- * error_handler may need to give us.
+ * error_handler may need to give us. Also allow old-style
+ * MFA three-tuples.
*/
for (l = stacktrace, depth = 0;
is_list(l);
l = CDR(list_val(l)), depth++) {
Eterm t = CAR(list_val(l));
- int arity;
+ Eterm location = NIL;
+
if (is_not_tuple(t)) goto error;
tp = tuple_val(t);
- arity = arityval(tp[0]);
- if ((arity == 3) && is_atom(tp[1]) && is_atom(tp[2])) continue;
- if ((arity == 2) && is_fun(tp[1])) continue;
- goto error;
+ switch (arityval(tp[0])) {
+ case 2:
+ /* {Fun,Args} */
+ if (is_fun(tp[1])) {
+ must_copy = 1;
+ } else {
+ goto error;
+ }
+ break;
+ case 3:
+ /*
+ * One of:
+ * {Fun,Args,Location}
+ * {M,F,A}
+ */
+ if (is_fun(tp[1])) {
+ location = tp[3];
+ } else if (is_atom(tp[1]) && is_atom(tp[2])) {
+ must_copy = 1;
+ } else {
+ goto error;
+ }
+ break;
+ case 4:
+ if (!(is_atom(tp[1]) && is_atom(tp[2]))) {
+ goto error;
+ }
+ location = tp[4];
+ break;
+ default:
+ goto error;
+ }
+ if (is_not_list(location) && is_not_nil(location)) {
+ goto error;
+ }
}
if (is_not_nil(l)) goto error;
/* Create stacktrace and store */
- if (depth <= erts_backtrace_depth) {
+ if (erts_backtrace_depth < depth) {
+ depth = erts_backtrace_depth;
+ must_copy = 1;
+ }
+ if (must_copy) {
+ cnt = depth;
+ c_p->ftrace = NIL;
+ } else {
+ /* No need to copy the stacktrace */
cnt = 0;
c_p->ftrace = stacktrace;
- } else {
- cnt = depth = erts_backtrace_depth;
- c_p->ftrace = NIL;
}
+
tp = &c_p->ftrace;
sz = (offsetof(struct StackTrace, trace) + sizeof(Eterm) - 1)
/ sizeof(Eterm);
- hp = HAlloc(c_p, sz + 2*(cnt + 1));
- hp_end = hp + sz + 2*(cnt + 1);
+ hp = HAlloc(c_p, sz + (2+6)*(cnt + 1));
+ hp_end = hp + sz + (2+6)*(cnt + 1);
s = (struct StackTrace *) hp;
s->header = make_neg_bignum_header(sz - 1);
s->freason = reason;
@@ -1242,13 +1292,29 @@ raise_3(Process *c_p, Eterm class, Eterm value, Eterm stacktrace) {
s->current = NULL;
s->depth = 0;
hp += sz;
- if (cnt > 0) {
+ if (must_copy) {
+ int cnt;
+
/* Copy list up to depth */
for (cnt = 0, l = stacktrace;
cnt < depth;
cnt++, l = CDR(list_val(l))) {
+ Eterm t;
+ Eterm *tpp;
+ int arity;
+
ASSERT(*tp == NIL);
- *tp = CONS(hp, CAR(list_val(l)), *tp);
+ t = CAR(list_val(l));
+ tpp = tuple_val(t);
+ arity = arityval(tpp[0]);
+ if (arity == 2) {
+ t = TUPLE3(hp, tpp[1], tpp[2], NIL);
+ hp += 4;
+ } else if (arity == 3 && is_atom(tpp[1])) {
+ t = TUPLE4(hp, tpp[1], tpp[2], tpp[3], NIL);
+ hp += 5;
+ }
+ *tp = CONS(hp, t, *tp);
tp = &CDR(list_val(*tp));
hp += 2;
}
@@ -1256,7 +1322,7 @@ raise_3(Process *c_p, Eterm class, Eterm value, Eterm stacktrace) {
c_p->ftrace = CONS(hp, c_p->ftrace, make_big((Eterm *) s));
hp += 2;
ASSERT(hp <= hp_end);
-
+ HRelease(c_p, hp_end, hp);
BIF_ERROR(c_p, reason);
error:
@@ -1474,8 +1540,6 @@ BIF_RETTYPE process_flag_2(BIF_ALIST_2)
ErtsRunQueue *old;
ErtsRunQueue *new;
Sint sched;
- if (erts_common_run_queue && erts_no_schedulers > 1)
- goto error;
if (!is_small(BIF_ARG_2))
goto error;
sched = signed_val(BIF_ARG_2);
@@ -1674,10 +1738,10 @@ BIF_RETTYPE whereis_1(BIF_ALIST_1)
* erlang:'!'/2
*/
-Eterm
-ebif_bang_2(Process* p, Eterm To, Eterm Message)
+BIF_RETTYPE
+ebif_bang_2(BIF_ALIST_2)
{
- return send_2(p, To, Message);
+ return erl_send(BIF_P, BIF_ARG_1, BIF_ARG_2);
}
@@ -1884,7 +1948,11 @@ do_send(Process *p, Eterm to, Eterm msg, int suspend) {
if (ERTS_PROC_GET_SAVED_CALLS_BUF(p))
save_calls(p, &exp_send);
- if (SEQ_TRACE_TOKEN(p) != NIL) {
+ if (SEQ_TRACE_TOKEN(p) != NIL
+#ifdef USE_VM_PROBES
+ && SEQ_TRACE_TOKEN(p) != am_have_dt_utag
+#endif
+ ) {
seq_trace_update_send(p);
seq_trace_output(SEQ_TRACE_TOKEN(p), msg,
SEQ_TRACE_SEND, portid, p);
@@ -2014,8 +2082,13 @@ do_send(Process *p, Eterm to, Eterm msg, int suspend) {
}
-Eterm
-send_3(Process *p, Eterm to, Eterm msg, Eterm opts) {
+BIF_RETTYPE send_3(BIF_ALIST_3)
+{
+ Process *p = BIF_P;
+ Eterm to = BIF_ARG_1;
+ Eterm msg = BIF_ARG_2;
+ Eterm opts = BIF_ARG_3;
+
int connect = !0;
int suspend = !0;
Eterm l = opts;
@@ -2079,8 +2152,13 @@ send_3(Process *p, Eterm to, Eterm msg, Eterm opts) {
BIF_ERROR(p, BADARG);
}
-Eterm
-send_2(Process *p, Eterm to, Eterm msg) {
+BIF_RETTYPE send_2(BIF_ALIST_2)
+{
+ return erl_send(BIF_P, BIF_ARG_1, BIF_ARG_2);
+}
+
+Eterm erl_send(Process *p, Eterm to, Eterm msg)
+{
Sint result = do_send(p, to, msg, !0);
if (result > 0) {
@@ -3256,8 +3334,11 @@ time_to_parts(Eterm date, Sint* year, Sint* month, Sint* day,
/* return the universal time */
BIF_RETTYPE
-localtime_to_universaltime_2(Process *p, Eterm localtime, Eterm dst)
+localtime_to_universaltime_2(BIF_ALIST_2)
{
+ Process *p = BIF_P;
+ Eterm localtime = BIF_ARG_1;
+ Eterm dst = BIF_ARG_2;
Sint year, month, day;
Sint hour, minute, second;
int isdst;
@@ -3315,6 +3396,61 @@ BIF_RETTYPE universaltime_to_localtime_1(BIF_ALIST_1)
BIF_RET(TUPLE2(hp, res1, res2));
}
+/* convert calendar:universaltime_to_seconds/1 */
+
+BIF_RETTYPE universaltime_to_posixtime_1(BIF_ALIST_1)
+{
+ Sint year, month, day;
+ Sint hour, minute, second;
+
+ Sint64 seconds = 0;
+ Eterm *hp;
+ Uint hsz = 0;
+
+ if (!time_to_parts(BIF_ARG_1, &year, &month, &day,
+ &hour, &minute, &second))
+ BIF_ERROR(BIF_P, BADARG);
+
+ if (!univ_to_seconds(year, month, day, hour, minute, second, &seconds)) {
+ BIF_ERROR(BIF_P, BADARG);
+ }
+
+ erts_bld_sint64(NULL, &hsz, seconds);
+ hp = HAlloc(BIF_P, hsz);
+ BIF_RET(erts_bld_sint64(&hp, NULL, seconds));
+}
+
+/* convert calendar:seconds_to_universaltime/1 */
+
+BIF_RETTYPE posixtime_to_universaltime_1(BIF_ALIST_1)
+{
+ Sint year, month, day;
+ Sint hour, minute, second;
+ Eterm res1, res2;
+ Eterm* hp;
+
+ Sint64 time = 0;
+
+ if (!term_to_Sint64(BIF_ARG_1, &time)) {
+ BIF_ERROR(BIF_P, BADARG);
+ }
+
+ if (!seconds_to_univ(time, &year, &month, &day,
+ &hour, &minute, &second)) {
+ BIF_ERROR(BIF_P, BADARG);
+ }
+
+ hp = HAlloc(BIF_P, 4+4+3);
+ res1 = TUPLE3(hp,make_small(year),make_small(month),
+ make_small(day));
+ hp += 4;
+ res2 = TUPLE3(hp,make_small(hour),make_small(minute),
+ make_small(second));
+ hp += 4;
+ BIF_RET(TUPLE2(hp, res1, res2));
+}
+
+
/**********************************************************************/
@@ -3417,10 +3553,10 @@ BIF_RETTYPE ports_0(BIF_ALIST_0)
erts_smp_mtx_lock(&ports_snapshot_mtx); /* One snapshot at a time */
- erts_smp_atomic_set(&erts_dead_ports_ptr,
- (erts_aint_t) (port_buf + erts_max_ports));
+ erts_smp_atomic_set_nob(&erts_dead_ports_ptr,
+ (erts_aint_t) (port_buf + erts_max_ports));
- next_ss = erts_smp_atomic32_inctest(&erts_ports_snapshot);
+ next_ss = erts_smp_atomic32_inc_read_relb(&erts_ports_snapshot);
for (i = erts_max_ports-1; i >= 0; i--) {
Port* prt = &erts_port[i];
@@ -3434,8 +3570,8 @@ BIF_RETTYPE ports_0(BIF_ALIST_0)
erts_smp_port_state_unlock(prt);
}
- dead_ports = (Eterm*)erts_smp_atomic_xchg(&erts_dead_ports_ptr,
- (erts_aint_t) NULL);
+ dead_ports = (Eterm*)erts_smp_atomic_xchg_nob(&erts_dead_ports_ptr,
+ (erts_aint_t) NULL);
erts_smp_mtx_unlock(&ports_snapshot_mtx);
ASSERT(pp <= dead_ports);
@@ -3506,9 +3642,10 @@ BIF_RETTYPE erts_debug_display_1(BIF_ALIST_1)
}
-Eterm
-display_string_1(Process* p, Eterm string)
+BIF_RETTYPE display_string_1(BIF_ALIST_1)
{
+ Process* p = BIF_P;
+ Eterm string = BIF_ARG_1;
int len = is_string(string);
char *str;
@@ -3524,8 +3661,7 @@ display_string_1(Process* p, Eterm string)
BIF_RET(am_true);
}
-Eterm
-display_nl_0(Process* p)
+BIF_RETTYPE display_nl_0(BIF_ALIST_0)
{
erts_fprintf(stderr, "\n");
BIF_RET(am_true);
@@ -3537,43 +3673,122 @@ display_nl_0(Process* p)
/* ARGSUSED */
BIF_RETTYPE halt_0(BIF_ALIST_0)
{
- VERBOSE(DEBUG_SYSTEM,("System halted by BIF halt/0\n"));
- erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
- erl_exit(0, "");
- return NIL; /* Pedantic (lint does not know about erl_exit) */
+ VERBOSE(DEBUG_SYSTEM,("System halted by BIF halt()\n"));
+ erl_halt(0);
+ ERTS_BIF_YIELD1(bif_export[BIF_halt_1], BIF_P, am_undefined);
}
/**********************************************************************/
-#define MSG_SIZE 200
+#define HALT_MSG_SIZE 200
+static char halt_msg[HALT_MSG_SIZE];
/* stop the system with exit code */
/* ARGSUSED */
BIF_RETTYPE halt_1(BIF_ALIST_1)
{
Sint code;
- static char msg[MSG_SIZE];
- int i;
if (is_small(BIF_ARG_1) && (code = signed_val(BIF_ARG_1)) >= 0) {
- VERBOSE(DEBUG_SYSTEM,("System halted by BIF halt(%d)\n", code));
+ VERBOSE(DEBUG_SYSTEM,("System halted by BIF halt(%T)\n", BIF_ARG_1));
+ erl_halt((int)(- code));
+ ERTS_BIF_YIELD1(bif_export[BIF_halt_1], BIF_P, am_undefined);
+ }
+ else if (ERTS_IS_ATOM_STR("abort", BIF_ARG_1)) {
+ VERBOSE(DEBUG_SYSTEM,("System halted by BIF halt(%T)\n", BIF_ARG_1));
erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
- erl_exit(-code, "");
- } else if (is_string(BIF_ARG_1) || BIF_ARG_1 == NIL) {
- if ((i = intlist_to_buf(BIF_ARG_1, msg, MSG_SIZE-1)) < 0) {
+ erl_exit(ERTS_ABORT_EXIT, "");
+ }
+ else if (is_string(BIF_ARG_1) || BIF_ARG_1 == NIL) {
+ int i;
+
+ if ((i = intlist_to_buf(BIF_ARG_1, halt_msg, HALT_MSG_SIZE-1)) < 0) {
goto error;
}
- msg[i] = '\0';
- VERBOSE(DEBUG_SYSTEM,("System halted by BIF halt(%s)\n", msg));
+ halt_msg[i] = '\0';
+ VERBOSE(DEBUG_SYSTEM,("System halted by BIF halt(%T)\n", BIF_ARG_1));
erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
- erl_exit(ERTS_DUMP_EXIT, "%s\n", msg);
- } else {
- error:
+ erl_exit(ERTS_DUMP_EXIT, "%s\n", halt_msg);
+ }
+ else
+ goto error;
+ return NIL; /* Pedantic (lint does not know about erl_exit) */
+ error:
BIF_ERROR(BIF_P, BADARG);
+}
+
+/**********************************************************************/
+
+/* stop the system with exit code and flags */
+/* ARGSUSED */
+BIF_RETTYPE halt_2(BIF_ALIST_2)
+{
+ Sint code;
+ Eterm optlist = BIF_ARG_2;
+ int flush = 0;
+
+ for (optlist = BIF_ARG_2;
+ is_list(optlist);
+ optlist = CDR(list_val(optlist))) {
+ Eterm *tp, opt = CAR(list_val(optlist));
+ if (is_not_tuple(opt))
+ goto error;
+ tp = tuple_val(opt);
+ if (tp[0] != make_arityval(2))
+ goto error;
+ if (tp[1] == am_flush) {
+ if (tp[2] == am_true)
+ flush = 1;
+ else if (tp[2] == am_false)
+ flush = 0;
+ else
+ goto error;
+ }
+ else
+ goto error;
+ }
+ if (is_not_nil(optlist))
+ goto error;
+
+ if (is_small(BIF_ARG_1) && (code = signed_val(BIF_ARG_1)) >= 0) {
+ VERBOSE(DEBUG_SYSTEM,
+ ("System halted by BIF halt(%T, %T)\n", BIF_ARG_1, BIF_ARG_2));
+ if (flush) {
+ erl_halt((int)(- code));
+ ERTS_BIF_YIELD1(bif_export[BIF_halt_1], BIF_P, am_undefined);
+ }
+ else {
+ erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
+ erl_exit((int)(- code), "");
+ }
}
+ else if (ERTS_IS_ATOM_STR("abort", BIF_ARG_1)) {
+ VERBOSE(DEBUG_SYSTEM,
+ ("System halted by BIF halt(%T, %T)\n", BIF_ARG_1, BIF_ARG_2));
+ erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
+ erl_exit(ERTS_ABORT_EXIT, "");
+ }
+ else if (is_string(BIF_ARG_1) || BIF_ARG_1 == NIL) {
+ int i;
+
+ if ((i = intlist_to_buf(BIF_ARG_1, halt_msg, HALT_MSG_SIZE-1)) < 0) {
+ goto error;
+ }
+ halt_msg[i] = '\0';
+ VERBOSE(DEBUG_SYSTEM,
+ ("System halted by BIF halt(%T, %T)\n", BIF_ARG_1, BIF_ARG_2));
+ erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
+ erl_exit(ERTS_DUMP_EXIT, "%s\n", halt_msg);
+ }
+ else
+ goto error;
return NIL; /* Pedantic (lint does not know about erl_exit) */
+ error:
+ BIF_ERROR(BIF_P, BADARG);
}
+/**********************************************************************/
+
BIF_RETTYPE function_exported_3(BIF_ALIST_3)
{
if (is_not_atom(BIF_ARG_1) ||
@@ -3589,8 +3804,13 @@ BIF_RETTYPE function_exported_3(BIF_ALIST_3)
/**********************************************************************/
-BIF_RETTYPE is_builtin_3(Process* p, Eterm Mod, Eterm Name, Eterm Arity)
+BIF_RETTYPE is_builtin_3(BIF_ALIST_3)
{
+ Process* p = BIF_P;
+ Eterm Mod = BIF_ARG_1;
+ Eterm Name = BIF_ARG_2;
+ Eterm Arity = BIF_ARG_3;
+
if (is_not_atom(Mod) || is_not_atom(Name) || is_not_small(Arity)) {
BIF_ERROR(p, BADARG);
}
@@ -3655,9 +3875,11 @@ BIF_RETTYPE make_fun_3(BIF_ALIST_3)
BIF_RET(make_export(hp));
}
-Eterm
-fun_to_list_1(Process* p, Eterm fun)
+BIF_RETTYPE fun_to_list_1(BIF_ALIST_1)
{
+ Process* p = BIF_P;
+ Eterm fun = BIF_ARG_1;
+
if (is_not_any_fun(fun))
BIF_ERROR(p, BADARG);
BIF_RET(term2list_dsprintf(p, fun));
@@ -3942,8 +4164,8 @@ BIF_RETTYPE system_flag_2(BIF_ALIST_2)
goto error;
}
nval = (n > (Sint) ((Uint16) -1)) ? ((Uint16) -1) : ((Uint16) n);
- oval = (Uint) erts_smp_atomic32_xchg(&erts_max_gen_gcs,
- (erts_aint32_t) nval);
+ oval = (Uint) erts_smp_atomic32_xchg_nob(&erts_max_gen_gcs,
+ (erts_aint32_t) nval);
BIF_RET(make_small(oval));
} else if (BIF_ARG_1 == am_min_heap_size) {
int oval = H_MIN_SIZE;
@@ -3953,11 +4175,11 @@ BIF_RETTYPE system_flag_2(BIF_ALIST_2)
}
erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
- erts_smp_block_system(0);
+ erts_smp_thr_progress_block();
H_MIN_SIZE = erts_next_heap_size(n, 0);
- erts_smp_release_system();
+ erts_smp_thr_progress_unblock();
erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
BIF_RET(make_small(oval));
@@ -3969,11 +4191,11 @@ BIF_RETTYPE system_flag_2(BIF_ALIST_2)
}
erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
- erts_smp_block_system(0);
+ erts_smp_thr_progress_block();
BIN_VH_MIN_SIZE = erts_next_heap_size(n, 0);
- erts_smp_release_system();
+ erts_smp_thr_progress_unblock();
erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
BIF_RET(make_small(oval));
@@ -3995,7 +4217,7 @@ BIF_RETTYPE system_flag_2(BIF_ALIST_2)
erts_backtrace_depth = n;
BIF_RET(make_small(oval));
} else if (BIF_ARG_1 == am_trace_control_word) {
- BIF_RET(db_set_trace_control_word_1(BIF_P, BIF_ARG_2));
+ BIF_RET(db_set_trace_control_word(BIF_P, BIF_ARG_2));
} else if (BIF_ARG_1 == am_sequential_tracer) {
Eterm old_value = erts_set_system_seq_tracer(BIF_P,
ERTS_PROC_LOCK_MAIN,
@@ -4007,27 +4229,47 @@ BIF_RETTYPE system_flag_2(BIF_ALIST_2)
Uint i;
ErlMessage* mp;
erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
- erts_smp_block_system(0);
+ erts_smp_thr_progress_block();
for (i = 0; i < erts_max_processes; i++) {
if (process_tab[i] != (Process*) 0) {
Process* p = process_tab[i];
+#ifdef USE_VM_PROBES
+ p->seq_trace_token = (p->dt_utag != NIL) ? am_have_dt_utag : NIL;
+#else
p->seq_trace_token = NIL;
+#endif
p->seq_trace_clock = 0;
p->seq_trace_lastcnt = 0;
ERTS_SMP_MSGQ_MV_INQ2PRIVQ(p);
mp = p->msg.first;
while(mp != NULL) {
+#ifdef USE_VM_PROBES
+ ERL_MESSAGE_TOKEN(mp) = (ERL_MESSAGE_DT_UTAG(mp) != NIL) ? am_have_dt_utag : NIL;
+#else
ERL_MESSAGE_TOKEN(mp) = NIL;
+#endif
mp = mp->next;
}
}
}
- erts_smp_release_system();
+ erts_smp_thr_progress_unblock();
erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
BIF_RET(am_true);
+ } else if (BIF_ARG_1 == am_scheduler_wall_time) {
+ if (BIF_ARG_2 == am_true || BIF_ARG_2 == am_false) {
+ erts_aint32_t new = BIF_ARG_2 == am_true ? 1 : 0;
+ erts_aint32_t old = erts_smp_atomic32_xchg_nob(&sched_wall_time,
+ new);
+ Eterm ref = erts_sched_wall_time_request(BIF_P, 1, new);
+ ASSERT(is_value(ref));
+ BIF_TRAP2(await_sched_wall_time_mod_trap,
+ BIF_P,
+ ref,
+ old ? am_true : am_false);
+ }
} else if (ERTS_IS_ATOM_STR("scheduling_statistics", BIF_ARG_1)) {
int what;
if (ERTS_IS_ATOM_STR("disable", BIF_ARG_2))
@@ -4047,8 +4289,20 @@ BIF_RETTYPE system_flag_2(BIF_ALIST_2)
if (is_value(res))
BIF_RET(res);
} else if (ERTS_IS_ATOM_STR("cpu_topology", BIF_ARG_1)) {
+ erts_send_warning_to_logger_str(
+ BIF_P->group_leader,
+ "A call to erlang:system_flag(cpu_topology, _) was made.\n"
+ "The cpu_topology argument is deprecated and scheduled\n"
+ "for removal in erts-5.10/OTP-R16. For more information\n"
+ "see the erlang:system_flag/2 documentation.\n");
BIF_TRAP1(set_cpu_topology_trap, BIF_P, BIF_ARG_2);
} else if (ERTS_IS_ATOM_STR("scheduler_bind_type", BIF_ARG_1)) {
+ erts_send_warning_to_logger_str(
+ BIF_P->group_leader,
+ "A call to erlang:system_flag(scheduler_bind_type, _) was\n"
+ "made. The scheduler_bind_type argument is deprecated and\n"
+ "scheduled for removal in erts-5.10/OTP-R16. For more\n"
+ "information see the erlang:system_flag/2 documentation.\n");
return erts_bind_schedulers(BIF_P, BIF_ARG_2);
}
error:
@@ -4235,8 +4489,7 @@ void
erts_bif_prep_await_proc_exit_data_trap(Process *c_p, Eterm pid, Eterm ret)
{
if (skip_current_msgq(c_p)) {
- Eterm unused;
- ERTS_BIF_PREP_TRAP3(unused, await_proc_exit_trap, c_p, pid, am_data, ret);
+ ERTS_BIF_PREP_TRAP3_NO_RET(await_proc_exit_trap, c_p, pid, am_data, ret);
}
}
@@ -4244,8 +4497,7 @@ void
erts_bif_prep_await_proc_exit_reason_trap(Process *c_p, Eterm pid)
{
if (skip_current_msgq(c_p)) {
- Eterm unused;
- ERTS_BIF_PREP_TRAP3(unused, await_proc_exit_trap, c_p,
+ ERTS_BIF_PREP_TRAP3_NO_RET(await_proc_exit_trap, c_p,
pid, am_reason, am_undefined);
}
}
@@ -4260,7 +4512,6 @@ erts_bif_prep_await_proc_exit_apply_trap(Process *c_p,
{
ASSERT(is_atom(module) && is_atom(function));
if (skip_current_msgq(c_p)) {
- Eterm unused;
Eterm term;
Eterm *hp;
int i;
@@ -4272,7 +4523,7 @@ erts_bif_prep_await_proc_exit_apply_trap(Process *c_p,
hp += 2;
}
term = TUPLE3(hp, module, function, term);
- ERTS_BIF_PREP_TRAP3(unused, await_proc_exit_trap, c_p, pid, am_apply, term);
+ ERTS_BIF_PREP_TRAP3_NO_RET(await_proc_exit_trap, c_p, pid, am_apply, term);
}
}
@@ -4286,7 +4537,7 @@ void erts_init_bif(void)
erts_smp_spinlock_init(&make_ref_lock, "make_ref");
erts_smp_mtx_init(&ports_snapshot_mtx, "ports_snapshot");
- erts_smp_atomic_init(&erts_dead_ports_ptr, (erts_aint_t) NULL);
+ erts_smp_atomic_init_nob(&erts_dead_ports_ptr, (erts_aint_t) NULL);
/*
* bif_return_trap/1 is a hidden BIF that bifs that need to
@@ -4316,6 +4567,9 @@ void erts_init_bif(void)
am_format_cpu_topology,
1);
await_proc_exit_trap = erts_export_put(am_erlang,am_await_proc_exit,3);
+ await_sched_wall_time_mod_trap
+ = erts_export_put(am_erlang, am_await_sched_wall_time_modifications, 2);
+ erts_smp_atomic32_init_nob(&sched_wall_time, 0);
}
#ifdef HARDDEBUG
@@ -4392,3 +4646,193 @@ BIF_RETTYPE get_module_info_2(BIF_ALIST_2)
}
BIF_RET(ret);
}
+
+BIF_RETTYPE dt_put_tag_1(BIF_ALIST_1)
+{
+#ifdef USE_VM_PROBES
+ Eterm otag;
+ if (BIF_ARG_1 == am_undefined) {
+ otag = (DT_UTAG(BIF_P) == NIL) ? am_undefined : DT_UTAG(BIF_P);
+ DT_UTAG(BIF_P) = NIL;
+ DT_UTAG_FLAGS(BIF_P) = 0;
+ if (SEQ_TRACE_TOKEN(BIF_P) == am_have_dt_utag) {
+ SEQ_TRACE_TOKEN(BIF_P) = NIL;
+ }
+ BIF_RET(otag);
+ }
+ if (!is_binary(BIF_ARG_1)) {
+ BIF_ERROR(BIF_P,BADARG);
+ }
+ otag = (DT_UTAG(BIF_P) == NIL) ? am_undefined : DT_UTAG(BIF_P);
+ DT_UTAG(BIF_P) = BIF_ARG_1;
+ DT_UTAG_FLAGS(BIF_P) |= DT_UTAG_PERMANENT;
+ if (SEQ_TRACE_TOKEN(BIF_P) == NIL) {
+ SEQ_TRACE_TOKEN(BIF_P) = am_have_dt_utag;
+ }
+ BIF_RET(otag);
+#else
+ BIF_RET(am_undefined);
+#endif
+}
+
+BIF_RETTYPE dt_get_tag_0(BIF_ALIST_0)
+{
+#ifdef USE_VM_PROBES
+ BIF_RET((DT_UTAG(BIF_P) == NIL || !(DT_UTAG_FLAGS(BIF_P) & DT_UTAG_PERMANENT)) ? am_undefined : DT_UTAG(BIF_P));
+#else
+ BIF_RET(am_undefined);
+#endif
+}
+BIF_RETTYPE dt_get_tag_data_0(BIF_ALIST_0)
+{
+#ifdef USE_VM_PROBES
+ BIF_RET((DT_UTAG(BIF_P) == NIL) ? am_undefined : DT_UTAG(BIF_P));
+#else
+ BIF_RET(am_undefined);
+#endif
+}
+BIF_RETTYPE dt_prepend_vm_tag_data_1(BIF_ALIST_1)
+{
+#ifdef USE_VM_PROBES
+ Eterm b;
+ Eterm *hp;
+ hp = HAlloc(BIF_P,2);
+ if (is_binary((DT_UTAG(BIF_P)))) {
+ Uint sz = binary_size(DT_UTAG(BIF_P));
+ int i;
+ unsigned char *p,*q;
+ byte *temp_alloc = NULL;
+ b = new_binary(BIF_P,NULL,sz+1);
+ q = binary_bytes(b);
+ p = erts_get_aligned_binary_bytes(DT_UTAG(BIF_P),&temp_alloc);
+ for(i=0;i<sz;++i) {
+ q[i] = p[i];
+ }
+ erts_free_aligned_binary_bytes(temp_alloc);
+ q[sz] = '\0';
+ } else {
+ b = new_binary(BIF_P,(byte *)"\0",1);
+ }
+ BIF_RET(CONS(hp,b,BIF_ARG_1));
+#else
+ BIF_RET(BIF_ARG_1);
+#endif
+}
+BIF_RETTYPE dt_append_vm_tag_data_1(BIF_ALIST_1)
+{
+#ifdef USE_VM_PROBES
+ Eterm b;
+ Eterm *hp;
+ hp = HAlloc(BIF_P,2);
+ if (is_binary((DT_UTAG(BIF_P)))) {
+ Uint sz = binary_size(DT_UTAG(BIF_P));
+ int i;
+ unsigned char *p,*q;
+ byte *temp_alloc = NULL;
+ b = new_binary(BIF_P,NULL,sz+1);
+ q = binary_bytes(b);
+ p = erts_get_aligned_binary_bytes(DT_UTAG(BIF_P),&temp_alloc);
+ for(i=0;i<sz;++i) {
+ q[i] = p[i];
+ }
+ erts_free_aligned_binary_bytes(temp_alloc);
+ q[sz] = '\0';
+ } else {
+ b = new_binary(BIF_P,(byte *)"\0",1);
+ }
+ BIF_RET(CONS(hp,BIF_ARG_1,b));
+#else
+ BIF_RET(BIF_ARG_1);
+#endif
+}
+BIF_RETTYPE dt_spread_tag_1(BIF_ALIST_1)
+{
+#ifdef USE_VM_PROBES
+ Eterm ret;
+ Eterm *hp;
+#endif
+ if (BIF_ARG_1 != am_true && BIF_ARG_1 != am_false) {
+ BIF_ERROR(BIF_P,BADARG);
+ }
+#ifdef USE_VM_PROBES
+ hp = HAlloc(BIF_P,3);
+ ret = TUPLE2(hp,make_small(DT_UTAG_FLAGS(BIF_P)),DT_UTAG(BIF_P));
+ if (DT_UTAG(BIF_P) != NIL) {
+ if (BIF_ARG_1 == am_true) {
+ DT_UTAG_FLAGS(BIF_P) |= DT_UTAG_SPREADING;
+#ifdef DTRACE_TAG_HARDDEBUG
+ erts_fprintf(stderr,
+ "Dtrace -> (%T) start spreading tag %T\r\n",
+ BIF_P->id,DT_UTAG(BIF_P));
+#endif
+ } else {
+ DT_UTAG_FLAGS(BIF_P) &= ~DT_UTAG_SPREADING;
+#ifdef DTRACE_TAG_HARDDEBUG
+ erts_fprintf(stderr,
+ "Dtrace -> (%T) stop spreading tag %T\r\n",
+ BIF_P->id,DT_UTAG(BIF_P));
+#endif
+ }
+ }
+ BIF_RET(ret);
+#else
+ BIF_RET(am_true);
+#endif
+}
+BIF_RETTYPE dt_restore_tag_1(BIF_ALIST_1)
+{
+#ifdef USE_VM_PROBES
+ Eterm *tpl;
+ Uint x;
+ if (is_not_tuple(BIF_ARG_1)) {
+ BIF_ERROR(BIF_P,BADARG);
+ }
+ tpl = tuple_val(BIF_ARG_1);
+ if(arityval(*tpl) != 2 || is_not_small(tpl[1]) || (is_not_binary(tpl[2]) && tpl[2] != NIL)) {
+ BIF_ERROR(BIF_P,BADARG);
+ }
+ if (tpl[2] == NIL) {
+ if (DT_UTAG(BIF_P) != NIL) {
+#ifdef DTRACE_TAG_HARDDEBUG
+ erts_fprintf(stderr,
+ "Dtrace -> (%T) restore Killing tag!\r\n",
+ BIF_P->id);
+#endif
+ }
+ DT_UTAG(BIF_P) = NIL;
+ if (SEQ_TRACE_TOKEN(BIF_P) == am_have_dt_utag) {
+ SEQ_TRACE_TOKEN(BIF_P) = NIL;
+ }
+ DT_UTAG_FLAGS(BIF_P) = 0;
+ } else {
+ x = unsigned_val(tpl[1]) & (DT_UTAG_SPREADING | DT_UTAG_PERMANENT);
+#ifdef DTRACE_TAG_HARDDEBUG
+
+ if (!(x & DT_UTAG_SPREADING) && (DT_UTAG_FLAGS(BIF_P) &
+ DT_UTAG_SPREADING)) {
+ erts_fprintf(stderr,
+ "Dtrace -> (%T) restore stop spreading "
+ "tag %T\r\n",
+ BIF_P->id, tpl[2]);
+ } else if ((x & DT_UTAG_SPREADING) &&
+ !(DT_UTAG_FLAGS(BIF_P) & DT_UTAG_SPREADING)) {
+ erts_fprintf(stderr,
+ "Dtrace -> (%T) restore start spreading "
+ "tag %T\r\n",BIF_P->id,tpl[2]);
+ }
+#endif
+ DT_UTAG_FLAGS(BIF_P) = x;
+ DT_UTAG(BIF_P) = tpl[2];
+ if (SEQ_TRACE_TOKEN(BIF_P) == NIL) {
+ SEQ_TRACE_TOKEN(BIF_P) = am_have_dt_utag;
+ }
+ }
+#else
+ if (BIF_ARG_1 != am_true) {
+ BIF_ERROR(BIF_P,BADARG);
+ }
+#endif
+ BIF_RET(am_true);
+}
+
+
diff --git a/erts/emulator/beam/bif.h b/erts/emulator/beam/bif.h
index 8faa09feb8..d20089a9fb 100644
--- a/erts/emulator/beam/bif.h
+++ b/erts/emulator/beam/bif.h
@@ -26,14 +26,14 @@ extern Export* erts_format_cpu_topology_trap;
#define BIF_P A__p
-#define BIF_ALIST_0 Process* A__p
-#define BIF_ALIST_1 Process* A__p, Eterm A_1
-#define BIF_ALIST_2 Process* A__p, Eterm A_1, Eterm A_2
-#define BIF_ALIST_3 Process* A__p, Eterm A_1, Eterm A_2, Eterm A_3
+#define BIF_ALIST_0 Process* A__p, Eterm* BIF__ARGS
+#define BIF_ALIST_1 Process* A__p, Eterm* BIF__ARGS
+#define BIF_ALIST_2 Process* A__p, Eterm* BIF__ARGS
+#define BIF_ALIST_3 Process* A__p, Eterm* BIF__ARGS
-#define BIF_ARG_1 A_1
-#define BIF_ARG_2 A_2
-#define BIF_ARG_3 A_3
+#define BIF_ARG_1 (BIF__ARGS[0])
+#define BIF_ARG_2 (BIF__ARGS[1])
+#define BIF_ARG_3 (BIF__ARGS[2])
#define BUMP_ALL_REDS(p) do { \
if (!ERTS_PROC_GET_SAVED_CALLS_BUF((p))) \
@@ -122,89 +122,106 @@ do { \
} while (0)
-#define ERTS_BIF_PREP_TRAP0(Ret, Trap, Proc) \
-do { \
- (Proc)->arity = 0; \
- *((UWord *) (UWord) ((Proc)->def_arg_reg + 3)) = (UWord) ((Trap)->address); \
- (Proc)->freason = TRAP; \
- (Ret) = THE_NON_VALUE; \
+#define ERTS_BIF_PREP_TRAP0(Ret, Trap, Proc) \
+do { \
+ (Proc)->arity = 0; \
+ (Proc)->i = (BeamInstr*) ((Trap)->address); \
+ (Proc)->freason = TRAP; \
+ (Ret) = THE_NON_VALUE; \
} while (0)
-#define ERTS_BIF_PREP_TRAP1(Ret, Trap, Proc, A0) \
-do { \
- (Proc)->arity = 1; \
- (Proc)->def_arg_reg[0] = (Eterm) (A0); \
- *((UWord *) (UWord) ((Proc)->def_arg_reg + 3)) = (UWord) ((Trap)->address); \
- (Proc)->freason = TRAP; \
- (Ret) = THE_NON_VALUE; \
+#define ERTS_BIF_PREP_TRAP1(Ret, Trap, Proc, A0) \
+do { \
+ Eterm* reg = ERTS_PROC_GET_SCHDATA((Proc))->x_reg_array; \
+ (Proc)->arity = 1; \
+ reg[0] = (Eterm) (A0); \
+ (Proc)->i = (BeamInstr*) ((Trap)->address); \
+ (Proc)->freason = TRAP; \
+ (Ret) = THE_NON_VALUE; \
} while (0)
-#define ERTS_BIF_PREP_TRAP2(Ret, Trap, Proc, A0, A1) \
-do { \
- (Proc)->arity = 2; \
- (Proc)->def_arg_reg[0] = (Eterm) (A0); \
- (Proc)->def_arg_reg[1] = (Eterm) (A1); \
- *((UWord *) (UWord) ((Proc)->def_arg_reg + 3)) = (UWord) ((Trap)->address); \
- (Proc)->freason = TRAP; \
- (Ret) = THE_NON_VALUE; \
+#define ERTS_BIF_PREP_TRAP2(Ret, Trap, Proc, A0, A1) \
+do { \
+ Eterm* reg = ERTS_PROC_GET_SCHDATA((Proc))->x_reg_array; \
+ (Proc)->arity = 2; \
+ reg[0] = (Eterm) (A0); \
+ reg[1] = (Eterm) (A1); \
+ (Proc)->i = (BeamInstr*) ((Trap)->address); \
+ (Proc)->freason = TRAP; \
+ (Ret) = THE_NON_VALUE; \
+} while (0)
+
+#define ERTS_BIF_PREP_TRAP3(Ret, Trap, Proc, A0, A1, A2) \
+do { \
+ Eterm* reg = ERTS_PROC_GET_SCHDATA((Proc))->x_reg_array; \
+ (Proc)->arity = 3; \
+ reg[0] = (Eterm) (A0); \
+ reg[1] = (Eterm) (A1); \
+ reg[2] = (Eterm) (A2); \
+ (Proc)->i = (BeamInstr*) ((Trap)->address); \
+ (Proc)->freason = TRAP; \
+ (Ret) = THE_NON_VALUE; \
} while (0)
-#define ERTS_BIF_PREP_TRAP3(Ret, Trap, Proc, A0, A1, A2)\
+#define ERTS_BIF_PREP_TRAP3_NO_RET(Trap, Proc, A0, A1, A2)\
do { \
+ Eterm* reg = ERTS_PROC_GET_SCHDATA((Proc))->x_reg_array; \
(Proc)->arity = 3; \
- (Proc)->def_arg_reg[0] = (Eterm) (A0); \
- (Proc)->def_arg_reg[1] = (Eterm) (A1); \
- (Proc)->def_arg_reg[2] = (Eterm) (A2); \
- *((UWord *) (UWord) ((Proc)->def_arg_reg + 3)) = (UWord) ((Trap)->address); \
+ reg[0] = (Eterm) (A0); \
+ reg[1] = (Eterm) (A1); \
+ reg[2] = (Eterm) (A2); \
+ (Proc)->i = (BeamInstr*) ((Trap)->address); \
(Proc)->freason = TRAP; \
- (Ret) = THE_NON_VALUE; \
} while (0)
-#define BIF_TRAP0(p, Trap_) do { \
- (p)->arity = 0; \
- *((UWord *) (UWord) ((p)->def_arg_reg + 3)) = (UWord) ((Trap_)->address); \
- (p)->freason = TRAP; \
- return THE_NON_VALUE; \
+#define BIF_TRAP0(p, Trap_) do { \
+ (p)->arity = 0; \
+ (p)->i = (BeamInstr*) ((Trap_)->address); \
+ (p)->freason = TRAP; \
+ return THE_NON_VALUE; \
} while(0)
-#define BIF_TRAP1(Trap_, p, A0) do { \
- (p)->arity = 1; \
- (p)->def_arg_reg[0] = (A0); \
- *((UWord *) (UWord) ((p)->def_arg_reg + 3)) = (UWord) ((Trap_)->address); \
- (p)->freason = TRAP; \
- return THE_NON_VALUE; \
+#define BIF_TRAP1(Trap_, p, A0) do { \
+ Eterm* reg = ERTS_PROC_GET_SCHDATA((p))->x_reg_array; \
+ (p)->arity = 1; \
+ reg[0] = (A0); \
+ (p)->i = (BeamInstr*) ((Trap_)->address); \
+ (p)->freason = TRAP; \
+ return THE_NON_VALUE; \
} while(0)
-#define BIF_TRAP2(Trap_, p, A0, A1) do { \
- (p)->arity = 2; \
- (p)->def_arg_reg[0] = (A0); \
- (p)->def_arg_reg[1] = (A1); \
- *((UWord *) (UWord) ((p)->def_arg_reg + 3)) = (UWord) ((Trap_)->address); \
- (p)->freason = TRAP; \
- return THE_NON_VALUE; \
+#define BIF_TRAP2(Trap_, p, A0, A1) do { \
+ Eterm* reg = ERTS_PROC_GET_SCHDATA((p))->x_reg_array; \
+ (p)->arity = 2; \
+ reg[0] = (A0); \
+ reg[1] = (A1); \
+ (p)->i = (BeamInstr*) ((Trap_)->address); \
+ (p)->freason = TRAP; \
+ return THE_NON_VALUE; \
} while(0)
-#define BIF_TRAP3(Trap_, p, A0, A1, A2) do { \
- (p)->arity = 3; \
- (p)->def_arg_reg[0] = (A0); \
- (p)->def_arg_reg[1] = (A1); \
- (p)->def_arg_reg[2] = (A2); \
- *((UWord *) (UWord) ((p)->def_arg_reg + 3)) = (UWord) ((Trap_)->address); \
- (p)->freason = TRAP; \
- return THE_NON_VALUE; \
+#define BIF_TRAP3(Trap_, p, A0, A1, A2) do { \
+ Eterm* reg = ERTS_PROC_GET_SCHDATA((p))->x_reg_array; \
+ (p)->arity = 3; \
+ reg[0] = (A0); \
+ reg[1] = (A1); \
+ reg[2] = (A2); \
+ (p)->i = (BeamInstr*) ((Trap_)->address); \
+ (p)->freason = TRAP; \
+ return THE_NON_VALUE; \
} while(0)
-#define BIF_TRAP_CODE_PTR_0(p, Code_) do { \
- (p)->arity = 0; \
- *((UWord *) (UWord) ((p)->def_arg_reg + 3)) = (UWord) (Code_); \
- (p)->freason = TRAP; \
- return THE_NON_VALUE; \
+#define BIF_TRAP_CODE_PTR_0(p, Code_) do { \
+ (p)->arity = 0; \
+ (p)->i = (BeamInstr*) (Code_); \
+ (p)->freason = TRAP; \
+ return THE_NON_VALUE; \
} while(0)
-#define BIF_TRAP_CODE_PTR_(p, Code_) do { \
- *((UWord *) (UWord) ((p)->def_arg_reg + 3)) = (UWord) (Code_); \
- (p)->freason = TRAP; \
- return THE_NON_VALUE; \
+#define BIF_TRAP_CODE_PTR_(p, Code_) do { \
+ (p)-> i = (BeamInstr*) (Code_); \
+ (p)->freason = TRAP; \
+ return THE_NON_VALUE; \
} while(0)
extern Export bif_return_trap_export;
diff --git a/erts/emulator/beam/bif.tab b/erts/emulator/beam/bif.tab
index d9dd80fa8b..8a85e102d1 100644
--- a/erts/emulator/beam/bif.tab
+++ b/erts/emulator/beam/bif.tab
@@ -1,7 +1,7 @@
#
# %CopyrightBegin%
#
-# Copyright Ericsson AB 1996-2010. All Rights Reserved.
+# Copyright Ericsson AB 1996-2012. All Rights Reserved.
#
# The contents of this file are subject to the Erlang Public License,
# Version 1.1, (the "License"); you may not use this file except in
@@ -87,6 +87,8 @@ bif erlang:exit/2
bif 'erl.lang.proc':signal/2 ebif_signal_2 exit_2
bif erlang:external_size/1
bif 'erl.lang.term':external_size/1 ebif_external_size_1
+bif erlang:external_size/2
+bif 'erl.lang.term':external_size/2 ebif_external_size_2
ubif erlang:float/1
ubif 'erl.lang.number':to_float/1 ebif_to_float_1 float_1
bif erlang:float_to_list/1
@@ -113,6 +115,8 @@ bif erlang:halt/0
bif 'erl.lang.system':halt/0 ebif_halt_0
bif erlang:halt/1
bif 'erl.lang.system':halt/1 ebif_halt_1
+bif erlang:halt/2
+bif 'erl.lang.system':halt/2 ebif_halt_2
bif erlang:phash/2
bif erlang:phash2/1
bif erlang:phash2/2
@@ -158,10 +162,6 @@ bif erlang:md5_update/2
bif 'erl.util.crypt.md5':update/2 ebif_md5_update_2
bif erlang:md5_final/1
bif 'erl.util.crypt.md5':final/1 ebif_md5_final_1
-bif erlang:memory/0
-bif 'erl.lang':memory/0 ebif_memory_0
-bif erlang:memory/1
-bif 'erl.lang':memory/1 ebif_memory_1
bif erlang:module_loaded/1
bif 'erl.system.code':is_loaded/1 ebif_is_loaded_1 module_loaded_1
bif erlang:function_exported/3
@@ -802,6 +802,35 @@ bif prim_file:internal_name2native/1
bif prim_file:internal_native2name/1
bif prim_file:internal_normalize_utf8/1
bif file:native_name_encoding/0
+
+#
+# New in R14B04.
+#
+bif erlang:check_old_code/1
+
+
+#
+# New in R15B
+#
+bif erlang:universaltime_to_posixtime/1
+bif erlang:posixtime_to_universaltime/1
+
+#
+# New in R15B01
+#
+
+# The dtrace BIF's are always present, but give dummy results if dynamic trace is not enabled in the build
+bif erlang:dt_put_tag/1
+bif erlang:dt_get_tag/0
+bif erlang:dt_get_tag_data/0
+bif erlang:dt_spread_tag/1
+bif erlang:dt_restore_tag/1
+
+# These are dummies even with enabled dynamic trace unless vm probes are enabled.
+# They are also internal, for dtrace tags sent to the VM's own drivers (efile)
+bif erlang:dt_prepend_vm_tag_data/1
+bif erlang:dt_append_vm_tag_data/1
+
#
# Obsolete
#
diff --git a/erts/emulator/beam/big.c b/erts/emulator/beam/big.c
index d18de9ae5d..5a5b162b9c 100644
--- a/erts/emulator/beam/big.c
+++ b/erts/emulator/beam/big.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1996-2011. All Rights Reserved.
+ * Copyright Ericsson AB 1996-2012. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -310,12 +310,12 @@
#define DREM(a1,a0,b,r) do { \
ErtsDigit __a1 = (a1); \
ErtsDigit __b = (b); \
- ErtsDigit __q0; \
+ ERTS_DECLARE_DUMMY(ErtsDigit __q0); \
DDIVREM((__a1 % __b), (a0), __b, __q0, r); \
} while(0)
#define DDIV(a1,a0,b,q) do { \
- ErtsDigit _tmp; \
+ ERTS_DECLARE_DUMMY(ErtsDigit _tmp); \
DDIVREM(a1,a0,b,q,_tmp); \
} while(0)
@@ -413,8 +413,8 @@
} while(0)
#define DDIV2(a1,a0,b1,b0,q) do { \
- ErtsDigit _tmp_r1; \
- ErtsDigit _tmp_r0; \
+ ERTS_DECLARE_DUMMY(ErtsDigit _tmp_r1); \
+ ERTS_DECLARE_DUMMY(ErtsDigit _tmp_r0); \
D2DIVREM(a1,a0,b1,b0,q,_tmp_r1,_tmp_r0); \
} while(0)
@@ -810,7 +810,9 @@ static dsize_t D_div(ErtsDigit* x, dsize_t xl, ErtsDigit d, ErtsDigit* q, ErtsDi
}
do {
- ErtsDigit q0, a0, b1, b0, b;
+ ErtsDigit q0, a0, b0;
+ ERTS_DECLARE_DUMMY(ErtsDigit b);
+ ERTS_DECLARE_DUMMY(ErtsDigit b1);
if (d > a1) {
a0 = *xp;
@@ -1323,7 +1325,7 @@ static dsize_t I_lshift(ErtsDigit* x, dsize_t xl, Sint y,
return 1;
}
else {
- long ay = (y < 0) ? -y : y;
+ SWord ay = (y < 0) ? -y : y;
int bw = ay / D_EXP;
int sw = ay % D_EXP;
dsize_t rl;
@@ -1448,6 +1450,20 @@ erts_make_integer(Uint x, Process *p)
return uint_to_big(x,hp);
}
}
+/*
+ * As erts_make_integer, but from a whole UWord.
+ */
+Eterm
+erts_make_integer_from_uword(UWord x, Process *p)
+{
+ Eterm* hp;
+ if (IS_USMALL(0,x))
+ return make_small(x);
+ else {
+ hp = HAlloc(p, BIG_UWORD_HEAP_SIZE(x));
+ return uword_to_big(x,hp);
+ }
+}
/*
** convert Uint to bigint
@@ -1584,6 +1600,62 @@ big_to_double(Wterm x, double* resp)
return 0;
}
+/*
+ * Logic has been copied from erl_bif_guard.c and slightly
+ * modified to use a static instead of dynamic heap
+ */
+Eterm
+double_to_big(double x, Eterm *heap)
+{
+ int is_negative;
+ int ds;
+ ErtsDigit* xp;
+ Eterm res;
+ int i;
+ size_t sz;
+ Eterm* hp;
+ double dbase;
+
+ if (x >= 0) {
+ is_negative = 0;
+ } else {
+ is_negative = 1;
+ x = -x;
+ }
+
+ /* Unscale & (calculate exponent) */
+ ds = 0;
+ dbase = ((double) (D_MASK) + 1);
+ while (x >= 1.0) {
+ x /= dbase; /* "shift" right */
+ ds++;
+ }
+ sz = BIG_NEED_SIZE(ds); /* number of words including arity */
+
+ hp = heap;
+ res = make_big(hp);
+ xp = (ErtsDigit*) (hp + 1);
+
+ for (i = ds - 1; i >= 0; i--) {
+ ErtsDigit d;
+
+ x *= dbase; /* "shift" left */
+ d = x; /* trunc */
+ xp[i] = d; /* store digit */
+ x -= d; /* remove integer part */
+ }
+ while ((ds & (BIG_DIGITS_PER_WORD - 1)) != 0) {
+ xp[ds++] = 0;
+ }
+
+ if (is_negative) {
+ *hp = make_neg_bignum_header(sz-1);
+ } else {
+ *hp = make_pos_bignum_header(sz-1);
+ }
+ return res;
+}
+
/*
** Estimate the number of decimal digits (include sign)
@@ -1772,6 +1844,7 @@ dsize_t big_bytes(Eterm x)
/*
** Load a bignum from bytes
** xsz is the number of bytes in xp
+** *r is untouched if number fits in small
*/
Eterm bytes_to_big(byte *xp, dsize_t xsz, int xsgn, Eterm *r)
{
@@ -1780,7 +1853,7 @@ Eterm bytes_to_big(byte *xp, dsize_t xsz, int xsgn, Eterm *r)
ErtsDigit d;
int i;
- while(xsz >= sizeof(ErtsDigit)) {
+ while(xsz > sizeof(ErtsDigit)) {
d = 0;
for(i = sizeof(ErtsDigit); --i >= 0;)
d = (d << 8) | xp[i];
@@ -1795,11 +1868,20 @@ Eterm bytes_to_big(byte *xp, dsize_t xsz, int xsgn, Eterm *r)
d = 0;
for(i = xsz; --i >= 0;)
d = (d << 8) | xp[i];
+ if (++rsz == 1 && IS_USMALL(xsgn,d)) {
+ if (xsgn) d = -d;
+ return make_small(d);
+ }
*rwp = d;
rwp++;
- rsz++;
}
- return big_norm(r, rsz, (short) xsgn);
+ if (xsgn) {
+ *r = make_neg_bignum_header(rsz);
+ }
+ else {
+ *r = make_pos_bignum_header(rsz);
+ }
+ return make_big(r);
}
/*
diff --git a/erts/emulator/beam/big.h b/erts/emulator/beam/big.h
index 2afc37004f..7eb1e5afe2 100644
--- a/erts/emulator/beam/big.h
+++ b/erts/emulator/beam/big.h
@@ -140,10 +140,12 @@ Eterm big_lshift(Eterm, Sint, Eterm*);
int big_comp (Wterm, Wterm);
int big_ucomp (Eterm, Eterm);
int big_to_double(Wterm x, double* resp);
+Eterm double_to_big(double, Eterm*);
Eterm small_to_big(Sint, Eterm*);
Eterm uint_to_big(Uint, Eterm*);
Eterm uword_to_big(UWord, Eterm*);
Eterm erts_make_integer(Uint, Process *);
+Eterm erts_make_integer_from_uword(UWord x, Process *p);
dsize_t big_bytes(Eterm);
Eterm bytes_to_big(byte*, dsize_t, int, Eterm*);
diff --git a/erts/emulator/beam/binary.c b/erts/emulator/beam/binary.c
index 1fb39c6c67..3d2725e239 100644
--- a/erts/emulator/beam/binary.c
+++ b/erts/emulator/beam/binary.c
@@ -47,7 +47,7 @@ erts_init_binary(void)
away. If not, this test is not very expensive... */
erl_exit(ERTS_ABORT_EXIT,
"Internal error: Address of orig_bytes[0] of a Binary"
- "is *not* 8-byte aligned\n");
+ " is *not* 8-byte aligned\n");
}
}
@@ -356,8 +356,10 @@ BIF_RETTYPE erts_list_to_binary_bif(Process *p, Eterm arg)
{
Eterm bin;
Uint size;
- int offset;
byte* bytes;
+#ifdef DEBUG
+ int offset;
+#endif
if (is_nil(arg)) {
BIF_RET(new_binary(p,(byte*)"",0));
@@ -372,7 +374,11 @@ BIF_RETTYPE erts_list_to_binary_bif(Process *p, Eterm arg)
}
bin = new_binary(p, (byte *)NULL, size);
bytes = binary_bytes(bin);
- offset = io_list_to_buf(arg, (char*) bytes, size);
+#ifdef DEBUG
+ offset =
+#endif
+ io_list_to_buf(arg, (char*) bytes, size);
+
ASSERT(offset == 0);
BIF_RET(bin);
diff --git a/erts/emulator/beam/break.c b/erts/emulator/beam/break.c
index b8889e6206..6f5020dc14 100644
--- a/erts/emulator/beam/break.c
+++ b/erts/emulator/beam/break.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1996-2011. All Rights Reserved.
+ * Copyright Ericsson AB 1996-2012. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -37,6 +37,7 @@
#include "beam_load.h"
#include "erl_instrument.h"
#include "erl_bif_timer.h"
+#include "erl_thr_progress.h"
/* Forward declarations -- should really appear somewhere else */
static void process_killer(void);
@@ -94,7 +95,7 @@ process_killer(void)
erts_printf("(k)ill (n)ext (r)eturn:\n");
while(1) {
if ((j = sys_get_key(0)) <= 0)
- halt_0(0);
+ erl_exit(0, "");
switch(j) {
case 'k':
if (rp->status == P_WAITING) {
@@ -181,6 +182,7 @@ print_process_info(int to, void *to_arg, Process *p)
{
int garbing = 0;
int running = 0;
+ time_t tmp_t;
struct saved_calls *scb;
/* display the PID */
@@ -243,8 +245,8 @@ print_process_info(int to, void *to_arg, Process *p)
}
erts_print(to, to_arg, "Spawned by: %T\n", p->parent);
-
- erts_print(to, to_arg, "Started: %s", ctime((time_t*)&p->started.tv_sec));
+ tmp_t = p->started.tv_sec;
+ erts_print(to, to_arg, "Started: %s", ctime(&tmp_t));
ERTS_SMP_MSGQ_MV_INQ2PRIVQ(p);
erts_print(to, to_arg, "Message queue length: %d\n", p->msg.len);
@@ -626,7 +628,7 @@ bin_check(void)
erts_printf("%p orig_size: %bpd, norefs = %bpd\n",
bp->val,
bp->val->orig_size,
- erts_smp_atomic_read(&bp->val->refc));
+ erts_smp_atomic_read_nob(&bp->val->refc));
}
}
if (printed) {
@@ -644,30 +646,32 @@ bin_check(void)
void
erl_crash_dump_v(char *file, int line, char* fmt, va_list args)
{
+#ifdef ERTS_SMP
+ ErtsThrPrgrData tpd_buf; /* in case we aren't a managed thread... */
+#endif
int fd;
time_t now;
size_t dumpnamebufsize = MAXPATHLEN;
char dumpnamebuf[MAXPATHLEN];
char* dumpname;
- if (ERTS_IS_CRASH_DUMPING)
+ if (ERTS_SOMEONE_IS_CRASH_DUMPING)
return;
- /* Wait for all threads to block. If all threads haven't blocked
+#ifdef ERTS_SMP
+ /*
+ * Wait for all managed threads to block. If all threads haven't blocked
* after a minute, we go anyway and hope for the best...
*
* We do not release system again. We expect an exit() or abort() after
* dump has been written.
- *
- * NOTE: We allow gc therefore it is important not to lock *any*
- * process locks.
*/
- erts_smp_emergency_block_system(60000, ERTS_BS_FLG_ALLOW_GC);
+ erts_thr_progress_fatal_error_block(60000, &tpd_buf);
/* Either worked or not... */
/* Allow us to pass certain places without locking... */
-#ifdef ERTS_SMP
- erts_smp_atomic_inc(&erts_writing_erl_crash_dump);
+ erts_smp_atomic32_set_mb(&erts_writing_erl_crash_dump, 1);
+ erts_smp_tsd_set(erts_is_crash_dumping_key, (void *) 1);
#else
erts_writing_erl_crash_dump = 1;
#endif
diff --git a/erts/emulator/beam/copy.c b/erts/emulator/beam/copy.c
index 90201f3a90..d7345c2f54 100644
--- a/erts/emulator/beam/copy.c
+++ b/erts/emulator/beam/copy.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1996-2011. All Rights Reserved.
+ * Copyright Ericsson AB 1996-2012. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -30,6 +30,7 @@
#include "big.h"
#include "erl_binary.h"
#include "erl_bits.h"
+#include "dtrace-wrapper.h"
#ifdef HYBRID
MA_STACK_DECLARE(src);
@@ -59,6 +60,14 @@ copy_object(Eterm obj, Process* to)
Eterm* hp = HAlloc(to, size);
Eterm res;
+#ifdef USE_VM_PROBES
+ if (DTRACE_ENABLED(copy_object)) {
+ DTRACE_CHARBUF(proc_name, 64);
+
+ erts_snprintf(proc_name, sizeof(proc_name), "%T", to->id);
+ DTRACE2(copy_object, proc_name, size);
+ }
+#endif
res = copy_struct(obj, size, &hp, &to->off_heap);
#ifdef DEBUG
if (eq(obj, res) == 0) {
@@ -134,7 +143,7 @@ Uint size_object(Eterm obj)
case SUB_BINARY_SUBTAG:
{
Eterm real_bin;
- Uint offset; /* Not used. */
+ ERTS_DECLARE_DUMMY(Uint offset); /* Not used. */
Uint bitsize;
Uint bitoffs;
Uint extra_bytes;
@@ -213,6 +222,8 @@ Eterm copy_struct(Eterm obj, Uint sz, Eterm** hpp, ErlOffHeap* off_heap)
if (IS_CONST(obj))
return obj;
+ DTRACE1(copy_struct, (int32_t)sz);
+
hp = htop = *hpp;
hbot = htop + sz;
hstart = (char *)htop;
diff --git a/erts/emulator/beam/dist.c b/erts/emulator/beam/dist.c
index b1cdd0660a..7c75c9fdb7 100644
--- a/erts/emulator/beam/dist.c
+++ b/erts/emulator/beam/dist.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1996-2011. All Rights Reserved.
+ * Copyright Ericsson AB 1996-2012. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -41,6 +41,8 @@
#include "bif.h"
#include "external.h"
#include "erl_binary.h"
+#include "erl_thr_progress.h"
+#include "dtrace-wrapper.h"
/* Turn this on to get printouts of all distribution messages
* which go on the line
@@ -53,9 +55,9 @@
#endif
#if defined(ERTS_DIST_MSG_DBG) || defined(ERTS_RAW_DIST_MSG_DBG)
-static void bw(byte *buf, int sz)
+static void bw(byte *buf, ErlDrvSizeT sz)
{
- bin_write(ERTS_PRINT_STDERR,NULL,buf,sz);
+ bin_write(ERTS_PRINT_STDERR, NULL, buf, sz);
}
#endif
@@ -128,8 +130,8 @@ delete_cache(ErtsAtomCache *cache)
{
if (cache) {
erts_free(ERTS_ALC_T_DCACHE, (void *) cache);
- ASSERT(erts_smp_atomic_read(&no_caches) > 0);
- erts_smp_atomic_dec(&no_caches);
+ ASSERT(erts_smp_atomic_read_nob(&no_caches) > 0);
+ erts_smp_atomic_dec_nob(&no_caches);
}
}
@@ -147,7 +149,7 @@ create_cache(DistEntry *dep)
dep->cache = cp = (ErtsAtomCache*) erts_alloc(ERTS_ALC_T_DCACHE,
sizeof(ErtsAtomCache));
- erts_smp_atomic_inc(&no_caches);
+ erts_smp_atomic_inc_nob(&no_caches);
for (i = 0; i < sizeof(cp->in_arr)/sizeof(cp->in_arr[0]); i++) {
cp->in_arr[i] = THE_NON_VALUE;
cp->out_arr[i] = THE_NON_VALUE;
@@ -156,7 +158,7 @@ create_cache(DistEntry *dep)
Uint erts_dist_cache_size(void)
{
- return (Uint) erts_smp_atomic_read(&no_caches)*sizeof(ErtsAtomCache);
+ return (Uint) erts_smp_atomic_read_mb(&no_caches)*sizeof(ErtsAtomCache);
}
static ErtsProcList *
@@ -380,7 +382,11 @@ static void doit_node_link_net_exits(ErtsLink *lnk, void *vnecp)
Eterm tup;
Eterm *hp = erts_alloc_message_heap(3,&bp,&ohp,rp,&rp_locks);
tup = TUPLE2(hp, am_nodedown, name);
- erts_queue_message(rp, &rp_locks, bp, tup, NIL);
+ erts_queue_message(rp, &rp_locks, bp, tup, NIL
+#ifdef USE_VM_PROBES
+ , NIL
+#endif
+ );
}
erts_smp_proc_unlock(rp, rp_locks);
}
@@ -430,11 +436,11 @@ int erts_do_net_exits(DistEntry *dep, Eterm reason)
erts_smp_rwmtx_rwunlock(&erts_dist_table_rwmtx);
nodename = erts_this_dist_entry->sysname;
- erts_smp_block_system(ERTS_BS_FLG_ALLOW_GC);
+ erts_smp_thr_progress_block();
erts_set_this_node(am_Noname, 0);
erts_is_alive = 0;
send_nodes_mon_msgs(NULL, am_nodedown, nodename, am_visible, nd_reason);
- erts_smp_release_system();
+ erts_smp_thr_progress_unblock();
}
else { /* recursive call via erts_do_exit_port() will end up here */
@@ -444,7 +450,7 @@ int erts_do_net_exits(DistEntry *dep, Eterm reason)
ErtsMonitor *monitors;
Uint32 flags;
- erts_smp_atomic_set(&dep->dist_cmd_scheduled, 1);
+ erts_smp_atomic_set_mb(&dep->dist_cmd_scheduled, 1);
erts_smp_de_rwlock(dep);
ERTS_SMP_LC_ASSERT(is_internal_port(dep->cid)
@@ -510,7 +516,7 @@ void init_dist(void)
{
init_nodes_monitors();
- erts_smp_atomic_init(&no_caches, 0);
+ erts_smp_atomic_init_nob(&no_caches, 0);
/* Lookup/Install all references to trap functions */
dsend2_trap = trap_function(am_dsend,2);
@@ -535,7 +541,7 @@ alloc_dist_obuf(Uint size)
Binary *bin = erts_bin_drv_alloc(obuf_size);
bin->flags = BIN_FLAG_DRV;
erts_refc_init(&bin->refc, 1);
- bin->orig_size = (long) obuf_size;
+ bin->orig_size = (SWord) obuf_size;
obuf = (ErtsDistOutputBuf *) &bin->orig_bytes[0];
#ifdef DEBUG
obuf->dbg_pattern = ERTS_DIST_OUTPUT_BUF_DBG_PATTERN;
@@ -596,7 +602,7 @@ static void clear_dist_entry(DistEntry *dep)
suspendees = get_suspended_on_de(dep, ERTS_DE_QFLGS_ALL);
erts_smp_mtx_unlock(&dep->qlock);
- erts_smp_atomic_set(&dep->dist_cmd_scheduled, 0);
+ erts_smp_atomic_set_nob(&dep->dist_cmd_scheduled, 0);
dep->send = NULL;
erts_smp_de_rwunlock(dep);
@@ -739,19 +745,50 @@ erts_dsig_send_msg(ErtsDSigData *dsdp, Eterm remote, Eterm message)
Eterm token = NIL;
Process *sender = dsdp->proc;
int res;
+#ifdef USE_VM_PROBES
+ Sint tok_label = 0;
+ Sint tok_lastcnt = 0;
+ Sint tok_serial = 0;
+ Uint msize = 0;
+ DTRACE_CHARBUF(node_name, 64);
+ DTRACE_CHARBUF(sender_name, 64);
+ DTRACE_CHARBUF(receiver_name, 64);
+#endif
UseTmpHeapNoproc(5);
- if (SEQ_TRACE_TOKEN(sender) != NIL) {
+ if (SEQ_TRACE_TOKEN(sender) != NIL
+#ifdef USE_VM_PROBES
+ && SEQ_TRACE_TOKEN(sender) != am_have_dt_utag
+#endif
+ ) {
seq_trace_update_send(sender);
token = SEQ_TRACE_TOKEN(sender);
seq_trace_output(token, message, SEQ_TRACE_SEND, remote, sender);
}
+#ifdef USE_VM_PROBES
+ *node_name = *sender_name = *receiver_name = '\0';
+ if (DTRACE_ENABLED(message_send) || DTRACE_ENABLED(message_send_remote)) {
+ erts_snprintf(node_name, sizeof(node_name), "%T", dsdp->dep->sysname);
+ erts_snprintf(sender_name, sizeof(sender_name), "%T", sender->id);
+ erts_snprintf(receiver_name, sizeof(receiver_name), "%T", remote);
+ msize = size_object(message);
+ if (token != NIL && token != am_have_dt_utag) {
+ tok_label = signed_val(SEQ_TRACE_T_LABEL(token));
+ tok_lastcnt = signed_val(SEQ_TRACE_T_LASTCNT(token));
+ tok_serial = signed_val(SEQ_TRACE_T_SERIAL(token));
+ }
+ }
+#endif
if (token != NIL)
ctl = TUPLE4(&ctl_heap[0],
make_small(DOP_SEND_TT), am_Cookie, remote, token);
else
ctl = TUPLE3(&ctl_heap[0], make_small(DOP_SEND), am_Cookie, remote);
+ DTRACE6(message_send, sender_name, receiver_name,
+ msize, tok_label, tok_lastcnt, tok_serial);
+ DTRACE7(message_send_remote, sender_name, node_name, receiver_name,
+ msize, tok_label, tok_lastcnt, tok_serial);
res = dsig_send(dsdp, ctl, message, 0);
UnUseTmpHeapNoproc(5);
return res;
@@ -765,13 +802,41 @@ erts_dsig_send_reg_msg(ErtsDSigData *dsdp, Eterm remote_name, Eterm message)
Eterm token = NIL;
Process *sender = dsdp->proc;
int res;
+#ifdef USE_VM_PROBES
+ Sint tok_label = 0;
+ Sint tok_lastcnt = 0;
+ Sint tok_serial = 0;
+ Uint32 msize = 0;
+ DTRACE_CHARBUF(node_name, 64);
+ DTRACE_CHARBUF(sender_name, 64);
+ DTRACE_CHARBUF(receiver_name, 128);
+#endif
UseTmpHeapNoproc(6);
- if (SEQ_TRACE_TOKEN(sender) != NIL) {
+ if (SEQ_TRACE_TOKEN(sender) != NIL
+#ifdef USE_VM_PROBES
+ && SEQ_TRACE_TOKEN(sender) != am_have_dt_utag
+#endif
+ ) {
seq_trace_update_send(sender);
token = SEQ_TRACE_TOKEN(sender);
seq_trace_output(token, message, SEQ_TRACE_SEND, remote_name, sender);
}
+#ifdef USE_VM_PROBES
+ *node_name = *sender_name = *receiver_name = '\0';
+ if (DTRACE_ENABLED(message_send) || DTRACE_ENABLED(message_send_remote)) {
+ erts_snprintf(node_name, sizeof(node_name), "%T", dsdp->dep->sysname);
+ erts_snprintf(sender_name, sizeof(sender_name), "%T", sender->id);
+ erts_snprintf(receiver_name, sizeof(receiver_name),
+ "{%T,%s}", remote_name, node_name);
+ msize = size_object(message);
+ if (token != NIL && token != am_have_dt_utag) {
+ tok_label = signed_val(SEQ_TRACE_T_LABEL(token));
+ tok_lastcnt = signed_val(SEQ_TRACE_T_LASTCNT(token));
+ tok_serial = signed_val(SEQ_TRACE_T_SERIAL(token));
+ }
+ }
+#endif
if (token != NIL)
ctl = TUPLE5(&ctl_heap[0], make_small(DOP_REG_SEND_TT),
@@ -779,6 +844,10 @@ erts_dsig_send_reg_msg(ErtsDSigData *dsdp, Eterm remote_name, Eterm message)
else
ctl = TUPLE4(&ctl_heap[0], make_small(DOP_REG_SEND),
sender->id, am_Cookie, remote_name);
+ DTRACE6(message_send, sender_name, receiver_name,
+ msize, tok_label, tok_lastcnt, tok_serial);
+ DTRACE7(message_send_remote, sender_name, node_name, receiver_name,
+ msize, tok_label, tok_lastcnt, tok_serial);
res = dsig_send(dsdp, ctl, message, 0);
UnUseTmpHeapNoproc(6);
return res;
@@ -792,9 +861,23 @@ erts_dsig_send_exit_tt(ErtsDSigData *dsdp, Eterm local, Eterm remote,
Eterm ctl;
DeclareTmpHeapNoproc(ctl_heap,6);
int res;
+#ifdef USE_VM_PROBES
+ Process *sender = dsdp->proc;
+ Sint tok_label = 0;
+ Sint tok_lastcnt = 0;
+ Sint tok_serial = 0;
+ DTRACE_CHARBUF(node_name, 64);
+ DTRACE_CHARBUF(sender_name, 64);
+ DTRACE_CHARBUF(remote_name, 128);
+ DTRACE_CHARBUF(reason_str, 128);
+#endif
UseTmpHeapNoproc(6);
- if (token != NIL) {
+ if (token != NIL
+#ifdef USE_VM_PROBES
+ && token != am_have_dt_utag
+#endif
+ ) {
seq_trace_update_send(dsdp->proc);
seq_trace_output_exit(token, reason, SEQ_TRACE_SEND, remote, local);
ctl = TUPLE5(&ctl_heap[0],
@@ -802,6 +885,23 @@ erts_dsig_send_exit_tt(ErtsDSigData *dsdp, Eterm local, Eterm remote,
} else {
ctl = TUPLE4(&ctl_heap[0], make_small(DOP_EXIT), local, remote, reason);
}
+#ifdef USE_VM_PROBES
+ *node_name = *sender_name = *remote_name = '\0';
+ if (DTRACE_ENABLED(process_exit_signal_remote)) {
+ erts_snprintf(node_name, sizeof(node_name), "%T", dsdp->dep->sysname);
+ erts_snprintf(sender_name, sizeof(sender_name), "%T", sender->id);
+ erts_snprintf(remote_name, sizeof(remote_name),
+ "{%T,%s}", remote, node_name);
+ erts_snprintf(reason_str, sizeof(reason), "%T", reason);
+ if (token != NIL && token != am_have_dt_utag) {
+ tok_label = signed_val(SEQ_TRACE_T_LABEL(token));
+ tok_lastcnt = signed_val(SEQ_TRACE_T_LASTCNT(token));
+ tok_serial = signed_val(SEQ_TRACE_T_SERIAL(token));
+ }
+ }
+#endif
+ DTRACE7(process_exit_signal_remote, sender_name, node_name,
+ remote_name, reason_str, tok_label, tok_lastcnt, tok_serial);
/* forced, i.e ignore busy */
res = dsig_send(dsdp, ctl, THE_NON_VALUE, 1);
UnUseTmpHeapNoproc(6);
@@ -896,9 +996,9 @@ erts_dsig_send_group_leader(ErtsDSigData *dsdp, Eterm leader, Eterm remote)
int erts_net_message(Port *prt,
DistEntry *dep,
byte *hbuf,
- int hlen,
+ ErlDrvSizeT hlen,
byte *buf,
- int len)
+ ErlDrvSizeT len)
{
#define DIST_CTL_DEFAULT_SIZE 64
ErtsDistExternal ede;
@@ -923,7 +1023,7 @@ int erts_net_message(Port *prt,
Uint tuple_arity;
int res;
#ifdef ERTS_DIST_MSG_DBG
- int orig_len = len;
+ ErlDrvSizeT orig_len = len;
#endif
UseTmpHeapNoproc(DIST_CTL_DEFAULT_SIZE);
@@ -939,7 +1039,7 @@ int erts_net_message(Port *prt,
UnUseTmpHeapNoproc(DIST_CTL_DEFAULT_SIZE);
return 0;
}
- if (hlen > 0)
+ if (hlen != 0)
goto data_error;
if (len == 0) { /* HANDLE TICK !!! */
UnUseTmpHeapNoproc(DIST_CTL_DEFAULT_SIZE);
@@ -967,7 +1067,7 @@ int erts_net_message(Port *prt,
res = erts_prepare_dist_ext(&ede, t, len, dep, dep->cache);
if (res >= 0)
- res = ctl_len = erts_decode_dist_ext_size(&ede, 0);
+ res = ctl_len = erts_decode_dist_ext_size(&ede);
else {
#ifdef ERTS_DIST_MSG_DBG
erts_fprintf(stderr, "DIST MSG DEBUG: erts_prepare_dist_ext() failed:\n");
@@ -1618,6 +1718,18 @@ dsig_send(ErtsDSigData *dsdp, Eterm ctl, Eterm msg, int force_busy)
if (!(dep->qflgs & ERTS_DE_QFLG_BUSY)) {
if (suspended)
resume = 1; /* was busy when we started, but isn't now */
+#ifdef USE_VM_PROBES
+ if (resume && DTRACE_ENABLED(dist_port_not_busy)) {
+ DTRACE_CHARBUF(port_str, 64);
+ DTRACE_CHARBUF(remote_str, 64);
+
+ erts_snprintf(port_str, sizeof(port_str), "%T", cid);
+ erts_snprintf(remote_str, sizeof(remote_str),
+ "%T", dep->sysname);
+ DTRACE3(dist_port_not_busy, erts_this_node_sysname,
+ port_str, remote_str);
+ }
+#endif
}
else {
/* Enqueue suspended process on dist entry */
@@ -1667,6 +1779,19 @@ dsig_send(ErtsDSigData *dsdp, Eterm ctl, Eterm msg, int force_busy)
}
if (suspended) {
+#ifdef USE_VM_PROBES
+ if (!resume && DTRACE_ENABLED(dist_port_busy)) {
+ DTRACE_CHARBUF(port_str, 64);
+ DTRACE_CHARBUF(remote_str, 64);
+ DTRACE_CHARBUF(pid_str, 16);
+
+ erts_snprintf(port_str, sizeof(port_str), "%T", cid);
+ erts_snprintf(remote_str, sizeof(remote_str), "%T", dep->sysname);
+ erts_snprintf(pid_str, sizeof(pid_str), "%T", c_p->id);
+ DTRACE4(dist_port_busy, erts_this_node_sysname,
+ port_str, remote_str, pid_str);
+ }
+#endif
if (!resume && erts_system_monitor_flags.busy_dist_port)
monitor_generic(c_p, am_busy_dist_port, cid);
return ERTS_DSIG_SEND_YIELD;
@@ -1690,6 +1815,18 @@ dist_port_command(Port *prt, ErtsDistOutputBuf *obuf)
"(%beu bytes) passed.\n",
size);
+#ifdef USE_VM_PROBES
+ if (DTRACE_ENABLED(dist_output)) {
+ DTRACE_CHARBUF(port_str, 64);
+ DTRACE_CHARBUF(remote_str, 64);
+
+ erts_snprintf(port_str, sizeof(port_str), "%T", prt->id);
+ erts_snprintf(remote_str, sizeof(remote_str),
+ "%T", prt->dist_entry->sysname);
+ DTRACE4(dist_output, erts_this_node_sysname, port_str,
+ remote_str, size);
+ }
+#endif
prt->caller = NIL;
fpe_was_unmasked = erts_block_fpe();
(*prt->drv_ptr->output)((ErlDrvData) prt->drv_data,
@@ -1732,6 +1869,18 @@ dist_port_commandv(Port *prt, ErtsDistOutputBuf *obuf)
ASSERT(prt->drv_ptr->outputv);
+#ifdef USE_VM_PROBES
+ if (DTRACE_ENABLED(dist_outputv)) {
+ DTRACE_CHARBUF(port_str, 64);
+ DTRACE_CHARBUF(remote_str, 64);
+
+ erts_snprintf(port_str, sizeof(port_str), "%T", prt->id);
+ erts_snprintf(remote_str, sizeof(remote_str),
+ "%T", prt->dist_entry->sysname);
+ DTRACE4(dist_outputv, erts_this_node_sysname, port_str,
+ remote_str, size);
+ }
+#endif
prt->caller = NIL;
fpe_was_unmasked = erts_block_fpe();
(*prt->drv_ptr->outputv)((ErlDrvData) prt->drv_data, &eiov);
@@ -1775,7 +1924,7 @@ erts_dist_command(Port *prt, int reds_limit)
erts_refc_inc(&dep->refc, 1); /* Otherwise dist_entry might be
removed if port command fails */
- erts_smp_atomic_xchg(&dep->dist_cmd_scheduled, 0);
+ erts_smp_atomic_set_mb(&dep->dist_cmd_scheduled, 0);
erts_smp_de_rlock(dep);
flags = dep->flags;
@@ -2051,6 +2200,18 @@ erts_dist_command(Port *prt, int reds_limit)
void
erts_dist_port_not_busy(Port *prt)
{
+#ifdef USE_VM_PROBES
+ if (DTRACE_ENABLED(dist_port_not_busy)) {
+ DTRACE_CHARBUF(port_str, 64);
+ DTRACE_CHARBUF(remote_str, 64);
+
+ erts_snprintf(port_str, sizeof(port_str), "%T", prt->id);
+ erts_snprintf(remote_str, sizeof(remote_str),
+ "%T", prt->dist_entry->sysname);
+ DTRACE3(dist_port_not_busy, erts_this_node_sysname,
+ port_str, remote_str);
+ }
+#endif
erts_schedule_dist_command(prt, NULL);
}
@@ -2330,11 +2491,11 @@ BIF_RETTYPE setnode_2(BIF_ALIST_2)
#endif
erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
- erts_smp_block_system(ERTS_BS_FLG_ALLOW_GC);
+ erts_smp_thr_progress_block();
erts_set_this_node(BIF_ARG_1, (Uint32) creation);
erts_is_alive = 1;
send_nodes_mon_msgs(NULL, am_nodeup, BIF_ARG_1, am_visible, NIL);
- erts_smp_release_system();
+ erts_smp_thr_progress_unblock();
erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
BIF_RET(am_true);
@@ -2730,85 +2891,92 @@ BIF_RETTYPE is_alive_0(BIF_ALIST_0)
/**********************************************************************/
/* erlang:monitor_node(Node, Bool, Options) -> Bool */
-BIF_RETTYPE monitor_node_3(BIF_ALIST_3)
+static BIF_RETTYPE
+monitor_node(Process* p, Eterm Node, Eterm Bool, Eterm Options)
{
DistEntry *dep;
ErtsLink *lnk;
Eterm l;
- for (l = BIF_ARG_3; l != NIL && is_list(l); l = CDR(list_val(l))) {
+ for (l = Options; l != NIL && is_list(l); l = CDR(list_val(l))) {
Eterm t = CAR(list_val(l));
/* allow_passive_connect the only available option right now */
if (t != am_allow_passive_connect) {
- BIF_ERROR(BIF_P, BADARG);
+ BIF_ERROR(p, BADARG);
}
}
if (l != NIL) {
- BIF_ERROR(BIF_P, BADARG);
+ BIF_ERROR(p, BADARG);
}
- if (is_not_atom(BIF_ARG_1) ||
- ((BIF_ARG_2 != am_true) && (BIF_ARG_2 != am_false)) ||
+ if (is_not_atom(Node) ||
+ ((Bool != am_true) && (Bool != am_false)) ||
((erts_this_node->sysname == am_Noname)
- && (BIF_ARG_1 != erts_this_node->sysname))) {
- BIF_ERROR(BIF_P, BADARG);
+ && (Node != erts_this_node->sysname))) {
+ BIF_ERROR(p, BADARG);
}
- dep = erts_sysname_to_connected_dist_entry(BIF_ARG_1);
+ dep = erts_sysname_to_connected_dist_entry(Node);
if (!dep) {
do_trap:
- BIF_TRAP3(dmonitor_node_trap, BIF_P, BIF_ARG_1, BIF_ARG_2, BIF_ARG_3);
+ BIF_TRAP3(dmonitor_node_trap, p, Node, Bool, Options);
}
if (dep == erts_this_dist_entry)
goto done;
- erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_LINK);
+ erts_smp_proc_lock(p, ERTS_PROC_LOCK_LINK);
erts_smp_de_rlock(dep);
if (ERTS_DE_IS_NOT_CONNECTED(dep)) {
- erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_LINK);
+ erts_smp_proc_unlock(p, ERTS_PROC_LOCK_LINK);
erts_smp_de_runlock(dep);
goto do_trap;
}
erts_smp_de_links_lock(dep);
erts_smp_de_runlock(dep);
- if (BIF_ARG_2 == am_true) {
+ if (Bool == am_true) {
ASSERT(dep->cid != NIL);
lnk = erts_add_or_lookup_link(&(dep->node_links), LINK_NODE,
- BIF_P->id);
+ p->id);
++ERTS_LINK_REFC(lnk);
- lnk = erts_add_or_lookup_link(&(BIF_P->nlinks), LINK_NODE, BIF_ARG_1);
+ lnk = erts_add_or_lookup_link(&(p->nlinks), LINK_NODE, Node);
++ERTS_LINK_REFC(lnk);
}
else {
- lnk = erts_lookup_link(dep->node_links, BIF_P->id);
+ lnk = erts_lookup_link(dep->node_links, p->id);
if (lnk != NULL) {
if ((--ERTS_LINK_REFC(lnk)) == 0) {
erts_destroy_link(erts_remove_link(&(dep->node_links),
- BIF_P->id));
+ p->id));
}
}
- lnk = erts_lookup_link(BIF_P->nlinks, BIF_ARG_1);
+ lnk = erts_lookup_link(p->nlinks, Node);
if (lnk != NULL) {
if ((--ERTS_LINK_REFC(lnk)) == 0) {
- erts_destroy_link(erts_remove_link(&(BIF_P->nlinks),
- BIF_ARG_1));
+ erts_destroy_link(erts_remove_link(&(p->nlinks),
+ Node));
}
}
}
erts_smp_de_links_unlock(dep);
- erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_LINK);
+ erts_smp_proc_unlock(p, ERTS_PROC_LOCK_LINK);
done:
erts_deref_dist_entry(dep);
BIF_RET(am_true);
}
+BIF_RETTYPE monitor_node_3(BIF_ALIST_3)
+{
+ BIF_RET(monitor_node(BIF_P, BIF_ARG_1, BIF_ARG_2, BIF_ARG_3));
+}
+
+
/* monitor_node(Node, Bool) -> Bool */
BIF_RETTYPE monitor_node_2(BIF_ALIST_2)
{
- BIF_RET(monitor_node_3(BIF_P,BIF_ARG_1,BIF_ARG_2,NIL));
+ BIF_RET(monitor_node(BIF_P, BIF_ARG_1, BIF_ARG_2, NIL));
}
BIF_RETTYPE net_kernel_dflag_unicode_io_1(BIF_ALIST_1)
@@ -2964,7 +3132,11 @@ send_nodes_mon_msg(Process *rp,
}
ASSERT(hend == hp);
- erts_queue_message(rp, rp_locksp, bp, msg, NIL);
+ erts_queue_message(rp, rp_locksp, bp, msg, NIL
+#ifdef USE_VM_PROBES
+ , NIL
+#endif
+ );
}
static void
@@ -2977,6 +3149,21 @@ send_nodes_mon_msgs(Process *c_p, Eterm what, Eterm node, Eterm type, Eterm reas
ASSERT(is_immed(what));
ASSERT(is_immed(node));
ASSERT(is_immed(type));
+#ifdef USE_VM_PROBES
+ if (DTRACE_ENABLED(dist_monitor)) {
+ DTRACE_CHARBUF(what_str, 12);
+ DTRACE_CHARBUF(node_str, 64);
+ DTRACE_CHARBUF(type_str, 12);
+ DTRACE_CHARBUF(reason_str, 64);
+
+ erts_snprintf(what_str, sizeof(what_str), "%T", what);
+ erts_snprintf(node_str, sizeof(node_str), "%T", node);
+ erts_snprintf(type_str, sizeof(type_str), "%T", type);
+ erts_snprintf(reason_str, sizeof(reason_str), "%T", reason);
+ DTRACE5(dist_monitor, erts_this_node_sysname,
+ what_str, node_str, type_str, reason_str);
+ }
+#endif
ERTS_SMP_LC_ASSERT(!c_p
|| (erts_proc_lc_my_proc_locks(c_p)
diff --git a/erts/emulator/beam/dist.h b/erts/emulator/beam/dist.h
index 695a4fc3fe..845151c895 100644
--- a/erts/emulator/beam/dist.h
+++ b/erts/emulator/beam/dist.h
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1996-2010. All Rights Reserved.
+ * Copyright Ericsson AB 1996-2011. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -203,7 +203,7 @@ void erts_schedule_dist_command(Port *prt, DistEntry *dist_entry)
id = dep->cid;
}
- if (!erts_smp_atomic_xchg(&dep->dist_cmd_scheduled, 1)) {
+ if (!erts_smp_atomic_xchg_mb(&dep->dist_cmd_scheduled, 1)) {
(void) erts_port_task_schedule(id,
&dep->dist_cmd,
ERTS_PORT_TASK_DIST_CMD,
diff --git a/erts/emulator/beam/dtrace-wrapper.h b/erts/emulator/beam/dtrace-wrapper.h
new file mode 100644
index 0000000000..1aeb7f9221
--- /dev/null
+++ b/erts/emulator/beam/dtrace-wrapper.h
@@ -0,0 +1,109 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Dustin Sallings, Michal Ptaszek, Scott Lystig Fritchie 2011-2012.
+ * All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+#ifndef __DTRACE_WRAPPER_H
+#define __DTRACE_WRAPPER_H
+
+#define DTRACE_TERM_BUF_SIZE 256
+
+/*
+ * Some varieties of SystemTap macros do not like statically-sized
+ * char[N] buffers. (For example, CentOS 6's macros.)
+ * So, we'll play a game to humor them.
+ *
+ * The code necessary to play nice with CentOS 6's SystemTap looks
+ * stupid to a C programmer's eyes, so we hide the ugliness with this
+ * macro, which expands:
+ *
+ * DTRACE_CHARBUF(proc_name, 64);
+ *
+ * to become:
+ *
+ * char proc_name_BUFFER[64], *proc_name = proc_name_BUFFER;
+ */
+
+#define DTRACE_CHARBUF(name, size) \
+ char name##_BUFFER[size], *name = name##_BUFFER
+
+#if defined(USE_DYNAMIC_TRACE) && defined(USE_VM_PROBES)
+
+#include "erlang_dtrace.h"
+
+#define DTRACE_ENABLED(name) \
+ erlang_##name##_enabled()
+#define DTRACE0(name) \
+ erlang_##name()
+#define DTRACE1(name, a0) \
+ erlang_##name(a0)
+#define DTRACE2(name, a0, a1) \
+ erlang_##name((a0), (a1))
+#define DTRACE3(name, a0, a1, a2) \
+ erlang_##name((a0), (a1), (a2))
+#define DTRACE4(name, a0, a1, a2, a3) \
+ erlang_##name((a0), (a1), (a2), (a3))
+#define DTRACE5(name, a0, a1, a2, a3, a4) \
+ erlang_##name((a0), (a1), (a2), (a3), (a4))
+#define DTRACE6(name, a0, a1, a2, a3, a4, a5) \
+ erlang_##name((a0), (a1), (a2), (a3), (a4), (a5))
+#define DTRACE7(name, a0, a1, a2, a3, a4, a5, a6) \
+ erlang_##name((a0), (a1), (a2), (a3), (a4), (a5), (a6))
+#define DTRACE10(name, a0, a1, a2, a3, a4, a5, a6, a7, a8, a9) \
+ erlang_##name((a0), (a1), (a2), (a3), (a4), (a5), (a6), (a7), (a8), (a9))
+#define DTRACE11(name, a0, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10) \
+ erlang_##name((a0), (a1), (a2), (a3), (a4), (a5), (a6), (a7), (a8), (a9), (a10))
+
+#if defined(_SDT_PROBE) && !defined(STAP_PROBE11)
+/* SLF: This is Ubuntu 11-style SystemTap hackery */
+/* work arround for missing STAP macro */
+#define STAP_PROBE11(provider,name,arg1,arg2,arg3,arg4,arg5,arg6,arg7,arg8,arg9,arg10,arg11) \
+ _SDT_PROBE(provider, name, 11, \
+ (arg1,arg2,arg3,arg4,arg5,arg6,arg7,arg8,arg9,arg10,arg11))
+#define _SDT_ASM_OPERANDS_11(arg1,arg2,arg3,arg4,arg5,arg6,arg7,arg8,arg9,arg10,arg11) \
+ _SDT_ASM_OPERANDS_10(arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9,arg10), \
+ _SDT_ARG(11, arg11)
+#endif
+
+#ifdef STAP_PROBE_ADDR
+/* SLF: This is CentOS 5-style SystemTap hackery */
+/* SystemTap compat mode cannot support 11 args. We'll ignore the 11th */
+#define STAP_PROBE11(provider,probe,parm1,parm2,parm3,parm4,parm5,parm6,parm7,parm8,parm9,parm10,parm11) \
+ STAP_PROBE10(provider,probe,(parm1),(parm2),(parm3),(parm4),(parm5),(parm6),(parm7),(parm8),(parm9),(parm10))
+#endif /* STAP_PROBE_ADDR */
+
+#else /* USE_DYNAMIC_TRACE && USE_VM_PROBES */
+
+/* Render all macros to do nothing */
+#define DTRACE_ENABLED(name) 0
+#define DTRACE0(name) do {} while (0)
+#define DTRACE1(name, a0) do {} while (0)
+#define DTRACE2(name, a0, a1) do {} while (0)
+#define DTRACE3(name, a0, a1, a2) do {} while (0)
+#define DTRACE4(name, a0, a1, a2, a3) do {} while (0)
+#define DTRACE5(name, a0, a1, a2, a3, a4) do {} while (0)
+#define DTRACE6(name, a0, a1, a2, a3, a4, a5) do {} while (0)
+#define DTRACE7(name, a0, a1, a2, a3, a4, a5, a6) do {} while (0)
+#define DTRACE10(name, a0, a1, a2, a3, a4, a5, a6, a7, a8, a9) \
+ do {} while (0)
+#define DTRACE11(name, a0, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10) \
+ do {} while (0)
+
+#endif /* USE_DYNAMIC_TRACE && USE_VM_PROBES */
+
+#endif /* __DTRACE_WRAPPER_H */
diff --git a/erts/emulator/beam/erl_afit_alloc.c b/erts/emulator/beam/erl_afit_alloc.c
index e8b594bb47..570cc59be2 100644
--- a/erts/emulator/beam/erl_afit_alloc.c
+++ b/erts/emulator/beam/erl_afit_alloc.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2003-2009. All Rights Reserved.
+ * Copyright Ericsson AB 2003-2011. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -43,9 +43,9 @@
/* Prototypes of callback functions */
static Block_t * get_free_block (Allctr_t *, Uint,
- Block_t *, Uint);
-static void link_free_block (Allctr_t *, Block_t *);
-static void unlink_free_block (Allctr_t *, Block_t *);
+ Block_t *, Uint, Uint32);
+static void link_free_block (Allctr_t *, Block_t *, Uint32);
+static void unlink_free_block (Allctr_t *, Block_t *, Uint32);
static Eterm info_options (Allctr_t *, char *, int *,
@@ -65,14 +65,20 @@ erts_afalc_start(AFAllctr_t *afallctr,
AFAllctrInit_t *afinit,
AllctrInit_t *init)
{
- AFAllctr_t nulled_state = {{0}};
- /* {{0}} is used instead of {0}, in order to avoid (an incorrect) gcc
- warning. gcc warns if {0} is used as initializer of a struct when
- the first member is a struct (not if, for example, the third member
- is a struct). */
+ struct {
+ int dummy;
+ AFAllctr_t allctr;
+ } zero = {0};
+ /* The struct with a dummy element first is used in order to avoid (an
+ incorrect) gcc warning. gcc warns if {0} is used as initializer of
+ a struct when the first member is a struct (not if, for example,
+ the third member is a struct). */
+
Allctr_t *allctr = (Allctr_t *) afallctr;
- sys_memcpy((void *) afallctr, (void *) &nulled_state, sizeof(AFAllctr_t));
+ sys_memcpy((void *) afallctr, (void *) &zero.allctr, sizeof(AFAllctr_t));
+
+ init->sbmbct = 0; /* Small mbc not supported by afit */
allctr->mbc_header_size = sizeof(Carrier_t);
allctr->min_mbc_size = MIN_MBC_SZ;
@@ -105,7 +111,8 @@ erts_afalc_start(AFAllctr_t *afallctr,
}
static Block_t *
-get_free_block(Allctr_t *allctr, Uint size, Block_t *cand_blk, Uint cand_size)
+get_free_block(Allctr_t *allctr, Uint size, Block_t *cand_blk, Uint cand_size,
+ Uint32 flags)
{
AFAllctr_t *afallctr = (AFAllctr_t *) allctr;
@@ -123,7 +130,7 @@ get_free_block(Allctr_t *allctr, Uint size, Block_t *cand_blk, Uint cand_size)
}
static void
-link_free_block(Allctr_t *allctr, Block_t *block)
+link_free_block(Allctr_t *allctr, Block_t *block, Uint32 flags)
{
AFFreeBlock_t *blk = (AFFreeBlock_t *) block;
AFAllctr_t *afallctr = (AFAllctr_t *) allctr;
@@ -144,7 +151,7 @@ link_free_block(Allctr_t *allctr, Block_t *block)
}
static void
-unlink_free_block(Allctr_t *allctr, Block_t *block)
+unlink_free_block(Allctr_t *allctr, Block_t *block, Uint32 flags)
{
AFFreeBlock_t *blk = (AFFreeBlock_t *) block;
AFAllctr_t *afallctr = (AFAllctr_t *) allctr;
diff --git a/erts/emulator/beam/erl_alloc.c b/erts/emulator/beam/erl_alloc.c
index cda404af5e..8130d5c576 100644
--- a/erts/emulator/beam/erl_alloc.c
+++ b/erts/emulator/beam/erl_alloc.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2002-2011. All Rights Reserved.
+ * Copyright Ericsson AB 2002-2012. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -40,6 +40,8 @@
#include "erl_mseg.h"
#include "erl_monitors.h"
#include "erl_bif_timer.h"
+#include "erl_cpu_topology.h"
+#include "erl_thr_queue.h"
#if defined(ERTS_ALC_T_DRV_SEL_D_STATE) || defined(ERTS_ALC_T_DRV_EV_D_STATE)
#include "erl_check_io.h"
#endif
@@ -50,8 +52,18 @@
#include "erl_bestfit_alloc.h"
#define GET_ERL_AF_ALLOC_IMPL
#include "erl_afit_alloc.h"
+#define GET_ERL_AOFF_ALLOC_IMPL
+#include "erl_ao_firstfit_alloc.h"
-#define ERTS_ALC_DEFAULT_MAX_THR_PREF 16
+
+#if ERTS_MAX_NO_OF_SCHEDULERS > ERTS_AU_MAX_PREF_ALLOC_INSTANCES
+# error "Too many schedulers; cannot create that many pref alloc instances"
+#endif
+
+#define ERTS_ALC_FIX_TYPE_IX(T) \
+ (ERTS_ALC_T2N((T)) - ERTS_ALC_N_MIN_A_FIXED_SIZE)
+
+#define ERTS_ALC_DEFAULT_MAX_THR_PREF ERTS_MAX_NO_OF_SCHEDULERS
#if defined(SMALL_MEMORY) || defined(PURIFY) || defined(VALGRIND)
#define AU_ALLOC_DEFAULT_ENABLE(X) 0
@@ -85,42 +97,66 @@ typedef union {
char align_bfa[ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(BFAllctr_t))];
AFAllctr_t afa;
char align_afa[ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(AFAllctr_t))];
+ AOFFAllctr_t aoffa;
+ char align_aoffa[ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(AOFFAllctr_t))];
} ErtsAllocatorState_t;
-static ErtsAllocatorState_t sl_alloc_state;
+static ErtsAllocatorState_t sbmbc_alloc_state;
static ErtsAllocatorState_t std_alloc_state;
static ErtsAllocatorState_t ll_alloc_state;
#if HALFWORD_HEAP
-static ErtsAllocatorState_t std_alloc_low_state;
-static ErtsAllocatorState_t ll_alloc_low_state;
+static ErtsAllocatorState_t sbmbc_low_alloc_state;
+static ErtsAllocatorState_t std_low_alloc_state;
+static ErtsAllocatorState_t ll_low_alloc_state;
#endif
+static ErtsAllocatorState_t sl_alloc_state;
static ErtsAllocatorState_t temp_alloc_state;
static ErtsAllocatorState_t eheap_alloc_state;
static ErtsAllocatorState_t binary_alloc_state;
static ErtsAllocatorState_t ets_alloc_state;
static ErtsAllocatorState_t driver_alloc_state;
+static ErtsAllocatorState_t fix_alloc_state;
-ErtsAlcType_t erts_fix_core_allocator_ix;
-#ifdef ERTS_ALC_N_MIN_A_FIXED_SIZE
-static void *(*fix_core_allocator)(ErtsAlcType_t, void *, Uint);
-static void *fix_core_extra;
-static void *fix_core_alloc(Uint size)
+typedef struct {
+ erts_smp_atomic32_t refc;
+ int only_sz;
+ Uint req_sched;
+ Process *proc;
+ Eterm ref;
+ Eterm ref_heap[REF_THING_SIZE];
+ int allocs[ERTS_ALC_A_MAX-ERTS_ALC_A_MIN+1+2];
+} ErtsAllocInfoReq;
+
+#define ERTS_ALC_INFO_A_ALLOC_UTIL (ERTS_ALC_A_MAX + 1)
+#define ERTS_ALC_INFO_A_MSEG_ALLOC (ERTS_ALC_A_MAX + 2)
+#define ERTS_ALC_INFO_A_MAX ERTS_ALC_INFO_A_MSEG_ALLOC
+
+#if !HALFWORD_HEAP
+ERTS_SCHED_PREF_QUICK_ALLOC_IMPL(aireq,
+ ErtsAllocInfoReq,
+ 5,
+ ERTS_ALC_T_AINFO_REQ)
+#else
+static ERTS_INLINE ErtsAllocInfoReq *
+aireq_alloc(void)
{
- void *res;
- res = (*fix_core_allocator)(ERTS_ALC_T_UNDEF, fix_core_extra, size);
- if (erts_mtrace_enabled)
- erts_mtrace_crr_alloc(res,
- ERTS_ALC_A_FIXED_SIZE,
- erts_fix_core_allocator_ix,
- size);
- return res;
+ return erts_alloc(ERTS_ALC_T_AINFO_REQ, sizeof(ErtsAllocInfoReq));
+}
+
+static ERTS_INLINE void
+aireq_free(ErtsAllocInfoReq *ptr)
+{
+ erts_free(ERTS_ALC_T_AINFO_REQ, ptr);
}
#endif
+ErtsAlcType_t erts_fix_core_allocator_ix;
+
enum allctr_type {
GOODFIT,
BESTFIT,
- AFIT
+ AFIT,
+ AOFIRSTFIT
};
struct au_init {
@@ -132,6 +168,7 @@ struct au_init {
GFAllctrInit_t gf;
BFAllctrInit_t bf;
AFAllctrInit_t af;
+ AOFFAllctrInit_t aoff;
} init;
struct {
int mmbcs;
@@ -145,7 +182,8 @@ struct au_init {
ERTS_DEFAULT_ALLCTR_INIT, \
ERTS_DEFAULT_GF_ALLCTR_INIT, \
ERTS_DEFAULT_BF_ALLCTR_INIT, \
- ERTS_DEFAULT_AF_ALLCTR_INIT \
+ ERTS_DEFAULT_AF_ALLCTR_INIT, \
+ ERTS_DEFAULT_AOFF_ALLCTR_INIT \
}
typedef struct {
@@ -162,6 +200,7 @@ typedef struct {
char *mtrace;
char *nodename;
} instr;
+ struct au_init sbmbc_alloc;
struct au_init sl_alloc;
struct au_init std_alloc;
struct au_init ll_alloc;
@@ -170,9 +209,11 @@ typedef struct {
struct au_init binary_alloc;
struct au_init ets_alloc;
struct au_init driver_alloc;
+ struct au_init fix_alloc;
#if HALFWORD_HEAP
- struct au_init std_alloc_low;
- struct au_init ll_alloc_low;
+ struct au_init sbmbc_low_alloc;
+ struct au_init std_low_alloc;
+ struct au_init ll_low_alloc;
#endif
} erts_alc_hndl_args_init_t;
@@ -185,6 +226,34 @@ do { \
} while (0)
static void
+set_default_sbmbc_alloc_opts(struct au_init *ip)
+{
+ SET_DEFAULT_ALLOC_OPTS(ip);
+ ip->enable = 0;
+ ip->thr_spec = 0;
+ ip->atype = BESTFIT;
+ ip->init.bf.ao = 1;
+ ip->init.util.ramv = 0;
+ ip->init.util.mmsbc = 0;
+ ip->init.util.mmmbc = 500;
+ ip->init.util.sbct = ~((UWord) 0);
+ ip->init.util.name_prefix = "sbmbc_";
+ ip->init.util.alloc_no = ERTS_ALC_A_SBMBC;
+#ifndef SMALL_MEMORY
+ ip->init.util.mmbcs = 2*1024*1024; /* Main carrier size */
+#else
+ ip->init.util.mmbcs = 1*1024*1024; /* Main carrier size */
+#endif
+ ip->init.util.ts = ERTS_ALC_MTA_SBMBC;
+ ip->init.util.asbcst = 0;
+ ip->init.util.rsbcst = 0;
+ ip->init.util.rsbcmt = 0;
+ ip->init.util.rmbcmt = 0;
+ ip->init.util.sbmbct = 0;
+ ip->init.util.sbmbcs = 0;
+}
+
+static void
set_default_sl_alloc_opts(struct au_init *ip)
{
SET_DEFAULT_ALLOC_OPTS(ip);
@@ -202,6 +271,7 @@ set_default_sl_alloc_opts(struct au_init *ip)
ip->init.util.ts = ERTS_ALC_MTA_SHORT_LIVED;
ip->init.util.rsbcst = 80;
#if HALFWORD_HEAP
+ ip->init.util.force = 1;
ip->init.util.low_mem = 1;
#endif
@@ -249,6 +319,8 @@ set_default_ll_alloc_opts(struct au_init *ip)
ip->init.util.rsbcst = 0;
ip->init.util.rsbcmt = 0;
ip->init.util.rmbcmt = 0;
+ ip->init.util.sbmbct = 0;
+ ip->init.util.sbmbcs = 0;
}
static void
@@ -269,6 +341,7 @@ set_default_temp_alloc_opts(struct au_init *ip)
ip->init.util.rsbcst = 90;
ip->init.util.rmbcmt = 100;
#if HALFWORD_HEAP
+ ip->init.util.force = 1;
ip->init.util.low_mem = 1;
#endif
}
@@ -291,6 +364,7 @@ set_default_eheap_alloc_opts(struct au_init *ip)
ip->init.util.ts = ERTS_ALC_MTA_EHEAP;
ip->init.util.rsbcst = 50;
#if HALFWORD_HEAP
+ ip->init.util.force = 1;
ip->init.util.low_mem = 1;
#endif
}
@@ -348,46 +422,52 @@ set_default_driver_alloc_opts(struct au_init *ip)
ip->init.util.ts = ERTS_ALC_MTA_DRIVER;
}
+static void
+set_default_fix_alloc_opts(struct au_init *ip,
+ size_t *fix_type_sizes)
+{
+ SET_DEFAULT_ALLOC_OPTS(ip);
+ ip->enable = AU_ALLOC_DEFAULT_ENABLE(1);
+ ip->thr_spec = 1;
+ ip->atype = BESTFIT;
+ ip->init.bf.ao = 1;
+ ip->init.util.name_prefix = "fix_";
+ ip->init.util.fix_type_size = fix_type_sizes;
+ ip->init.util.alloc_no = ERTS_ALC_A_FIXED_SIZE;
+#ifndef SMALL_MEMORY
+ ip->init.util.mmbcs = 128*1024; /* Main carrier size */
+#else
+ ip->init.util.mmbcs = 128*1024; /* Main carrier size */
+#endif
+ ip->init.util.ts = ERTS_ALC_MTA_FIXED_SIZE;
+}
+
#ifdef ERTS_SMP
static void
adjust_tpref(struct au_init *ip, int no_sched)
{
if (ip->thr_spec) {
- Uint allocs;
- if (ip->thr_spec < 0) {/* User specified amount */
- allocs = abs(ip->thr_spec);
- if (allocs > no_sched)
- allocs = no_sched;
- }
- else if (no_sched > ERTS_ALC_DEFAULT_MAX_THR_PREF)
- allocs = ERTS_ALC_DEFAULT_MAX_THR_PREF;
- else
- allocs = no_sched;
- if (allocs <= 1)
- ip->thr_spec = 0;
- else {
- ip->thr_spec = (int) allocs;
- ip->thr_spec *= -1; /* thread preferred */
-
- /* If default ... */
-
- /* ... shrink main multi-block carrier size */
- if (ip->default_.mmbcs)
- ip->init.util.mmbcs /= ERTS_MIN(4, allocs);
- /* ... shrink largest multi-block carrier size */
- if (ip->default_.lmbcs)
- ip->init.util.lmbcs /= ERTS_MIN(2, allocs);
- /* ... shrink smallest multi-block carrier size */
- if (ip->default_.smbcs)
- ip->init.util.smbcs /= ERTS_MIN(4, allocs);
- /* ... and more than three allocators shrink
- max mseg multi-block carriers */
- if (ip->default_.mmmbc && allocs > 2) {
- ip->init.util.mmmbc /= ERTS_MIN(4, allocs - 1);
- if (ip->init.util.mmmbc < 3)
- ip->init.util.mmmbc = 3;
- }
+ ip->thr_spec = no_sched;
+ ip->thr_spec *= -1; /* thread preferred */
+
+ /* If default ... */
+
+ /* ... shrink main multi-block carrier size */
+ if (ip->default_.mmbcs)
+ ip->init.util.mmbcs /= ERTS_MIN(4, no_sched);
+ /* ... shrink largest multi-block carrier size */
+ if (ip->default_.lmbcs)
+ ip->init.util.lmbcs /= ERTS_MIN(2, no_sched);
+ /* ... shrink smallest multi-block carrier size */
+ if (ip->default_.smbcs)
+ ip->init.util.smbcs /= ERTS_MIN(4, no_sched);
+ /* ... and more than three allocators shrink
+ max mseg multi-block carriers */
+ if (ip->default_.mmmbc && no_sched > 2) {
+ ip->init.util.mmmbc /= ERTS_MIN(4, no_sched - 1);
+ if (ip->init.util.mmmbc < 3)
+ ip->init.util.mmmbc = 3;
}
}
}
@@ -397,7 +477,7 @@ adjust_tpref(struct au_init *ip, int no_sched)
static void handle_args(int *, char **, erts_alc_hndl_args_init_t *);
static void
-set_au_allocator(ErtsAlcType_t alctr_n, struct au_init *init);
+set_au_allocator(ErtsAlcType_t alctr_n, struct au_init *init, int ncpu);
static void
start_au_allocator(ErtsAlcType_t alctr_n,
@@ -411,8 +491,6 @@ refuse_af_strategy(struct au_init *init)
init->atype = GOODFIT;
}
-static void init_thr_ix(int static_ixs);
-
#ifdef HARD_DEBUG
static void hdbg_init(void);
#endif
@@ -421,7 +499,7 @@ void
erts_alloc_init(int *argc, char **argv, ErtsAllocInitOpts *eaiop)
{
UWord extra_block_size = 0;
- int i;
+ int i, ncpu;
erts_alc_hndl_args_init_t init = {
0,
#if HAVE_ERTS_MSEG
@@ -429,17 +507,41 @@ erts_alloc_init(int *argc, char **argv, ErtsAllocInitOpts *eaiop)
#endif
ERTS_DEFAULT_TRIM_THRESHOLD,
ERTS_DEFAULT_TOP_PAD,
- ERTS_DEFAULT_ALCU_INIT
+ ERTS_DEFAULT_ALCU_INIT,
};
+ size_t fix_type_sizes[ERTS_ALC_NO_FIXED_SIZES] = {0};
+ fix_type_sizes[ERTS_ALC_FIX_TYPE_IX(ERTS_ALC_T_PROC)]
+ = sizeof(Process);
+#if !HALFWORD_HEAP
+ fix_type_sizes[ERTS_ALC_FIX_TYPE_IX(ERTS_ALC_T_MONITOR_SH)]
+ = ERTS_MONITOR_SH_SIZE;
+ fix_type_sizes[ERTS_ALC_FIX_TYPE_IX(ERTS_ALC_T_NLINK_SH)]
+ = ERTS_LINK_SH_SIZE;
+#endif
+ fix_type_sizes[ERTS_ALC_FIX_TYPE_IX(ERTS_ALC_T_DRV_EV_D_STATE)]
+ = sizeof(ErtsDrvEventDataState);
+ fix_type_sizes[ERTS_ALC_FIX_TYPE_IX(ERTS_ALC_T_DRV_SEL_D_STATE)]
+ = sizeof(ErtsDrvSelectDataState);
+ fix_type_sizes[ERTS_ALC_FIX_TYPE_IX(ERTS_ALC_T_MSG_REF)]
+ = sizeof(ErlMessage);
+#ifdef ERTS_SMP
+ fix_type_sizes[ERTS_ALC_FIX_TYPE_IX(ERTS_ALC_T_THR_Q_EL_SL)]
+ = sizeof(ErtsThrQElement_t);
+#endif
#ifdef HARD_DEBUG
hdbg_init();
#endif
+ erts_have_sbmbc_alloc = 0;
+ ncpu = eaiop->ncpu;
+ if (ncpu < 1)
+ ncpu = 1;
+
erts_sys_alloc_init();
- init_thr_ix(erts_no_schedulers);
erts_init_utils_mem();
+ set_default_sbmbc_alloc_opts(&init.sbmbc_alloc);
set_default_sl_alloc_opts(&init.sl_alloc);
set_default_std_alloc_opts(&init.std_alloc);
set_default_ll_alloc_opts(&init.ll_alloc);
@@ -448,22 +550,27 @@ erts_alloc_init(int *argc, char **argv, ErtsAllocInitOpts *eaiop)
set_default_binary_alloc_opts(&init.binary_alloc);
set_default_ets_alloc_opts(&init.ets_alloc);
set_default_driver_alloc_opts(&init.driver_alloc);
+ set_default_fix_alloc_opts(&init.fix_alloc,
+ fix_type_sizes);
if (argc && argv)
handle_args(argc, argv, &init);
- if (erts_no_schedulers <= 1) {
- init.sl_alloc.thr_spec = 0;
- init.std_alloc.thr_spec = 0;
- init.ll_alloc.thr_spec = 0;
- init.eheap_alloc.thr_spec = 0;
- init.binary_alloc.thr_spec = 0;
- init.ets_alloc.thr_spec = 0;
- init.driver_alloc.thr_spec = 0;
- }
+#ifndef ERTS_SMP
+ init.sbmbc_alloc.thr_spec = 0;
+ init.sl_alloc.thr_spec = 0;
+ init.std_alloc.thr_spec = 0;
+ init.ll_alloc.thr_spec = 0;
+ init.eheap_alloc.thr_spec = 0;
+ init.binary_alloc.thr_spec = 0;
+ init.ets_alloc.thr_spec = 0;
+ init.driver_alloc.thr_spec = 0;
+ init.fix_alloc.thr_spec = 0;
+#endif
if (init.erts_alloc_config) {
/* Adjust flags that erts_alloc_config won't like */
+ init.sbmbc_alloc.thr_spec = 0;
init.temp_alloc.thr_spec = 0;
init.sl_alloc.thr_spec = 0;
init.std_alloc.thr_spec = 0;
@@ -472,6 +579,7 @@ erts_alloc_init(int *argc, char **argv, ErtsAllocInitOpts *eaiop)
init.binary_alloc.thr_spec = 0;
init.ets_alloc.thr_spec = 0;
init.driver_alloc.thr_spec = 0;
+ init.fix_alloc.thr_spec = 0;
}
#ifdef ERTS_SMP
@@ -480,6 +588,7 @@ erts_alloc_init(int *argc, char **argv, ErtsAllocInitOpts *eaiop)
init.temp_alloc.thr_spec = erts_no_schedulers;
/* Others must use thread preferred interface */
+ adjust_tpref(&init.sbmbc_alloc, erts_no_schedulers);
adjust_tpref(&init.sl_alloc, erts_no_schedulers);
adjust_tpref(&init.std_alloc, erts_no_schedulers);
adjust_tpref(&init.ll_alloc, erts_no_schedulers);
@@ -487,6 +596,7 @@ erts_alloc_init(int *argc, char **argv, ErtsAllocInitOpts *eaiop)
adjust_tpref(&init.binary_alloc, erts_no_schedulers);
adjust_tpref(&init.ets_alloc, erts_no_schedulers);
adjust_tpref(&init.driver_alloc, erts_no_schedulers);
+ adjust_tpref(&init.fix_alloc, erts_no_schedulers);
#else
/* No thread specific if not smp */
@@ -497,6 +607,7 @@ erts_alloc_init(int *argc, char **argv, ErtsAllocInitOpts *eaiop)
* The following allocators cannot be run with afit strategy.
* Make sure they don't...
*/
+ refuse_af_strategy(&init.sbmbc_alloc);
refuse_af_strategy(&init.sl_alloc);
refuse_af_strategy(&init.std_alloc);
refuse_af_strategy(&init.ll_alloc);
@@ -504,6 +615,7 @@ erts_alloc_init(int *argc, char **argv, ErtsAllocInitOpts *eaiop)
refuse_af_strategy(&init.binary_alloc);
refuse_af_strategy(&init.ets_alloc);
refuse_af_strategy(&init.driver_alloc);
+ refuse_af_strategy(&init.fix_alloc);
#ifdef ERTS_SMP
if (!init.temp_alloc.thr_spec)
@@ -512,12 +624,14 @@ erts_alloc_init(int *argc, char **argv, ErtsAllocInitOpts *eaiop)
erts_mtrace_pre_init();
#if HAVE_ERTS_MSEG
+ init.mseg.nos = erts_no_schedulers;
erts_mseg_init(&init.mseg);
#endif
erts_alcu_init(&init.alloc_util);
erts_afalc_init();
erts_bfalc_init();
erts_gfalc_init();
+ erts_aoffalc_init();
for (i = ERTS_ALC_A_MIN; i <= ERTS_ALC_A_MAX; i++) {
erts_allctrs[i].alloc = NULL;
@@ -530,20 +644,6 @@ erts_alloc_init(int *argc, char **argv, ErtsAllocInitOpts *eaiop)
erts_allctrs_info[i].extra = NULL;
}
-#ifdef ERTS_ALC_N_MIN_A_FIXED_SIZE
-#if !defined(PURIFY) && !defined(VALGRIND)
- erts_allctrs[ERTS_ALC_A_FIXED_SIZE].alloc = erts_fix_alloc;
- erts_allctrs[ERTS_ALC_A_FIXED_SIZE].realloc = erts_fix_realloc;
- erts_allctrs[ERTS_ALC_A_FIXED_SIZE].free = erts_fix_free;
- erts_allctrs_info[ERTS_ALC_A_FIXED_SIZE].enabled = 1;
-#else
- erts_allctrs[ERTS_ALC_A_FIXED_SIZE].alloc = erts_sys_alloc;
- erts_allctrs[ERTS_ALC_A_FIXED_SIZE].realloc = erts_sys_realloc;
- erts_allctrs[ERTS_ALC_A_FIXED_SIZE].free = erts_sys_free;
- erts_allctrs_info[ERTS_ALC_A_FIXED_SIZE].enabled = 0;
-#endif
-#endif
-
erts_allctrs[ERTS_ALC_A_SYSTEM].alloc = erts_sys_alloc;
erts_allctrs[ERTS_ALC_A_SYSTEM].realloc = erts_sys_realloc;
erts_allctrs[ERTS_ALC_A_SYSTEM].free = erts_sys_free;
@@ -551,26 +651,38 @@ erts_alloc_init(int *argc, char **argv, ErtsAllocInitOpts *eaiop)
#if HALFWORD_HEAP
/* Init low memory variants by cloning */
- init.std_alloc_low = init.std_alloc;
- init.std_alloc_low.init.util.alloc_no = ERTS_ALC_A_STANDARD_LOW;
- init.std_alloc_low.init.util.low_mem = 1;
-
- init.ll_alloc_low = init.ll_alloc;
- init.ll_alloc_low.init.util.alloc_no = ERTS_ALC_A_LONG_LIVED_LOW;
- init.ll_alloc_low.init.util.low_mem = 1;
-
- set_au_allocator(ERTS_ALC_A_STANDARD_LOW, &init.std_alloc_low);
- set_au_allocator(ERTS_ALC_A_LONG_LIVED_LOW, &init.ll_alloc_low);
+ init.sbmbc_low_alloc = init.sbmbc_alloc;
+ init.sbmbc_low_alloc.init.util.name_prefix = "sbmbc_low_";
+ init.sbmbc_low_alloc.init.util.alloc_no = ERTS_ALC_A_SBMBC_LOW;
+ init.sbmbc_low_alloc.init.util.low_mem = 1;
+
+ init.std_low_alloc = init.std_alloc;
+ init.std_low_alloc.init.util.name_prefix = "std_low_";
+ init.std_low_alloc.init.util.alloc_no = ERTS_ALC_A_STANDARD_LOW;
+ init.std_low_alloc.init.util.force = 1;
+ init.std_low_alloc.init.util.low_mem = 1;
+
+ init.ll_low_alloc = init.ll_alloc;
+ init.ll_low_alloc.init.util.name_prefix = "ll_low_";
+ init.ll_low_alloc.init.util.alloc_no = ERTS_ALC_A_LONG_LIVED_LOW;
+ init.ll_low_alloc.init.util.force = 1;
+ init.ll_low_alloc.init.util.low_mem = 1;
+
+ set_au_allocator(ERTS_ALC_A_SBMBC_LOW, &init.sbmbc_low_alloc, ncpu);
+ set_au_allocator(ERTS_ALC_A_STANDARD_LOW, &init.std_low_alloc, ncpu);
+ set_au_allocator(ERTS_ALC_A_LONG_LIVED_LOW, &init.ll_low_alloc, ncpu);
#endif /* HALFWORD */
- set_au_allocator(ERTS_ALC_A_TEMPORARY, &init.temp_alloc);
- set_au_allocator(ERTS_ALC_A_SHORT_LIVED, &init.sl_alloc);
- set_au_allocator(ERTS_ALC_A_STANDARD, &init.std_alloc);
- set_au_allocator(ERTS_ALC_A_LONG_LIVED, &init.ll_alloc);
- set_au_allocator(ERTS_ALC_A_EHEAP, &init.eheap_alloc);
- set_au_allocator(ERTS_ALC_A_BINARY, &init.binary_alloc);
- set_au_allocator(ERTS_ALC_A_ETS, &init.ets_alloc);
- set_au_allocator(ERTS_ALC_A_DRIVER, &init.driver_alloc);
+ set_au_allocator(ERTS_ALC_A_TEMPORARY, &init.temp_alloc, ncpu);
+ set_au_allocator(ERTS_ALC_A_SBMBC, &init.sbmbc_alloc, ncpu);
+ set_au_allocator(ERTS_ALC_A_SHORT_LIVED, &init.sl_alloc, ncpu);
+ set_au_allocator(ERTS_ALC_A_STANDARD, &init.std_alloc, ncpu);
+ set_au_allocator(ERTS_ALC_A_LONG_LIVED, &init.ll_alloc, ncpu);
+ set_au_allocator(ERTS_ALC_A_EHEAP, &init.eheap_alloc, ncpu);
+ set_au_allocator(ERTS_ALC_A_BINARY, &init.binary_alloc, ncpu);
+ set_au_allocator(ERTS_ALC_A_ETS, &init.ets_alloc, ncpu);
+ set_au_allocator(ERTS_ALC_A_DRIVER, &init.driver_alloc, ncpu);
+ set_au_allocator(ERTS_ALC_A_FIXED_SIZE, &init.fix_alloc, ncpu);
for (i = ERTS_ALC_A_MIN; i <= ERTS_ALC_A_MAX; i++) {
if (!erts_allctrs[i].alloc)
@@ -586,13 +698,23 @@ erts_alloc_init(int *argc, char **argv, ErtsAllocInitOpts *eaiop)
sys_alloc_opt(SYS_ALLOC_OPT_TRIM_THRESHOLD, init.trim_threshold);
sys_alloc_opt(SYS_ALLOC_OPT_TOP_PAD, init.top_pad);
- if (erts_allctrs_info[ERTS_FIX_CORE_ALLOCATOR].enabled)
- erts_fix_core_allocator_ix = ERTS_FIX_CORE_ALLOCATOR;
- else
- erts_fix_core_allocator_ix = ERTS_ALC_A_SYSTEM;
erts_mtrace_init(init.instr.mtrace, init.instr.nodename);
+ /* sbmbc_alloc() needs to be started first */
+ start_au_allocator(ERTS_ALC_A_SBMBC,
+ &init.sbmbc_alloc,
+ &sbmbc_alloc_state);
+#if HALFWORD_HEAP
+ start_au_allocator(ERTS_ALC_A_SBMBC_LOW,
+ &init.sbmbc_low_alloc,
+ &sbmbc_low_alloc_state);
+ erts_have_sbmbc_alloc = (init.sbmbc_alloc.enable
+ && init.sbmbc_low_alloc.enable);
+#else
+ erts_have_sbmbc_alloc = init.sbmbc_alloc.enable;
+#endif
+
start_au_allocator(ERTS_ALC_A_TEMPORARY,
&init.temp_alloc,
&temp_alloc_state);
@@ -610,11 +732,11 @@ erts_alloc_init(int *argc, char **argv, ErtsAllocInitOpts *eaiop)
&ll_alloc_state);
#if HALFWORD_HEAP
start_au_allocator(ERTS_ALC_A_LONG_LIVED_LOW,
- &init.ll_alloc_low,
- &ll_alloc_low_state);
+ &init.ll_low_alloc,
+ &ll_low_alloc_state);
start_au_allocator(ERTS_ALC_A_STANDARD_LOW,
- &init.std_alloc_low,
- &std_alloc_low_state);
+ &init.std_low_alloc,
+ &std_low_alloc_state);
#endif
start_au_allocator(ERTS_ALC_A_EHEAP,
&init.eheap_alloc,
@@ -632,62 +754,56 @@ erts_alloc_init(int *argc, char **argv, ErtsAllocInitOpts *eaiop)
&init.driver_alloc,
&driver_alloc_state);
- fix_core_allocator = erts_allctrs[erts_fix_core_allocator_ix].alloc;
- fix_core_extra = erts_allctrs[erts_fix_core_allocator_ix].extra;
+ start_au_allocator(ERTS_ALC_A_FIXED_SIZE,
+ &init.fix_alloc,
+ &fix_alloc_state);
erts_mtrace_install_wrapper_functions();
extra_block_size += erts_instr_init(init.instr.stat, init.instr.map);
+#if !HALFWORD_HEAP
+ init_aireq_alloc();
+#endif
+
#ifdef DEBUG
extra_block_size += install_debug_functions();
#endif
-#ifdef ERTS_ALC_N_MIN_A_FIXED_SIZE
-
- erts_init_fix_alloc(extra_block_size, fix_core_alloc);
-
+}
-#if !defined(PURIFY) && !defined(VALGRIND)
- erts_set_fix_size(ERTS_ALC_T_PROC, sizeof(Process));
- erts_set_fix_size(ERTS_ALC_T_DB_TABLE, sizeof(DbTable));
- erts_set_fix_size(ERTS_ALC_T_ATOM, sizeof(Atom));
+void
+erts_alloc_late_init(void)
+{
- erts_set_fix_size(ERTS_ALC_T_MODULE, sizeof(Module));
- erts_set_fix_size(ERTS_ALC_T_REG_PROC, sizeof(RegProc));
- erts_set_fix_size(ERTS_ALC_T_FUN_ENTRY, sizeof(ErlFunEntry));
-#ifdef ERTS_ALC_T_DRV_EV_D_STATE
- erts_set_fix_size(ERTS_ALC_T_DRV_EV_D_STATE,
- sizeof(ErtsDrvEventDataState));
-#endif
-#ifdef ERTS_ALC_T_DRV_SEL_D_STATE
- erts_set_fix_size(ERTS_ALC_T_DRV_SEL_D_STATE,
- sizeof(ErtsDrvSelectDataState));
-#endif
-#if !HALFWORD_HEAP
- erts_set_fix_size(ERTS_ALC_T_EXPORT, sizeof(Export));
- erts_set_fix_size(ERTS_ALC_T_MONITOR_SH, ERTS_MONITOR_SH_SIZE*sizeof(Uint));
- erts_set_fix_size(ERTS_ALC_T_NLINK_SH, ERTS_LINK_SH_SIZE*sizeof(Uint));
-#endif
-#endif
-#endif
+}
+static void *
+erts_realloc_fixed_size(ErtsAlcType_t type, void *extra, void *p, Uint size)
+{
+ erl_exit(ERTS_ABORT_EXIT,
+ "Attempt to reallocate a block of the fixed size type %s\n",
+ ERTS_ALC_T2TD(type));
}
+
static void
-set_au_allocator(ErtsAlcType_t alctr_n, struct au_init *init)
+set_au_allocator(ErtsAlcType_t alctr_n, struct au_init *init, int ncpu)
{
ErtsAllocatorFunctions_t *af = &erts_allctrs[alctr_n];
ErtsAllocatorInfo_t *ai = &erts_allctrs_info[alctr_n];
ErtsAllocatorThrSpec_t *tspec = &erts_allctr_thr_spec[alctr_n];
-#if HALFWORD_HEAP
- /* If halfword heap, silently ignore any disabling of internal
- * allocators for low memory
+ /*
+ * Some allocators are forced on if halfword heap is used.
*/
- if (init->init.util.low_mem) {
+ if (init->init.util.force)
init->enable = 1;
- }
-#endif
+
+ tspec->enabled = 0;
+ tspec->dd = 0;
+ tspec->aix = alctr_n;
+ tspec->size = 0;
+ ai->thr_spec = 0;
if (!init->enable) {
af->alloc = erts_sys_alloc;
@@ -700,14 +816,14 @@ set_au_allocator(ErtsAlcType_t alctr_n, struct au_init *init)
return;
}
- tspec->enabled = 0;
- tspec->all_thr_safe = 0;
- ai->thr_spec = 0;
#ifdef USE_THREADS
+#ifdef ERTS_SMP
if (init->thr_spec) {
if (init->thr_spec > 0) {
af->alloc = erts_alcu_alloc_thr_spec;
- if (init->init.util.ramv)
+ if (init->init.util.fix_type_size)
+ af->realloc = erts_realloc_fixed_size;
+ else if (init->init.util.ramv)
af->realloc = erts_alcu_realloc_mv_thr_spec;
else
af->realloc = erts_alcu_realloc_thr_spec;
@@ -715,12 +831,14 @@ set_au_allocator(ErtsAlcType_t alctr_n, struct au_init *init)
}
else {
af->alloc = erts_alcu_alloc_thr_pref;
- if (init->init.util.ramv)
+ if (init->init.util.fix_type_size)
+ af->realloc = erts_realloc_fixed_size;
+ else if (init->init.util.ramv)
af->realloc = erts_alcu_realloc_mv_thr_pref;
else
af->realloc = erts_alcu_realloc_thr_pref;
af->free = erts_alcu_free_thr_pref;
- tspec->all_thr_safe = 1;
+ tspec->dd = 1;
}
tspec->enabled = 1;
@@ -728,9 +846,13 @@ set_au_allocator(ErtsAlcType_t alctr_n, struct au_init *init)
ai->thr_spec = tspec->size;
}
- else if (init->init.util.ts) {
+ else
+#endif
+ if (init->init.util.ts) {
af->alloc = erts_alcu_alloc_ts;
- if (init->init.util.ramv)
+ if (init->init.util.fix_type_size)
+ af->realloc = erts_realloc_fixed_size;
+ else if (init->init.util.ramv)
af->realloc = erts_alcu_realloc_mv_ts;
else
af->realloc = erts_alcu_realloc_ts;
@@ -740,7 +862,9 @@ set_au_allocator(ErtsAlcType_t alctr_n, struct au_init *init)
#endif
{
af->alloc = erts_alcu_alloc;
- if (init->init.util.ramv)
+ if (init->init.util.fix_type_size)
+ af->realloc = erts_realloc_fixed_size;
+ else if (init->init.util.ramv)
af->realloc = erts_alcu_realloc_mv;
else
af->realloc = erts_alcu_realloc;
@@ -763,12 +887,14 @@ start_au_allocator(ErtsAlcType_t alctr_n,
ErtsAllocatorFunctions_t *af = &erts_allctrs[alctr_n];
ErtsAllocatorInfo_t *ai = &erts_allctrs_info[alctr_n];
ErtsAllocatorThrSpec_t *tspec = &erts_allctr_thr_spec[alctr_n];
+ ErtsAlcFixList_t *fix_lists = NULL;
+ size_t fix_list_size = 0;
if (!init->enable)
return;
if (init->thr_spec) {
- void *states = erts_sys_alloc(0,
+ char *states = erts_sys_alloc(0,
NULL,
((sizeof(Allctr_t *)
* (tspec->size + 1))
@@ -780,18 +906,40 @@ start_au_allocator(ErtsAlcType_t alctr_n,
"Failed to allocate allocator states for %salloc\n",
init->init.util.name_prefix);
tspec->allctr = (Allctr_t **) states;
- states = ((char *) states) + sizeof(Allctr_t *) * (tspec->size + 1);
+ states += sizeof(Allctr_t *) * (tspec->size + 1);
states = ((((UWord) states) & ERTS_CACHE_LINE_MASK)
- ? (void *) ((((UWord) states) & ~ERTS_CACHE_LINE_MASK)
+ ? (char *) ((((UWord) states) & ~ERTS_CACHE_LINE_MASK)
+ ERTS_CACHE_LINE_SIZE)
- : (void *) states);
- tspec->allctr[0] = init->thr_spec > 0 ? (Allctr_t *) state : (Allctr_t *) NULL;
+ : (char *) states);
+ tspec->allctr[0] = (Allctr_t *) state;
size = tspec->size;
for (i = 1; i < size; i++)
tspec->allctr[i] = (Allctr_t *)
&((ErtsAllocatorState_t *) states)[i-1];
}
+ if (init->init.util.fix_type_size) {
+ size_t tot_fix_list_size;
+ fix_list_size = sizeof(ErtsAlcFixList_t)*ERTS_ALC_NO_FIXED_SIZES;
+ fix_list_size = ERTS_ALC_CACHE_LINE_ALIGN_SIZE(fix_list_size);
+ tot_fix_list_size = fix_list_size;
+ if (init->thr_spec)
+ tot_fix_list_size *= tspec->size;
+ fix_lists = erts_sys_alloc(0,
+ NULL,
+ (tot_fix_list_size
+ + ERTS_CACHE_LINE_SIZE - 1));
+ if (!fix_lists)
+ erl_exit(ERTS_ABORT_EXIT,
+ "Failed to allocate fix lists for %salloc\n",
+ init->init.util.name_prefix);
+
+ if (((UWord) fix_lists) & ERTS_CACHE_LINE_MASK)
+ fix_lists = ((ErtsAlcFixList_t *)
+ ((((UWord) fix_lists) & ~ERTS_CACHE_LINE_MASK)
+ + ERTS_CACHE_LINE_SIZE));
+ }
+
for (i = 0; i < size; i++) {
void *as;
atype = init->atype;
@@ -802,25 +950,32 @@ start_au_allocator(ErtsAlcType_t alctr_n,
as0 = (void *) tspec->allctr[i];
if (!as0)
continue;
- if (i == 0) {
- if (atype == AFIT)
- atype = GOODFIT;
- init->init.util.ts = 1;
+ if (init->thr_spec < 0) {
+ init->init.util.ts = i == 0;
+ init->init.util.tspec = 0;
+ init->init.util.tpref = -1*init->thr_spec + 1;
}
else {
- if (init->thr_spec < 0) {
+ if (i != 0)
+ init->init.util.ts = 0;
+ else {
+ if (atype == AFIT)
+ atype = GOODFIT;
init->init.util.ts = 1;
- init->init.util.tspec = 0;
- init->init.util.tpref = -1*init->thr_spec;
}
- else {
- init->init.util.ts = 0;
- init->init.util.tspec = init->thr_spec + 1;
- init->init.util.tpref = 0;
- }
- }
+ init->init.util.tspec = init->thr_spec + 1;
+ init->init.util.tpref = 0;
+ }
}
+ if (fix_lists) {
+ init->init.util.fix = fix_lists;
+ fix_lists = ((ErtsAlcFixList_t *)
+ (((char *) fix_lists) + fix_list_size));
+ }
+
+ init->init.util.ix = i;
+
switch (atype) {
case GOODFIT:
as = (void *) erts_gfalc_start((GFAllctr_t *) as0,
@@ -837,6 +992,12 @@ start_au_allocator(ErtsAlcType_t alctr_n,
&init->init.af,
&init->init.util);
break;
+ case AOFIRSTFIT:
+ as = (void *) erts_aoffalc_start((AOFFAllctr_t *) as0,
+ &init->init.aoff,
+ &init->init.util);
+ break;
+
default:
as = NULL;
ASSERT(0);
@@ -850,11 +1011,8 @@ start_au_allocator(ErtsAlcType_t alctr_n,
af->extra = as;
}
- if (init->thr_spec) {
+ if (init->thr_spec)
af->extra = tspec;
- init->init.util.ts = 1;
- }
-
ai->extra = af->extra;
}
@@ -937,7 +1095,7 @@ get_kb_value(char *param_end, char** argv, int* ip)
char *param = argv[*ip]+1;
char *value = get_value(param_end, argv, ip);
errno = 0;
- tmp = (Sint) strtol(value, &rest, 10);
+ tmp = (Sint) ErtsStrToSint(value, &rest, 10);
if (errno != 0 || rest == value || tmp < 0 || max < ((Uint) tmp))
bad_value(param, param_end, value);
if (max == (Uint) tmp)
@@ -947,45 +1105,31 @@ get_kb_value(char *param_end, char** argv, int* ip)
}
static Uint
-get_amount_value(char *param_end, char** argv, int* ip)
+get_byte_value(char *param_end, char** argv, int* ip)
{
Sint tmp;
char *rest;
char *param = argv[*ip]+1;
char *value = get_value(param_end, argv, ip);
errno = 0;
- tmp = (Sint) strtol(value, &rest, 10);
+ tmp = (Sint) ErtsStrToSint(value, &rest, 10);
if (errno != 0 || rest == value || tmp < 0)
bad_value(param, param_end, value);
return (Uint) tmp;
}
-static int
-get_bool_or_possitive_amount_value(int *bool, Uint *amount,
- char *param_end, char** argv, int* ip)
+static Uint
+get_amount_value(char *param_end, char** argv, int* ip)
{
+ Sint tmp;
+ char *rest;
char *param = argv[*ip]+1;
char *value = get_value(param_end, argv, ip);
- if (strcmp(value, "true") == 0) {
- *bool = 1;
- return 1;
- }
- else if (strcmp(value, "false") == 0) {
- *bool = 0;
- return 1;
- }
- else {
- Sint tmp;
- char *rest;
- errno = 0;
- tmp = (Sint) strtol(value, &rest, 10);
- if (errno != 0 || rest == value || tmp <= 0) {
- bad_value(param, param_end, value);
- return -1;
- }
- *amount = (Uint) tmp;
- return 0;
- }
+ errno = 0;
+ tmp = (Sint) ErtsStrToSint(value, &rest, 10);
+ if (errno != 0 || rest == value || tmp < 0)
+ bad_value(param, param_end, value);
+ return (Uint) tmp;
}
static void
@@ -1017,6 +1161,9 @@ handle_au_arg(struct au_init *auip,
else if (strcmp("af", alg) == 0) {
auip->atype = AFIT;
}
+ else if (strcmp("aoff", alg) == 0) {
+ auip->atype = AOFIRSTFIT;
+ }
else {
bad_value(param, sub_param + 1, alg);
}
@@ -1085,6 +1232,12 @@ handle_au_arg(struct au_init *auip,
if(has_prefix("sbct", sub_param)) {
auip->init.util.sbct = get_kb_value(sub_param + 4, argv, ip);
}
+ else if (has_prefix("sbmbcs", sub_param)) {
+ auip->init.util.sbmbcs = get_byte_value(sub_param + 6, argv, ip);
+ }
+ else if (has_prefix("sbmbct", sub_param)) {
+ auip->init.util.sbmbct = get_byte_value(sub_param + 6, argv, ip);
+ }
else if (has_prefix("smbcs", sub_param)) {
auip->default_.smbcs = 0;
auip->init.util.smbcs = get_kb_value(sub_param + 5, argv, ip);
@@ -1093,25 +1246,16 @@ handle_au_arg(struct au_init *auip,
goto bad_switch;
break;
case 't': {
- Uint no;
- int enable;
- int res = get_bool_or_possitive_amount_value(&enable,
- &no,
- sub_param+1,
- argv,
- ip);
- if (res > 0)
- auip->thr_spec = enable ? 1 : 0;
+ int res = get_bool_value(sub_param+1, argv, ip);
+ if (res > 0) {
+ auip->thr_spec = 1;
+ break;
+ }
else if (res == 0) {
- int allocs = (int) no;
- if (allocs < 0)
- allocs = INT_MIN;
- else {
- allocs *= -1;
- }
- auip->thr_spec = allocs;
+ auip->thr_spec = 0;
+ break;
}
- break;
+ goto bad_switch;
}
default:
bad_switch:
@@ -1123,12 +1267,14 @@ static void
handle_args(int *argc, char **argv, erts_alc_hndl_args_init_t *init)
{
struct au_init *aui[] = {
+ &init->sbmbc_alloc,
&init->binary_alloc,
&init->std_alloc,
&init->ets_alloc,
&init->eheap_alloc,
&init->ll_alloc,
&init->driver_alloc,
+ &init->fix_alloc,
&init->sl_alloc,
&init->temp_alloc
};
@@ -1150,20 +1296,17 @@ handle_args(int *argc, char **argv, erts_alc_hndl_args_init_t *init)
case 'B':
handle_au_arg(&init->binary_alloc, &argv[i][3], argv, &i);
break;
+ case 'C':
+ handle_au_arg(&init->sbmbc_alloc, &argv[i][3], argv, &i);
+ break;
case 'D':
handle_au_arg(&init->std_alloc, &argv[i][3], argv, &i);
break;
case 'E':
handle_au_arg(&init->ets_alloc, &argv[i][3], argv, &i);
break;
- case 'F': /* fix_alloc */
- if (has_prefix("e", param+2)) {
- arg = get_value(param+3, argv, &i);
- if (strcmp("true", arg) != 0)
- bad_value(param, param+3, arg);
- }
- else
- bad_param(param, param+2);
+ case 'F':
+ handle_au_arg(&init->fix_alloc, &argv[i][3], argv, &i);
break;
case 'H':
handle_au_arg(&init->eheap_alloc, &argv[i][3], argv, &i);
@@ -1190,12 +1333,6 @@ handle_args(int *argc, char **argv, erts_alc_hndl_args_init_t *init)
#endif
get_amount_value(argv[i]+6, argv, &i);
}
- else if (has_prefix("cci", argv[i]+3)) {
-#if HAVE_ERTS_MSEG
- init->mseg.cci =
-#endif
- get_amount_value(argv[i]+6, argv, &i);
- }
else {
bad_param(param, param+2);
}
@@ -1281,6 +1418,7 @@ handle_args(int *argc, char **argv, erts_alc_hndl_args_init_t *init)
set_default_binary_alloc_opts(&init->binary_alloc);
set_default_ets_alloc_opts(&init->ets_alloc);
set_default_driver_alloc_opts(&init->driver_alloc);
+ set_default_driver_alloc_opts(&init->fix_alloc);
init->driver_alloc.enable = 0;
if (strcmp("r9c", arg) == 0) {
@@ -1415,43 +1553,78 @@ static char *type_no_str(ErtsAlcType_t n)
#define type_str(T) type_no_str(ERTS_ALC_T2N((T)))
-erts_tsd_key_t thr_ix_key;
-erts_spinlock_t alloc_thr_ix_lock;
-int last_thr_ix;
-int first_dyn_thr_ix;
-
-static void
-init_thr_ix(int static_ixs)
+void
+erts_alloc_register_scheduler(void *vesdp)
{
- erts_tsd_key_create(&thr_ix_key);
- erts_spinlock_init(&alloc_thr_ix_lock, "alloc_thr_ix_lock");
- last_thr_ix = -4711;
- first_dyn_thr_ix = static_ixs+1;
+ ErtsSchedulerData *esdp = (ErtsSchedulerData *) vesdp;
+ int ix = (int) esdp->no;
+ int aix;
+
+ for (aix = ERTS_ALC_A_MIN; aix <= ERTS_ALC_A_MAX; aix++) {
+ ErtsAllocatorThrSpec_t *tspec = &erts_allctr_thr_spec[aix];
+ esdp->alloc_data.deallctr[aix] = NULL;
+ esdp->alloc_data.pref_ix[aix] = -1;
+ if (tspec->enabled) {
+ if (!tspec->dd)
+ esdp->alloc_data.pref_ix[aix] = ix;
+ else {
+ Allctr_t *allctr = tspec->allctr[ix];
+ ASSERT(allctr);
+ esdp->alloc_data.deallctr[aix] = allctr;
+ esdp->alloc_data.pref_ix[aix] = ix;
+ }
+ }
+ }
}
-int
-erts_alc_get_thr_ix(void)
+#ifdef ERTS_SMP
+void
+erts_alloc_scheduler_handle_delayed_dealloc(void *vesdp,
+ int *need_thr_progress,
+ ErtsThrPrgrVal *thr_prgr_p,
+ int *more_work)
{
- int ix = (int)(long) erts_tsd_get(thr_ix_key);
- if (ix == 0) {
- erts_spin_lock(&alloc_thr_ix_lock);
- last_thr_ix++;
- if (last_thr_ix < 0)
- last_thr_ix = first_dyn_thr_ix;
- ix = last_thr_ix;
- erts_spin_unlock(&alloc_thr_ix_lock);
- erts_tsd_set(thr_ix_key, (void *)(long) ix);
+ ErtsSchedulerData *esdp = (ErtsSchedulerData *) vesdp;
+ int aix;
+ for (aix = ERTS_ALC_A_MIN; aix <= ERTS_ALC_A_MAX; aix++) {
+ Allctr_t *allctr;
+ if (esdp)
+ allctr = esdp->alloc_data.deallctr[aix];
+ else {
+ ErtsAllocatorThrSpec_t *tspec = &erts_allctr_thr_spec[aix];
+ if (tspec->enabled && tspec->dd)
+ allctr = tspec->allctr[0];
+ else
+ allctr = NULL;
+ }
+ if (allctr) {
+ erts_alcu_check_delayed_dealloc(allctr,
+ 1,
+ need_thr_progress,
+ thr_prgr_p,
+ more_work);
+ }
}
- ASSERT(ix > 0);
- return ix;
}
+#endif
-void erts_alloc_reg_scheduler_id(Uint id)
+erts_aint32_t
+erts_alloc_fix_alloc_shrink(int ix, erts_aint32_t flgs)
{
- int ix = (int) id;
- ASSERT(0 < ix && ix <= first_dyn_thr_ix);
- ASSERT(0 == (int) (long) erts_tsd_get(thr_ix_key));
- erts_tsd_set(thr_ix_key, (void *)(long) ix);
+#ifdef ERTS_SMP
+ ErtsAllocatorThrSpec_t *tspec;
+ tspec = &erts_allctr_thr_spec[ERTS_ALC_A_FIXED_SIZE];
+ if (erts_allctrs_info[ERTS_ALC_A_FIXED_SIZE].thr_spec && tspec->enabled)
+ return erts_alcu_fix_alloc_shrink(tspec->allctr[ix], flgs);
+ if (ix == 0 && erts_allctrs_info[ERTS_ALC_A_FIXED_SIZE].extra)
+ return erts_alcu_fix_alloc_shrink(
+ erts_allctrs_info[ERTS_ALC_A_FIXED_SIZE].extra, flgs);
+#else
+ if (ix == 1 && erts_allctrs_info[ERTS_ALC_A_FIXED_SIZE].extra)
+ return erts_alcu_fix_alloc_shrink(
+ erts_allctrs_info[ERTS_ALC_A_FIXED_SIZE].extra, flgs);
+#endif
+ return 0;
}
static void
@@ -1466,14 +1639,12 @@ erts_alloc_get_verify_unused_temp_alloc(Allctr_t **allctr)
if (erts_allctrs_info[ERTS_ALC_A_TEMPORARY].alloc_util
&& erts_allctrs_info[ERTS_ALC_A_TEMPORARY].thr_spec) {
ErtsAllocatorThrSpec_t *tspec;
+ int ix = ERTS_ALC_GET_THR_IX();
tspec = &erts_allctr_thr_spec[ERTS_ALC_A_TEMPORARY];
- if (!tspec->all_thr_safe) {
- int ix = erts_alc_get_thr_ix();
- if (ix < tspec->size) {
- *allctr = tspec->allctr[ix];
- return erts_alcu_verify_unused;
- }
+ if (ix < tspec->size) {
+ *allctr = tspec->allctr[ix];
+ return erts_alcu_verify_unused;
}
}
@@ -1572,7 +1743,7 @@ erts_realloc_n_enomem(ErtsAlcType_t n, void *ptr, Uint size)
}
static ERTS_INLINE UWord
-alcu_size(ErtsAlcType_t ai)
+alcu_size(ErtsAlcType_t ai, ErtsAlcUFixInfo_t *fi, int fisz)
{
UWord res = 0;
@@ -1582,22 +1753,20 @@ alcu_size(ErtsAlcType_t ai)
if (!erts_allctrs_info[ai].thr_spec) {
Allctr_t *allctr = erts_allctrs_info[ai].extra;
AllctrSize_t asize;
- erts_alcu_current_size(allctr, &asize);
+ erts_alcu_current_size(allctr, &asize, fi, fisz);
res += asize.blocks;
}
else {
ErtsAllocatorThrSpec_t *tspec = &erts_allctr_thr_spec[ai];
int i;
- ASSERT(tspec->all_thr_safe);
-
ASSERT(tspec->enabled);
for (i = tspec->size - 1; i >= 0; i--) {
Allctr_t *allctr = tspec->allctr[i];
AllctrSize_t asize;
if (allctr) {
- erts_alcu_current_size(allctr, &asize);
+ erts_alcu_current_size(allctr, &asize, fi, fisz);
res += asize.blocks;
}
}
@@ -1625,7 +1794,6 @@ alcu_is_low(ErtsAlcType_t ai)
int found_one = 0;
# endif
- ASSERT(tspec->all_thr_safe);
ASSERT(tspec->enabled);
for (i = tspec->size - 1; i >= 0; i--) {
@@ -1649,11 +1817,24 @@ alcu_is_low(ErtsAlcType_t ai)
}
#endif /* HALFWORD */
+static ERTS_INLINE void
+add_fix_values(UWord *ap, UWord *up, ErtsAlcUFixInfo_t *fi, ErtsAlcType_t type)
+{
+ int ix = ERTS_ALC_T2N(type) - ERTS_ALC_N_MIN_A_FIXED_SIZE;
+ ASSERT(0 <= ix && ix < ERTS_ALC_NO_FIXED_SIZES);
+
+ *ap += (UWord) fi[ix].allocated;
+ *up += (UWord) fi[ix].used;
+}
+
Eterm
erts_memory(int *print_to_p, void *print_to_arg, void *proc, Eterm earg)
{
+/*
+ * NOTE! When updating this function, make sure to also update
+ * erlang:memory/[0,1] in $ERL_TOP/erts/preloaded/src/erlang.erl
+ */
#define ERTS_MEM_NEED_ALL_ALCU (!erts_instr_stat && want_tot_or_sys)
- ErtsFixInfo efi;
struct {
int total;
int processes;
@@ -1692,6 +1873,9 @@ erts_memory(int *print_to_p, void *print_to_arg, void *proc, Eterm earg)
Eterm res = THE_NON_VALUE;
ErtsAlcType_t ai;
int only_one_value = 0;
+ ErtsAlcUFixInfo_t fi[ERTS_ALC_NO_FIXED_SIZES] = {{0,0}};
+
+ ERTS_SMP_LC_ASSERT(erts_smp_thr_progress_is_blocking());
/* Figure out whats wanted... */
@@ -1856,12 +2040,15 @@ erts_memory(int *print_to_p, void *print_to_arg, void *proc, Eterm earg)
return am_badarg;
}
- /* All alloc_util allocators *have* to be enabled */
+ /* All alloc_util allocators except sbmbc_alloc *have* to be enabled */
for (ai = ERTS_ALC_A_MIN; ai <= ERTS_ALC_A_MAX; ai++) {
switch (ai) {
case ERTS_ALC_A_SYSTEM:
- case ERTS_ALC_A_FIXED_SIZE:
+ case ERTS_ALC_A_SBMBC:
+#if HALFWORD_HEAP
+ case ERTS_ALC_A_SBMBC_LOW:
+#endif
break;
default:
if (!erts_allctrs_info[ai].enabled
@@ -1901,6 +2088,12 @@ erts_memory(int *print_to_p, void *print_to_arg, void *proc, Eterm earg)
* Often not thread safe and usually never
* contain any allocated memory.
*/
+ case ERTS_ALC_A_SBMBC:
+ /* Included in other allocators */
+#if HALFWORD_HEAP
+ case ERTS_ALC_A_SBMBC_LOW:
+ /* Included in other allocators */
+#endif
continue;
case ERTS_ALC_A_EHEAP:
save = &size.processes;
@@ -1911,11 +2104,15 @@ erts_memory(int *print_to_p, void *print_to_arg, void *proc, Eterm earg)
case ERTS_ALC_A_BINARY:
save = &size.binary;
break;
+ case ERTS_ALC_A_FIXED_SIZE:
+ asz = alcu_size(ai, fi, ERTS_ALC_NO_FIXED_SIZES);
+ size.total += asz;
+ continue;
default:
save = NULL;
break;
}
- asz = alcu_size(ai);
+ asz = alcu_size(ai, NULL, 0);
if (save)
*save = asz;
size.total += asz;
@@ -1935,8 +2132,11 @@ erts_memory(int *print_to_p, void *print_to_arg, void *proc, Eterm earg)
if (ERTS_MEM_NEED_ALL_ALCU)
tmp = size.processes;
- else
- tmp = alcu_size(ERTS_ALC_A_EHEAP);
+ else {
+ alcu_size(ERTS_ALC_A_FIXED_SIZE,
+ fi, ERTS_ALC_NO_FIXED_SIZES);
+ tmp = alcu_size(ERTS_ALC_A_EHEAP, NULL, 0);
+ }
tmp += erts_max_processes*sizeof(Process*);
#ifdef HYBRID
tmp += erts_max_processes*sizeof(Process*);
@@ -1946,69 +2146,54 @@ erts_memory(int *print_to_p, void *print_to_arg, void *proc, Eterm earg)
size.processes = size.processes_used = tmp;
-#if HALFWORD_HEAP
- /* BUG: We ignore link and monitor memory */
-#else
- erts_fix_info(ERTS_ALC_T_NLINK_SH, &efi);
- size.processes += efi.total;
- size.processes_used += efi.used;
+ add_fix_values(&size.processes,
+ &size.processes_used,
+ fi,
+ ERTS_ALC_T_PROC);
+#if !HALFWORD_HEAP
+ add_fix_values(&size.processes,
+ &size.processes_used,
+ fi,
+ ERTS_ALC_T_MONITOR_SH);
- erts_fix_info(ERTS_ALC_T_MONITOR_SH, &efi);
- size.processes += efi.total;
- size.processes_used += efi.used;
+ add_fix_values(&size.processes,
+ &size.processes_used,
+ fi,
+ ERTS_ALC_T_NLINK_SH);
#endif
-
- erts_fix_info(ERTS_ALC_T_PROC, &efi);
- size.processes += efi.total;
- size.processes_used += efi.used;
-
- erts_fix_info(ERTS_ALC_T_REG_PROC, &efi);
- size.processes += efi.total;
- size.processes_used += efi.used;
-
+ add_fix_values(&size.processes,
+ &size.processes_used,
+ fi,
+ ERTS_ALC_T_MSG_REF);
}
if (want.atom || want.atom_used) {
Uint reserved_atom_space, atom_space;
erts_atom_get_text_space_sizes(&reserved_atom_space, &atom_space);
size.atom = size.atom_used = atom_table_sz();
- erts_fix_info(ERTS_ALC_T_ATOM, &efi);
- if (want.atom) {
+ if (want.atom)
size.atom += reserved_atom_space;
- size.atom += efi.total;
- }
- if (want.atom_used) {
+ if (want.atom_used)
size.atom_used += atom_space;
- size.atom_used += efi.used;
- }
}
if (!ERTS_MEM_NEED_ALL_ALCU && want.binary)
- size.binary = alcu_size(ERTS_ALC_A_BINARY);
+ size.binary = alcu_size(ERTS_ALC_A_BINARY, NULL, 0);
if (want.code) {
size.code = module_table_sz();
- erts_fix_info(ERTS_ALC_T_MODULE, &efi);
- size.code += efi.used;
size.code += export_table_sz();
-#if HALFWORD_HEAP
size.code += export_list_size() * sizeof(Export);
-#else
- erts_fix_info(ERTS_ALC_T_EXPORT, &efi);
- size.code += efi.used;
-#endif
size.code += erts_fun_table_sz();
- erts_fix_info(ERTS_ALC_T_FUN_ENTRY, &efi);
- size.code += efi.used;
size.code += allocated_modules*sizeof(Range);
size.code += erts_total_code_size;
}
if (want.ets) {
if (!ERTS_MEM_NEED_ALL_ALCU)
- size.ets = alcu_size(ERTS_ALC_A_ETS);
+ size.ets = alcu_size(ERTS_ALC_A_ETS, NULL, 0);
size.ets += erts_get_ets_misc_mem_size();
}
@@ -2081,13 +2266,10 @@ struct aa_values {
Eterm
erts_allocated_areas(int *print_to_p, void *print_to_arg, void *proc)
{
-#define MAX_AA_VALUES \
- (20 + (ERTS_ALC_N_MAX_A_FIXED_SIZE - ERTS_ALC_N_MIN_A_FIXED_SIZE + 1))
-
+#define MAX_AA_VALUES (23)
struct aa_values values[MAX_AA_VALUES];
Eterm res = THE_NON_VALUE;
int i, length;
- ErtsFixInfo efi;
Uint reserved_atom_space, atom_space;
if (proc) {
@@ -2152,6 +2334,11 @@ erts_allocated_areas(int *print_to_p, void *print_to_arg, void *proc)
i++;
values[i].arity = 2;
+ values[i].name = "export_list";
+ values[i].ui[0] = export_list_size() * sizeof(Export);
+ i++;
+
+ values[i].arity = 2;
values[i].name = "register_table";
values[i].ui[0] = process_reg_sz();
i++;
@@ -2196,22 +2383,15 @@ erts_allocated_areas(int *print_to_p, void *print_to_arg, void *proc)
values[i].ui[0] = erts_tot_link_lh_size();
i++;
- {
- Uint n;
-
- for (n = ERTS_ALC_N_MIN_A_FIXED_SIZE;
- n <= ERTS_ALC_N_MAX_A_FIXED_SIZE;
- n++) {
- erts_fix_info(ERTS_ALC_N2T(n), &efi);
-
- values[i].arity = 3;
- values[i].name = ERTS_ALC_N2TD(n);
- values[i].ui[0] = efi.total;
- values[i].ui[1] = efi.used;
- i++;
- }
+ values[i].arity = 2;
+ values[i].name = "process_table";
+ values[i].ui[0] = erts_max_processes*sizeof(Process*);
+ i++;
- }
+ values[i].arity = 2;
+ values[i].name = "ets_misc";
+ values[i].ui[0] = erts_get_ets_misc_mem_size();
+ i++;
length = i;
ASSERT(length <= MAX_AA_VALUES);
@@ -2305,17 +2485,16 @@ erts_alloc_util_allocators(void *proc)
Uint sz;
int i;
/*
- * Currently all allocators except sys_alloc and fix_alloc are
+ * Currently all allocators except sys_alloc are
* alloc_util allocators.
*/
- sz = ((ERTS_ALC_A_MAX + 1 - ERTS_ALC_A_MIN) - 2)*2;
+ sz = ((ERTS_ALC_A_MAX + 1 - ERTS_ALC_A_MIN) - 1)*2;
ASSERT(sz > 0);
hp = HAlloc((Process *) proc, sz);
res = NIL;
for (i = ERTS_ALC_A_MAX; i >= ERTS_ALC_A_MIN; i--) {
switch (i) {
case ERTS_ALC_A_SYSTEM:
- case ERTS_ALC_A_FIXED_SIZE:
break;
default: {
char *alc_str = (char *) ERTS_ALC_A2AD(i);
@@ -2329,267 +2508,12 @@ erts_alloc_util_allocators(void *proc)
return res;
}
-Eterm
-erts_allocator_info_term(void *proc, Eterm which_alloc, int only_sz)
-{
-#define ERTS_AIT_RET(R) \
- do { res = (R); goto done; } while (0)
-#define ERTS_AIT_HALLOC(P, S) \
- do { hp = HAlloc((P), (S)); hp_end = hp + (S); } while (0)
-
- ErtsAlcType_t i;
- Uint sz = 0;
- Uint *hp = NULL;
- Uint *hp_end = NULL;
- Eterm res = am_undefined;
-
- if (is_not_atom(which_alloc))
- goto done;
-
- for (i = ERTS_ALC_A_MIN; i <= ERTS_ALC_A_MAX; i++) {
- if (erts_is_atom_str((char *) ERTS_ALC_A2AD(i), which_alloc)) {
- if (!erts_allctrs_info[i].enabled)
- ERTS_AIT_RET(am_false);
- else {
- if (erts_allctrs_info[i].alloc_util) {
- Eterm ires, tmp;
- Eterm **hpp;
- Uint *szp;
- Eterm (*info_func)(Allctr_t *,
- int,
- int *,
- void *,
- Uint **,
- Uint *);
-
- info_func = (only_sz
- ? erts_alcu_sz_info
- : erts_alcu_info);
-
- if (erts_allctrs_info[i].thr_spec) {
- ErtsAllocatorThrSpec_t *tspec = &erts_allctr_thr_spec[i];
- int j;
- int block_system = !tspec->all_thr_safe;
-
- if (block_system) {
- erts_smp_proc_unlock(proc, ERTS_PROC_LOCK_MAIN);
- erts_smp_block_system(0);
- }
- ASSERT(tspec->enabled);
-
- szp = &sz;
- hpp = NULL;
-
- while (1) {
- ires = NIL;
- for (j = tspec->size - 1; j >= 0; j--) {
- Allctr_t *allctr = tspec->allctr[j];
- if (allctr) {
- tmp = erts_bld_tuple(hpp,
- szp,
- 3,
- erts_bld_atom(hpp,
- szp,
- "instance"),
- make_small((Uint) j),
- (*info_func)(allctr,
- hpp != NULL,
- NULL,
- NULL,
- hpp,
- szp));
- ires = erts_bld_cons(hpp, szp, tmp, ires);
- }
- }
- if (hpp)
- break;
- ERTS_AIT_HALLOC((Process *) proc, sz);
- hpp = &hp;
- szp = NULL;
- }
-
- if (block_system) {
- erts_smp_release_system();
- erts_smp_proc_lock(proc, ERTS_PROC_LOCK_MAIN);
- }
- }
- else {
- Allctr_t *allctr = erts_allctrs_info[i].extra;
- szp = &sz;
- hpp = NULL;
- while (1) {
- ires = NIL;
- tmp = erts_bld_tuple(hpp,
- szp,
- 3,
- erts_bld_atom(hpp,
- szp,
- "instance"),
- make_small((Uint) 0),
- (*info_func)(allctr,
- hpp != NULL,
- NULL,
- NULL,
- hpp,
- szp));
- ires = erts_bld_cons(hpp, szp, tmp, ires);
- if (hpp)
- break;
- ERTS_AIT_HALLOC((Process *) proc, sz);
- hpp = &hp;
- szp = NULL;
- }
- }
- ERTS_AIT_RET(ires);
- }
- else {
- Eterm *szp, **hpp;
-
- switch (i) {
- case ERTS_ALC_A_SYSTEM: {
- SysAllocStat sas;
- Eterm opts_am;
- Eterm opts;
- Eterm as[4]; /* Ok even if !HEAP_ON_C_STACK, not really heap data on stack */
- Eterm ts[4]; /* Ok even if !HEAP_ON_C_STACK, not really heap data on stack */
- int l;
-
- if (only_sz)
- ERTS_AIT_RET(NIL);
-
- sys_alloc_stat(&sas);
- opts_am = am_atom_put("options", 7);
-
- szp = &sz;
- hpp = NULL;
-
- restart_sys_alloc:
- l = 0;
- as[l] = am_atom_put("e", 1);
- ts[l++] = am_true;
- as[l] = am_atom_put("m", 1);
- ts[l++] = am_atom_put("libc", 4);
- if(sas.trim_threshold >= 0) {
- as[l] = am_atom_put("tt", 2);
- ts[l++] = erts_bld_uint(hpp, szp,
- (Uint) sas.trim_threshold);
- }
- if(sas.top_pad >= 0) {
- as[l] = am_atom_put("tp", 2);
- ts[l++] = erts_bld_uint(hpp, szp, (Uint) sas.top_pad);
- }
-
- opts = erts_bld_2tup_list(hpp, szp, l, as, ts);
- res = erts_bld_2tup_list(hpp, szp, 1, &opts_am, &opts);
-
- if (szp) {
- ERTS_AIT_HALLOC((Process *) proc, sz);
- szp = NULL;
- hpp = &hp;
- goto restart_sys_alloc;
- }
- ERTS_AIT_RET(res);
- }
- case ERTS_ALC_A_FIXED_SIZE: {
- ErtsAlcType_t n;
- Eterm as[2], vs[2];
-
- if (only_sz)
- ERTS_AIT_RET(NIL);
-
- as[0] = am_atom_put("options", 7);
- as[1] = am_atom_put("pools", 5);
-
- szp = &sz;
- hpp = NULL;
-
- restart_fix_alloc:
-
- vs[0] = erts_bld_cons(hpp, szp,
- erts_bld_tuple(hpp, szp, 2,
- am_atom_put("e",
- 1),
- am_true),
- NIL);
-
- vs[1] = NIL;
- for (n = ERTS_ALC_N_MIN_A_FIXED_SIZE;
- n <= ERTS_ALC_N_MAX_A_FIXED_SIZE;
- n++) {
- ErtsFixInfo efi;
- erts_fix_info(ERTS_ALC_N2T(n), &efi);
-
- vs[1] = erts_bld_cons(
- hpp, szp,
- erts_bld_tuple(
- hpp, szp, 3,
- am_atom_put((char *) ERTS_ALC_N2TD(n),
- strlen(ERTS_ALC_N2TD(n))),
- erts_bld_uint(hpp, szp, efi.total),
- erts_bld_uint(hpp, szp, efi.used)),
- vs[1]);
-
- }
-
- res = erts_bld_2tup_list(hpp, szp, 2, as, vs);
- if (szp) {
- ERTS_AIT_HALLOC((Process *) proc, sz);
- szp = NULL;
- hpp = &hp;
- goto restart_fix_alloc;
- }
- ERTS_AIT_RET(res);
- }
- default:
- ASSERT(0);
- goto done;
- }
- }
- }
- }
- }
-
- if (ERTS_IS_ATOM_STR("mseg_alloc", which_alloc)) {
-#if HAVE_ERTS_MSEG
- if (only_sz)
- ERTS_AIT_RET(NIL);
- erts_mseg_info(NULL, NULL, 0, NULL, &sz);
- if (sz)
- ERTS_AIT_HALLOC((Process *) proc, sz);
- ERTS_AIT_RET(erts_mseg_info(NULL, NULL, 1, &hp, NULL));
-#else
- ERTS_AIT_RET(am_false);
-#endif
-
- }
- else if (ERTS_IS_ATOM_STR("alloc_util", which_alloc)) {
- if (only_sz)
- ERTS_AIT_RET(NIL);
- erts_alcu_au_info_options(NULL, NULL, NULL, &sz);
- if (sz)
- ERTS_AIT_HALLOC((Process *) proc, sz);
- ERTS_AIT_RET(erts_alcu_au_info_options(NULL, NULL, &hp, NULL));
- }
-
- done:
- if (hp) {
- ASSERT(hp_end >= hp);
- HRelease((Process *) proc, hp_end, hp);
- }
- return res;
-
-#undef ERTS_AIT_RET
-#undef ERTS_AIT_HALLOC
-}
-
void
erts_allocator_info(int to, void *arg)
{
ErtsAlcType_t a;
- ERTS_SMP_LC_ASSERT(erts_smp_is_system_blocked(0)
- || (ERTS_IS_CRASH_DUMPING
- && erts_smp_is_system_blocked(ERTS_BS_FLG_ALLOW_GC)));
+ ERTS_SMP_LC_ASSERT(erts_smp_thr_progress_is_blocking());
for (a = ERTS_ALC_A_MIN; a <= ERTS_ALC_A_MAX; a++) {
int ai;
@@ -2630,22 +2554,6 @@ erts_allocator_info(int to, void *arg)
erts_print(to, arg, "option tp: %d\n", sas.top_pad);
break;
}
- case ERTS_ALC_A_FIXED_SIZE: {
- ErtsAlcType_t n;
- erts_print(to, arg, "option e: true\n");
-
- for (n = ERTS_ALC_N_MIN_A_FIXED_SIZE;
- n <= ERTS_ALC_N_MAX_A_FIXED_SIZE;
- n++) {
- ErtsFixInfo efi;
- erts_fix_info(ERTS_ALC_N2T(n), &efi);
- erts_print(to, arg, "%s: %lu %lu\n",
- ERTS_ALC_N2TD(n),
- efi.total,
- efi.used);
- }
- break;
- }
default:
ASSERT(0);
break;
@@ -2656,8 +2564,18 @@ erts_allocator_info(int to, void *arg)
}
#if HAVE_ERTS_MSEG
- erts_print(to, arg, "=allocator:mseg_alloc\n");
- erts_mseg_info(&to, arg, 0, NULL, NULL);
+ {
+#ifdef ERTS_SMP
+ int max = (int) erts_no_schedulers;
+#else
+ int max = 0;
+#endif
+ int i;
+ for (i = 0; i <= max; i++) {
+ erts_print(to, arg, "=allocator:mseg_alloc[%d]\n", i);
+ erts_mseg_info(i, &to, arg, 0, NULL, NULL);
+ }
+ }
#endif
erts_print(to, arg, "=allocator:alloc_util\n");
@@ -2711,7 +2629,7 @@ erts_allocator_options(void *proc)
use_mseg++;
#endif
if (erts_allctr_thr_spec[a].enabled)
- allctr = erts_allctr_thr_spec[a].allctr[1];
+ allctr = erts_allctr_thr_spec[a].allctr[0];
else
allctr = erts_allctrs_info[a].extra;
tmp = erts_alcu_info_options(allctr, NULL, NULL, hpp, szp);
@@ -2760,7 +2678,7 @@ erts_allocator_options(void *proc)
#if HAVE_ERTS_MSEG
if (use_mseg) {
atoms[length] = am_atom_put("mseg_alloc", 10);
- terms[length++] = erts_mseg_info_options(NULL, NULL, hpp, szp);
+ terms[length++] = erts_mseg_info_options(0, NULL, NULL, hpp, szp);
}
#endif
@@ -2842,6 +2760,343 @@ erts_allocator_options(void *proc)
return res;
}
+void *erts_alloc_permanent_cache_aligned(ErtsAlcType_t type, Uint size)
+{
+ UWord v = (UWord) erts_alloc(type, size + (ERTS_CACHE_LINE_SIZE-1)
+#ifdef VALGRIND
+ + sizeof(UWord)
+#endif
+ );
+
+#ifdef VALGRIND
+ { /* Link them to avoid Leak_PossiblyLost */
+ static UWord* first_in_list = NULL;
+ *(UWord**)v = first_in_list;
+ first_in_list = (UWord*) v;
+ v += sizeof(UWord);
+ }
+#endif
+
+ if (v & ERTS_CACHE_LINE_MASK) {
+ v = (v & ~ERTS_CACHE_LINE_MASK) + ERTS_CACHE_LINE_SIZE;
+ }
+ ASSERT((v & ERTS_CACHE_LINE_MASK) == 0);
+ return (void*)v;
+}
+
+static void
+reply_alloc_info(void *vair)
+{
+ ErtsAllocInfoReq *air = (ErtsAllocInfoReq *) vair;
+ Uint sched_id = erts_get_scheduler_id();
+ int global_instances = air->req_sched == sched_id;
+ ErtsProcLocks rp_locks;
+ Process *rp = air->proc;
+ Eterm ref_copy = NIL, ai_list, msg;
+ Eterm *hp = NULL, *hp_end = NULL, *hp_start = NULL;
+ Eterm **hpp;
+ Uint sz, *szp;
+ ErlOffHeap *ohp = NULL;
+ ErlHeapFragment *bp = NULL;
+ int i;
+ Eterm (*info_func)(Allctr_t *,
+ int,
+ int *,
+ void *,
+ Uint **,
+ Uint *) = (air->only_sz
+ ? erts_alcu_sz_info
+ : erts_alcu_info);
+
+ rp_locks = air->req_sched == sched_id ? ERTS_PROC_LOCK_MAIN : 0;
+
+ sz = 0;
+ hpp = NULL;
+ szp = &sz;
+
+ while (1) {
+
+ if (hpp)
+ ref_copy = STORE_NC(hpp, ohp, air->ref);
+ else
+ *szp += REF_THING_SIZE;
+
+ ai_list = NIL;
+ for (i = 0; air->allocs[i] != ERTS_ALC_A_INVALID; i++);
+ for (i--; i >= 0; i--) {
+ int ai = air->allocs[i];
+ Allctr_t *allctr;
+ Eterm ainfo;
+ Eterm alloc_atom;
+ if (global_instances) {
+ switch (ai) {
+ case ERTS_ALC_A_SYSTEM: {
+ alloc_atom = erts_bld_atom(hpp, szp, "sys_alloc");
+ ainfo = NIL;
+ if (!air->only_sz) {
+ SysAllocStat sas;
+ if (hpp)
+ sys_alloc_stat(&sas);
+ if (szp) {
+ /* ensure ehough heap */
+ sas.top_pad = INT_MAX;
+ sas.trim_threshold = INT_MAX;
+ }
+ if (sas.top_pad >= 0) {
+ ainfo = erts_bld_cons(
+ hpp, szp,
+ erts_bld_tuple(
+ hpp, szp, 2,
+ erts_bld_atom(hpp, szp, "tp"),
+ erts_bld_uint(
+ hpp, szp,
+ (Uint) sas.top_pad)),
+ ainfo);
+ }
+ if (sas.trim_threshold >= 0) {
+ ainfo = erts_bld_cons(
+ hpp, szp,
+ erts_bld_tuple(
+ hpp, szp, 2,
+ erts_bld_atom(hpp, szp, "tt"),
+ erts_bld_uint(
+ hpp, szp,
+ (Uint) sas.trim_threshold)),
+ ainfo);
+ }
+ ainfo = erts_bld_cons(hpp, szp,
+ erts_bld_tuple(
+ hpp, szp, 2,
+ erts_bld_atom(hpp, szp,
+ "m"),
+ erts_bld_atom(hpp, szp,
+ "libc")),
+ ainfo);
+ ainfo = erts_bld_cons(hpp, szp,
+ erts_bld_tuple(
+ hpp, szp, 2,
+ erts_bld_atom(hpp, szp,
+ "e"),
+ am_true),
+ ainfo);
+ ainfo = erts_bld_tuple(hpp, szp, 2,
+ erts_bld_atom(hpp, szp,
+ "options"),
+ ainfo);
+ ainfo = erts_bld_cons(hpp, szp,ainfo,NIL);
+ }
+ ainfo = erts_bld_tuple(hpp, szp, 3,
+ alloc_atom,
+ make_small(0),
+ ainfo);
+ break;
+ }
+ case ERTS_ALC_INFO_A_ALLOC_UTIL:
+ alloc_atom = erts_bld_atom(hpp, szp, "alloc_util");
+ ainfo = (air->only_sz
+ ? NIL
+ : erts_alcu_au_info_options(NULL, NULL,
+ hpp, szp));
+ ainfo = erts_bld_tuple(hpp, szp, 3,
+ alloc_atom,
+ make_small(0),
+ ainfo);
+ break;
+ case ERTS_ALC_INFO_A_MSEG_ALLOC:
+ alloc_atom = erts_bld_atom(hpp, szp, "mseg_alloc");
+#if HAVE_ERTS_MSEG
+ ainfo = (air->only_sz
+ ? NIL
+ : erts_mseg_info(0, NULL, NULL, hpp != NULL,
+ hpp, szp));
+ ainfo = erts_bld_tuple(hpp, szp, 3,
+ alloc_atom,
+ make_small(0),
+ ainfo);
+#else
+ ainfo = erts_bld_tuple(hpp, szp, 2, alloc_atom,
+ am_false);
+#endif
+ break;
+ default:
+ alloc_atom = erts_bld_atom(hpp, szp,
+ (char *) ERTS_ALC_A2AD(ai));
+ if (!erts_allctrs_info[ai].enabled)
+ ainfo = erts_bld_tuple(hpp, szp, 2, alloc_atom,
+ am_false);
+ else if (erts_allctrs_info[ai].alloc_util) {
+ if (erts_allctrs_info[ai].thr_spec)
+ allctr = erts_allctr_thr_spec[ai].allctr[0];
+ else
+ allctr = erts_allctrs_info[ai].extra;
+ ainfo = info_func(allctr, hpp != NULL, NULL,
+ NULL, hpp, szp);
+ ainfo = erts_bld_tuple(hpp, szp, 3, alloc_atom,
+ make_small(0), ainfo);
+ }
+ else {
+ erl_exit(ERTS_ABORT_EXIT, "%s:%d: internal error\n",
+ __FILE__, __LINE__);
+ }
+ }
+ ai_list = erts_bld_cons(hpp, szp,
+ ainfo, ai_list);
+ }
+ switch (ai) {
+ case ERTS_ALC_A_SYSTEM:
+ case ERTS_ALC_INFO_A_ALLOC_UTIL:
+ break;
+ case ERTS_ALC_INFO_A_MSEG_ALLOC:
+#if HAVE_ERTS_MSEG && defined(ERTS_SMP)
+ alloc_atom = erts_bld_atom(hpp, szp, "mseg_alloc");
+ ainfo = (air->only_sz
+ ? NIL
+ : erts_mseg_info(sched_id, NULL, NULL,
+ hpp != NULL, hpp, szp));
+ ainfo = erts_bld_tuple(hpp, szp, 3,
+ alloc_atom,
+ make_small(sched_id),
+ ainfo);
+ ai_list = erts_bld_cons(hpp, szp, ainfo, ai_list);
+#endif
+ break;
+ default:
+ if (erts_allctrs_info[ai].thr_spec) {
+ alloc_atom = erts_bld_atom(hpp, szp,
+ (char *) ERTS_ALC_A2AD(ai));
+ allctr = erts_allctr_thr_spec[ai].allctr[sched_id];
+ ainfo = info_func(allctr, hpp != NULL, NULL,
+ NULL, hpp, szp);
+ ai_list = erts_bld_cons(hpp, szp,
+ erts_bld_tuple(
+ hpp, szp,
+ 3,
+ alloc_atom,
+ make_small(sched_id),
+ ainfo),
+ ai_list);
+ }
+ break;
+ }
+ msg = erts_bld_tuple(hpp, szp,
+ 3,
+ ref_copy,
+ make_small(sched_id),
+ ai_list);
+
+ }
+ if (hpp)
+ break;
+
+ hp = erts_alloc_message_heap(sz, &bp, &ohp, rp, &rp_locks);
+ hp_start = hp;
+ hp_end = hp + sz;
+ szp = NULL;
+ hpp = &hp;
+ }
+ if (bp)
+ bp = erts_resize_message_buffer(bp, hp - hp_start, &msg, 1);
+ else {
+ ASSERT(hp);
+ HRelease(rp, hp_end, hp);
+ }
+
+ erts_queue_message(rp, &rp_locks, bp, msg, NIL
+#ifdef USE_VM_PROBES
+ , NIL
+#endif
+ );
+
+ if (air->req_sched == sched_id)
+ rp_locks &= ~ERTS_PROC_LOCK_MAIN;
+
+ erts_smp_proc_unlock(rp, rp_locks);
+ erts_smp_proc_dec_refc(rp);
+
+ if (erts_smp_atomic32_dec_read_nob(&air->refc) == 0)
+ aireq_free(air);
+}
+
+int
+erts_request_alloc_info(struct process *c_p,
+ Eterm ref,
+ Eterm allocs,
+ int only_sz)
+{
+ ErtsAllocInfoReq *air = aireq_alloc();
+ Eterm req_ai[ERTS_ALC_A_MAX+1+2] = {0};
+ Eterm alist;
+ Eterm *hp;
+ int airix = 0, ai;
+
+ air->req_sched = erts_get_scheduler_id();
+
+ air->only_sz = only_sz;
+
+ air->proc = c_p;
+
+ if (is_not_internal_ref(ref))
+ return 0;
+
+ hp = &air->ref_heap[0];
+ air->ref = STORE_NC(&hp, NULL, ref);
+
+ if (is_not_list(allocs))
+ return 0;
+
+ alist = allocs;
+
+ while (is_list(alist)) {
+ int saved = 0;
+ Eterm* consp = list_val(alist);
+ Eterm alloc = CAR(consp);
+
+ for (ai = ERTS_ALC_A_MIN; ai <= ERTS_ALC_A_MAX; ai++)
+ if (erts_is_atom_str((char *) erts_alc_a2ad[ai], alloc))
+ goto save_alloc;
+ if (erts_is_atom_str("mseg_alloc", alloc)) {
+ ai = ERTS_ALC_INFO_A_MSEG_ALLOC;
+ goto save_alloc;
+ }
+ if (erts_is_atom_str("alloc_util", alloc)) {
+ ai = ERTS_ALC_INFO_A_ALLOC_UTIL;
+ save_alloc:
+ if (req_ai[ai])
+ return 0;
+ air->allocs[airix++] = ai;
+ req_ai[ai] = 1;
+ saved = 1;
+ }
+
+ if (!saved)
+ return 0;
+
+ alist = CDR(consp);
+ }
+
+ if (is_not_nil(alist))
+ return 0;
+
+ air->allocs[airix] = ERTS_ALC_A_INVALID;
+
+ erts_smp_atomic32_init_nob(&air->refc,
+ (erts_aint32_t) erts_no_schedulers);
+
+ erts_smp_proc_add_refc(c_p, (Sint32) erts_no_schedulers);
+
+#ifdef ERTS_SMP
+ if (erts_no_schedulers > 1)
+ erts_schedule_multi_misc_aux_work(1,
+ erts_no_schedulers,
+ reply_alloc_info,
+ (void *) air);
+#endif
+
+ reply_alloc_info((void *) air);
+
+ return 1;
+}
+
/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *\
* Deprecated functions *
* *
@@ -2871,10 +3126,10 @@ void *safe_realloc(void *ptr, Uint sz)
\* */
#define ERTS_ALC_TEST_ABORT erl_exit(ERTS_ABORT_EXIT, "%s:%d: Internal error\n")
-unsigned long erts_alc_test(unsigned long op,
- unsigned long a1,
- unsigned long a2,
- unsigned long a3)
+UWord erts_alc_test(UWord op,
+ UWord a1,
+ UWord a2,
+ UWord a3)
{
switch (op >> 8) {
case 0x0: return erts_alcu_test(op, a1, a2);
@@ -2882,29 +3137,30 @@ unsigned long erts_alc_test(unsigned long op,
case 0x2: return erts_bfalc_test(op, a1, a2);
case 0x3: return erts_afalc_test(op, a1, a2);
case 0x4: return erts_mseg_test(op, a1, a2, a3);
+ case 0x5: return erts_aoffalc_test(op, a1, a2);
case 0xf:
switch (op) {
case 0xf00:
#ifdef USE_THREADS
if (((Allctr_t *) a1)->thread_safe)
- return (unsigned long) erts_alcu_alloc_ts(ERTS_ALC_T_UNDEF,
+ return (UWord) erts_alcu_alloc_ts(ERTS_ALC_T_UNDEF,
(void *) a1,
(Uint) a2);
else
#endif
- return (unsigned long) erts_alcu_alloc(ERTS_ALC_T_UNDEF,
+ return (UWord) erts_alcu_alloc(ERTS_ALC_T_UNDEF,
(void *) a1,
(Uint) a2);
case 0xf01:
#ifdef USE_THREADS
if (((Allctr_t *) a1)->thread_safe)
- return (unsigned long) erts_alcu_realloc_ts(ERTS_ALC_T_UNDEF,
+ return (UWord) erts_alcu_realloc_ts(ERTS_ALC_T_UNDEF,
(void *) a1,
(void *) a2,
(Uint) a3);
else
#endif
- return (unsigned long) erts_alcu_realloc(ERTS_ALC_T_UNDEF,
+ return (UWord) erts_alcu_realloc(ERTS_ALC_T_UNDEF,
(void *) a1,
(void *) a2,
(Uint) a3);
@@ -2925,6 +3181,7 @@ unsigned long erts_alc_test(unsigned long op,
init.atype = GOODFIT;
init.init.util.name_prefix = (char *) a1;
init.init.util.ts = a2 ? 1 : 0;
+ init.init.util.sbmbct = 0;
if ((char **) a3) {
char **argv = (char **) a3;
@@ -2933,7 +3190,7 @@ unsigned long erts_alc_test(unsigned long op,
if (argv[i][0] == '-' && argv[i][1] == 't')
handle_au_arg(&init, &argv[i][2], argv, &i);
else
- return (unsigned long) NULL;
+ return (UWord) NULL;
i++;
}
}
@@ -2960,31 +3217,39 @@ unsigned long erts_alc_test(unsigned long op,
&init.init.af,
&init.init.util);
break;
+ case AOFIRSTFIT:
+ allctr = erts_aoffalc_start((AOFFAllctr_t *)
+ erts_alloc(ERTS_ALC_T_UNDEF,
+ sizeof(AOFFAllctr_t)),
+ &init.init.aoff,
+ &init.init.util);
+ break;
+
default:
ASSERT(0);
allctr = NULL;
break;
}
- return (unsigned long) allctr;
+ return (UWord) allctr;
}
case 0xf04:
erts_alcu_stop((Allctr_t *) a1);
erts_free(ERTS_ALC_T_UNDEF, (void *) a1);
break;
#ifdef USE_THREADS
- case 0xf05: return (unsigned long) 1;
- case 0xf06: return (unsigned long) ((Allctr_t *) a1)->thread_safe;
+ case 0xf05: return (UWord) 1;
+ case 0xf06: return (UWord) ((Allctr_t *) a1)->thread_safe;
#ifdef ETHR_NO_FORKSAFETY
- case 0xf07: return (unsigned long) 0;
+ case 0xf07: return (UWord) 0;
#else
- case 0xf07: return (unsigned long) ((Allctr_t *) a1)->thread_safe;
+ case 0xf07: return (UWord) ((Allctr_t *) a1)->thread_safe;
#endif
case 0xf08: {
ethr_mutex *mtx = erts_alloc(ERTS_ALC_T_UNDEF, sizeof(ethr_mutex));
if (ethr_mutex_init(mtx) != 0)
ERTS_ALC_TEST_ABORT;
- return (unsigned long) mtx;
+ return (UWord) mtx;
}
case 0xf09: {
ethr_mutex *mtx = (ethr_mutex *) a1;
@@ -3003,7 +3268,7 @@ unsigned long erts_alc_test(unsigned long op,
ethr_cond *cnd = erts_alloc(ERTS_ALC_T_UNDEF, sizeof(ethr_cond));
if (ethr_cond_init(cnd) != 0)
ERTS_ALC_TEST_ABORT;
- return (unsigned long) cnd;
+ return (UWord) cnd;
}
case 0xf0d: {
ethr_cond *cnd = (ethr_cond *) a1;
@@ -3029,7 +3294,7 @@ unsigned long erts_alc_test(unsigned long op,
(void *) a2,
NULL) != 0)
ERTS_ALC_TEST_ABORT;
- return (unsigned long) tid;
+ return (UWord) tid;
}
case 0xf11: {
ethr_tid *tid = (ethr_tid *) a1;
@@ -3046,13 +3311,13 @@ unsigned long erts_alc_test(unsigned long op,
default:
break;
}
- return (unsigned long) 0;
+ return (UWord) 0;
default:
break;
}
ASSERT(0);
- return ~((unsigned long) 0);
+ return ~((UWord) 0);
}
#ifdef DEBUG
@@ -3288,7 +3553,7 @@ check_memory_fence(void *ptr, Uint *size, ErtsAlcType_t n, int func)
erl_exit(ERTS_ABORT_EXIT,
"ERROR: Fence at beginning of memory block (p=0x%u) "
"clobbered.\n",
- (unsigned long) ptr);
+ (UWord) ptr);
}
memcpy((void *) &post_pattern, (void *) (((char *)ptr)+sz), sizeof(UWord));
@@ -3305,12 +3570,12 @@ check_memory_fence(void *ptr, Uint *size, ErtsAlcType_t n, int func)
erl_exit(ERTS_ABORT_EXIT,
"ERROR: Fence at end of memory block (p=0x%u, sz=%u) "
"clobbered.\n",
- (unsigned long) ptr, (unsigned long) sz);
+ (UWord) ptr, (UWord) sz);
if (found_type != GET_TYPE_OF_PATTERN(post_pattern))
erl_exit(ERTS_ABORT_EXIT,
"ERROR: Fence around memory block (p=0x%u, sz=%u) "
"clobbered.\n",
- (unsigned long) ptr, (unsigned long) sz);
+ (UWord) ptr, (UWord) sz);
ftype = type_no_str(found_type);
if (!ftype) {
@@ -3333,7 +3598,7 @@ check_memory_fence(void *ptr, Uint *size, ErtsAlcType_t n, int func)
erl_exit(ERTS_ABORT_EXIT,
"ERROR: Memory block (p=0x%u, sz=%u) allocated as type \"%s\","
" but %s as type \"%s\".\n",
- (unsigned long) ptr, (unsigned long) sz, ftype, op_str, otype);
+ (UWord) ptr, (UWord) sz, ftype, op_str, otype);
}
#ifdef HARD_DEBUG
@@ -3455,6 +3720,4 @@ install_debug_functions(void)
return FENCE_SZ;
}
-
-
#endif /* #ifdef DEBUG */
diff --git a/erts/emulator/beam/erl_alloc.h b/erts/emulator/beam/erl_alloc.h
index ce792d4d17..e475f9d8a2 100644
--- a/erts/emulator/beam/erl_alloc.h
+++ b/erts/emulator/beam/erl_alloc.h
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2002-2011. All Rights Reserved.
+ * Copyright Ericsson AB 2002-2012. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -21,6 +21,10 @@
#define ERL_ALLOC_H__
#include "erl_alloc_types.h"
+#undef ERL_THR_PROGRESS_TSD_TYPE_ONLY
+#define ERL_THR_PROGRESS_TSD_TYPE_ONLY
+#include "erl_thr_progress.h"
+#undef ERL_THR_PROGRESS_TSD_TYPE_ONLY
#include "erl_alloc_util.h"
#ifdef USE_THREADS
#include "erl_threads.h"
@@ -43,50 +47,47 @@
# define ERTS_ALC_INLINE
#endif
-#define ERTS_FIX_CORE_ALLOCATOR ERTS_ALC_A_LONG_LIVED
-extern ErtsAlcType_t erts_fix_core_allocator_ix;
-
-typedef struct {
- Uint total;
- Uint used;
-} ErtsFixInfo;
+#define ERTS_ALC_NO_FIXED_SIZES \
+ (ERTS_ALC_N_MAX_A_FIXED_SIZE - ERTS_ALC_N_MIN_A_FIXED_SIZE + 1)
void erts_sys_alloc_init(void);
void *erts_sys_alloc(ErtsAlcType_t, void *, Uint);
void *erts_sys_realloc(ErtsAlcType_t, void *, void *, Uint);
void erts_sys_free(ErtsAlcType_t, void *, void *);
-
-void erts_init_fix_alloc(Uint, void *(*)(Uint));
-Uint erts_get_fix_size(ErtsAlcType_t);
-void erts_set_fix_size(ErtsAlcType_t, Uint);
-void erts_fix_info(ErtsAlcType_t, ErtsFixInfo *);
-void *erts_fix_alloc(ErtsAlcType_t, void *, Uint);
-void *erts_fix_realloc(ErtsAlcType_t, void *, void*, Uint);
-void erts_fix_free(ErtsAlcType_t, void *, void*);
-
-
Eterm erts_memory(int *, void *, void *, Eterm);
Eterm erts_allocated_areas(int *, void *, void *);
Eterm erts_alloc_util_allocators(void *proc);
void erts_allocator_info(int, void *);
-Eterm erts_allocator_info_term(void *proc, Eterm which_alloc, int only_sz);
Eterm erts_allocator_options(void *proc);
+struct process;
+
+int erts_request_alloc_info(struct process *c_p, Eterm ref, Eterm allocs,
+ int only_sz);
+
#define ERTS_ALLOC_INIT_DEF_OPTS_INITER {0}
typedef struct {
- int dummy;
+ int ncpu;
} ErtsAllocInitOpts;
+typedef struct {
+ Allctr_t *deallctr[ERTS_ALC_A_MAX+1];
+ int pref_ix[ERTS_ALC_A_MAX+1];
+ int flist_ix[ERTS_ALC_A_MAX+1];
+ int pre_alc_ix;
+} ErtsSchedAllocData;
+
void erts_alloc_init(int *argc, char **argv, ErtsAllocInitOpts *eaiop);
+void erts_alloc_late_init(void);
#if defined(GET_ERTS_ALC_TEST) || defined(ERTS_ALC_INTERNAL__)
/* Only for testing */
-unsigned long erts_alc_test(unsigned long,
- unsigned long,
- unsigned long,
- unsigned long);
+UWord erts_alc_test(UWord,
+ UWord,
+ UWord,
+ UWord);
#endif
#define ERTS_ALC_O_ALLOC 0
@@ -99,6 +100,14 @@ unsigned long erts_alc_test(unsigned long,
#define ERTS_ALC_MIN_LONG_LIVED_TIME (10*60*1000)
+#if HALFWORD_HEAP
+#define ERTS_IS_SBMBC_ALLOCATOR_NO__(NO) \
+ ((NO) == ERTS_ALC_A_SBMBC || (NO) == ERTS_ALC_A_SBMBC_LOW)
+#else
+#define ERTS_IS_SBMBC_ALLOCATOR_NO__(NO) \
+ ((NO) == ERTS_ALC_A_SBMBC)
+#endif
+
typedef struct {
int alloc_util;
int enabled;
@@ -118,15 +127,22 @@ extern ErtsAllocatorInfo_t erts_allctrs_info[ERTS_ALC_A_MAX+1];
typedef struct {
int enabled;
- int all_thr_safe;
+ int dd;
+ int aix;
int size;
Allctr_t **allctr;
} ErtsAllocatorThrSpec_t;
extern ErtsAllocatorThrSpec_t erts_allctr_thr_spec[ERTS_ALC_A_MAX+1];
-int erts_alc_get_thr_ix(void);
-void erts_alloc_reg_scheduler_id(Uint id);
+void erts_alloc_register_scheduler(void *vesdp);
+#ifdef ERTS_SMP
+void erts_alloc_scheduler_handle_delayed_dealloc(void *vesdp,
+ int *need_thr_progress,
+ ErtsThrPrgrVal *thr_prgr_p,
+ int *more_work);
+#endif
+erts_aint32_t erts_alloc_fix_alloc_shrink(int ix, erts_aint32_t flgs);
__decl_noreturn void erts_alloc_enomem(ErtsAlcType_t,Uint)
__noreturn;
@@ -172,11 +188,11 @@ void *erts_realloc(ErtsAlcType_t type, void *ptr, Uint size);
void erts_free(ErtsAlcType_t type, void *ptr);
void *erts_alloc_fnf(ErtsAlcType_t type, Uint size);
void *erts_realloc_fnf(ErtsAlcType_t type, void *ptr, Uint size);
-void *erts_alloc_permanent_cache_aligned(ErtsAlcType_t type, Uint size);
-
#endif /* #if !ERTS_ALC_DO_INLINE */
+void *erts_alloc_permanent_cache_aligned(ErtsAlcType_t type, Uint size);
+
#ifndef ERTS_CACHE_LINE_SIZE
/* Assume a cache line size of 64 bytes */
# define ERTS_CACHE_LINE_SIZE ((UWord) 64)
@@ -242,20 +258,10 @@ void *erts_realloc_fnf(ErtsAlcType_t type, void *ptr, Uint size)
size);
}
-ERTS_ALC_INLINE
-void *erts_alloc_permanent_cache_aligned(ErtsAlcType_t type, Uint size)
-{
- UWord v = (UWord) erts_alloc(type, size + (ERTS_CACHE_LINE_SIZE-1));
-
- if (v & ERTS_CACHE_LINE_MASK) {
- v = (v & ~ERTS_CACHE_LINE_MASK) + ERTS_CACHE_LINE_SIZE;
- }
- ASSERT((v & ERTS_CACHE_LINE_MASK) == 0);
- return (void*)v;
-}
-
#endif /* #if ERTS_ALC_DO_INLINE || defined(ERTS_ALC_INTERNAL__) */
+#define ERTS_ALC_GET_THR_IX() ((int) erts_get_scheduler_id())
+
typedef void (*erts_alloc_verify_func_t)(Allctr_t *);
erts_alloc_verify_func_t
@@ -440,136 +446,41 @@ NAME##_free(TYPE *p) \
} \
}
-typedef struct {
- void *start;
- void *end;
- int chunks_mem_size;
-} erts_sched_pref_quick_alloc_data_t;
-
-#ifdef DEBUG
-#define ERTS_SPPA_DBG_CHK_IN_CHNK(A, C, P) \
-do { \
- ASSERT((void *) (C) < (void *) (P)); \
- ASSERT((void *) (P) \
- < (void *) (((char *) (C)) + (A)->chunks_mem_size)); \
-} while (0)
-#else
-#define ERTS_SPPA_DBG_CHK_IN_CHNK(A, C, P)
-#endif
+#include "erl_sched_spec_pre_alloc.h"
#define ERTS_SCHED_PREF_PRE_ALLOC_IMPL(NAME, TYPE, PASZ) \
-union erts_qa_##NAME##__ { \
+union erts_sspa_##NAME##__ { \
+ erts_sspa_blk_t next; \
TYPE type; \
- union erts_qa_##NAME##__ *next; \
}; \
-typedef struct { \
- erts_smp_spinlock_t lock; \
- union erts_qa_##NAME##__ *freelist; \
- union erts_qa_##NAME##__ pre_alloced[1]; \
-} erts_qa_##NAME##_chunk__; \
-static erts_sched_pref_quick_alloc_data_t *qa_data_##NAME##__; \
-static ERTS_INLINE erts_qa_##NAME##_chunk__ * \
-get_##NAME##_chunk_ix(int cix) \
-{ \
- char *ptr = (char *) qa_data_##NAME##__->start; \
- ptr += cix*qa_data_##NAME##__->chunks_mem_size; \
- return (erts_qa_##NAME##_chunk__ *) ptr; \
-} \
-static ERTS_INLINE erts_qa_##NAME##_chunk__ * \
-get_##NAME##_chunk_ptr(void *ptr) \
-{ \
- int cix; \
- size_t diff; \
- if (ptr < qa_data_##NAME##__->start || qa_data_##NAME##__->end <= ptr)\
- return NULL; \
- diff = ((char *) ptr) - ((char *) qa_data_##NAME##__->start); \
- cix = diff / qa_data_##NAME##__->chunks_mem_size; \
- return get_##NAME##_chunk_ix(cix); \
-} \
+ \
+static erts_sspa_data_t *sspa_data_##NAME##__; \
+ \
static void \
init_##NAME##_alloc(void) \
{ \
- size_t tot_size; \
- size_t chunk_mem_size; \
- char *chunk_start; \
- int cix; \
- int no_blocks = ERTS_PRE_ALLOC_SIZE((PASZ)); \
- int no_blocks_per_chunk = 2*((no_blocks-1)/erts_no_schedulers + 1); \
- no_blocks = no_blocks_per_chunk * erts_no_schedulers; \
- chunk_mem_size = sizeof(erts_qa_##NAME##_chunk__); \
- chunk_mem_size += (sizeof(union erts_qa_##NAME##__) \
- * (no_blocks_per_chunk - 1)); \
- chunk_mem_size = ERTS_ALC_CACHE_LINE_ALIGN_SIZE(chunk_mem_size); \
- tot_size = sizeof(erts_sched_pref_quick_alloc_data_t); \
- tot_size += ERTS_CACHE_LINE_SIZE - 1; \
- tot_size += chunk_mem_size*erts_no_schedulers; \
- qa_data_##NAME##__ = erts_alloc(ERTS_ALC_T_PRE_ALLOC_DATA,tot_size);\
- chunk_start = (((char *) qa_data_##NAME##__) \
- + sizeof(erts_sched_pref_quick_alloc_data_t)); \
- if ((((UWord) chunk_start) & ERTS_CACHE_LINE_MASK) != ((UWord) 0)) \
- chunk_start = ((char *) \
- ((((UWord) chunk_start) & ~ERTS_CACHE_LINE_MASK) \
- + ERTS_CACHE_LINE_SIZE)); \
- qa_data_##NAME##__->chunks_mem_size = chunk_mem_size; \
- qa_data_##NAME##__->start = (void *) chunk_start; \
- qa_data_##NAME##__->end = (chunk_start \
- + chunk_mem_size*erts_no_schedulers); \
- for (cix = 0; cix < erts_no_schedulers; cix++) { \
- int i; \
- erts_qa_##NAME##_chunk__ *chunk = get_##NAME##_chunk_ix(cix); \
- erts_smp_spinlock_init(&chunk->lock, #NAME "_alloc_lock"); \
- chunk->freelist = &chunk->pre_alloced[0]; \
- for (i = 1; i < no_blocks_per_chunk; i++) { \
- ERTS_PRE_ALLOC_CLOBBER(&chunk->pre_alloced[i-1], \
- union erts_qa_##NAME##__); \
- chunk->pre_alloced[i-1].next = &chunk->pre_alloced[i]; \
- } \
- ERTS_PRE_ALLOC_CLOBBER(&chunk->pre_alloced[no_blocks_per_chunk-1],\
- union erts_qa_##NAME##__); \
- chunk->pre_alloced[no_blocks_per_chunk-1].next = NULL; \
- } \
+ sspa_data_##NAME##__ = \
+ erts_sspa_create(sizeof(union erts_sspa_##NAME##__), \
+ ERTS_PRE_ALLOC_SIZE((PASZ))); \
} \
-static ERTS_INLINE TYPE * \
+ \
+static TYPE * \
NAME##_alloc(void) \
{ \
- int cix = ((int) erts_get_scheduler_id()) - 1; \
- TYPE *res; \
- if (cix < 0) \
- res = NULL; \
- else { \
- erts_qa_##NAME##_chunk__ *chunk = get_##NAME##_chunk_ix(cix); \
- erts_smp_spin_lock(&chunk->lock); \
- if (!chunk->freelist) \
- res = NULL; \
- else { \
- res = &chunk->freelist->type; \
- chunk->freelist = chunk->freelist->next; \
- ERTS_SPPA_DBG_CHK_IN_CHNK(qa_data_##NAME##__, chunk, res); \
- } \
- erts_smp_spin_unlock(&chunk->lock); \
- } \
- return res; \
+ ErtsSchedulerData *esdp = erts_get_scheduler_data(); \
+ if (!esdp) \
+ return NULL; \
+ return (TYPE *) erts_sspa_alloc(sspa_data_##NAME##__, \
+ (int) esdp->no - 1); \
} \
-static ERTS_INLINE int \
+ \
+static int \
NAME##_free(TYPE *p) \
{ \
- erts_qa_##NAME##_chunk__ *chunk; \
- chunk = get_##NAME##_chunk_ptr((void *) p); \
- if (!chunk) \
- return 0; \
- else { \
- union erts_qa_##NAME##__ *up; \
- ERTS_SPPA_DBG_CHK_IN_CHNK(qa_data_##NAME##__, chunk, p); \
- up = ((union erts_qa_##NAME##__ *) \
- (((char *) p) \
- - ((char *) &((union erts_qa_##NAME##__ *) 0)->type))); \
- erts_smp_spin_lock(&chunk->lock); \
- ERTS_PRE_ALLOC_CLOBBER(up, union erts_qa_##NAME##__); \
- up->next = chunk->freelist; \
- chunk->freelist = up; \
- erts_smp_spin_unlock(&chunk->lock); \
- return 1; \
- } \
+ ErtsSchedulerData *esdp = erts_get_scheduler_data(); \
+ return erts_sspa_free(sspa_data_##NAME##__, \
+ esdp ? (int) esdp->no - 1 : -1, \
+ (char *) p); \
}
#ifdef DEBUG
diff --git a/erts/emulator/beam/erl_alloc.types b/erts/emulator/beam/erl_alloc.types
index c6cc0e1fac..bba6b83ac6 100644
--- a/erts/emulator/beam/erl_alloc.types
+++ b/erts/emulator/beam/erl_alloc.types
@@ -1,7 +1,7 @@
#
# %CopyrightBegin%
#
-# Copyright Ericsson AB 2003-2011. All Rights Reserved.
+# Copyright Ericsson AB 2003-2012. All Rights Reserved.
#
# The contents of this file are subject to the Erlang Public License,
# Version 1.1, (the "License"); you may not use this file except in
@@ -50,6 +50,15 @@
# command line argument to make_alloc_types. The variable X is false
# after a "+disable X" statement or if it has never been mentioned.
++if smp
++disable threads_no_smp
++else
++if threads
++enable threads_no_smp
++else
++disable threads_no_smp
++endif
++endif
# --- Allocator declarations -------------------------------------------------
#
@@ -65,6 +74,11 @@
allocator SYSTEM true sys_alloc
+allocator SBMBC true sbmbc_alloc
++if halfword
+allocator SBMBC_LOW true sbmbc_low_alloc
++endif
+
+if smp
allocator TEMPORARY true temp_alloc
@@ -76,8 +90,8 @@ allocator ETS true ets_alloc
allocator FIXED_SIZE true fix_alloc
+if halfword
-allocator LONG_LIVED_LOW true ll_alloc_low
-allocator STANDARD_LOW true std_alloc_low
+allocator LONG_LIVED_LOW true ll_low_alloc
+allocator STANDARD_LOW true std_low_alloc
+endif
+else # Non smp build
@@ -91,8 +105,8 @@ allocator ETS false ets_alloc
allocator FIXED_SIZE false fix_alloc
+if halfword
-allocator LONG_LIVED_LOW false ll_alloc_low
-allocator STANDARD_LOW false std_alloc_low
+allocator LONG_LIVED_LOW false ll_low_alloc
+allocator STANDARD_LOW false std_low_alloc
+endif
+endif
@@ -128,28 +142,25 @@ class SYSTEM system_data
# should be deallocated before the emulator starts executing Erlang
# code again.
#
-# NOTE: When adding or removing a type which uses the FIXED_SIZE allocator,
-# also add or remove initialization of the type in erts_alloc_init()
-# (erl_alloc.c).
-#
# <TYPE> <ALLOCATOR> <CLASS> <DESCRIPTION>
+type SBMBC SBMBC SYSTEM small_block_mbc
type PROC FIXED_SIZE PROCESSES proc
-type ATOM FIXED_SIZE ATOM atom_entry
-type MODULE FIXED_SIZE CODE module_entry
-type REG_PROC FIXED_SIZE PROCESSES reg_proc
+type ATOM LONG_LIVED ATOM atom_entry
+type MODULE LONG_LIVED CODE module_entry
+type REG_PROC STANDARD PROCESSES reg_proc
type LINK_LH STANDARD PROCESSES link_lh
type SUSPEND_MON STANDARD PROCESSES suspend_monitor
type PEND_SUSPEND SHORT_LIVED PROCESSES pending_suspend
type PROC_LIST SHORT_LIVED PROCESSES proc_list
-type FUN_ENTRY FIXED_SIZE CODE fun_entry
+type FUN_ENTRY LONG_LIVED CODE fun_entry
type ATOM_TXT LONG_LIVED ATOM atom_text
type BEAM_REGISTER EHEAP PROCESSES beam_register
type HEAP EHEAP PROCESSES heap
type OLD_HEAP EHEAP PROCESSES old_heap
type HEAP_FRAG EHEAP PROCESSES heap_frag
type TMP_HEAP TEMPORARY PROCESSES tmp_heap
-type MSG_REF SHORT_LIVED PROCESSES msg_ref
+type MSG_REF FIXED_SIZE PROCESSES msg_ref
type MSG_ROOTS TEMPORARY PROCESSES msg_roots
type ROOTSET TEMPORARY PROCESSES root_set
type LOADER_TMP TEMPORARY CODE loader_tmp
@@ -190,10 +201,10 @@ type LINEBUF STANDARD SYSTEM line_buf
type IOQ STANDARD SYSTEM io_queue
type BITS_BUF STANDARD SYSTEM bits_buf
type TMP_DIST_BUF TEMPORARY SYSTEM tmp_dist_buf
-type ASYNC_Q LONG_LIVED SYSTEM async_queue
+type ASYNC_DATA LONG_LIVED SYSTEM internal_async_data
type ESTACK TEMPORARY SYSTEM estack
type PORT_CALL_BUF TEMPORARY SYSTEM port_call_buf
-type DB_TABLE FIXED_SIZE ETS db_tab
+type DB_TABLE ETS ETS db_tab
type DB_FIXATION SHORT_LIVED ETS db_fixation
type DB_FIX_DEL SHORT_LIVED ETS fixed_del
type DB_TABLES LONG_LIVED ETS db_tabs
@@ -250,6 +261,23 @@ type TMP_CPU_IDS SHORT_LIVED SYSTEM tmp_cpu_ids
type EXT_TERM_DATA SHORT_LIVED PROCESSES external_term_data
type ZLIB STANDARD SYSTEM zlib
type CPU_GRPS_MAP LONG_LIVED SYSTEM cpu_groups_map
+type AUX_WORK_TMO LONG_LIVED SYSTEM aux_work_timeouts
+type MISC_AUX_WORK_Q LONG_LIVED SYSTEM misc_aux_work_q
+
++if threads_no_smp
+# Need thread safe allocs, but std_alloc and fix_alloc are not;
+# use driver_alloc which is...
+type THR_Q_EL DRIVER SYSTEM thr_q_element
+type THR_Q_EL_SL DRIVER SYSTEM sl_thr_q_element
+type MISC_AUX_WORK DRIVER SYSTEM misc_aux_work
++else
+type THR_Q_EL STANDARD SYSTEM thr_q_element
+type THR_Q_EL_SL FIXED_SIZE SYSTEM sl_thr_q_element
+type MISC_AUX_WORK SHORT_LIVED SYSTEM misc_aux_work
++endif
+type THR_Q STANDARD SYSTEM thr_queue
+type THR_Q_SL SHORT_LIVED SYSTEM short_lived_thr_queue
+type THR_Q_LL LONG_LIVED SYSTEM long_lived_thr_queue
+if smp
type ASYNC SHORT_LIVED SYSTEM async
@@ -265,8 +293,9 @@ type XPORTS_LIST SHORT_LIVED SYSTEM extra_port_list
type PROC_LCK_WTR LONG_LIVED SYSTEM proc_lock_waiter
type PROC_LCK_QS LONG_LIVED SYSTEM proc_lock_queues
type RUNQ_BLNS LONG_LIVED SYSTEM run_queue_balancing
-type MISC_AUX_WORK_Q LONG_LIVED SYSTEM misc_aux_work_q
-type MISC_AUX_WORK SHORT_LIVED SYSTEM misc_aux_work
+type THR_PRGR_IDATA LONG_LIVED SYSTEM thr_prgr_internal_data
+type THR_PRGR_DATA LONG_LIVED SYSTEM thr_prgr_data
+type T_THR_PRGR_DATA SHORT_LIVED SYSTEM temp_thr_prgr_data
+endif
#
@@ -279,12 +308,6 @@ type ETHR_STD STANDARD SYSTEM ethread_standard
type ETHR_SL SHORT_LIVED SYSTEM ethread_short_lived
type ETHR_LL LONG_LIVED SYSTEM ethread_long_lived
-+ifnot smp
-
-type ARCALLBACK LONG_LIVED SYSTEM async_ready_callback
-
-+endif
-
+endif
+if shared_heap
@@ -330,6 +353,7 @@ type SSB SHORT_LIVED PROCESSES ssb
+if halfword
+type SBMBC_LOW SBMBC_LOW SYSTEM small_block_mbc_low
type DDLL_PROCESS STANDARD_LOW SYSTEM ddll_processes
type MONITOR_LH STANDARD_LOW PROCESSES monitor_lh
type NLINK_LH STANDARD_LOW PROCESSES nlink_lh
@@ -339,10 +363,11 @@ type DB_MS_PSDO_PROC LONG_LIVED_LOW ETS db_match_pseudo_proc
type SCHDLR_DATA LONG_LIVED_LOW SYSTEM scheduler_data
type LL_TEMP_TERM LONG_LIVED_LOW SYSTEM ll_temp_term
-# no FIXED_SIZE for low memory
-type EXPORT STANDARD_LOW CODE export_entry
+type EXPORT LONG_LIVED_LOW CODE export_entry
type MONITOR_SH STANDARD_LOW PROCESSES monitor_sh
type NLINK_SH STANDARD_LOW PROCESSES nlink_sh
+type AINFO_REQ STANDARD_LOW SYSTEM alloc_info_request
+type SCHED_WTIME_REQ STANDARD_LOW SYSTEM sched_wall_time_request
+else # "fullword"
@@ -355,9 +380,11 @@ type DB_MS_PSDO_PROC LONG_LIVED ETS db_match_pseudo_proc
type SCHDLR_DATA LONG_LIVED SYSTEM scheduler_data
type LL_TEMP_TERM LONG_LIVED SYSTEM ll_temp_term
-type EXPORT FIXED_SIZE CODE export_entry
+type EXPORT LONG_LIVED CODE export_entry
type MONITOR_SH FIXED_SIZE PROCESSES monitor_sh
type NLINK_SH FIXED_SIZE PROCESSES nlink_sh
+type AINFO_REQ SHORT_LIVED SYSTEM alloc_info_request
+type SCHED_WTIME_REQ SHORT_LIVED SYSTEM sched_wall_time_request
+endif
diff --git a/erts/emulator/beam/erl_alloc_util.c b/erts/emulator/beam/erl_alloc_util.c
index cc04ef65bf..e0d525bdde 100644
--- a/erts/emulator/beam/erl_alloc_util.c
+++ b/erts/emulator/beam/erl_alloc_util.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2002-2011. All Rights Reserved.
+ * Copyright Ericsson AB 2002-2012. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -46,6 +46,7 @@
#include "erl_alloc_util.h"
#include "erl_mseg.h"
#include "erl_threads.h"
+#include "erl_thr_progress.h"
#ifdef ERTS_ENABLE_LOCK_COUNT
#include "erl_lock_count.h"
@@ -61,11 +62,19 @@
#warning "* * * * * * * * * *"
#endif
+#define ERTS_ALCU_DD_OPS_LIM_HIGH 20
+#define ERTS_ALCU_DD_OPS_LIM_LOW 2
+
+/* Fix alloc limit */
+#define ERTS_ALCU_FIX_MAX_LIST_SZ 1000
+#define ERTS_ALC_FIX_MAX_SHRINK_OPS 30
+
#define ALLOC_ZERO_EQ_NULL 0
static int atoms_initialized = 0;
static int initialized = 0;
+int erts_have_sbmbc_alloc;
#if HAVE_ERTS_MSEG
@@ -85,8 +94,6 @@ static int initialized = 0;
#undef ASSERT
#define ASSERT ASSERT_EXPR
-#define ERTS_ALCU_FLG_FAIL_REALLOC_MOVE ((UWord) 1)
-
#if 0
/* Can be useful for debugging */
#define MBC_REALLOC_ALWAYS_MOVES
@@ -270,19 +277,30 @@ static void check_blk_carrier(Allctr_t *, Block_t *);
#define HARD_CHECK_BLK_CARRIER(A, B)
#endif
-
/* Statistics updating ... */
#ifdef DEBUG
#define DEBUG_CHECK_CARRIER_NO_SZ(AP) \
- ASSERT(((AP)->sbcs.curr_mseg.no && (AP)->sbcs.curr_mseg.size) \
- || (!(AP)->sbcs.curr_mseg.no && !(AP)->sbcs.curr_mseg.size));\
- ASSERT(((AP)->sbcs.curr_sys_alloc.no && (AP)->sbcs.curr_sys_alloc.size)\
- || (!(AP)->sbcs.curr_sys_alloc.no && !(AP)->sbcs.curr_sys_alloc.size));\
- ASSERT(((AP)->mbcs.curr_mseg.no && (AP)->mbcs.curr_mseg.size) \
- || (!(AP)->mbcs.curr_mseg.no && !(AP)->mbcs.curr_mseg.size));\
- ASSERT(((AP)->mbcs.curr_sys_alloc.no && (AP)->mbcs.curr_sys_alloc.size)\
- || (!(AP)->mbcs.curr_sys_alloc.no && !(AP)->mbcs.curr_sys_alloc.size))
+ ASSERT(((AP)->sbcs.curr.norm.mseg.no \
+ && (AP)->sbcs.curr.norm.mseg.size) \
+ || (!(AP)->sbcs.curr.norm.mseg.no \
+ && !(AP)->sbcs.curr.norm.mseg.size)); \
+ ASSERT(((AP)->sbcs.curr.norm.sys_alloc.no \
+ && (AP)->sbcs.curr.norm.sys_alloc.size) \
+ || (!(AP)->sbcs.curr.norm.sys_alloc.no \
+ && !(AP)->sbcs.curr.norm.sys_alloc.size)); \
+ ASSERT(((AP)->mbcs.curr.norm.mseg.no \
+ && (AP)->mbcs.curr.norm.mseg.size) \
+ || (!(AP)->mbcs.curr.norm.mseg.no \
+ && !(AP)->mbcs.curr.norm.mseg.size)); \
+ ASSERT(((AP)->mbcs.curr.norm.sys_alloc.no \
+ && (AP)->mbcs.curr.norm.sys_alloc.size) \
+ || (!(AP)->mbcs.curr.norm.sys_alloc.no \
+ && !(AP)->mbcs.curr.norm.sys_alloc.size)); \
+ ASSERT(((AP)->sbmbcs.curr.small_block.no \
+ && (AP)->sbmbcs.curr.small_block.size) \
+ || (!(AP)->sbmbcs.curr.small_block.no \
+ && !(AP)->sbmbcs.curr.small_block.size))
#else
#define DEBUG_CHECK_CARRIER_NO_SZ(AP)
@@ -292,27 +310,27 @@ static void check_blk_carrier(Allctr_t *, Block_t *);
(AP)->sbcs.blocks.curr.size += (BSZ); \
if ((AP)->sbcs.blocks.max.size < (AP)->sbcs.blocks.curr.size) \
(AP)->sbcs.blocks.max.size = (AP)->sbcs.blocks.curr.size; \
- if ((AP)->sbcs.max.no < ((AP)->sbcs.curr_mseg.no \
- + (AP)->sbcs.curr_sys_alloc.no)) \
- (AP)->sbcs.max.no = ((AP)->sbcs.curr_mseg.no \
- + (AP)->sbcs.curr_sys_alloc.no); \
- if ((AP)->sbcs.max.size < ((AP)->sbcs.curr_mseg.size \
- + (AP)->sbcs.curr_sys_alloc.size)) \
- (AP)->sbcs.max.size = ((AP)->sbcs.curr_mseg.size \
- + (AP)->sbcs.curr_sys_alloc.size)
+ if ((AP)->sbcs.max.no < ((AP)->sbcs.curr.norm.mseg.no \
+ + (AP)->sbcs.curr.norm.sys_alloc.no)) \
+ (AP)->sbcs.max.no = ((AP)->sbcs.curr.norm.mseg.no \
+ + (AP)->sbcs.curr.norm.sys_alloc.no); \
+ if ((AP)->sbcs.max.size < ((AP)->sbcs.curr.norm.mseg.size \
+ + (AP)->sbcs.curr.norm.sys_alloc.size)) \
+ (AP)->sbcs.max.size = ((AP)->sbcs.curr.norm.mseg.size \
+ + (AP)->sbcs.curr.norm.sys_alloc.size)
#define STAT_MSEG_SBC_ALLOC(AP, CSZ, BSZ) \
do { \
- (AP)->sbcs.curr_mseg.no++; \
- (AP)->sbcs.curr_mseg.size += (CSZ); \
+ (AP)->sbcs.curr.norm.mseg.no++; \
+ (AP)->sbcs.curr.norm.mseg.size += (CSZ); \
STAT_SBC_ALLOC((AP), (BSZ)); \
DEBUG_CHECK_CARRIER_NO_SZ((AP)); \
} while (0)
#define STAT_SYS_ALLOC_SBC_ALLOC(AP, CSZ, BSZ) \
do { \
- (AP)->sbcs.curr_sys_alloc.no++; \
- (AP)->sbcs.curr_sys_alloc.size += (CSZ); \
+ (AP)->sbcs.curr.norm.sys_alloc.no++; \
+ (AP)->sbcs.curr.norm.sys_alloc.size += (CSZ); \
STAT_SBC_ALLOC((AP), (BSZ)); \
DEBUG_CHECK_CARRIER_NO_SZ((AP)); \
} while (0)
@@ -324,85 +342,111 @@ do { \
#define STAT_MSEG_SBC_FREE(AP, CSZ, BSZ) \
do { \
- ASSERT((AP)->sbcs.curr_mseg.no > 0); \
- (AP)->sbcs.curr_mseg.no--; \
- ASSERT((AP)->sbcs.curr_mseg.size >= (CSZ)); \
- (AP)->sbcs.curr_mseg.size -= (CSZ); \
+ ASSERT((AP)->sbcs.curr.norm.mseg.no > 0); \
+ (AP)->sbcs.curr.norm.mseg.no--; \
+ ASSERT((AP)->sbcs.curr.norm.mseg.size >= (CSZ)); \
+ (AP)->sbcs.curr.norm.mseg.size -= (CSZ); \
STAT_SBC_FREE((AP), (BSZ)); \
DEBUG_CHECK_CARRIER_NO_SZ((AP)); \
} while (0)
#define STAT_SYS_ALLOC_SBC_FREE(AP, CSZ, BSZ) \
do { \
- ASSERT((AP)->sbcs.curr_sys_alloc.no > 0); \
- (AP)->sbcs.curr_sys_alloc.no--; \
- ASSERT((AP)->sbcs.curr_sys_alloc.size >= (CSZ)); \
- (AP)->sbcs.curr_sys_alloc.size -= (CSZ); \
+ ASSERT((AP)->sbcs.curr.norm.sys_alloc.no > 0); \
+ (AP)->sbcs.curr.norm.sys_alloc.no--; \
+ ASSERT((AP)->sbcs.curr.norm.sys_alloc.size >= (CSZ)); \
+ (AP)->sbcs.curr.norm.sys_alloc.size -= (CSZ); \
STAT_SBC_FREE((AP), (BSZ)); \
DEBUG_CHECK_CARRIER_NO_SZ((AP)); \
} while (0)
#define STAT_MBC_ALLOC(AP) \
- if ((AP)->mbcs.max.no < ((AP)->mbcs.curr_mseg.no \
- + (AP)->mbcs.curr_sys_alloc.no)) \
- (AP)->mbcs.max.no = ((AP)->mbcs.curr_mseg.no \
- + (AP)->mbcs.curr_sys_alloc.no); \
- if ((AP)->mbcs.max.size < ((AP)->mbcs.curr_mseg.size \
- + (AP)->mbcs.curr_sys_alloc.size)) \
- (AP)->mbcs.max.size = ((AP)->mbcs.curr_mseg.size \
- + (AP)->mbcs.curr_sys_alloc.size)
+ if ((AP)->mbcs.max.no < ((AP)->mbcs.curr.norm.mseg.no \
+ + (AP)->mbcs.curr.norm.sys_alloc.no)) \
+ (AP)->mbcs.max.no = ((AP)->mbcs.curr.norm.mseg.no \
+ + (AP)->mbcs.curr.norm.sys_alloc.no); \
+ if ((AP)->mbcs.max.size < ((AP)->mbcs.curr.norm.mseg.size \
+ + (AP)->mbcs.curr.norm.sys_alloc.size)) \
+ (AP)->mbcs.max.size = ((AP)->mbcs.curr.norm.mseg.size \
+ + (AP)->mbcs.curr.norm.sys_alloc.size)
+#define STAT_SBMBC_ALLOC(AP, CSZ) \
+do { \
+ (AP)->sbmbcs.curr.small_block.no++; \
+ (AP)->sbmbcs.curr.small_block.size += (CSZ); \
+ if ((AP)->sbmbcs.max.no < (AP)->sbmbcs.curr.small_block.no) \
+ (AP)->sbmbcs.max.no = (AP)->sbmbcs.curr.small_block.no; \
+ if ((AP)->sbmbcs.max.size < (AP)->sbmbcs.curr.small_block.size) \
+ (AP)->sbmbcs.max.size = (AP)->sbmbcs.curr.small_block.size; \
+ DEBUG_CHECK_CARRIER_NO_SZ((AP)); \
+} while (0)
+
#define STAT_MSEG_MBC_ALLOC(AP, CSZ) \
do { \
- (AP)->mbcs.curr_mseg.no++; \
- (AP)->mbcs.curr_mseg.size += (CSZ); \
+ (AP)->mbcs.curr.norm.mseg.no++; \
+ (AP)->mbcs.curr.norm.mseg.size += (CSZ); \
STAT_MBC_ALLOC((AP)); \
DEBUG_CHECK_CARRIER_NO_SZ((AP)); \
} while (0)
#define STAT_SYS_ALLOC_MBC_ALLOC(AP, CSZ) \
do { \
- (AP)->mbcs.curr_sys_alloc.no++; \
- (AP)->mbcs.curr_sys_alloc.size += (CSZ); \
+ (AP)->mbcs.curr.norm.sys_alloc.no++; \
+ (AP)->mbcs.curr.norm.sys_alloc.size += (CSZ); \
STAT_MBC_ALLOC((AP)); \
DEBUG_CHECK_CARRIER_NO_SZ((AP)); \
} while (0)
+#define STAT_SBMBC_FREE(AP, CSZ) \
+do { \
+ ASSERT((AP)->sbmbcs.curr.small_block.no > 0); \
+ (AP)->sbmbcs.curr.small_block.no--; \
+ ASSERT((AP)->sbmbcs.curr.small_block.size >= (CSZ)); \
+ (AP)->sbmbcs.curr.small_block.size -= (CSZ); \
+ DEBUG_CHECK_CARRIER_NO_SZ((AP)); \
+} while (0)
+
#define STAT_MSEG_MBC_FREE(AP, CSZ) \
do { \
- ASSERT((AP)->mbcs.curr_mseg.no > 0); \
- (AP)->mbcs.curr_mseg.no--; \
- ASSERT((AP)->mbcs.curr_mseg.size >= (CSZ)); \
- (AP)->mbcs.curr_mseg.size -= (CSZ); \
+ ASSERT((AP)->mbcs.curr.norm.mseg.no > 0); \
+ (AP)->mbcs.curr.norm.mseg.no--; \
+ ASSERT((AP)->mbcs.curr.norm.mseg.size >= (CSZ)); \
+ (AP)->mbcs.curr.norm.mseg.size -= (CSZ); \
DEBUG_CHECK_CARRIER_NO_SZ((AP)); \
} while (0)
#define STAT_SYS_ALLOC_MBC_FREE(AP, CSZ) \
do { \
- ASSERT((AP)->mbcs.curr_sys_alloc.no > 0); \
- (AP)->mbcs.curr_sys_alloc.no--; \
- ASSERT((AP)->mbcs.curr_sys_alloc.size >= (CSZ)); \
- (AP)->mbcs.curr_sys_alloc.size -= (CSZ); \
+ ASSERT((AP)->mbcs.curr.norm.sys_alloc.no > 0); \
+ (AP)->mbcs.curr.norm.sys_alloc.no--; \
+ ASSERT((AP)->mbcs.curr.norm.sys_alloc.size >= (CSZ)); \
+ (AP)->mbcs.curr.norm.sys_alloc.size -= (CSZ); \
DEBUG_CHECK_CARRIER_NO_SZ((AP)); \
} while (0)
-#define STAT_MBC_BLK_ALLOC(AP, BSZ) \
+#define STAT_MBC_BLK_ALLOC(AP, BSZ, FLGS) \
do { \
- (AP)->mbcs.blocks.curr.no++; \
- if ((AP)->mbcs.blocks.max.no < (AP)->mbcs.blocks.curr.no) \
- (AP)->mbcs.blocks.max.no = (AP)->mbcs.blocks.curr.no; \
- (AP)->mbcs.blocks.curr.size += (BSZ); \
- if ((AP)->mbcs.blocks.max.size < (AP)->mbcs.blocks.curr.size) \
- (AP)->mbcs.blocks.max.size = (AP)->mbcs.blocks.curr.size; \
+ CarriersStats_t *cstats__ = (((FLGS) & ERTS_ALCU_FLG_SBMBC) \
+ ? &(AP)->sbmbcs \
+ : &(AP)->mbcs); \
+ cstats__->blocks.curr.no++; \
+ if (cstats__->blocks.max.no < cstats__->blocks.curr.no) \
+ cstats__->blocks.max.no = cstats__->blocks.curr.no; \
+ cstats__->blocks.curr.size += (BSZ); \
+ if (cstats__->blocks.max.size < cstats__->blocks.curr.size) \
+ cstats__->blocks.max.size = cstats__->blocks.curr.size; \
} while (0)
-#define STAT_MBC_BLK_FREE(AP, BSZ) \
+#define STAT_MBC_BLK_FREE(AP, BSZ, FLGS) \
do { \
- ASSERT((AP)->mbcs.blocks.curr.no > 0); \
- (AP)->mbcs.blocks.curr.no--; \
- ASSERT((AP)->mbcs.blocks.curr.size >= (BSZ)); \
- (AP)->mbcs.blocks.curr.size -= (BSZ); \
+ CarriersStats_t *cstats__ = (((FLGS) & ERTS_ALCU_FLG_SBMBC) \
+ ? &(AP)->sbmbcs \
+ : &(AP)->mbcs); \
+ ASSERT(cstats__->blocks.curr.no > 0); \
+ cstats__->blocks.curr.no--; \
+ ASSERT(cstats__->blocks.curr.size >= (BSZ)); \
+ cstats__->blocks.curr.size -= (BSZ); \
} while (0)
/* Debug stuff... */
@@ -410,7 +454,7 @@ do { \
static UWord carrier_alignment;
#define DEBUG_SAVE_ALIGNMENT(C) \
do { \
- UWord algnmnt__ = sizeof(Unit_t) - (((UWord) (C)) % sizeof(Unit_t)); \
+ UWord algnmnt__ = sizeof(Unit_t) - (((UWord) (C)) % sizeof(Unit_t));\
carrier_alignment = MIN(carrier_alignment, algnmnt__); \
ASSERT(((UWord) (C)) % sizeof(UWord) == 0); \
} while (0)
@@ -428,26 +472,34 @@ do { \
#ifdef DEBUG
#ifdef USE_THREADS
-#define ERTS_ALCU_DBG_CHK_THR_SPEC(A) \
+#define ERTS_ALCU_DBG_CHK_THR_ACCESS(A) \
do { \
if (!(A)->thread_safe) { \
- if (!(A)->debug.saved_tid) \
+ if (!(A)->debug.saved_tid) { \
(A)->debug.tid = erts_thr_self(); \
+ (A)->debug.saved_tid = 1; \
+ } \
else { \
- ASSERT(ethr_equal_tids((A)->debug.tid, erts_thr_self())); \
+ ERTS_SMP_LC_ASSERT( \
+ ethr_equal_tids((A)->debug.tid, erts_thr_self()) \
+ || erts_thr_progress_is_blocking()); \
} \
} \
} while (0)
#else
-#define ERTS_ALCU_DBG_CHK_THR_SPEC(A)
+#define ERTS_ALCU_DBG_CHK_THR_ACCESS(A)
#endif
#else
-#define ERTS_ALCU_DBG_CHK_THR_SPEC(A)
+#define ERTS_ALCU_DBG_CHK_THR_ACCESS(A)
#endif
static void make_name_atoms(Allctr_t *allctr);
+static Block_t *create_carrier(Allctr_t *, Uint, UWord);
+static void destroy_carrier(Allctr_t *, Block_t *);
+static void mbc_free(Allctr_t *allctr, void *p);
+
/* mseg ... */
@@ -524,8 +576,8 @@ static Uint
get_next_mbc_size(Allctr_t *allctr)
{
Uint size;
- int cs = (allctr->mbcs.curr_mseg.no
- + allctr->mbcs.curr_sys_alloc.no
+ int cs = (allctr->mbcs.curr.norm.mseg.no
+ + allctr->mbcs.curr.norm.sys_alloc.no
- (allctr->main_carrier ? 1 : 0));
ASSERT(cs >= 0);
@@ -609,49 +661,540 @@ unlink_carrier(CarrierList_t *cl, Carrier_t *crr)
}
}
-
+static Block_t *create_sbmbc(Allctr_t *allctr, Uint umem_sz);
+static void destroy_sbmbc(Allctr_t *allctr, Block_t *blk);
static Block_t *create_carrier(Allctr_t *, Uint, UWord);
static void destroy_carrier(Allctr_t *, Block_t *);
+#if 0
+#define ERTS_DBG_CHK_FIX_LIST(A, FIX, IX, B) \
+ do { if ((FIX)) chk_fix_list((A), (FIX), (IX), (B)); } while (0)
+static void
+chk_fix_list(Allctr_t *allctr, ErtsAlcFixList_t *fix, int ix, int before)
+{
+ void *p;
+ int n;
+ for (n = 0, p = fix[ix].list; p; p = *((void **) p))
+ n++;
+ if (n != fix[ix].list_size) {
+ erts_fprintf(stderr, "FOUND IT ts=%d, sched=%d, ix=%d, n=%d, ls=%d %s!\n",
+ allctr->thread_safe, allctr->ix, ix, n, fix[ix].list_size, before ? "before" : "after");
+ abort();
+ }
+}
+#else
+#define ERTS_DBG_CHK_FIX_LIST(A, FIX, IX, B)
+#endif
+
+erts_aint32_t
+erts_alcu_fix_alloc_shrink(Allctr_t *allctr, erts_aint32_t flgs)
+{
+ int all_empty = 1;
+ erts_aint32_t res = 0;
+ int ix, o;
+ ErtsAlcFixList_t *fix = allctr->fix;
+ int flush = flgs == 0;
+
+#ifdef USE_THREADS
+ if (allctr->thread_safe)
+ erts_mtx_lock(&allctr->mutex);
+#endif
+
+ for (ix = 0; ix < ERTS_ALC_NO_FIXED_SIZES; ix++) {
+ ERTS_DBG_CHK_FIX_LIST(allctr, fix, ix, 1);
+ if (flgs & ERTS_SSI_AUX_WORK_FIX_ALLOC_LOWER_LIM) {
+ fix[ix].limit = fix[ix].max_used;
+ if (fix[ix].limit < fix[ix].used)
+ fix[ix].limit = fix[ix].used;
+ fix[ix].max_used = fix[ix].used;
+ ASSERT(fix[ix].limit >= 0);
+
+ }
+ if (flush) {
+ fix[ix].limit = 0;
+ fix[ix].max_used = fix[ix].used;
+ ASSERT(fix[ix].limit >= 0);
+ }
+ for (o = 0; o < ERTS_ALC_FIX_MAX_SHRINK_OPS || flush; o++) {
+ Block_t *blk;
+ void *ptr;
+
+ if (!flush && fix[ix].limit >= fix[ix].allocated)
+ break;
+ if (fix[ix].list_size == 0)
+ break;
+ ptr = fix[ix].list;
+ fix[ix].list = *((void **) ptr);
+ fix[ix].list_size--;
+
+ blk = UMEM2BLK(ptr);
+
+ if (IS_SBC_BLK(blk))
+ destroy_carrier(allctr, blk);
+ else
+ mbc_free(allctr, ptr);
+
+ fix[ix].allocated--;
+ }
+ if (fix[ix].list_size != 0) {
+ if (fix[ix].limit < fix[ix].allocated)
+ res |= ERTS_SSI_AUX_WORK_FIX_ALLOC_DEALLOC;
+ all_empty = 0;
+ }
+ ERTS_DBG_CHK_FIX_LIST(allctr, fix, ix, 0);
+ }
+
+ if (all_empty && allctr->fix_shrink_scheduled) {
+ allctr->fix_shrink_scheduled = 0;
+ erts_set_aux_work_timeout(allctr->ix,
+ (ERTS_SSI_AUX_WORK_FIX_ALLOC_LOWER_LIM
+ | ERTS_SSI_AUX_WORK_FIX_ALLOC_DEALLOC),
+ 0);
+ }
+
+#ifdef USE_THREADS
+ if (allctr->thread_safe)
+ erts_mtx_unlock(&allctr->mutex);
+#endif
+
+ return res;
+}
+
+#ifdef ERTS_SMP
+
+#define ERTS_ALCU_DD_FIX_TYPE_OFFS \
+ ((sizeof(ErtsAllctrDDBlock_t)-1)/sizeof(UWord) + 1)
+
+#define ERTS_AU_PREF_ALLOC_IX_MASK \
+ ((((UWord) 1) << ERTS_AU_PREF_ALLOC_BITS) - 1)
+#define ERTS_AU_PREF_ALLOC_SIZE_MASK \
+ ((((UWord) 1) << (sizeof(UWord)*8 - ERTS_AU_PREF_ALLOC_BITS)) - 1)
+
+static ERTS_INLINE int
+get_pref_allctr(void *extra, Allctr_t **allctr)
+{
+ ErtsAllocatorThrSpec_t *tspec = (ErtsAllocatorThrSpec_t *) extra;
+ int pref_ix;
+
+ pref_ix = ERTS_ALC_GET_THR_IX();
+
+ ASSERT(sizeof(UWord) == sizeof(Allctr_t *));
+ ASSERT(0 <= pref_ix && pref_ix < tspec->size);
+
+ *allctr = tspec->allctr[pref_ix];
+ return pref_ix;
+}
+
+static ERTS_INLINE void *
+get_used_allctr(void *extra, void *p, Allctr_t **allctr, UWord *sizep)
+{
+ ErtsAllocatorThrSpec_t *tspec = (ErtsAllocatorThrSpec_t *) extra;
+ void *ptr = (void *) (((char *) p) - sizeof(UWord));
+ UWord ainfo = *((UWord *) ptr);
+ int aix = (int) (ainfo & ERTS_AU_PREF_ALLOC_IX_MASK);
+ *allctr = tspec->allctr[aix];
+ if (sizep)
+ *sizep = ((ainfo >> ERTS_AU_PREF_ALLOC_BITS)
+ & ERTS_AU_PREF_ALLOC_SIZE_MASK);
+ return ptr;
+}
+
+static ERTS_INLINE void *
+put_used_allctr(void *p, int ix, UWord size)
+{
+ UWord ainfo = (size >= ERTS_AU_PREF_ALLOC_SIZE_MASK
+ ? ERTS_AU_PREF_ALLOC_SIZE_MASK
+ : size);
+ ainfo <<= ERTS_AU_PREF_ALLOC_BITS;
+ ainfo |= (UWord) ix;
+ *((UWord *) p) = ainfo;
+ return (void *) (((char *) p) + sizeof(UWord));
+}
+
+static void
+init_dd_queue(ErtsAllctrDDQueue_t *ddq)
+{
+ erts_atomic_init_nob(&ddq->tail.data.marker.atmc_next, ERTS_AINT_NULL);
+ erts_atomic_init_nob(&ddq->tail.data.last,
+ (erts_aint_t) &ddq->tail.data.marker);
+ erts_atomic_init_nob(&ddq->tail.data.um_refc[0], 0);
+ erts_atomic_init_nob(&ddq->tail.data.um_refc[1], 0);
+ erts_atomic32_init_nob(&ddq->tail.data.um_refc_ix, 0);
+ ddq->head.first = &ddq->tail.data.marker;
+ ddq->head.unref_end = &ddq->tail.data.marker;
+ ddq->head.next.thr_progress = erts_thr_progress_current();
+ ddq->head.next.thr_progress_reached = 1;
+ ddq->head.next.um_refc_ix = 1;
+ ddq->head.next.unref_end = &ddq->tail.data.marker;
+ ddq->head.used_marker = 1;
+}
+
+static ERTS_INLINE erts_aint_t
+ddq_managed_thread_enqueue(ErtsAllctrDDQueue_t *ddq, void *ptr)
+{
+ erts_aint_t ilast, itmp;
+ ErtsAllctrDDBlock_t *this = ptr;
+
+ erts_atomic_init_nob(&this->atmc_next, ERTS_AINT_NULL);
+
+ /* Enqueue at end of list... */
+
+ ilast = erts_atomic_read_nob(&ddq->tail.data.last);
+ while (1) {
+ ErtsAllctrDDBlock_t *last = (ErtsAllctrDDBlock_t *) ilast;
+ itmp = erts_atomic_cmpxchg_mb(&last->atmc_next,
+ (erts_aint_t) this,
+ ERTS_AINT_NULL);
+ if (itmp == ERTS_AINT_NULL)
+ break;
+ ilast = itmp;
+ }
+
+ /* Move last pointer forward... */
+ while (1) {
+ if (erts_atomic_read_rb(&this->atmc_next) != ERTS_AINT_NULL) {
+ /* Someone else will move it forward */
+ return erts_atomic_read_rb(&ddq->tail.data.last);
+ }
+ itmp = erts_atomic_cmpxchg_mb(&ddq->tail.data.last,
+ (erts_aint_t) this,
+ ilast);
+ if (ilast == itmp)
+ return (erts_aint_t) this;
+ ilast = itmp;
+ }
+}
+
+static ERTS_INLINE int
+ddq_enqueue(ErtsAlcType_t type, ErtsAllctrDDQueue_t *ddq, void *ptr)
+{
+ erts_aint_t ilast;
+ int um_refc_ix = 0;
+ int managed_thread = erts_thr_progress_is_managed_thread();
+ if (!managed_thread) {
+ um_refc_ix = erts_atomic32_read_acqb(&ddq->tail.data.um_refc_ix);
+ while (1) {
+ int tmp_um_refc_ix;
+ erts_atomic_inc_acqb(&ddq->tail.data.um_refc[um_refc_ix]);
+ tmp_um_refc_ix = erts_atomic32_read_acqb(&ddq->tail.data.um_refc_ix);
+ if (tmp_um_refc_ix == um_refc_ix)
+ break;
+ erts_atomic_dec_relb(&ddq->tail.data.um_refc[um_refc_ix]);
+ um_refc_ix = tmp_um_refc_ix;
+ }
+ }
+
+ ilast = ddq_managed_thread_enqueue(ddq, ptr);
+
+ if (!managed_thread)
+ erts_atomic_dec_relb(&ddq->tail.data.um_refc[um_refc_ix]);
+ return ilast == (erts_aint_t) ptr;
+}
+
+static ERTS_INLINE void *
+ddq_dequeue(ErtsAllctrDDQueue_t *ddq)
+{
+ ErtsAllctrDDBlock_t *blk;
+
+ if (ddq->head.first == ddq->head.unref_end)
+ return NULL;
+
+ blk = ddq->head.first;
+ if (blk == &ddq->tail.data.marker) {
+ ASSERT(ddq->head.used_marker);
+ ddq->head.used_marker = 0;
+ blk = ((ErtsAllctrDDBlock_t *)
+ erts_atomic_read_nob(&blk->atmc_next));
+ if (blk == ddq->head.unref_end) {
+ ddq->head.first = blk;
+ return NULL;
+ }
+ }
+
+ ddq->head.first = ((ErtsAllctrDDBlock_t *)
+ erts_atomic_read_nob(&blk->atmc_next));
+
+ ASSERT(ddq->head.first);
+
+ return (void *) blk;
+}
+
+static int
+ddq_check_incoming(ErtsAllctrDDQueue_t *ddq)
+{
+ erts_aint_t ilast = erts_atomic_read_nob(&ddq->tail.data.last);
+ if (((ErtsAllctrDDBlock_t *) ilast) == &ddq->tail.data.marker
+ && ddq->head.first == &ddq->tail.data.marker) {
+ /* Nothing more to do... */
+ return 0;
+ }
+
+ if (ddq->head.next.thr_progress_reached
+ || erts_thr_progress_has_reached(ddq->head.next.thr_progress)) {
+ int um_refc_ix;
+ ddq->head.next.thr_progress_reached = 1;
+ um_refc_ix = ddq->head.next.um_refc_ix;
+ if (erts_atomic_read_acqb(&ddq->tail.data.um_refc[um_refc_ix]) == 0) {
+ /* Move unreferenced end pointer forward... */
+
+ ddq->head.unref_end = ddq->head.next.unref_end;
+
+ if (!ddq->head.used_marker
+ && ddq->head.unref_end == (ErtsAllctrDDBlock_t *) ilast) {
+ ddq->head.used_marker = 1;
+ ilast = ddq_managed_thread_enqueue(ddq, &ddq->tail.data.marker);
+ }
+
+ if (ddq->head.unref_end == (ErtsAllctrDDBlock_t *) ilast)
+ ERTS_THR_MEMORY_BARRIER;
+ else {
+ ddq->head.next.unref_end = (ErtsAllctrDDBlock_t *) ilast;
+ ddq->head.next.thr_progress = erts_thr_progress_later();
+ erts_atomic32_set_relb(&ddq->tail.data.um_refc_ix,
+ um_refc_ix);
+ ddq->head.next.um_refc_ix = um_refc_ix == 0 ? 1 : 0;
+ ddq->head.next.thr_progress_reached = 0;
+ }
+ }
+ }
+ return 1;
+}
+
+static ERTS_INLINE void
+store_earliest_thr_prgr(ErtsThrPrgrVal *prev_val, ErtsAllctrDDQueue_t *ddq)
+{
+ if (!ddq->head.next.thr_progress_reached
+ && (*prev_val == ERTS_THR_PRGR_INVALID
+ || erts_thr_progress_cmp(ddq->head.next.thr_progress,
+ *prev_val) < 0)) {
+ *prev_val = ddq->head.next.thr_progress;
+ }
+}
+
+static ERTS_INLINE int
+handle_delayed_dealloc(Allctr_t *allctr,
+ int allctr_locked,
+ int use_limit,
+ int ops_limit,
+ int *need_thr_progress,
+ ErtsThrPrgrVal *thr_prgr_p,
+ int *need_more_work)
+{
+ int need_thr_prgr = 0;
+ int need_mr_wrk = 0;
+ int have_checked_incoming = 0;
+ int ops = 0;
+ ErtsAlcFixList_t *fix;
+ int res;
+ ErtsAllctrDDQueue_t *ddq;
+
+ if (allctr->thread_safe && !allctr_locked)
+ erts_mtx_lock(&allctr->mutex);
+
+ ERTS_ALCU_DBG_CHK_THR_ACCESS(allctr);
+
+ fix = allctr->fix;
+
+ ddq = &allctr->dd.q;
+
+ res = 0;
+
+ while (1) {
+ Block_t *blk;
+ void *ptr;
+ int ix;
+
+ if (use_limit && ++ops > ops_limit) {
+ if (ddq->head.first != ddq->head.unref_end) {
+ need_mr_wrk = 1;
+ if (need_more_work)
+ *need_more_work |= 1;
+ }
+ break;
+ }
+
+ dequeue:
+ ptr = ddq_dequeue(ddq);
+ if (!ptr) {
+ if (have_checked_incoming)
+ break;
+ need_thr_prgr = ddq_check_incoming(ddq);
+ if (need_thr_progress) {
+ *need_thr_progress |= need_thr_prgr;
+ if (need_thr_prgr)
+ store_earliest_thr_prgr(thr_prgr_p, ddq);
+
+ }
+ have_checked_incoming = 1;
+ goto dequeue;
+ }
+
+ res = 1;
+
+ INC_CC(allctr->calls.this_free);
+
+ if (fix) {
+ ErtsAlcType_t type;
+
+ type = (ErtsAlcType_t) ((UWord *) ptr)[ERTS_ALCU_DD_FIX_TYPE_OFFS];
+ ix = type - ERTS_ALC_N_MIN_A_FIXED_SIZE;
+ ERTS_DBG_CHK_FIX_LIST(allctr, fix, ix, 1);
+ fix[ix].used--;
+ if (fix[ix].allocated < fix[ix].limit
+ && fix[ix].list_size < ERTS_ALCU_FIX_MAX_LIST_SZ) {
+ *((void **) ptr) = fix[ix].list;
+ fix[ix].list = ptr;
+ fix[ix].list_size++;
+ if (!allctr->fix_shrink_scheduled) {
+ allctr->fix_shrink_scheduled = 1;
+ erts_set_aux_work_timeout(
+ allctr->ix,
+ (ERTS_SSI_AUX_WORK_FIX_ALLOC_LOWER_LIM
+ | ERTS_SSI_AUX_WORK_FIX_ALLOC_DEALLOC),
+ 1);
+ }
+ ERTS_DBG_CHK_FIX_LIST(allctr, fix, ix, 0);
+ continue;
+ }
+ fix[ix].allocated--;
+ if (fix[ix].list && fix[ix].allocated > fix[ix].limit) {
+ blk = UMEM2BLK(ptr);
+ if (IS_SBC_BLK(blk))
+ destroy_carrier(allctr, blk);
+ else
+ mbc_free(allctr, ptr);
+ ptr = fix[ix].list;
+ fix[ix].list = *((void **) ptr);
+ fix[ix].list_size--;
+ fix[ix].allocated--;
+ }
+ }
+
+ blk = UMEM2BLK(ptr);
+
+ if (IS_SBC_BLK(blk))
+ destroy_carrier(allctr, blk);
+ else
+ mbc_free(allctr, ptr);
+ ERTS_DBG_CHK_FIX_LIST(allctr, fix, ix, 0);
+ }
+
+ if (need_thr_progress && !(need_thr_prgr | need_mr_wrk)) {
+ need_thr_prgr = ddq_check_incoming(ddq);
+ *need_thr_progress |= need_thr_prgr;
+ if (need_thr_prgr)
+ store_earliest_thr_prgr(thr_prgr_p, ddq);
+ }
+
+ if (allctr->thread_safe && !allctr_locked)
+ erts_mtx_unlock(&allctr->mutex);
+ return res;
+}
+
+static ERTS_INLINE void
+enqueue_dealloc_other_instance(ErtsAlcType_t type, Allctr_t *allctr, void *ptr)
+{
+ if (allctr->fix)
+ ((UWord *) ptr)[ERTS_ALCU_DD_FIX_TYPE_OFFS] = (UWord) type;
+
+ if (ddq_enqueue(type, &allctr->dd.q, ptr))
+ erts_alloc_notify_delayed_dealloc(allctr->ix);
+}
+
+#endif
+
+#ifdef ERTS_SMP
+void
+erts_alcu_check_delayed_dealloc(Allctr_t *allctr,
+ int limit,
+ int *need_thr_progress,
+ ErtsThrPrgrVal *thr_prgr_p,
+ int *more_work)
+{
+ handle_delayed_dealloc(allctr,
+ 0,
+ limit,
+ ERTS_ALCU_DD_OPS_LIM_HIGH,
+ need_thr_progress,
+ thr_prgr_p,
+ more_work);
+}
+#endif
+
+#define ERTS_ALCU_HANDLE_DD_IN_OP(Allctr, Locked) \
+ handle_delayed_dealloc((Allctr), (Locked), 1, \
+ ERTS_ALCU_DD_OPS_LIM_LOW, NULL, NULL, NULL)
+
/* Multi block carrier alloc/realloc/free ... */
/* NOTE! mbc_alloc() may in case of memory shortage place the requested
* block in a sbc.
*/
static ERTS_INLINE void *
-mbc_alloc_block(Allctr_t *allctr, Uint size, Uint *blk_szp)
+mbc_alloc_block(Allctr_t *allctr, Uint size, Uint *blk_szp, Uint32 *alcu_flgsp)
{
Block_t *blk;
+ Uint get_blk_sz;
+ Uint sbmbct;
ASSERT(size);
ASSERT(size < allctr->sbc_threshold);
- *blk_szp = UMEMSZ2BLKSZ(allctr, size);
+ *blk_szp = get_blk_sz = UMEMSZ2BLKSZ(allctr, size);
- blk = (*allctr->get_free_block)(allctr, *blk_szp, NULL, 0);
+ sbmbct = allctr->sbmbc_threshold;
+ if (sbmbct) {
+ if (get_blk_sz < sbmbct) {
+ *alcu_flgsp |= ERTS_ALCU_FLG_SBMBC;
+ if (get_blk_sz + allctr->min_block_size > sbmbct) {
+ /* Since we use block size to determine if blocks are
+ located in sbmbc or not... */
+ get_blk_sz += allctr->min_block_size;
+ }
+ }
+ }
-#if HALFWORD_HEAP
- if (!blk) {
- blk = create_carrier(allctr, *blk_szp, CFLG_MBC|CFLG_FORCE_MSEG);
+#ifdef ERTS_SMP
+ if (allctr->dd.use)
+ ERTS_ALCU_HANDLE_DD_IN_OP(allctr, 1);
+#endif
+
+ blk = (*allctr->get_free_block)(allctr, get_blk_sz, NULL, 0, *alcu_flgsp);
+
+#ifdef ERTS_SMP
+ if (!blk && allctr->dd.use) {
+ if (ERTS_ALCU_HANDLE_DD_IN_OP(allctr, 1))
+ blk = (*allctr->get_free_block)(allctr, get_blk_sz, NULL, 0,
+ *alcu_flgsp);
}
-#else
+#endif
+
if (!blk) {
- blk = create_carrier(allctr, *blk_szp, CFLG_MBC);
- if (!blk) {
- /* Emergency! We couldn't create the carrier as we wanted.
- Try to place it in a sys_alloced sbc. */
- blk = create_carrier(allctr,
- size,
- CFLG_SBC|CFLG_FORCE_SIZE|CFLG_FORCE_SYS_ALLOC);
+ if ((*alcu_flgsp) & ERTS_ALCU_FLG_SBMBC)
+ blk = create_sbmbc(allctr, get_blk_sz);
+ else {
+#if HALFWORD_HEAP
+ blk = create_carrier(allctr, get_blk_sz, CFLG_MBC|CFLG_FORCE_MSEG);
+#else
+ blk = create_carrier(allctr, get_blk_sz, CFLG_MBC);
+ if (!blk) {
+ /* Emergency! We couldn't create the carrier as we wanted.
+ Try to place it in a sys_alloced sbc. */
+ blk = create_carrier(allctr,
+ size,
+ (CFLG_SBC
+ | CFLG_FORCE_SIZE
+ | CFLG_FORCE_SYS_ALLOC));
+ }
+#endif
}
}
-#endif
#ifdef ERTS_ALLOC_UTIL_HARD_DEBUG
if (IS_MBC_BLK(blk)) {
- (*allctr->link_free_block)(allctr, blk);
+ (*allctr->link_free_block)(allctr, blk, *alcu_flgsp);
HARD_CHECK_BLK_CARRIER(allctr, blk);
- (*allctr->unlink_free_block)(allctr, blk);
+ (*allctr->unlink_free_block)(allctr, blk, *alcu_flgsp);
}
#endif
@@ -664,7 +1207,8 @@ mbc_alloc_finalize(Allctr_t *allctr,
Uint org_blk_sz,
UWord flags,
Uint want_blk_sz,
- int valid_blk_info)
+ int valid_blk_info,
+ Uint32 alcu_flgs)
{
Uint blk_sz;
Uint nxt_blk_sz;
@@ -700,7 +1244,7 @@ mbc_alloc_finalize(Allctr_t *allctr,
SET_PREV_BLK_FREE(nxt_nxt_blk);
}
}
- (*allctr->link_free_block)(allctr, nxt_blk);
+ (*allctr->link_free_block)(allctr, nxt_blk, alcu_flgs);
ASSERT(IS_NOT_LAST_BLK(blk));
ASSERT(IS_FREE_BLK(nxt_blk));
@@ -741,7 +1285,7 @@ mbc_alloc_finalize(Allctr_t *allctr,
: IS_NOT_LAST_BLK(blk));
}
- STAT_MBC_BLK_ALLOC(allctr, blk_sz);
+ STAT_MBC_BLK_ALLOC(allctr, blk_sz, alcu_flgs);
ASSERT(IS_ALLOCED_BLK(blk));
ASSERT(blk_sz == BLK_SZ(blk));
@@ -761,7 +1305,8 @@ mbc_alloc(Allctr_t *allctr, Uint size)
{
Block_t *blk;
Uint blk_sz;
- blk = mbc_alloc_block(allctr, size, &blk_sz);
+ Uint32 alcu_flgs = 0;
+ blk = mbc_alloc_block(allctr, size, &blk_sz, &alcu_flgs);
if (!blk)
return NULL;
if (IS_MBC_BLK(blk))
@@ -770,7 +1315,8 @@ mbc_alloc(Allctr_t *allctr, Uint size)
BLK_SZ(blk),
GET_BLK_HDR_FLGS(blk),
blk_sz,
- 1);
+ 1,
+ alcu_flgs);
return BLK2UMEM(blk);
}
@@ -779,6 +1325,7 @@ mbc_free(Allctr_t *allctr, void *p)
{
Uint is_first_blk;
Uint is_last_blk;
+ Uint32 alcu_flgs = 0;
Uint blk_sz;
Block_t *blk;
Block_t *nxt_blk;
@@ -788,13 +1335,15 @@ mbc_free(Allctr_t *allctr, void *p)
blk = UMEM2BLK(p);
blk_sz = BLK_SZ(blk);
+ if (blk_sz < allctr->sbmbc_threshold)
+ alcu_flgs |= ERTS_ALCU_FLG_SBMBC;
ASSERT(IS_MBC_BLK(blk));
ASSERT(blk_sz >= allctr->min_block_size);
HARD_CHECK_BLK_CARRIER(allctr, blk);
- STAT_MBC_BLK_FREE(allctr, blk_sz);
+ STAT_MBC_BLK_FREE(allctr, blk_sz, alcu_flgs);
is_first_blk = IS_FIRST_BLK(blk);
is_last_blk = IS_LAST_BLK(blk);
@@ -802,7 +1351,7 @@ mbc_free(Allctr_t *allctr, void *p)
if (!is_first_blk && IS_PREV_BLK_FREE(blk)) {
/* Coalesce with previous block... */
blk = PREV_BLK(blk);
- (*allctr->unlink_free_block)(allctr, blk);
+ (*allctr->unlink_free_block)(allctr, blk, alcu_flgs);
blk_sz += BLK_SZ(blk);
is_first_blk = IS_FIRST_BLK(blk);
@@ -818,7 +1367,7 @@ mbc_free(Allctr_t *allctr, void *p)
nxt_blk = NXT_BLK(blk);
if (IS_FREE_BLK(nxt_blk)) {
/* Coalesce with next block... */
- (*allctr->unlink_free_block)(allctr, nxt_blk);
+ (*allctr->unlink_free_block)(allctr, nxt_blk, alcu_flgs);
blk_sz += BLK_SZ(nxt_blk);
SET_BLK_SZ(blk, blk_sz);
@@ -850,16 +1399,20 @@ mbc_free(Allctr_t *allctr, void *p)
if (is_first_blk
&& is_last_blk
- && allctr->main_carrier != FBLK2MBC(allctr, blk))
- destroy_carrier(allctr, blk);
+ && allctr->main_carrier != FBLK2MBC(allctr, blk)) {
+ if (alcu_flgs & ERTS_ALCU_FLG_SBMBC)
+ destroy_sbmbc(allctr, blk);
+ else
+ destroy_carrier(allctr, blk);
+ }
else {
- (*allctr->link_free_block)(allctr, blk);
+ (*allctr->link_free_block)(allctr, blk, alcu_flgs);
HARD_CHECK_BLK_CARRIER(allctr, blk);
}
}
static void *
-mbc_realloc(Allctr_t *allctr, void *p, Uint size, UWord flgs)
+mbc_realloc(Allctr_t *allctr, void *p, Uint size, Uint32 alcu_flgs)
{
void *new_p;
Uint old_blk_sz;
@@ -867,12 +1420,17 @@ mbc_realloc(Allctr_t *allctr, void *p, Uint size, UWord flgs)
#ifndef MBC_REALLOC_ALWAYS_MOVES
Block_t *new_blk, *cand_blk;
Uint cand_blk_sz;
- Uint blk_sz;
+ Uint blk_sz, get_blk_sz;
Block_t *nxt_blk;
Uint nxt_blk_sz;
Uint is_last_blk;
#endif /* #ifndef MBC_REALLOC_ALWAYS_MOVES */
+#ifdef ERTS_SMP
+ if (allctr->dd.use)
+ ERTS_ALCU_HANDLE_DD_IN_OP(allctr, 1);
+#endif
+
ASSERT(p);
ASSERT(size);
ASSERT(size < allctr->sbc_threshold);
@@ -883,10 +1441,16 @@ mbc_realloc(Allctr_t *allctr, void *p, Uint size, UWord flgs)
ASSERT(old_blk_sz >= allctr->min_block_size);
#ifdef MBC_REALLOC_ALWAYS_MOVES
- if (flgs & ERTS_ALCU_FLG_FAIL_REALLOC_MOVE)
+ if (alcu_flgs & ERTS_ALCU_FLG_FAIL_REALLOC_MOVE)
return NULL;
#else /* !MBC_REALLOC_ALWAYS_MOVES */
- blk_sz = UMEMSZ2BLKSZ(allctr, size);
+ get_blk_sz = blk_sz = UMEMSZ2BLKSZ(allctr, size);
+ if ((alcu_flgs & ERTS_ALCU_FLG_SBMBC)
+ && (blk_sz + allctr->min_block_size > allctr->sbmbc_threshold)) {
+ /* Since we use block size to determine if blocks are
+ located in sbmbc or not... */
+ get_blk_sz = blk_sz + allctr->min_block_size;
+ }
ASSERT(IS_ALLOCED_BLK(blk));
ASSERT(IS_MBC_BLK(blk));
@@ -901,6 +1465,9 @@ mbc_realloc(Allctr_t *allctr, void *p, Uint size, UWord flgs)
Uint diff_sz_val = old_blk_sz - blk_sz;
Uint old_blk_sz_val = old_blk_sz;
+ if (get_blk_sz >= old_blk_sz)
+ return p;
+
if (diff_sz_val >= (~((Uint) 0) / 100)) {
/* div both by 128 */
old_blk_sz_val >>= 7;
@@ -909,7 +1476,7 @@ mbc_realloc(Allctr_t *allctr, void *p, Uint size, UWord flgs)
/* Avoid fragmentation by moving the block if it is shrunk much */
if (100*diff_sz_val > allctr->mbc_move_threshold*old_blk_sz_val) {
- if (flgs & ERTS_ALCU_FLG_FAIL_REALLOC_MOVE)
+ if (alcu_flgs & ERTS_ALCU_FLG_FAIL_REALLOC_MOVE)
return NULL;
cand_blk_sz = old_blk_sz;
@@ -926,10 +1493,10 @@ mbc_realloc(Allctr_t *allctr, void *p, Uint size, UWord flgs)
}
new_blk = (*allctr->get_free_block)(allctr,
- blk_sz,
+ get_blk_sz,
cand_blk,
- cand_blk_sz);
-
+ cand_blk_sz,
+ alcu_flgs);
if (new_blk || cand_blk != blk)
goto move_into_new_blk;
}
@@ -952,8 +1519,8 @@ mbc_realloc(Allctr_t *allctr, void *p, Uint size, UWord flgs)
nxt_blk_sz,
SBH_THIS_FREE|SBH_PREV_ALLOCED|SBH_NOT_LAST_BLK);
- STAT_MBC_BLK_FREE(allctr, old_blk_sz);
- STAT_MBC_BLK_ALLOC(allctr, blk_sz);
+ STAT_MBC_BLK_FREE(allctr, old_blk_sz, alcu_flgs);
+ STAT_MBC_BLK_ALLOC(allctr, blk_sz, alcu_flgs);
ASSERT(BLK_SZ(blk) >= allctr->min_block_size);
@@ -964,7 +1531,7 @@ mbc_realloc(Allctr_t *allctr, void *p, Uint size, UWord flgs)
if (IS_FREE_BLK(nxt_nxt_blk)) {
/* Coalesce with next free block... */
nxt_blk_sz += BLK_SZ(nxt_nxt_blk);
- (*allctr->unlink_free_block)(allctr, nxt_nxt_blk);
+ (*allctr->unlink_free_block)(allctr, nxt_nxt_blk, alcu_flgs);
SET_BLK_SZ(nxt_blk, nxt_blk_sz);
is_last_blk = IS_LAST_BLK(nxt_nxt_blk);
@@ -979,7 +1546,7 @@ mbc_realloc(Allctr_t *allctr, void *p, Uint size, UWord flgs)
}
}
- (*allctr->link_free_block)(allctr, nxt_blk);
+ (*allctr->link_free_block)(allctr, nxt_blk, alcu_flgs);
ASSERT(IS_ALLOCED_BLK(blk));
@@ -1009,12 +1576,12 @@ mbc_realloc(Allctr_t *allctr, void *p, Uint size, UWord flgs)
if (!is_last_blk) {
nxt_blk = NXT_BLK(blk);
nxt_blk_sz = BLK_SZ(nxt_blk);
- if (IS_FREE_BLK(nxt_blk) && blk_sz <= old_blk_sz + nxt_blk_sz) {
+ if (IS_FREE_BLK(nxt_blk) && get_blk_sz <= old_blk_sz + nxt_blk_sz) {
/* Grow into next block... */
HARD_CHECK_BLK_CARRIER(allctr, blk);
- (*allctr->unlink_free_block)(allctr, nxt_blk);
+ (*allctr->unlink_free_block)(allctr, nxt_blk, alcu_flgs);
nxt_blk_sz -= blk_sz - old_blk_sz;
is_last_blk = IS_LAST_BLK(nxt_blk);
@@ -1051,13 +1618,13 @@ mbc_realloc(Allctr_t *allctr, void *p, Uint size, UWord flgs)
else
SET_BLK_SZ_FTR(nxt_blk, nxt_blk_sz);
- (*allctr->link_free_block)(allctr, nxt_blk);
+ (*allctr->link_free_block)(allctr, nxt_blk, alcu_flgs);
ASSERT(IS_FREE_BLK(nxt_blk));
}
- STAT_MBC_BLK_FREE(allctr, old_blk_sz);
- STAT_MBC_BLK_ALLOC(allctr, blk_sz);
+ STAT_MBC_BLK_FREE(allctr, old_blk_sz, alcu_flgs);
+ STAT_MBC_BLK_ALLOC(allctr, blk_sz, alcu_flgs);
ASSERT(IS_ALLOCED_BLK(blk));
@@ -1088,7 +1655,7 @@ mbc_realloc(Allctr_t *allctr, void *p, Uint size, UWord flgs)
}
}
- if (flgs & ERTS_ALCU_FLG_FAIL_REALLOC_MOVE)
+ if (alcu_flgs & ERTS_ALCU_FLG_FAIL_REALLOC_MOVE)
return NULL;
/* Need to grow in another block */
@@ -1108,7 +1675,7 @@ mbc_realloc(Allctr_t *allctr, void *p, Uint size, UWord flgs)
}
}
- if (cand_blk_sz < blk_sz) {
+ if (cand_blk_sz < get_blk_sz) {
/* We wont fit in cand_blk get a new one */
#endif /* !MBC_REALLOC_ALWAYS_MOVES */
@@ -1127,9 +1694,10 @@ mbc_realloc(Allctr_t *allctr, void *p, Uint size, UWord flgs)
/* We will at least fit in cand_blk */
new_blk = (*allctr->get_free_block)(allctr,
- blk_sz,
+ get_blk_sz,
cand_blk,
- cand_blk_sz);
+ cand_blk_sz,
+ alcu_flgs);
move_into_new_blk:
/*
* new_blk, and cand_blk have to be correctly set
@@ -1142,7 +1710,8 @@ mbc_realloc(Allctr_t *allctr, void *p, Uint size, UWord flgs)
BLK_SZ(new_blk),
GET_BLK_HDR_FLGS(new_blk),
blk_sz,
- 1);
+ 1,
+ alcu_flgs);
new_p = BLK2UMEM(new_blk);
sys_memcpy(new_p, p, MIN(size, old_blk_sz - ABLK_HDR_SZ));
mbc_free(allctr, p);
@@ -1164,7 +1733,7 @@ mbc_realloc(Allctr_t *allctr, void *p, Uint size, UWord flgs)
HARD_CHECK_BLK_CARRIER(allctr, blk);
- (*allctr->unlink_free_block)(allctr, new_blk); /* prev */
+ (*allctr->unlink_free_block)(allctr, new_blk, alcu_flgs); /* prev */
if (is_last_blk)
new_blk_flgs |= LAST_BLK_HDR_FLG;
@@ -1173,7 +1742,7 @@ mbc_realloc(Allctr_t *allctr, void *p, Uint size, UWord flgs)
if (IS_FREE_BLK(nxt_blk)) {
new_blk_flgs |= GET_LAST_BLK_HDR_FLG(nxt_blk);
new_blk_sz += BLK_SZ(nxt_blk);
- (*allctr->unlink_free_block)(allctr, nxt_blk);
+ (*allctr->unlink_free_block)(allctr, nxt_blk, alcu_flgs);
}
}
@@ -1196,9 +1765,10 @@ mbc_realloc(Allctr_t *allctr, void *p, Uint size, UWord flgs)
new_blk_sz,
new_blk_flgs,
blk_sz,
- 0);
+ 0,
+ alcu_flgs);
- STAT_MBC_BLK_FREE(allctr, old_blk_sz);
+ STAT_MBC_BLK_FREE(allctr, old_blk_sz, alcu_flgs);
return new_p;
}
@@ -1243,6 +1813,100 @@ do { \
#define CHECK_1BLK_CARRIER(A, SBC, MSEGED, C, CSZ, B, BSZ)
#endif
+static Block_t *
+create_sbmbc(Allctr_t *allctr, Uint umem_sz)
+{
+ Block_t *blk;
+ Uint blk_sz;
+ Uint crr_sz = allctr->sbmbc_size;
+ Carrier_t *crr;
+
+#if HALFWORD_HEAP
+ if (allctr->mseg_opt.low_mem)
+ crr = erts_alloc(ERTS_ALC_T_SBMBC_LOW, crr_sz);
+ else
+#endif
+ crr = erts_alloc(ERTS_ALC_T_SBMBC, crr_sz);
+
+ INC_CC(allctr->calls.sbmbc_alloc);
+ SET_CARRIER_HDR(crr, crr_sz, SCH_SYS_ALLOC|SCH_MBC);
+
+ blk = MBC2FBLK(allctr, crr);
+
+#ifdef ERTS_ALLOC_UTIL_HARD_DEBUG
+ if (allctr->mbc_header_size % sizeof(Unit_t) == 0)
+ crr_sz -= sizeof(UWord);
+#endif
+
+ blk_sz = UNIT_FLOOR(crr_sz - allctr->mbc_header_size);
+
+ SET_MBC_BLK_FTR(((UWord *) blk)[-1]);
+ SET_BLK_HDR(blk, blk_sz, SBH_THIS_FREE|SBH_PREV_FREE|SBH_LAST_BLK);
+
+#ifdef ERTS_ALLOC_UTIL_HARD_DEBUG
+ *((Carrier_t **) NXT_BLK(blk)) = crr;
+#endif
+
+ link_carrier(&allctr->sbmbc_list, crr);
+
+#ifdef ERTS_ALLOC_UTIL_HARD_DEBUG
+ if (allctr->mbc_header_size % sizeof(Unit_t) == 0)
+ crr_sz += sizeof(UWord);
+#endif
+
+ STAT_SBMBC_ALLOC(allctr, crr_sz);
+ CHECK_1BLK_CARRIER(allctr, 0, 0, crr, crr_sz, blk, blk_sz);
+#ifdef ERTS_ALLOC_UTIL_HARD_DEBUG
+ if (allctr->mbc_header_size % sizeof(Unit_t) == 0)
+ crr_sz -= sizeof(UWord);
+#endif
+ if (allctr->creating_mbc)
+ (*allctr->creating_mbc)(allctr, crr, ERTS_ALCU_FLG_SBMBC);
+
+ DEBUG_SAVE_ALIGNMENT(crr);
+ return blk;
+}
+
+static void
+destroy_sbmbc(Allctr_t *allctr, Block_t *blk)
+{
+ Uint crr_sz;
+ Carrier_t *crr;
+
+ ASSERT(IS_FIRST_BLK(blk));
+
+ ASSERT(IS_MBC_BLK(blk));
+
+ crr = FBLK2MBC(allctr, blk);
+ crr_sz = CARRIER_SZ(crr);
+
+#ifdef DEBUG
+ if (!allctr->stopped) {
+ ASSERT(IS_LAST_BLK(blk));
+
+#ifdef ERTS_ALLOC_UTIL_HARD_DEBUG
+ (*allctr->link_free_block)(allctr, blk, ERTS_ALCU_FLG_SBMBC);
+ HARD_CHECK_BLK_CARRIER(allctr, blk);
+ (*allctr->unlink_free_block)(allctr, blk, ERTS_ALCU_FLG_SBMBC);
+#endif
+ }
+#endif
+
+ STAT_SBMBC_FREE(allctr, crr_sz);
+
+ unlink_carrier(&allctr->sbmbc_list, crr);
+ if (allctr->destroying_mbc)
+ (*allctr->destroying_mbc)(allctr, crr, ERTS_ALCU_FLG_SBMBC);
+
+ INC_CC(allctr->calls.sbmbc_free);
+
+#if HALFWORD_HEAP
+ if (allctr->mseg_opt.low_mem)
+ erts_free(ERTS_ALC_T_SBMBC_LOW, crr);
+ else
+#endif
+ erts_free(ERTS_ALC_T_SBMBC, crr);
+}
static Block_t *
create_carrier(Allctr_t *allctr, Uint umem_sz, UWord flags)
@@ -1268,14 +1932,14 @@ create_carrier(Allctr_t *allctr, Uint umem_sz, UWord flags)
goto try_sys_alloc;
if (flags & CFLG_FORCE_MSEG)
goto try_mseg;
- if (erts_mseg_no() >= max_mseg_carriers)
+ if (erts_mseg_no(&allctr->mseg_opt) >= max_mseg_carriers)
goto try_sys_alloc;
if (flags & CFLG_SBC) {
- if (allctr->sbcs.curr_mseg.no >= allctr->max_mseg_sbcs)
+ if (allctr->sbcs.curr.norm.mseg.no >= allctr->max_mseg_sbcs)
goto try_sys_alloc;
}
else {
- if (allctr->mbcs.curr_mseg.no >= allctr->max_mseg_mbcs)
+ if (allctr->mbcs.curr.norm.mseg.no >= allctr->max_mseg_mbcs)
goto try_sys_alloc;
}
@@ -1289,7 +1953,7 @@ create_carrier(Allctr_t *allctr, Uint umem_sz, UWord flags)
if (crr_sz < allctr->mbc_header_size + blk_sz)
crr_sz = allctr->mbc_header_size + blk_sz;
#ifdef ERTS_ALLOC_UTIL_HARD_DEBUG
- if (sizeof(Unit_t) == sizeof(UWord))
+ if (allctr->mbc_header_size % sizeof(Unit_t) == 0)
crr_sz += sizeof(UWord);
#endif
}
@@ -1330,7 +1994,7 @@ create_carrier(Allctr_t *allctr, Uint umem_sz, UWord flags)
&& bcrr_sz < allctr->smallest_mbc_size)
bcrr_sz = allctr->smallest_mbc_size;
#ifdef ERTS_ALLOC_UTIL_HARD_DEBUG
- if (sizeof(Unit_t) == sizeof(UWord))
+ if (allctr->mbc_header_size % sizeof(Unit_t) == 0)
bcrr_sz += sizeof(UWord);
#endif
@@ -1385,7 +2049,7 @@ create_carrier(Allctr_t *allctr, Uint umem_sz, UWord flags)
blk = MBC2FBLK(allctr, crr);
#ifdef ERTS_ALLOC_UTIL_HARD_DEBUG
- if (sizeof(Unit_t) == sizeof(UWord))
+ if (allctr->mbc_header_size % sizeof(Unit_t) == 0)
crr_sz -= sizeof(UWord);
#endif
@@ -1406,16 +2070,16 @@ create_carrier(Allctr_t *allctr, Uint umem_sz, UWord flags)
link_carrier(&allctr->mbc_list, crr);
#ifdef ERTS_ALLOC_UTIL_HARD_DEBUG
- if (sizeof(Unit_t) == sizeof(UWord))
+ if (allctr->mbc_header_size % sizeof(Unit_t) == 0)
crr_sz += sizeof(UWord);
#endif
CHECK_1BLK_CARRIER(allctr, 0, is_mseg, crr, crr_sz, blk, blk_sz);
#ifdef ERTS_ALLOC_UTIL_HARD_DEBUG
- if (sizeof(Unit_t) == sizeof(UWord))
+ if (allctr->mbc_header_size % sizeof(Unit_t) == 0)
crr_sz -= sizeof(UWord);
#endif
if (allctr->creating_mbc)
- (*allctr->creating_mbc)(allctr, crr);
+ (*allctr->creating_mbc)(allctr, crr, 0);
}
@@ -1595,9 +2259,9 @@ destroy_carrier(Allctr_t *allctr, Block_t *blk)
ASSERT(IS_LAST_BLK(blk));
#ifdef ERTS_ALLOC_UTIL_HARD_DEBUG
- (*allctr->link_free_block)(allctr, blk);
+ (*allctr->link_free_block)(allctr, blk, 0);
HARD_CHECK_BLK_CARRIER(allctr, blk);
- (*allctr->unlink_free_block)(allctr, blk);
+ (*allctr->unlink_free_block)(allctr, blk, 0);
#endif
}
#endif
@@ -1614,7 +2278,7 @@ destroy_carrier(Allctr_t *allctr, Block_t *blk)
unlink_carrier(&allctr->mbc_list, crr);
if (allctr->destroying_mbc)
- (*allctr->destroying_mbc)(allctr, crr);
+ (*allctr->destroying_mbc)(allctr, crr, 0);
}
@@ -1658,14 +2322,21 @@ static struct {
Eterm lmbcs;
Eterm smbcs;
Eterm mbcgs;
+ Eterm sbmbcs;
+ Eterm sbmbct;
#if HAVE_ERTS_MSEG
Eterm mmc;
#endif
Eterm ycs;
+ /* Eterm sbmbcs; */
+
+ Eterm fix_types;
+
Eterm mbcs;
Eterm sbcs;
+
Eterm sys_alloc_carriers_size;
#if HAVE_ERTS_MSEG
Eterm mseg_alloc_carriers_size;
@@ -1688,11 +2359,15 @@ static struct {
Eterm mseg_dealloc;
Eterm mseg_realloc;
#endif
+ Eterm sbmbc_alloc;
+ Eterm sbmbc_free;
#ifdef DEBUG
Eterm end_of_atoms;
#endif
} am;
+static Eterm fix_type_atoms[ERTS_ALC_NO_FIXED_SIZES];
+
static ERTS_INLINE void atom_init(Eterm *atom, char *name)
{
*atom = am_atom_put(name, strlen(name));
@@ -1713,6 +2388,7 @@ init_atoms(Allctr_t *allctr)
erts_mtx_lock(&init_atoms_mtx);
if (!atoms_initialized) {
+ int ix;
#ifdef DEBUG
Eterm *atom;
@@ -1746,14 +2422,21 @@ init_atoms(Allctr_t *allctr)
AM_INIT(lmbcs);
AM_INIT(smbcs);
AM_INIT(mbcgs);
+ AM_INIT(sbmbcs);
+ AM_INIT(sbmbct);
#if HAVE_ERTS_MSEG
AM_INIT(mmc);
#endif
AM_INIT(ycs);
+ /*AM_INIT(sbmbcs);*/
+
+ AM_INIT(fix_types);
+
AM_INIT(mbcs);
AM_INIT(sbcs);
+
AM_INIT(sys_alloc_carriers_size);
#if HAVE_ERTS_MSEG
AM_INIT(mseg_alloc_carriers_size);
@@ -1776,12 +2459,21 @@ init_atoms(Allctr_t *allctr)
AM_INIT(mseg_dealloc);
AM_INIT(mseg_realloc);
#endif
+ AM_INIT(sbmbc_free);
+ AM_INIT(sbmbc_alloc);
#ifdef DEBUG
for (atom = (Eterm *) &am; atom < &am.end_of_atoms; atom++) {
ASSERT(*atom != THE_NON_VALUE);
}
#endif
+
+ for (ix = 0; ix < ERTS_ALC_NO_FIXED_SIZES; ix++) {
+ ErtsAlcType_t n = ERTS_ALC_N_MIN_A_FIXED_SIZE + ix;
+ char *name = (char *) ERTS_ALC_N2TD(n);
+ size_t len = strlen(name);
+ fix_type_atoms[ix] = am_atom_put(name, len);
+ }
}
@@ -1860,6 +2552,48 @@ add_4tup(Uint **hpp, Uint *szp, Eterm *lp,
}
static Eterm
+sz_info_fix(Allctr_t *allctr,
+ int *print_to_p,
+ void *print_to_arg,
+ Uint **hpp,
+ Uint *szp)
+{
+ Eterm res;
+ int ix;
+ ErtsAlcFixList_t *fix = allctr->fix;
+
+ ASSERT(fix);
+
+ res = NIL;
+
+ for (ix = ERTS_ALC_NO_FIXED_SIZES-1; ix >= 0; ix--) {
+ ErtsAlcType_t n = ix + ERTS_ALC_N_MIN_A_FIXED_SIZE;
+ Uint alloced = (fix[ix].type_size * fix[ix].allocated);
+ Uint used = fix[ix].type_size*fix[ix].used;
+
+ if (print_to_p) {
+ int to = *print_to_p;
+ void *arg = print_to_arg;
+ erts_print(to,
+ arg,
+ "fix type: %s %bpu %bpu\n",
+ (char *) ERTS_ALC_N2TD(n),
+ alloced,
+ used);
+ }
+
+ if (hpp || szp) {
+ add_3tup(hpp, szp, &res,
+ fix_type_atoms[ix],
+ bld_unstable_uint(hpp, szp, alloced),
+ bld_unstable_uint(hpp, szp, used));
+ }
+ }
+
+ return res;
+}
+
+static Eterm
sz_info_carriers(Allctr_t *allctr,
CarriersStats_t *cs,
char *prefix,
@@ -1869,7 +2603,9 @@ sz_info_carriers(Allctr_t *allctr,
Uint *szp)
{
Eterm res = THE_NON_VALUE;
- Uint curr_size = cs->curr_mseg.size + cs->curr_sys_alloc.size;
+ Uint curr_size = (cs == &allctr->sbmbcs
+ ? cs->curr.small_block.size
+ : cs->curr.norm.mseg.size + cs->curr.norm.sys_alloc.size);
if (print_to_p) {
int to = *print_to_p;
@@ -1917,8 +2653,17 @@ info_carriers(Allctr_t *allctr,
Uint *szp)
{
Eterm res = THE_NON_VALUE;
- Uint curr_no = cs->curr_mseg.no + cs->curr_sys_alloc.no;
- Uint curr_size = cs->curr_mseg.size + cs->curr_sys_alloc.size;
+ Uint curr_no, curr_size;
+ int small_block = cs == &allctr->sbmbcs;
+
+ if (small_block) {
+ curr_no = cs->curr.small_block.no;
+ curr_size = cs->curr.small_block.size;
+ }
+ else {
+ curr_no = cs->curr.norm.mseg.no + cs->curr.norm.sys_alloc.no;
+ curr_size = cs->curr.norm.mseg.size + cs->curr.norm.sys_alloc.size;
+ }
if (print_to_p) {
int to = *print_to_p;
@@ -1944,18 +2689,20 @@ info_carriers(Allctr_t *allctr,
curr_no,
cs->max.no,
cs->max_ever.no);
+ if (!small_block) {
#if HAVE_ERTS_MSEG
- erts_print(to,
- arg,
- "%smseg carriers: %bpu\n",
- prefix,
- cs->curr_mseg.no);
+ erts_print(to,
+ arg,
+ "%smseg carriers: %bpu\n",
+ prefix,
+ cs->curr.norm.mseg.no);
#endif
- erts_print(to,
- arg,
- "%ssys_alloc carriers: %bpu\n",
- prefix,
- cs->curr_sys_alloc.no);
+ erts_print(to,
+ arg,
+ "%ssys_alloc carriers: %bpu\n",
+ prefix,
+ cs->curr.norm.sys_alloc.no);
+ }
erts_print(to,
arg,
"%scarriers size: %beu %bpu %bpu\n",
@@ -1963,43 +2710,49 @@ info_carriers(Allctr_t *allctr,
curr_size,
cs->max.size,
cs->max_ever.size);
+ if (!small_block) {
#if HAVE_ERTS_MSEG
- erts_print(to,
- arg,
- "%smseg carriers size: %bpu\n",
- prefix,
- cs->curr_mseg.size);
+ erts_print(to,
+ arg,
+ "%smseg carriers size: %bpu\n",
+ prefix,
+ cs->curr.norm.mseg.size);
#endif
- erts_print(to,
- arg,
- "%ssys_alloc carriers size: %bpu\n",
- prefix,
- cs->curr_sys_alloc.size);
+ erts_print(to,
+ arg,
+ "%ssys_alloc carriers size: %bpu\n",
+ prefix,
+ cs->curr.norm.sys_alloc.size);
+ }
}
if (hpp || szp) {
res = NIL;
- add_2tup(hpp, szp, &res,
- am.sys_alloc_carriers_size,
- bld_unstable_uint(hpp, szp, cs->curr_sys_alloc.size));
+ if (!small_block) {
+ add_2tup(hpp, szp, &res,
+ am.sys_alloc_carriers_size,
+ bld_unstable_uint(hpp, szp, cs->curr.norm.sys_alloc.size));
#if HAVE_ERTS_MSEG
- add_2tup(hpp, szp, &res,
- am.mseg_alloc_carriers_size,
- bld_unstable_uint(hpp, szp, cs->curr_mseg.size));
+ add_2tup(hpp, szp, &res,
+ am.mseg_alloc_carriers_size,
+ bld_unstable_uint(hpp, szp, cs->curr.norm.mseg.size));
#endif
+ }
add_4tup(hpp, szp, &res,
am.carriers_size,
bld_unstable_uint(hpp, szp, curr_size),
bld_unstable_uint(hpp, szp, cs->max.size),
bld_unstable_uint(hpp, szp, cs->max_ever.size));
- add_2tup(hpp, szp, &res,
- am.sys_alloc_carriers,
- bld_unstable_uint(hpp, szp, cs->curr_sys_alloc.no));
+ if (!small_block) {
+ add_2tup(hpp, szp, &res,
+ am.sys_alloc_carriers,
+ bld_unstable_uint(hpp, szp, cs->curr.norm.sys_alloc.no));
#if HAVE_ERTS_MSEG
- add_2tup(hpp, szp, &res,
- am.mseg_alloc_carriers,
- bld_unstable_uint(hpp, szp, cs->curr_mseg.no));
+ add_2tup(hpp, szp, &res,
+ am.mseg_alloc_carriers,
+ bld_unstable_uint(hpp, szp, cs->curr.norm.mseg.no));
#endif
+ }
add_4tup(hpp, szp, &res,
am.carriers,
bld_unstable_uint(hpp, szp, curr_no),
@@ -2077,6 +2830,9 @@ info_calls(Allctr_t *allctr,
PRINT_CC_5(to, arg, prefix, "free", allctr->calls.this_free);
PRINT_CC_5(to, arg, prefix, "realloc", allctr->calls.this_realloc);
+ PRINT_CC_4(to, arg, "sbmbc_alloc", allctr->calls.sbmbc_alloc);
+ PRINT_CC_4(to, arg, "sbmbc_free", allctr->calls.sbmbc_free);
+
#if HAVE_ERTS_MSEG
PRINT_CC_4(to, arg, "mseg_alloc", allctr->calls.mseg_alloc);
PRINT_CC_4(to, arg, "mseg_dealloc", allctr->calls.mseg_dealloc);
@@ -2128,6 +2884,14 @@ info_calls(Allctr_t *allctr,
bld_unstable_uint(hpp, szp, allctr->calls.mseg_alloc.no));
#endif
add_3tup(hpp, szp, &res,
+ am.sbmbc_free,
+ bld_unstable_uint(hpp, szp, allctr->calls.sbmbc_free.giga_no),
+ bld_unstable_uint(hpp, szp, allctr->calls.sbmbc_free.no));
+ add_3tup(hpp, szp, &res,
+ am.sbmbc_alloc,
+ bld_unstable_uint(hpp, szp, allctr->calls.sbmbc_alloc.giga_no),
+ bld_unstable_uint(hpp, szp, allctr->calls.sbmbc_alloc.no));
+ add_3tup(hpp, szp, &res,
allctr->name.realloc,
bld_unstable_uint(hpp, szp, allctr->calls.this_realloc.giga_no),
bld_unstable_uint(hpp, szp, allctr->calls.this_realloc.no));
@@ -2191,7 +2955,9 @@ info_options(Allctr_t *allctr,
#endif
"option lmbcs: %beu\n"
"option smbcs: %beu\n"
- "option mbcgs: %beu\n",
+ "option mbcgs: %beu\n"
+ "option sbmbcs: %beu\n"
+ "option sbmbct: %beu\n",
topt,
allctr->ramv ? "true" : "false",
#if HALFWORD_HEAP
@@ -2211,7 +2977,9 @@ info_options(Allctr_t *allctr,
#endif
allctr->largest_mbc_size,
allctr->smallest_mbc_size,
- allctr->mbc_growth_stages);
+ allctr->mbc_growth_stages,
+ allctr->sbmbc_size,
+ allctr->sbmbc_threshold);
}
res = (*allctr->info_options)(allctr, "option ", print_to_p, print_to_arg,
@@ -2219,6 +2987,12 @@ info_options(Allctr_t *allctr,
if (hpp || szp) {
add_2tup(hpp, szp, &res,
+ am.sbmbct,
+ bld_uint(hpp, szp, allctr->sbmbc_threshold));
+ add_2tup(hpp, szp, &res,
+ am.sbmbcs,
+ bld_uint(hpp, szp, allctr->sbmbc_size));
+ add_2tup(hpp, szp, &res,
am.mbcgs,
bld_uint(hpp, szp, allctr->mbc_growth_stages));
add_2tup(hpp, szp, &res,
@@ -2259,9 +3033,7 @@ info_options(Allctr_t *allctr,
add_2tup(hpp, szp, &res, am.low, allctr->mseg_opt.low_mem ? am_true : am_false);
#endif
add_2tup(hpp, szp, &res, am.ramv, allctr->ramv ? am_true : am_false);
- add_2tup(hpp, szp, &res, am.t, (allctr->t
- ? bld_uint(hpp, szp, (Uint) allctr->t)
- : am_false));
+ add_2tup(hpp, szp, &res, am.t, (allctr->t ? am_true : am_false));
add_2tup(hpp, szp, &res, am.e, am_true);
}
@@ -2285,10 +3057,10 @@ update_max_ever_values(CarriersStats_t *cs)
static ERTS_INLINE void
reset_max_values(CarriersStats_t *cs)
{
- cs->max.no = cs->curr_mseg.no + cs->curr_sys_alloc.no;
- cs->max.size = cs->curr_mseg.size + cs->curr_sys_alloc.size;
- cs->blocks.max.no = cs->blocks.curr.no;
- cs->blocks.max.size = cs->blocks.curr.size;
+ cs->max.no = cs->curr.norm.mseg.no + cs->curr.norm.sys_alloc.no;
+ cs->max.size = cs->curr.norm.mseg.size + cs->curr.norm.sys_alloc.size;
+ cs->blocks.max.no = cs->blocks.curr.no;
+ cs->blocks.max.size = cs->blocks.curr.size;
}
@@ -2367,7 +3139,7 @@ erts_alcu_sz_info(Allctr_t *allctr,
Uint **hpp,
Uint *szp)
{
- Eterm res, mbcs, sbcs;
+ Eterm res, sbmbcs, mbcs, sbcs, fix = THE_NON_VALUE;
res = THE_NON_VALUE;
@@ -2384,29 +3156,40 @@ erts_alcu_sz_info(Allctr_t *allctr,
erts_mtx_lock(&allctr->mutex);
#endif
+ ERTS_ALCU_DBG_CHK_THR_ACCESS(allctr);
+
if (hpp || szp)
ensure_atoms_initialized(allctr);
/* Update sbc values not continously updated */
allctr->sbcs.blocks.curr.no
- = allctr->sbcs.curr_mseg.no + allctr->sbcs.curr_sys_alloc.no;
+ = allctr->sbcs.curr.norm.mseg.no + allctr->sbcs.curr.norm.sys_alloc.no;
allctr->sbcs.blocks.max.no = allctr->sbcs.max.no;
+ update_max_ever_values(&allctr->sbmbcs);
update_max_ever_values(&allctr->mbcs);
update_max_ever_values(&allctr->sbcs);
- mbcs = sz_info_carriers(allctr, &allctr->mbcs, "mbcs ", print_to_p,
- print_to_arg, hpp, szp);
- sbcs = sz_info_carriers(allctr, &allctr->sbcs, "sbcs ", print_to_p,
- print_to_arg, hpp, szp);
+ if (allctr->fix)
+ fix = sz_info_fix(allctr, print_to_p, print_to_arg, hpp, szp);
+ sbmbcs = sz_info_carriers(allctr, &allctr->sbmbcs, "sbmbcs ", print_to_p,
+ print_to_arg, hpp, szp);
+ mbcs = sz_info_carriers(allctr, &allctr->mbcs, "mbcs ", print_to_p,
+ print_to_arg, hpp, szp);
+ sbcs = sz_info_carriers(allctr, &allctr->sbcs, "sbcs ", print_to_p,
+ print_to_arg, hpp, szp);
if (hpp || szp) {
res = NIL;
add_2tup(hpp, szp, &res, am.sbcs, sbcs);
add_2tup(hpp, szp, &res, am.mbcs, mbcs);
+ add_2tup(hpp, szp, &res, am.sbmbcs, sbmbcs);
+ if (allctr->fix)
+ add_2tup(hpp, szp, &res, am.fix_types, fix);
}
if (begin_max_period) {
+ reset_max_values(&allctr->sbmbcs);
reset_max_values(&allctr->mbcs);
reset_max_values(&allctr->sbcs);
}
@@ -2428,7 +3211,7 @@ erts_alcu_info(Allctr_t *allctr,
Uint **hpp,
Uint *szp)
{
- Eterm res, sett, mbcs, sbcs, calls;
+ Eterm res, sett, sbmbcs, mbcs, sbcs, calls, fix = THE_NON_VALUE;
res = THE_NON_VALUE;
@@ -2445,14 +3228,17 @@ erts_alcu_info(Allctr_t *allctr,
erts_mtx_lock(&allctr->mutex);
#endif
+ ERTS_ALCU_DBG_CHK_THR_ACCESS(allctr);
+
if (hpp || szp)
ensure_atoms_initialized(allctr);
/* Update sbc values not continously updated */
allctr->sbcs.blocks.curr.no
- = allctr->sbcs.curr_mseg.no + allctr->sbcs.curr_sys_alloc.no;
+ = allctr->sbcs.curr.norm.mseg.no + allctr->sbcs.curr.norm.sys_alloc.no;
allctr->sbcs.blocks.max.no = allctr->sbcs.max.no;
+ update_max_ever_values(&allctr->sbmbcs);
update_max_ever_values(&allctr->mbcs);
update_max_ever_values(&allctr->sbcs);
@@ -2464,11 +3250,15 @@ erts_alcu_info(Allctr_t *allctr,
ERTS_ALCU_VSN_STR);
}
- sett = info_options(allctr, print_to_p, print_to_arg, hpp, szp);
- mbcs = info_carriers(allctr, &allctr->mbcs, "mbcs ", print_to_p,
- print_to_arg, hpp, szp);
- sbcs = info_carriers(allctr, &allctr->sbcs, "sbcs ", print_to_p,
- print_to_arg, hpp, szp);
+ sett = info_options(allctr, print_to_p, print_to_arg, hpp, szp);
+ if (allctr->fix)
+ fix = sz_info_fix(allctr, print_to_p, print_to_arg, hpp, szp);
+ sbmbcs = info_carriers(allctr, &allctr->sbmbcs, "sbmbcs ", print_to_p,
+ print_to_arg, hpp, szp);
+ mbcs = info_carriers(allctr, &allctr->mbcs, "mbcs ", print_to_p,
+ print_to_arg, hpp, szp);
+ sbcs = info_carriers(allctr, &allctr->sbcs, "sbcs ", print_to_p,
+ print_to_arg, hpp, szp);
calls = info_calls(allctr, print_to_p, print_to_arg, hpp, szp);
if (hpp || szp) {
@@ -2477,6 +3267,9 @@ erts_alcu_info(Allctr_t *allctr,
add_2tup(hpp, szp, &res, am.calls, calls);
add_2tup(hpp, szp, &res, am.sbcs, sbcs);
add_2tup(hpp, szp, &res, am.mbcs, mbcs);
+ add_2tup(hpp, szp, &res, am.sbmbcs, sbmbcs);
+ if (allctr->fix)
+ add_2tup(hpp, szp, &res, am.fix_types, fix);
add_2tup(hpp, szp, &res, am.options, sett);
add_3tup(hpp, szp, &res,
am.versions,
@@ -2485,6 +3278,7 @@ erts_alcu_info(Allctr_t *allctr,
}
if (begin_max_period) {
+ reset_max_values(&allctr->sbmbcs);
reset_max_values(&allctr->mbcs);
reset_max_values(&allctr->sbcs);
}
@@ -2500,7 +3294,7 @@ erts_alcu_info(Allctr_t *allctr,
void
-erts_alcu_current_size(Allctr_t *allctr, AllctrSize_t *size)
+erts_alcu_current_size(Allctr_t *allctr, AllctrSize_t *size, ErtsAlcUFixInfo_t *fi, int fisz)
{
#ifdef USE_THREADS
@@ -2508,14 +3302,28 @@ erts_alcu_current_size(Allctr_t *allctr, AllctrSize_t *size)
erts_mtx_lock(&allctr->mutex);
#endif
- size->carriers = allctr->mbcs.curr_mseg.size;
- size->carriers += allctr->mbcs.curr_sys_alloc.size;
- size->carriers += allctr->sbcs.curr_mseg.size;
- size->carriers += allctr->sbcs.curr_sys_alloc.size;
+ size->carriers = allctr->mbcs.curr.norm.mseg.size;
+ size->carriers += allctr->mbcs.curr.norm.sys_alloc.size;
+ size->carriers += allctr->sbmbcs.curr.small_block.size;
+ size->carriers += allctr->sbcs.curr.norm.mseg.size;
+ size->carriers += allctr->sbcs.curr.norm.sys_alloc.size;
size->blocks = allctr->mbcs.blocks.curr.size;
+ size->blocks += allctr->sbmbcs.blocks.curr.size;
size->blocks += allctr->sbcs.blocks.curr.size;
+ if (fi) {
+ int ix;
+ for (ix = 0; ix < fisz; ix++) {
+ if (allctr->fix) {
+ fi[ix].allocated += (allctr->fix[ix].type_size
+ * allctr->fix[ix].allocated);
+ fi[ix].used += (allctr->fix[ix].type_size
+ * allctr->fix[ix].used);
+ }
+ }
+ }
+
#ifdef USE_THREADS
if (allctr->thread_safe)
erts_mtx_unlock(&allctr->mutex);
@@ -2529,12 +3337,16 @@ do_erts_alcu_alloc(ErtsAlcType_t type, void *extra, Uint size)
{
Allctr_t *allctr = (Allctr_t *) extra;
void *res;
+ ErtsAlcFixList_t *fix;
ASSERT(initialized);
ASSERT(allctr);
- ERTS_ALCU_DBG_CHK_THR_SPEC(allctr);
+ ERTS_SMP_LC_ASSERT(!allctr->thread_safe
+ || erts_lc_mtx_is_locked(&allctr->mutex));
+
+ ERTS_ALCU_DBG_CHK_THR_ACCESS(allctr);
#if ALLOC_ZERO_EQ_NULL
if (!size)
@@ -2543,18 +3355,61 @@ do_erts_alcu_alloc(ErtsAlcType_t type, void *extra, Uint size)
INC_CC(allctr->calls.this_alloc);
+ fix = allctr->fix;
+ if (fix) {
+ int ix = type - ERTS_ALC_N_MIN_A_FIXED_SIZE;
+ ERTS_DBG_CHK_FIX_LIST(allctr, fix, ix, 1);
+ fix[ix].used++;
+ res = fix[ix].list;
+ if (res) {
+ fix[ix].list_size--;
+ fix[ix].list = *((void **) res);
+ if (fix[ix].list && fix[ix].allocated > fix[ix].limit) {
+ void *p = fix[ix].list;
+ Block_t *blk;
+ fix[ix].list = *((void **) p);
+ fix[ix].list_size--;
+ blk = UMEM2BLK(p);
+ if (IS_SBC_BLK(blk))
+ destroy_carrier(allctr, blk);
+ else
+ mbc_free(allctr, p);
+ fix[ix].allocated--;
+ }
+ ERTS_DBG_CHK_FIX_LIST(allctr, fix, ix, 0);
+ return res;
+ }
+ if (size < 2*sizeof(UWord))
+ size += sizeof(UWord);
+ if (fix[ix].limit < fix[ix].used)
+ fix[ix].limit = fix[ix].used;
+ if (fix[ix].max_used < fix[ix].used)
+ fix[ix].max_used = fix[ix].used;
+ fix[ix].allocated++;
+ }
+
if (size >= allctr->sbc_threshold) {
+ Block_t *blk;
+#ifdef ERTS_SMP
+ if (allctr->dd.use)
+ ERTS_ALCU_HANDLE_DD_IN_OP(allctr, 1);
+#endif
#if HALFWORD_HEAP
- Block_t *blk = create_carrier(allctr, size,
- CFLG_SBC | CFLG_FORCE_MSEG);
+ blk = create_carrier(allctr, size,
+ CFLG_SBC | CFLG_FORCE_MSEG);
#else
- Block_t *blk = create_carrier(allctr, size, CFLG_SBC);
+ blk = create_carrier(allctr, size, CFLG_SBC);
#endif
res = blk ? BLK2UMEM(blk) : NULL;
}
else
res = mbc_alloc(allctr, size);
+ if (!res && fix) {
+ int ix = type - ERTS_ALC_N_MIN_A_FIXED_SIZE;
+ fix[ix].allocated--;
+ fix[ix].used--;
+ }
return res;
}
@@ -2583,29 +3438,28 @@ erts_alcu_alloc_ts(ErtsAlcType_t type, void *extra, Uint size)
return res;
}
+#ifdef ERTS_SMP
+
void *
erts_alcu_alloc_thr_spec(ErtsAlcType_t type, void *extra, Uint size)
{
ErtsAllocatorThrSpec_t *tspec = (ErtsAllocatorThrSpec_t *) extra;
- int ix = erts_alc_get_thr_ix();
+ int ix;
Allctr_t *allctr;
- int unlock;
void *res;
- ASSERT(ix > 0);
- if (ix < tspec->size) {
- allctr = tspec->allctr[ix];
- unlock = 0;
- }
- else {
- allctr = tspec->allctr[0];
- unlock = 1;
+ ix = ERTS_ALC_GET_THR_IX();
+
+ ASSERT(0 <= ix && ix < tspec->size);
+
+ allctr = tspec->allctr[ix];
+
+ if (allctr->thread_safe)
erts_mtx_lock(&allctr->mutex);
- }
res = do_erts_alcu_alloc(type, allctr, size);
- if (unlock)
+ if (allctr->thread_safe)
erts_mtx_unlock(&allctr->mutex);
DEBUG_CHECK_ALIGNMENT(res);
@@ -2616,51 +3470,96 @@ erts_alcu_alloc_thr_spec(ErtsAlcType_t type, void *extra, Uint size)
void *
erts_alcu_alloc_thr_pref(ErtsAlcType_t type, void *extra, Uint size)
{
- ErtsAllocatorThrSpec_t *tspec = (ErtsAllocatorThrSpec_t *) extra;
- int ix = erts_alc_get_thr_ix();
- Allctr_t *allctr;
+ int pref_ix;
+ Allctr_t *pref_allctr;
void *res;
- ASSERT(sizeof(UWord) == sizeof(Allctr_t *));
- ASSERT(ix > 0);
- if (ix >= tspec->size)
- ix = (ix % (tspec->size - 1)) + 1;
- allctr = tspec->allctr[ix];
- erts_mtx_lock(&allctr->mutex);
- res = do_erts_alcu_alloc(type, allctr, size + sizeof(UWord));
- if (res) {
- *((Allctr_t **) res) = allctr;
- res = (void *) (((char *) res) + sizeof(UWord));
- }
- erts_mtx_unlock(&allctr->mutex);
+ pref_ix = get_pref_allctr(extra, &pref_allctr);
+
+ if (pref_allctr->thread_safe)
+ erts_mtx_lock(&pref_allctr->mutex);
+
+ ERTS_ALCU_DBG_CHK_THR_ACCESS(pref_allctr);
+
+ res = do_erts_alcu_alloc(type, pref_allctr, size + sizeof(UWord));
+ if (pref_allctr->thread_safe)
+ erts_mtx_unlock(&pref_allctr->mutex);
+
+ if (res)
+ res = put_used_allctr(res, pref_ix, size);
+
DEBUG_CHECK_ALIGNMENT(res);
+
+
return res;
}
#endif
+#endif
+
/* ------------------------------------------------------------------------- */
static ERTS_INLINE void
do_erts_alcu_free(ErtsAlcType_t type, void *extra, void *p)
{
+ int ix;
Allctr_t *allctr = (Allctr_t *) extra;
ASSERT(initialized);
ASSERT(allctr);
- ERTS_ALCU_DBG_CHK_THR_SPEC(allctr);
+ ERTS_SMP_LC_ASSERT(!allctr->thread_safe
+ || erts_lc_mtx_is_locked(&allctr->mutex));
+
+ ERTS_ALCU_DBG_CHK_THR_ACCESS(allctr);
if (p) {
+ ErtsAlcFixList_t *fix = allctr->fix;
Block_t *blk;
INC_CC(allctr->calls.this_free);
+ if (fix) {
+ ix = type - ERTS_ALC_N_MIN_A_FIXED_SIZE;
+ ERTS_DBG_CHK_FIX_LIST(allctr, fix, ix, 1);
+ fix[ix].used--;
+ if (fix[ix].allocated < fix[ix].limit
+ && fix[ix].list_size < ERTS_ALCU_FIX_MAX_LIST_SZ) {
+ *((void **) p) = fix[ix].list;
+ fix[ix].list = p;
+ fix[ix].list_size++;
+ if (!allctr->fix_shrink_scheduled) {
+ allctr->fix_shrink_scheduled = 1;
+ erts_set_aux_work_timeout(
+ allctr->ix,
+ (ERTS_SSI_AUX_WORK_FIX_ALLOC_LOWER_LIM
+ | ERTS_SSI_AUX_WORK_FIX_ALLOC_DEALLOC),
+ 1);
+ }
+ ERTS_DBG_CHK_FIX_LIST(allctr, fix, ix, 0);
+ return;
+ }
+ fix[ix].allocated--;
+ if (fix[ix].list && fix[ix].allocated > fix[ix].limit) {
+ blk = UMEM2BLK(p);
+ if (IS_SBC_BLK(blk))
+ destroy_carrier(allctr, blk);
+ else
+ mbc_free(allctr, p);
+ p = fix[ix].list;
+ fix[ix].list = *((void **) p);
+ fix[ix].list_size--;
+ fix[ix].allocated--;
+ }
+ }
+
blk = UMEM2BLK(p);
if (IS_SBC_BLK(blk))
destroy_carrier(allctr, blk);
else
mbc_free(allctr, p);
+ ERTS_DBG_CHK_FIX_LIST(allctr, fix, ix, 0);
}
}
@@ -2680,44 +3579,56 @@ erts_alcu_free_ts(ErtsAlcType_t type, void *extra, void *p)
erts_mtx_unlock(&allctr->mutex);
}
+#ifdef ERTS_SMP
+
void
erts_alcu_free_thr_spec(ErtsAlcType_t type, void *extra, void *p)
{
ErtsAllocatorThrSpec_t *tspec = (ErtsAllocatorThrSpec_t *) extra;
- int ix = erts_alc_get_thr_ix();
- int unlock;
+ int ix;
Allctr_t *allctr;
- ASSERT(ix > 0);
- if (ix < tspec->size) {
- allctr = tspec->allctr[ix];
- unlock = 0;
- }
- else {
- allctr = tspec->allctr[0];
- unlock = 1;
+ ix = ERTS_ALC_GET_THR_IX();
+
+ ASSERT(0 <= ix && ix < tspec->size);
+
+ allctr = tspec->allctr[ix];
+
+ if (allctr->thread_safe)
erts_mtx_lock(&allctr->mutex);
- }
do_erts_alcu_free(type, allctr, p);
- if (unlock)
+
+ if (allctr->thread_safe)
erts_mtx_unlock(&allctr->mutex);
}
void
-erts_alcu_free_thr_pref(ErtsAlcType_t type, void *unused, void *p)
+erts_alcu_free_thr_pref(ErtsAlcType_t type, void *extra, void *p)
{
if (p) {
- void *ptr = (void *) (((char *) p) - sizeof(UWord));
- Allctr_t *allctr = *((Allctr_t **) ptr);
- erts_mtx_lock(&allctr->mutex);
- do_erts_alcu_free(type, allctr, ptr);
- erts_mtx_unlock(&allctr->mutex);
+ Allctr_t *pref_allctr, *used_allctr;
+ void *ptr;
+
+ get_pref_allctr(extra, &pref_allctr);
+ ptr = get_used_allctr(extra, p, &used_allctr, NULL);
+ if (pref_allctr != used_allctr)
+ enqueue_dealloc_other_instance(type, used_allctr, ptr);
+ else {
+ if (used_allctr->thread_safe)
+ erts_mtx_lock(&used_allctr->mutex);
+ ERTS_ALCU_DBG_CHK_THR_ACCESS(used_allctr);
+ do_erts_alcu_free(type, used_allctr, ptr);
+ if (used_allctr->thread_safe)
+ erts_mtx_unlock(&used_allctr->mutex);
+ }
}
}
#endif
+#endif
+
/* ------------------------------------------------------------------------- */
static ERTS_INLINE void *
@@ -2725,7 +3636,7 @@ do_erts_alcu_realloc(ErtsAlcType_t type,
void *extra,
void *p,
Uint size,
- UWord flgs)
+ Uint32 alcu_flgs)
{
Allctr_t *allctr = (Allctr_t *) extra;
Block_t *blk;
@@ -2735,7 +3646,10 @@ do_erts_alcu_realloc(ErtsAlcType_t type,
ASSERT(allctr);
- ERTS_ALCU_DBG_CHK_THR_SPEC(allctr);
+ ERTS_SMP_LC_ASSERT(!allctr->thread_safe
+ || erts_lc_mtx_is_locked(&allctr->mutex));
+
+ ERTS_ALCU_DBG_CHK_THR_ACCESS(allctr);
if (!p) {
res = do_erts_alcu_alloc(type, extra, size);
@@ -2758,9 +3672,32 @@ do_erts_alcu_realloc(ErtsAlcType_t type,
blk = UMEM2BLK(p);
+ if (allctr->sbmbc_threshold > 0) {
+ Uint old_sz, new_sz, lim;
+ lim = allctr->sbmbc_threshold;
+ old_sz = BLK_SZ(blk);
+ new_sz = UMEMSZ2BLKSZ(allctr, size);
+ if ((old_sz < lim && lim <= new_sz)
+ || (new_sz < lim && lim <= old_sz)) {
+ /* *Need* to move it... */
+
+ INC_CC(allctr->calls.this_realloc);
+ res = do_erts_alcu_alloc(type, extra, size);
+ DEC_CC(allctr->calls.this_alloc);
+
+ sys_memcpy(res, p, MIN(size, old_sz - ABLK_HDR_SZ));
+
+ do_erts_alcu_free(type, extra, p);
+ DEC_CC(allctr->calls.this_free);
+ return res;
+ }
+ if (old_sz < lim)
+ alcu_flgs |= ERTS_ALCU_FLG_SBMBC;
+ }
+
if (size < allctr->sbc_threshold) {
if (IS_MBC_BLK(blk))
- res = mbc_realloc(allctr, p, size, flgs);
+ res = mbc_realloc(allctr, p, size, alcu_flgs);
else {
Uint used_sz = allctr->sbc_header_size + ABLK_HDR_SZ + size;
Uint crr_sz;
@@ -2791,7 +3728,7 @@ do_erts_alcu_realloc(ErtsAlcType_t type,
if (100*diff_sz_val < allctr->sbc_move_threshold*crr_sz_val)
/* Data won't be copied into a new carrier... */
goto do_carrier_resize;
- else if (flgs & ERTS_ALCU_FLG_FAIL_REALLOC_MOVE)
+ else if (alcu_flgs & ERTS_ALCU_FLG_FAIL_REALLOC_MOVE)
return NULL;
res = mbc_alloc(allctr, size);
@@ -2805,6 +3742,10 @@ do_erts_alcu_realloc(ErtsAlcType_t type,
}
else {
Block_t *new_blk;
+#ifdef ERTS_SMP
+ if (allctr->dd.use)
+ ERTS_ALCU_HANDLE_DD_IN_OP(allctr, 1);
+#endif
if(IS_SBC_BLK(blk)) {
do_carrier_resize:
#if HALFWORD_HEAP
@@ -2814,7 +3755,7 @@ do_erts_alcu_realloc(ErtsAlcType_t type,
#endif
res = new_blk ? BLK2UMEM(new_blk) : NULL;
}
- else if (flgs & ERTS_ALCU_FLG_FAIL_REALLOC_MOVE)
+ else if (alcu_flgs & ERTS_ALCU_FLG_FAIL_REALLOC_MOVE)
return NULL;
else {
#if HALFWORD_HEAP
@@ -2908,30 +3849,29 @@ erts_alcu_realloc_mv_ts(ErtsAlcType_t type, void *extra, void *p, Uint size)
return res;
}
+#ifdef ERTS_SMP
+
void *
erts_alcu_realloc_thr_spec(ErtsAlcType_t type, void *extra,
void *ptr, Uint size)
{
ErtsAllocatorThrSpec_t *tspec = (ErtsAllocatorThrSpec_t *) extra;
- int ix = erts_alc_get_thr_ix();
+ int ix;
Allctr_t *allctr;
- int unlock;
void *res;
- ASSERT(ix > 0);
- if (ix < tspec->size) {
- allctr = tspec->allctr[ix];
- unlock = 0;
- }
- else {
- allctr = tspec->allctr[0];
- unlock = 1;
+ ix = ERTS_ALC_GET_THR_IX();
+
+ ASSERT(0 <= ix && ix < tspec->size);
+
+ allctr = tspec->allctr[ix];
+
+ if (allctr->thread_safe)
erts_mtx_lock(&allctr->mutex);
- }
res = do_erts_alcu_realloc(type, allctr, ptr, size, 0);
- if (unlock)
+ if (allctr->thread_safe)
erts_mtx_unlock(&allctr->mutex);
DEBUG_CHECK_ALIGNMENT(res);
@@ -2944,26 +3884,22 @@ erts_alcu_realloc_mv_thr_spec(ErtsAlcType_t type, void *extra,
void *ptr, Uint size)
{
ErtsAllocatorThrSpec_t *tspec = (ErtsAllocatorThrSpec_t *) extra;
- int ix = erts_alc_get_thr_ix();
+ int ix;
Allctr_t *allctr;
- int unlock;
void *res;
- ASSERT(ix > 0);
- if (ix < tspec->size) {
- allctr = tspec->allctr[ix];
- unlock = 0;
- }
- else {
- allctr = tspec->allctr[0];
- unlock = 1;
- erts_mtx_lock(&allctr->mutex);
- }
+ ix = ERTS_ALC_GET_THR_IX();
+ ASSERT(0 <= ix && ix < tspec->size);
+
+ allctr = tspec->allctr[ix];
+
+ if (allctr->thread_safe)
+ erts_mtx_lock(&allctr->mutex);
res = do_erts_alcu_alloc(type, allctr, size);
if (!res) {
- if (unlock)
+ if (allctr->thread_safe)
erts_mtx_unlock(&allctr->mutex);
res = erts_alcu_realloc_thr_spec(type, allctr, ptr, size);
}
@@ -2977,7 +3913,7 @@ erts_alcu_realloc_mv_thr_spec(ErtsAlcType_t type, void *extra,
cpy_size = size;
sys_memcpy(res, ptr, cpy_size);
do_erts_alcu_free(type, allctr, ptr);
- if (unlock)
+ if (allctr->thread_safe)
erts_mtx_unlock(&allctr->mutex);
}
@@ -2986,129 +3922,102 @@ erts_alcu_realloc_mv_thr_spec(ErtsAlcType_t type, void *extra,
return res;
}
-void *
-erts_alcu_realloc_thr_pref(ErtsAlcType_t type, void *extra, void *p, Uint size)
+static ERTS_INLINE void *
+realloc_thr_pref(ErtsAlcType_t type, void *extra, void *p, Uint size,
+ int force_move)
{
- ErtsAllocatorThrSpec_t *tspec = (ErtsAllocatorThrSpec_t *) extra;
- int ix;
+ int pref_ix;
void *ptr, *res;
Allctr_t *pref_allctr, *used_allctr;
+ UWord old_user_size;
if (!p)
return erts_alcu_alloc_thr_pref(type, extra, size);
- ptr = (void *) (((char *) p) - sizeof(UWord));
- used_allctr = *((Allctr_t **) ptr);
+ pref_ix = get_pref_allctr(extra, &pref_allctr);
+ ptr = get_used_allctr(extra, p, &used_allctr, &old_user_size);
- ix = erts_alc_get_thr_ix();
- ASSERT(ix > 0);
- if (ix >= tspec->size)
- ix = (ix % (tspec->size - 1)) + 1;
- pref_allctr = tspec->allctr[ix];
ASSERT(used_allctr && pref_allctr);
- erts_mtx_lock(&used_allctr->mutex);
- res = do_erts_alcu_realloc(type,
- used_allctr,
- ptr,
- size + sizeof(UWord),
- (pref_allctr != used_allctr
- ? ERTS_ALCU_FLG_FAIL_REALLOC_MOVE
- : 0));
- erts_mtx_unlock(&used_allctr->mutex);
- if (res) {
- ASSERT(used_allctr == *((Allctr_t **) res));
- res = (void *) (((char *) res) + sizeof(UWord));
- DEBUG_CHECK_ALIGNMENT(res);
+ if (!force_move && used_allctr == pref_allctr) {
+ if (used_allctr->thread_safe)
+ erts_mtx_lock(&used_allctr->mutex);
+ ERTS_ALCU_DBG_CHK_THR_ACCESS(used_allctr);
+ res = do_erts_alcu_realloc(type,
+ used_allctr,
+ ptr,
+ size + sizeof(UWord),
+ 0);
+ if (used_allctr->thread_safe)
+ erts_mtx_unlock(&used_allctr->mutex);
+ if (res)
+ res = put_used_allctr(res, pref_ix, size);
}
else {
- erts_mtx_lock(&pref_allctr->mutex);
+ if (pref_allctr->thread_safe)
+ erts_mtx_lock(&pref_allctr->mutex);
res = do_erts_alcu_alloc(type, pref_allctr, size + sizeof(UWord));
- erts_mtx_unlock(&pref_allctr->mutex);
+ if (pref_allctr->thread_safe && (!force_move
+ || used_allctr != pref_allctr))
+ erts_mtx_unlock(&pref_allctr->mutex);
if (res) {
Block_t *blk;
size_t cpy_size;
- *((Allctr_t **) res) = pref_allctr;
- res = (void *) (((char *) res) + sizeof(UWord));
+ res = put_used_allctr(res, pref_ix, size);
DEBUG_CHECK_ALIGNMENT(res);
- erts_mtx_lock(&used_allctr->mutex);
blk = UMEM2BLK(ptr);
- cpy_size = BLK_SZ(blk) - ABLK_HDR_SZ - sizeof(UWord);
+ if (old_user_size != ERTS_AU_PREF_ALLOC_SIZE_MASK)
+ cpy_size = old_user_size;
+ else {
+ if (used_allctr->thread_safe && (!force_move
+ || used_allctr != pref_allctr))
+ erts_mtx_lock(&used_allctr->mutex);
+ ERTS_SMP_LC_ASSERT(!used_allctr->thread_safe ||
+ erts_lc_mtx_is_locked(&used_allctr->mutex));
+ cpy_size = BLK_SZ(blk);
+ if (used_allctr->thread_safe && (!force_move
+ || used_allctr != pref_allctr))
+ erts_mtx_unlock(&used_allctr->mutex);
+ cpy_size -= ABLK_HDR_SZ + sizeof(UWord);
+ }
if (cpy_size > size)
cpy_size = size;
sys_memcpy(res, p, cpy_size);
- do_erts_alcu_free(type, used_allctr, ptr);
- erts_mtx_unlock(&used_allctr->mutex);
+
+ if (!force_move || used_allctr != pref_allctr)
+ enqueue_dealloc_other_instance(type, used_allctr, ptr);
+ else {
+ do_erts_alcu_free(type, used_allctr, ptr);
+ ASSERT(pref_allctr == used_allctr);
+ if (pref_allctr->thread_safe)
+ erts_mtx_unlock(&pref_allctr->mutex);
+ }
}
}
return res;
}
+void *
+erts_alcu_realloc_thr_pref(ErtsAlcType_t type, void *extra, void *p, Uint size)
+{
+ return realloc_thr_pref(type, extra, p, size, 0);
+}
void *
erts_alcu_realloc_mv_thr_pref(ErtsAlcType_t type, void *extra,
void *p, Uint size)
{
- ErtsAllocatorThrSpec_t *tspec = (ErtsAllocatorThrSpec_t *) extra;
- int ix;
- void *ptr, *res;
- Allctr_t *pref_allctr, *used_allctr;
-
- if (!p)
- return erts_alcu_alloc_thr_pref(type, extra, size);
-
- ptr = (void *) (((char *) p) - sizeof(UWord));
- used_allctr = *((Allctr_t **) ptr);
-
- ix = erts_alc_get_thr_ix();
- ASSERT(ix > 0);
- if (ix >= tspec->size)
- ix = (ix % (tspec->size - 1)) + 1;
- pref_allctr = tspec->allctr[ix];
- ASSERT(used_allctr && pref_allctr);
-
- erts_mtx_lock(&pref_allctr->mutex);
- res = do_erts_alcu_alloc(type, pref_allctr, size + sizeof(UWord));
- if (!res) {
- erts_mtx_unlock(&pref_allctr->mutex);
- res = erts_alcu_realloc_thr_pref(type, extra, p, size);
- }
- else {
- Block_t *blk;
- size_t cpy_size;
- Allctr_t *allctr;
-
- *((Allctr_t **) res) = pref_allctr;
- res = (void *) (((char *) res) + sizeof(UWord));
-
- DEBUG_CHECK_ALIGNMENT(res);
-
- if (used_allctr == pref_allctr)
- allctr = pref_allctr;
- else {
- erts_mtx_unlock(&pref_allctr->mutex);
- allctr = used_allctr;
- erts_mtx_lock(&allctr->mutex);
- }
-
- blk = UMEM2BLK(ptr);
- cpy_size = BLK_SZ(blk) - ABLK_HDR_SZ - sizeof(UWord);
- if (cpy_size > size)
- cpy_size = size;
- sys_memcpy(res, p, cpy_size);
- do_erts_alcu_free(type, allctr, ptr);
- erts_mtx_unlock(&allctr->mutex);
- }
-
- return res;
+ return realloc_thr_pref(type, extra, p, size, 1);
}
#endif
+#endif
+
/* ------------------------------------------------------------------------- */
int
@@ -3123,6 +4032,10 @@ erts_alcu_start(Allctr_t *allctr, AllctrInit_t *init)
sys_memcpy((void *) &allctr->mseg_opt,
(void *) &erts_mseg_default_opt,
sizeof(ErtsMsegOpt_t));
+#ifdef ERTS_SMP
+ if (init->tspec || init->tpref)
+ allctr->mseg_opt.sched_spec = 1;
+#endif
# if HALFWORD_HEAP
allctr->mseg_opt.low_mem = init->low_mem;
# endif
@@ -3132,6 +4045,7 @@ erts_alcu_start(Allctr_t *allctr, AllctrInit_t *init)
if (!allctr->name_prefix)
goto error;
+ allctr->ix = init->ix;
allctr->alloc_no = init->alloc_no;
if (allctr->alloc_no < ERTS_ALC_A_MIN
|| ERTS_ALC_A_MAX < allctr->alloc_no)
@@ -3173,6 +4087,38 @@ erts_alcu_start(Allctr_t *allctr, AllctrInit_t *init)
goto error;
allctr->min_block_size = UNIT_CEILING(allctr->min_block_size
+ sizeof(UWord));
+#if ERTS_SMP
+ if (init->tpref) {
+ Uint sz = sizeof(Block_t);
+ sz += ERTS_ALCU_DD_FIX_TYPE_OFFS*sizeof(UWord);
+ if (init->fix)
+ sz += sizeof(UWord);
+ sz = UNIT_CEILING(sz);
+ if (sz > allctr->min_block_size)
+ allctr->min_block_size = sz;
+ }
+#endif
+
+
+
+ allctr->sbmbc_threshold = init->sbmbct;
+
+ if (!erts_have_sbmbc_alloc
+ || ERTS_IS_SBMBC_ALLOCATOR_NO__(allctr->alloc_no))
+ allctr->sbmbc_threshold = 0;
+
+ if (!allctr->sbmbc_threshold)
+ allctr->sbmbc_size = 0;
+ else {
+ Uint min_size;
+ allctr->sbmbc_size = init->sbmbcs;
+ min_size = allctr->sbmbc_threshold;
+ min_size += allctr->min_block_size;
+ min_size += allctr->mbc_header_size;
+ if (allctr->sbmbc_size < min_size)
+ allctr->sbmbc_size = min_size;
+ }
+
#if HAVE_ERTS_MSEG
if (allctr->mseg_opt.abs_shrink_th > ~((UWord) 0) / 100)
@@ -3185,12 +4131,16 @@ erts_alcu_start(Allctr_t *allctr, AllctrInit_t *init)
#ifdef ERTS_ENABLE_LOCK_COUNT
erts_mtx_init_x_opt(&allctr->mutex,
- "alcu_allocator",
- make_small(allctr->alloc_no),
- ERTS_LCNT_LT_ALLOC);
+ ERTS_IS_SBMBC_ALLOCATOR_NO__(allctr->alloc_no)
+ ? "sbmbc_alloc"
+ : "alcu_allocator",
+ make_small(allctr->alloc_no),
+ ERTS_LCNT_LT_ALLOC);
#else
erts_mtx_init_x(&allctr->mutex,
- "alcu_allocator",
+ ERTS_IS_SBMBC_ALLOCATOR_NO__(allctr->alloc_no)
+ ? "sbmbc_alloc"
+ : "alcu_allocator",
make_small(allctr->alloc_no));
#endif /*ERTS_ENABLE_LOCK_COUNT*/
@@ -3211,7 +4161,8 @@ erts_alcu_start(Allctr_t *allctr, AllctrInit_t *init)
if (allctr->mbc_header_size < sizeof(Carrier_t))
goto error;
-#ifdef USE_THREADS
+#ifdef ERTS_SMP
+ allctr->dd.use = 0;
if (init->tpref) {
allctr->mbc_header_size = (UNIT_CEILING(allctr->mbc_header_size
+ FBLK_FTR_SZ
@@ -3225,6 +4176,9 @@ erts_alcu_start(Allctr_t *allctr, AllctrInit_t *init)
+ sizeof(UWord))
- ABLK_HDR_SZ
- sizeof(UWord));
+
+ allctr->dd.use = 1;
+ init_dd_queue(&allctr->dd.q);
}
else
#endif
@@ -3260,12 +4214,27 @@ erts_alcu_start(Allctr_t *allctr, AllctrInit_t *init)
if (!blk)
goto error;
- (*allctr->link_free_block)(allctr, blk);
+ (*allctr->link_free_block)(allctr, blk, 0);
HARD_CHECK_BLK_CARRIER(allctr, blk);
}
+ if (init->fix) {
+ int i;
+ allctr->fix = init->fix;
+ allctr->fix_shrink_scheduled = 0;
+ for (i = 0; i < ERTS_ALC_NO_FIXED_SIZES; i++) {
+ allctr->fix[i].max_used = 0;
+ allctr->fix[i].limit = 0;
+ allctr->fix[i].type_size = init->fix_type_size[i];
+ allctr->fix[i].list_size = 0;
+ allctr->fix[i].list = NULL;
+ allctr->fix[i].allocated = 0;
+ allctr->fix[i].used = 0;
+ }
+ }
+
return 1;
error:
@@ -3290,6 +4259,8 @@ erts_alcu_stop(Allctr_t *allctr)
destroy_carrier(allctr, SBC2BLK(allctr, allctr->sbc_list.first));
while (allctr->mbc_list.first)
destroy_carrier(allctr, MBC2FBLK(allctr, allctr->mbc_list.first));
+ while (allctr->sbmbc_list.first)
+ destroy_sbmbc(allctr, MBC2FBLK(allctr, allctr->sbmbc_list.first));
#ifdef USE_THREADS
if (allctr->thread_safe)
@@ -3387,13 +4358,15 @@ erts_alcu_verify_unused(Allctr_t *allctr)
{
UWord no;
- no = allctr->sbcs.curr_mseg.no;
- no += allctr->sbcs.curr_sys_alloc.no;
+ no = allctr->sbcs.curr.norm.mseg.no;
+ no += allctr->sbcs.curr.norm.sys_alloc.no;
no += allctr->mbcs.blocks.curr.no;
+ no += allctr->sbmbcs.blocks.curr.no;
if (no) {
UWord sz = allctr->sbcs.blocks.curr.size;
sz += allctr->mbcs.blocks.curr.size;
+ sz += allctr->sbmbcs.blocks.curr.size;
erl_exit(ERTS_ABORT_EXIT,
"%salloc() used when expected to be unused!\n"
"Total amount of blocks allocated: %bpu\n"
@@ -3492,7 +4465,7 @@ check_blk_carrier(Allctr_t *allctr, Block_t *iblk)
(*allctr->check_block)(allctr, blk, (int) is_free_blk);
if (IS_LAST_BLK(blk)) {
- carrier_end = ((char *) NXT_BLK(blk)) + sizeof(UWord);
+ carrier_end = ((char *) NXT_BLK(blk));
mbc = *((Carrier_t **) NXT_BLK(blk));
prev_blk = NULL;
blk = MBC2FBLK(allctr, mbc);
@@ -3507,9 +4480,9 @@ check_blk_carrier(Allctr_t *allctr, Block_t *iblk)
ASSERT(IS_MB_CARRIER(mbc));
ASSERT((((char *) mbc)
+ allctr->mbc_header_size
- + tot_blk_sz
- + sizeof(UWord)) == carrier_end);
- ASSERT(((char *) mbc) + CARRIER_SZ(mbc) == carrier_end);
+ + tot_blk_sz) == carrier_end);
+ ASSERT(((char *) mbc) + CARRIER_SZ(mbc) - sizeof(Unit_t) <= carrier_end
+ && carrier_end <= ((char *) mbc) + CARRIER_SZ(mbc));
if (allctr->check_mbc)
(*allctr->check_mbc)(allctr, mbc);
@@ -3523,6 +4496,7 @@ check_blk_carrier(Allctr_t *allctr, Block_t *iblk)
cl = &allctr->mbc_list;
}
+#if 0 /* FIXIT sbmbc */
if (cl->first == crr) {
ASSERT(!crr->prev);
}
@@ -3537,6 +4511,7 @@ check_blk_carrier(Allctr_t *allctr, Block_t *iblk)
ASSERT(crr->next);
ASSERT(crr->next->prev == crr);
}
+#endif
}
#endif
diff --git a/erts/emulator/beam/erl_alloc_util.h b/erts/emulator/beam/erl_alloc_util.h
index ddf84c086c..cedf4ccf85 100644
--- a/erts/emulator/beam/erl_alloc_util.h
+++ b/erts/emulator/beam/erl_alloc_util.h
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2002-2011. All Rights Reserved.
+ * Copyright Ericsson AB 2002-2012. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -20,10 +20,13 @@
#ifndef ERL_ALLOC_UTIL__
#define ERL_ALLOC_UTIL__
-#define ERTS_ALCU_VSN_STR "2.2"
+#define ERTS_ALCU_VSN_STR "3.0"
#include "erl_alloc_types.h"
+#define ERTS_AU_PREF_ALLOC_BITS 11
+#define ERTS_AU_MAX_PREF_ALLOC_INSTANCES (1 << ERTS_AU_PREF_ALLOC_BITS)
+
typedef struct Allctr_t_ Allctr_t;
typedef struct {
@@ -34,6 +37,8 @@ typedef struct {
typedef struct {
char *name_prefix;
ErtsAlcType_t alloc_no;
+ int force;
+ int ix;
int ts;
int tspec;
int tpref;
@@ -50,6 +55,11 @@ typedef struct {
UWord lmbcs;
UWord smbcs;
UWord mbcgs;
+ UWord sbmbct;
+ UWord sbmbcs;
+
+ void *fix;
+ size_t *fix_type_size;
} AllctrInit_t;
typedef struct {
@@ -57,6 +67,11 @@ typedef struct {
UWord carriers;
} AllctrSize_t;
+typedef struct {
+ UWord allocated;
+ UWord used;
+} ErtsAlcUFixInfo_t;
+
#ifndef SMALL_MEMORY
#define ERTS_DEFAULT_ALCU_INIT { \
@@ -67,6 +82,8 @@ typedef struct {
#define ERTS_DEFAULT_ALLCTR_INIT { \
NULL, \
ERTS_ALC_A_INVALID, /* (number) alloc_no: allocator number */\
+ 0, /* (bool) force: force enabled */\
+ 0, /* (number) ix: instance index */\
1, /* (bool) ts: thread safe */\
0, /* (bool) tspec: thread specific */\
0, /* (bool) tpref: thread preferred */\
@@ -82,7 +99,12 @@ typedef struct {
10, /* (amount) mmmbc: max mseg mbcs */\
10*1024*1024, /* (bytes) lmbcs: largest mbc size */\
1024*1024, /* (bytes) smbcs: smallest mbc size */\
- 10 /* (amount) mbcgs: mbc growth stages */\
+ 10, /* (amount) mbcgs: mbc growth stages */\
+ 256, /* (bytes) sbmbct: small block mbc threshold */\
+ 8*1024, /* (bytes) sbmbcs: small block mbc size */ \
+ /* --- Data not options -------------------------------------------- */\
+ NULL, /* (ptr) fix */\
+ NULL /* (ptr) fix_type_size */\
}
#else /* if SMALL_MEMORY */
@@ -95,6 +117,8 @@ typedef struct {
#define ERTS_DEFAULT_ALLCTR_INIT { \
NULL, \
ERTS_ALC_A_INVALID, /* (number) alloc_no: allocator number */\
+ 0, /* (bool) force: force enabled */\
+ 0, /* (number) ix: instance index */\
1, /* (bool) ts: thread safe */\
0, /* (bool) tspec: thread specific */\
0, /* (bool) tpref: thread preferred */\
@@ -109,7 +133,12 @@ typedef struct {
10, /* (amount) mmmbc: max mseg mbcs */\
1024*1024, /* (bytes) lmbcs: largest mbc size */\
128*1024, /* (bytes) smbcs: smallest mbc size */\
- 10 /* (amount) mbcgs: mbc growth stages */\
+ 10, /* (amount) mbcgs: mbc growth stages */\
+ 256, /* (bytes) sbmbct: small block mbc threshold */\
+ 8*1024, /* (bytes) sbmbcs: small block mbc size */ \
+ /* --- Data not options -------------------------------------------- */\
+ NULL, /* (ptr) fix */\
+ NULL /* (ptr) fix_type_size */\
}
#endif
@@ -123,6 +152,7 @@ void * erts_alcu_alloc_ts(ErtsAlcType_t, void *, Uint);
void * erts_alcu_realloc_ts(ErtsAlcType_t, void *, void *, Uint);
void * erts_alcu_realloc_mv_ts(ErtsAlcType_t, void *, void *, Uint);
void erts_alcu_free_ts(ErtsAlcType_t, void *, void *);
+#ifdef ERTS_SMP
void * erts_alcu_alloc_thr_spec(ErtsAlcType_t, void *, Uint);
void * erts_alcu_realloc_thr_spec(ErtsAlcType_t, void *, void *, Uint);
void * erts_alcu_realloc_mv_thr_spec(ErtsAlcType_t, void *, void *, Uint);
@@ -132,18 +162,27 @@ void * erts_alcu_realloc_thr_pref(ErtsAlcType_t, void *, void *, Uint);
void * erts_alcu_realloc_mv_thr_pref(ErtsAlcType_t, void *, void *, Uint);
void erts_alcu_free_thr_pref(ErtsAlcType_t, void *, void *);
#endif
+#endif
Eterm erts_alcu_au_info_options(int *, void *, Uint **, Uint *);
Eterm erts_alcu_info_options(Allctr_t *, int *, void *, Uint **, Uint *);
Eterm erts_alcu_sz_info(Allctr_t *, int, int *, void *, Uint **, Uint *);
Eterm erts_alcu_info(Allctr_t *, int, int *, void *, Uint **, Uint *);
void erts_alcu_init(AlcUInit_t *);
-void erts_alcu_current_size(Allctr_t *, AllctrSize_t *);
+void erts_alcu_current_size(Allctr_t *, AllctrSize_t *,
+ ErtsAlcUFixInfo_t *, int);
+#ifdef ERTS_SMP
+void erts_alcu_check_delayed_dealloc(Allctr_t *, int, int *, ErtsThrPrgrVal *, int *);
+#endif
+erts_aint32_t erts_alcu_fix_alloc_shrink(Allctr_t *, erts_aint32_t);
#endif
#if defined(GET_ERL_ALLOC_UTIL_IMPL) && !defined(ERL_ALLOC_UTIL_IMPL__)
#define ERL_ALLOC_UTIL_IMPL__
+#define ERTS_ALCU_FLG_FAIL_REALLOC_MOVE (((Uint32) 1) << 0)
+#define ERTS_ALCU_FLG_SBMBC (((Uint32) 1) << 1)
+
#ifdef USE_THREADS
#define ERL_THREADS_EMU_INTERNAL__
#include "erl_threads.h"
@@ -188,7 +227,9 @@ void erts_alcu_current_size(Allctr_t *, AllctrSize_t *);
#define CARRIER_SZ(C) \
((C)->chdr & SZ_MASK)
-typedef union {char c[8]; long l; double d;} Unit_t;
+extern int erts_have_sbmbc_alloc;
+
+typedef union {char c[ERTS_ALLOC_ALIGN_BYTES]; long l; double d;} Unit_t;
typedef struct Carrier_t_ Carrier_t;
struct Carrier_t_ {
@@ -216,8 +257,13 @@ typedef struct {
} StatValues_t;
typedef struct {
- StatValues_t curr_mseg;
- StatValues_t curr_sys_alloc;
+ union {
+ struct {
+ StatValues_t mseg;
+ StatValues_t sys_alloc;
+ } norm;
+ StatValues_t small_block;
+ } curr;
StatValues_t max;
StatValues_t max_ever;
struct {
@@ -227,7 +273,74 @@ typedef struct {
} blocks;
} CarriersStats_t;
+#ifdef ERTS_SMP
+
+typedef union ErtsAllctrDDBlock_t_ ErtsAllctrDDBlock_t;
+
+union ErtsAllctrDDBlock_t_ {
+ erts_atomic_t atmc_next;
+ ErtsAllctrDDBlock_t *ptr_next;
+};
+
+typedef struct {
+ ErtsAllctrDDBlock_t marker;
+ erts_atomic_t last;
+ erts_atomic_t um_refc[2];
+ erts_atomic32_t um_refc_ix;
+} ErtsDDTail_t;
+
+typedef struct {
+ /*
+ * This structure needs to be cache line aligned for best
+ * performance.
+ */
+ union {
+ /* Modified by threads returning memory to this allocator */
+ ErtsDDTail_t data;
+ char align__[ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(ErtsDDTail_t))];
+ } tail;
+ /*
+ * Everything below this point is *only* accessed by the
+ * thread owning the allocator.
+ */
+ struct {
+ ErtsAllctrDDBlock_t *first;
+ ErtsAllctrDDBlock_t *unref_end;
+ struct {
+ ErtsThrPrgrVal thr_progress;
+ int thr_progress_reached;
+ int um_refc_ix;
+ ErtsAllctrDDBlock_t *unref_end;
+ } next;
+ int used_marker;
+ } head;
+} ErtsAllctrDDQueue_t;
+
+#endif
+
+typedef struct {
+ size_t type_size;
+ SWord list_size;
+ void *list;
+ SWord max_used;
+ SWord limit;
+ SWord allocated;
+ SWord used;
+} ErtsAlcFixList_t;
+
struct Allctr_t_ {
+#ifdef ERTS_SMP
+ struct {
+ /*
+ * We want the queue at the beginning of
+ * the Allctr_t struct, due to cache line
+ * alignment reasons.
+ */
+ ErtsAllctrDDQueue_t q;
+ int use;
+ int ix;
+ } dd;
+#endif
/* Allocator name prefix */
char * name_prefix;
@@ -235,6 +348,9 @@ struct Allctr_t_ {
/* Allocator number */
ErtsAlcType_t alloc_no;
+ /* Instance index */
+ int ix;
+
/* Alloc, realloc and free names as atoms */
struct {
Eterm alloc;
@@ -257,6 +373,9 @@ struct Allctr_t_ {
Uint largest_mbc_size;
Uint smallest_mbc_size;
Uint mbc_growth_stages;
+ Uint sbmbc_threshold;
+ Uint sbmbc_size;
+
#if HAVE_ERTS_MSEG
ErtsMsegOpt_t mseg_opt;
#endif
@@ -269,6 +388,7 @@ struct Allctr_t_ {
Uint min_block_size;
/* Carriers */
+ CarrierList_t sbmbc_list;
CarrierList_t mbc_list;
CarrierList_t sbc_list;
@@ -277,15 +397,15 @@ struct Allctr_t_ {
/* Callback functions (first 4 are mandatory) */
Block_t * (*get_free_block) (Allctr_t *, Uint,
- Block_t *, Uint);
- void (*link_free_block) (Allctr_t *, Block_t *);
- void (*unlink_free_block) (Allctr_t *, Block_t *);
+ Block_t *, Uint, Uint32);
+ void (*link_free_block) (Allctr_t *, Block_t *, Uint32);
+ void (*unlink_free_block) (Allctr_t *, Block_t *, Uint32);
Eterm (*info_options) (Allctr_t *, char *, int *,
void *, Uint **, Uint *);
Uint (*get_next_mbc_size) (Allctr_t *);
- void (*creating_mbc) (Allctr_t *, Carrier_t *);
- void (*destroying_mbc) (Allctr_t *, Carrier_t *);
+ void (*creating_mbc) (Allctr_t *, Carrier_t *, Uint32);
+ void (*destroying_mbc) (Allctr_t *, Carrier_t *, Uint32);
void (*init_atoms) (void);
#ifdef ERTS_ALLOC_UTIL_HARD_DEBUG
@@ -293,6 +413,10 @@ struct Allctr_t_ {
void (*check_mbc) (Allctr_t *, Carrier_t *);
#endif
+ int fix_n_base;
+ int fix_shrink_scheduled;
+ ErtsAlcFixList_t *fix;
+
#ifdef USE_THREADS
/* Mutex for this allocator */
erts_mtx_t mutex;
@@ -301,6 +425,7 @@ struct Allctr_t_ {
Allctr_t *prev;
Allctr_t *next;
} ts_list;
+
#endif
int atoms_initialized;
@@ -312,6 +437,8 @@ struct Allctr_t_ {
CallCounter_t this_alloc;
CallCounter_t this_free;
CallCounter_t this_realloc;
+ CallCounter_t sbmbc_alloc;
+ CallCounter_t sbmbc_free;
CallCounter_t mseg_alloc;
CallCounter_t mseg_dealloc;
CallCounter_t mseg_realloc;
@@ -322,6 +449,7 @@ struct Allctr_t_ {
CarriersStats_t sbcs;
CarriersStats_t mbcs;
+ CarriersStats_t sbmbcs;
#ifdef DEBUG
#ifdef USE_THREADS
diff --git a/erts/emulator/beam/erl_ao_firstfit_alloc.c b/erts/emulator/beam/erl_ao_firstfit_alloc.c
new file mode 100644
index 0000000000..5bdb752d3a
--- /dev/null
+++ b/erts/emulator/beam/erl_ao_firstfit_alloc.c
@@ -0,0 +1,976 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2003-2011. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+
+/*
+ * Description: An "address order first fit" allocator
+ * based on a Red-Black (binary search) Tree. The search,
+ * insert, and delete operations are all O(log n) operations
+ * on a Red-Black Tree.
+ * Red-Black Trees are described in "Introduction to Algorithms",
+ * by Thomas H. Cormen, Charles E. Leiserson, and Ronald L. Riverest.
+ *
+ * This module is a callback-module for erl_alloc_util.c
+ *
+ * Algorithm: The tree nodes are free-blocks ordered in address order.
+ * Every node also keeps the size of the largest block in its
+ * sub-tree ('max_size'). By that we can start from root and keep
+ * left (for low addresses) while dismissing entire sub-trees with
+ * too small blocks.
+ *
+ * Authors: Rickard Green/Sverker Eriksson
+ */
+
+
+#ifdef HAVE_CONFIG_H
+# include "config.h"
+#endif
+#include "global.h"
+#define GET_ERL_AOFF_ALLOC_IMPL
+#include "erl_ao_firstfit_alloc.h"
+
+#ifdef DEBUG
+#if 0
+#define HARD_DEBUG
+#endif
+#else
+#undef HARD_DEBUG
+#endif
+
+#define MIN_MBC_SZ (16*1024)
+#define MIN_MBC_FIRST_FREE_SZ (4*1024)
+
+#define TREE_NODE_FLG (((Uint) 1) << 0)
+#define RED_FLG (((Uint) 1) << 1)
+#ifdef HARD_DEBUG
+# define LEFT_VISITED_FLG (((Uint) 1) << 2)
+# define RIGHT_VISITED_FLG (((Uint) 1) << 3)
+#endif
+
+#define IS_RED(N) (((AOFF_RBTree_t *) (N)) \
+ && ((AOFF_RBTree_t *) (N))->flags & RED_FLG)
+#define IS_BLACK(N) (!IS_RED(((AOFF_RBTree_t *) (N))))
+
+#define SET_RED(N) (((AOFF_RBTree_t *) (N))->flags |= RED_FLG)
+#define SET_BLACK(N) (((AOFF_RBTree_t *) (N))->flags &= ~RED_FLG)
+
+#undef ASSERT
+#define ASSERT ASSERT_EXPR
+
+#if 1
+#define RBT_ASSERT ASSERT
+#else
+#define RBT_ASSERT(x)
+#endif
+
+
+/* Types... */
+typedef struct AOFF_RBTree_t_ AOFF_RBTree_t;
+
+struct AOFF_RBTree_t_ {
+ Block_t hdr;
+ Uint flags;
+ AOFF_RBTree_t *parent;
+ AOFF_RBTree_t *left;
+ AOFF_RBTree_t *right;
+ Uint max_sz; /* of all blocks in this sub-tree */
+};
+
+#ifdef HARD_DEBUG
+static AOFF_RBTree_t * check_tree(AOFF_RBTree_t* root, Uint);
+#endif
+
+
+/* Calculate 'max_size' of tree node x by only looking at the direct children
+ * of x and x itself.
+ */
+static ERTS_INLINE Uint node_max_size(AOFF_RBTree_t *x)
+{
+ Uint sz = BLK_SZ(x);
+ if (x->left && x->left->max_sz > sz) {
+ sz = x->left->max_sz;
+ }
+ if (x->right && x->right->max_sz > sz) {
+ sz = x->right->max_sz;
+ }
+ return sz;
+}
+
+/* Set new possibly lower 'max_size' of node and propagate change toward root
+*/
+static ERTS_INLINE void lower_max_size(AOFF_RBTree_t *node,
+ AOFF_RBTree_t* stop_at)
+{
+ AOFF_RBTree_t* x = node;
+ Uint old_max = x->max_sz;
+ Uint new_max = node_max_size(x);
+
+ if (new_max < old_max) {
+ x->max_sz = new_max;
+ while ((x=x->parent) != stop_at && x->max_sz == old_max) {
+ x->max_sz = node_max_size(x);
+ }
+ ASSERT(x == stop_at || x->max_sz > old_max);
+ }
+ else ASSERT(new_max == old_max);
+}
+
+
+/* Prototypes of callback functions */
+static Block_t* aoff_get_free_block(Allctr_t *, Uint, Block_t *, Uint, Uint32 flags);
+static void aoff_link_free_block(Allctr_t *, Block_t*, Uint32 flags);
+static void aoff_unlink_free_block(Allctr_t *allctr, Block_t *del, Uint32 flags);
+
+static Eterm info_options(Allctr_t *, char *, int *, void *, Uint **, Uint *);
+static void init_atoms(void);
+
+
+
+#ifdef DEBUG
+
+/* Destroy all tree fields */
+#define DESTROY_TREE_NODE(N) \
+ sys_memset((void *) (((Block_t *) (N)) + 1), \
+ 0xff, \
+ (sizeof(AOFF_RBTree_t) - sizeof(Block_t)))
+
+#else
+
+#define DESTROY_TREE_NODE(N)
+
+#endif
+
+
+static int atoms_initialized = 0;
+
+void
+erts_aoffalc_init(void)
+{
+ atoms_initialized = 0;
+}
+
+Allctr_t *
+erts_aoffalc_start(AOFFAllctr_t *alc,
+ AOFFAllctrInit_t* aoffinit,
+ AllctrInit_t *init)
+{
+ struct {
+ int dummy;
+ AOFFAllctr_t allctr;
+ } zero = {0};
+ /* The struct with a dummy element first is used in order to avoid (an
+ incorrect) gcc warning. gcc warns if {0} is used as initializer of
+ a struct when the first member is a struct (not if, for example,
+ the third member is a struct). */
+
+ Allctr_t *allctr = (Allctr_t *) alc;
+
+ sys_memcpy((void *) alc, (void *) &zero.allctr, sizeof(AOFFAllctr_t));
+
+ allctr->mbc_header_size = sizeof(Carrier_t);
+ allctr->min_mbc_size = MIN_MBC_SZ;
+ allctr->min_mbc_first_free_size = MIN_MBC_FIRST_FREE_SZ;
+ allctr->min_block_size = sizeof(AOFF_RBTree_t);
+
+ allctr->vsn_str = ERTS_ALC_AOFF_ALLOC_VSN_STR;
+
+
+ /* Callback functions */
+
+ allctr->get_free_block = aoff_get_free_block;
+ allctr->link_free_block = aoff_link_free_block;
+ allctr->unlink_free_block = aoff_unlink_free_block;
+ allctr->info_options = info_options;
+
+ allctr->get_next_mbc_size = NULL;
+ allctr->creating_mbc = NULL;
+ allctr->destroying_mbc = NULL;
+ allctr->init_atoms = init_atoms;
+
+#ifdef ERTS_ALLOC_UTIL_HARD_DEBUG
+ allctr->check_block = NULL;
+ allctr->check_mbc = NULL;
+#endif
+
+ allctr->atoms_initialized = 0;
+
+ if (!erts_alcu_start(allctr, init))
+ return NULL;
+
+ return allctr;
+}
+
+/*
+ * Red-Black Tree operations needed
+ */
+
+static ERTS_INLINE void
+left_rotate(AOFF_RBTree_t **root, AOFF_RBTree_t *x)
+{
+ AOFF_RBTree_t *y = x->right;
+ x->right = y->left;
+ if (y->left)
+ y->left->parent = x;
+ y->parent = x->parent;
+ if (!y->parent) {
+ RBT_ASSERT(*root == x);
+ *root = y;
+ }
+ else if (x == x->parent->left)
+ x->parent->left = y;
+ else {
+ RBT_ASSERT(x == x->parent->right);
+ x->parent->right = y;
+ }
+ y->left = x;
+ x->parent = y;
+
+ y->max_sz = x->max_sz;
+ x->max_sz = node_max_size(x);
+ ASSERT(y->max_sz >= x->max_sz);
+}
+
+static ERTS_INLINE void
+right_rotate(AOFF_RBTree_t **root, AOFF_RBTree_t *x)
+{
+ AOFF_RBTree_t *y = x->left;
+ x->left = y->right;
+ if (y->right)
+ y->right->parent = x;
+ y->parent = x->parent;
+ if (!y->parent) {
+ RBT_ASSERT(*root == x);
+ *root = y;
+ }
+ else if (x == x->parent->right)
+ x->parent->right = y;
+ else {
+ RBT_ASSERT(x == x->parent->left);
+ x->parent->left = y;
+ }
+ y->right = x;
+ x->parent = y;
+ y->max_sz = x->max_sz;
+ x->max_sz = node_max_size(x);
+ ASSERT(y->max_sz >= x->max_sz);
+}
+
+
+/*
+ * Replace node x with node y
+ * NOTE: block header of y is not changed
+ */
+static ERTS_INLINE void
+replace(AOFF_RBTree_t **root, AOFF_RBTree_t *x, AOFF_RBTree_t *y)
+{
+
+ if (!x->parent) {
+ RBT_ASSERT(*root == x);
+ *root = y;
+ }
+ else if (x == x->parent->left)
+ x->parent->left = y;
+ else {
+ RBT_ASSERT(x == x->parent->right);
+ x->parent->right = y;
+ }
+ if (x->left) {
+ RBT_ASSERT(x->left->parent == x);
+ x->left->parent = y;
+ }
+ if (x->right) {
+ RBT_ASSERT(x->right->parent == x);
+ x->right->parent = y;
+ }
+
+ y->flags = x->flags;
+ y->parent = x->parent;
+ y->right = x->right;
+ y->left = x->left;
+
+ y->max_sz = x->max_sz;
+ lower_max_size(y, NULL);
+ DESTROY_TREE_NODE(x);
+}
+
+static void
+tree_insert_fixup(AOFF_RBTree_t** root, AOFF_RBTree_t *blk)
+{
+ AOFF_RBTree_t *x = blk, *y;
+
+ /*
+ * Rearrange the tree so that it satisfies the Red-Black Tree properties
+ */
+
+ RBT_ASSERT(x != *root && IS_RED(x->parent));
+ do {
+
+ /*
+ * x and its parent are both red. Move the red pair up the tree
+ * until we get to the root or until we can separate them.
+ */
+
+ RBT_ASSERT(IS_RED(x));
+ RBT_ASSERT(IS_BLACK(x->parent->parent));
+ RBT_ASSERT(x->parent->parent);
+
+ if (x->parent == x->parent->parent->left) {
+ y = x->parent->parent->right;
+ if (IS_RED(y)) {
+ SET_BLACK(y);
+ x = x->parent;
+ SET_BLACK(x);
+ x = x->parent;
+ SET_RED(x);
+ }
+ else {
+
+ if (x == x->parent->right) {
+ x = x->parent;
+ left_rotate(root, x);
+ }
+
+ RBT_ASSERT(x == x->parent->parent->left->left);
+ RBT_ASSERT(IS_RED(x));
+ RBT_ASSERT(IS_RED(x->parent));
+ RBT_ASSERT(IS_BLACK(x->parent->parent));
+ RBT_ASSERT(IS_BLACK(y));
+
+ SET_BLACK(x->parent);
+ SET_RED(x->parent->parent);
+ right_rotate(root, x->parent->parent);
+
+ RBT_ASSERT(x == x->parent->left);
+ RBT_ASSERT(IS_RED(x));
+ RBT_ASSERT(IS_RED(x->parent->right));
+ RBT_ASSERT(IS_BLACK(x->parent));
+ break;
+ }
+ }
+ else {
+ RBT_ASSERT(x->parent == x->parent->parent->right);
+ y = x->parent->parent->left;
+ if (IS_RED(y)) {
+ SET_BLACK(y);
+ x = x->parent;
+ SET_BLACK(x);
+ x = x->parent;
+ SET_RED(x);
+ }
+ else {
+
+ if (x == x->parent->left) {
+ x = x->parent;
+ right_rotate(root, x);
+ }
+
+ RBT_ASSERT(x == x->parent->parent->right->right);
+ RBT_ASSERT(IS_RED(x));
+ RBT_ASSERT(IS_RED(x->parent));
+ RBT_ASSERT(IS_BLACK(x->parent->parent));
+ RBT_ASSERT(IS_BLACK(y));
+
+ SET_BLACK(x->parent);
+ SET_RED(x->parent->parent);
+ left_rotate(root, x->parent->parent);
+
+ RBT_ASSERT(x == x->parent->right);
+ RBT_ASSERT(IS_RED(x));
+ RBT_ASSERT(IS_RED(x->parent->left));
+ RBT_ASSERT(IS_BLACK(x->parent));
+ break;
+ }
+ }
+ } while (x != *root && IS_RED(x->parent));
+
+ SET_BLACK(*root);
+}
+
+static void
+aoff_unlink_free_block(Allctr_t *allctr, Block_t *del, Uint32 flags)
+{
+ AOFFAllctr_t *alc = (AOFFAllctr_t *) allctr;
+ AOFF_RBTree_t **root = ((flags & ERTS_ALCU_FLG_SBMBC)
+ ? &alc->sbmbc_root : &alc->mbc_root);
+ Uint spliced_is_black;
+ AOFF_RBTree_t *x, *y, *z = (AOFF_RBTree_t *) del;
+ AOFF_RBTree_t null_x; /* null_x is used to get the fixup started when we
+ splice out a node without children. */
+
+ null_x.parent = NULL;
+
+#ifdef HARD_DEBUG
+ check_tree(*root, 0);
+#endif
+
+ /* Remove node from tree... */
+
+ /* Find node to splice out */
+ if (!z->left || !z->right)
+ y = z;
+ else
+ /* Set y to z:s successor */
+ for(y = z->right; y->left; y = y->left);
+ /* splice out y */
+ x = y->left ? y->left : y->right;
+ spliced_is_black = IS_BLACK(y);
+ if (x) {
+ x->parent = y->parent;
+ }
+ else if (spliced_is_black) {
+ x = &null_x;
+ x->flags = 0;
+ SET_BLACK(x);
+ x->right = x->left = NULL;
+ x->max_sz = 0;
+ x->parent = y->parent;
+ y->left = x;
+ }
+
+ if (!y->parent) {
+ RBT_ASSERT(*root == y);
+ *root = x;
+ }
+ else {
+ if (y == y->parent->left) {
+ y->parent->left = x;
+ }
+ else {
+ RBT_ASSERT(y == y->parent->right);
+ y->parent->right = x;
+ }
+ if (y->parent != z) {
+ lower_max_size(y->parent, (y==z ? NULL : z));
+ }
+ }
+ if (y != z) {
+ /* We spliced out the successor of z; replace z by the successor */
+ replace(root, z, y);
+ }
+
+ if (spliced_is_black) {
+ /* We removed a black node which makes the resulting tree
+ violate the Red-Black Tree properties. Fixup tree... */
+
+ while (IS_BLACK(x) && x->parent) {
+
+ /*
+ * x has an "extra black" which we move up the tree
+ * until we reach the root or until we can get rid of it.
+ *
+ * y is the sibbling of x
+ */
+
+ if (x == x->parent->left) {
+ y = x->parent->right;
+ RBT_ASSERT(y);
+ if (IS_RED(y)) {
+ RBT_ASSERT(y->right);
+ RBT_ASSERT(y->left);
+ SET_BLACK(y);
+ RBT_ASSERT(IS_BLACK(x->parent));
+ SET_RED(x->parent);
+ left_rotate(root, x->parent);
+ y = x->parent->right;
+ }
+ RBT_ASSERT(y);
+ RBT_ASSERT(IS_BLACK(y));
+ if (IS_BLACK(y->left) && IS_BLACK(y->right)) {
+ SET_RED(y);
+ x = x->parent;
+ }
+ else {
+ if (IS_BLACK(y->right)) {
+ SET_BLACK(y->left);
+ SET_RED(y);
+ right_rotate(root, y);
+ y = x->parent->right;
+ }
+ RBT_ASSERT(y);
+ if (IS_RED(x->parent)) {
+
+ SET_BLACK(x->parent);
+ SET_RED(y);
+ }
+ RBT_ASSERT(y->right);
+ SET_BLACK(y->right);
+ left_rotate(root, x->parent);
+ x = *root;
+ break;
+ }
+ }
+ else {
+ RBT_ASSERT(x == x->parent->right);
+ y = x->parent->left;
+ RBT_ASSERT(y);
+ if (IS_RED(y)) {
+ RBT_ASSERT(y->right);
+ RBT_ASSERT(y->left);
+ SET_BLACK(y);
+ RBT_ASSERT(IS_BLACK(x->parent));
+ SET_RED(x->parent);
+ right_rotate(root, x->parent);
+ y = x->parent->left;
+ }
+ RBT_ASSERT(y);
+ RBT_ASSERT(IS_BLACK(y));
+ if (IS_BLACK(y->right) && IS_BLACK(y->left)) {
+ SET_RED(y);
+ x = x->parent;
+ }
+ else {
+ if (IS_BLACK(y->left)) {
+ SET_BLACK(y->right);
+ SET_RED(y);
+ left_rotate(root, y);
+ y = x->parent->left;
+ }
+ RBT_ASSERT(y);
+ if (IS_RED(x->parent)) {
+ SET_BLACK(x->parent);
+ SET_RED(y);
+ }
+ RBT_ASSERT(y->left);
+ SET_BLACK(y->left);
+ right_rotate(root, x->parent);
+ x = *root;
+ break;
+ }
+ }
+ }
+ SET_BLACK(x);
+
+ if (null_x.parent) {
+ if (null_x.parent->left == &null_x)
+ null_x.parent->left = NULL;
+ else {
+ RBT_ASSERT(null_x.parent->right == &null_x);
+ null_x.parent->right = NULL;
+ }
+ RBT_ASSERT(!null_x.left);
+ RBT_ASSERT(!null_x.right);
+ }
+ else if (*root == &null_x) {
+ *root = NULL;
+ RBT_ASSERT(!null_x.left);
+ RBT_ASSERT(!null_x.right);
+ }
+ }
+
+ DESTROY_TREE_NODE(del);
+
+#ifdef HARD_DEBUG
+ check_tree(*root, 0);
+#endif
+}
+
+static void
+aoff_link_free_block(Allctr_t *allctr, Block_t *block, Uint32 flags)
+{
+ AOFFAllctr_t *alc = (AOFFAllctr_t *) allctr;
+ AOFF_RBTree_t *blk = (AOFF_RBTree_t *) block;
+ AOFF_RBTree_t **root = ((flags & ERTS_ALCU_FLG_SBMBC)
+ ? &alc->sbmbc_root : &alc->mbc_root);
+ Uint blk_sz = BLK_SZ(blk);
+
+#ifdef HARD_DEBUG
+ check_tree(*root, 0);
+#endif
+
+ blk->flags = 0;
+ blk->left = NULL;
+ blk->right = NULL;
+ blk->max_sz = blk_sz;
+
+ if (!*root) {
+ blk->parent = NULL;
+ SET_BLACK(blk);
+ *root = blk;
+ }
+ else {
+ AOFF_RBTree_t *x = *root;
+ while (1) {
+ if (x->max_sz < blk_sz) {
+ x->max_sz = blk_sz;
+ }
+ if (blk < x) {
+ if (!x->left) {
+ blk->parent = x;
+ x->left = blk;
+ break;
+ }
+ x = x->left;
+ }
+ else {
+ if (!x->right) {
+ blk->parent = x;
+ x->right = blk;
+ break;
+ }
+ x = x->right;
+ }
+
+ }
+
+ /* Insert block into size tree */
+ RBT_ASSERT(blk->parent);
+
+ SET_RED(blk);
+ if (IS_RED(blk->parent))
+ tree_insert_fixup(root, blk);
+ }
+
+#ifdef HARD_DEBUG
+ check_tree(*root, 0);
+#endif
+}
+
+static Block_t *
+aoff_get_free_block(Allctr_t *allctr, Uint size,
+ Block_t *cand_blk, Uint cand_size, Uint32 flags)
+{
+ AOFFAllctr_t *alc = (AOFFAllctr_t *) allctr;
+ AOFF_RBTree_t *x = ((flags & ERTS_ALCU_FLG_SBMBC)
+ ? alc->sbmbc_root : alc->mbc_root);
+ AOFF_RBTree_t *blk = NULL;
+#ifdef HARD_DEBUG
+ AOFF_RBTree_t* dbg_blk = check_tree(x, size);
+#endif
+
+ ASSERT(!cand_blk || cand_size >= size);
+
+ while (x) {
+ if (x->left && x->left->max_sz >= size) {
+ x = x->left;
+ }
+ else if (BLK_SZ(x) >= size) {
+ blk = x;
+ break;
+ }
+ else {
+ x = x->right;
+ }
+ }
+
+#ifdef HARD_DEBUG
+ ASSERT(blk == dbg_blk);
+#endif
+
+ if (!blk)
+ return NULL;
+
+ if (cand_blk && cand_blk < &blk->hdr) {
+ return NULL; /* cand_blk was better */
+ }
+
+ aoff_unlink_free_block(allctr, (Block_t *) blk, flags);
+
+ return (Block_t *) blk;
+}
+
+
+/*
+ * info_options()
+ */
+
+static struct {
+ Eterm as;
+ Eterm aoff;
+#ifdef DEBUG
+ Eterm end_of_atoms;
+#endif
+} am;
+
+static void ERTS_INLINE atom_init(Eterm *atom, char *name)
+{
+ *atom = am_atom_put(name, strlen(name));
+}
+#define AM_INIT(AM) atom_init(&am.AM, #AM)
+
+static void
+init_atoms(void)
+{
+#ifdef DEBUG
+ Eterm *atom;
+#endif
+
+ if (atoms_initialized)
+ return;
+
+#ifdef DEBUG
+ for (atom = (Eterm *) &am; atom <= &am.end_of_atoms; atom++) {
+ *atom = THE_NON_VALUE;
+ }
+#endif
+ AM_INIT(as);
+ AM_INIT(aoff);
+
+#ifdef DEBUG
+ for (atom = (Eterm *) &am; atom < &am.end_of_atoms; atom++) {
+ ASSERT(*atom != THE_NON_VALUE);
+ }
+#endif
+
+ atoms_initialized = 1;
+}
+
+
+#define bld_uint erts_bld_uint
+#define bld_cons erts_bld_cons
+#define bld_tuple erts_bld_tuple
+
+static ERTS_INLINE void
+add_2tup(Uint **hpp, Uint *szp, Eterm *lp, Eterm el1, Eterm el2)
+{
+ *lp = bld_cons(hpp, szp, bld_tuple(hpp, szp, 2, el1, el2), *lp);
+}
+
+static Eterm
+info_options(Allctr_t *allctr,
+ char *prefix,
+ int *print_to_p,
+ void *print_to_arg,
+ Uint **hpp,
+ Uint *szp)
+{
+ Eterm res = THE_NON_VALUE;
+
+ if (print_to_p) {
+ erts_print(*print_to_p,
+ print_to_arg,
+ "%sas: %s\n",
+ prefix,
+ "aoff");
+ }
+
+ if (hpp || szp) {
+
+ if (!atoms_initialized)
+ erl_exit(1, "%s:%d: Internal error: Atoms not initialized",
+ __FILE__, __LINE__);;
+
+ res = NIL;
+ add_2tup(hpp, szp, &res, am.as, am.aoff);
+ }
+
+ return res;
+}
+
+
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *\
+ * NOTE: erts_aoffalc_test() is only supposed to be used for testing. *
+ * *
+ * Keep alloc_SUITE_data/allocator_test.h updated if changes are made *
+ * to erts_aoffalc_test() *
+\* */
+
+unsigned long
+erts_aoffalc_test(unsigned long op, unsigned long a1, unsigned long a2)
+{
+ switch (op) {
+ case 0x500: return (unsigned long) 0; /* IS_AOBF */
+ case 0x501: return (unsigned long) ((AOFFAllctr_t *) a1)->mbc_root;
+ case 0x502: return (unsigned long) ((AOFF_RBTree_t *) a1)->parent;
+ case 0x503: return (unsigned long) ((AOFF_RBTree_t *) a1)->left;
+ case 0x504: return (unsigned long) ((AOFF_RBTree_t *) a1)->right;
+ case 0x506: return (unsigned long) IS_BLACK((AOFF_RBTree_t *) a1);
+ case 0x508: return (unsigned long) 1; /* IS_AOFF */
+ case 0x509: return (unsigned long) ((AOFF_RBTree_t *) a1)->max_sz;
+ default: ASSERT(0); return ~((unsigned long) 0);
+ }
+}
+
+
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *\
+ * Debug functions *
+\* */
+
+
+#ifdef HARD_DEBUG
+
+#define IS_LEFT_VISITED(FB) ((FB)->flags & LEFT_VISITED_FLG)
+#define IS_RIGHT_VISITED(FB) ((FB)->flags & RIGHT_VISITED_FLG)
+
+#define SET_LEFT_VISITED(FB) ((FB)->flags |= LEFT_VISITED_FLG)
+#define SET_RIGHT_VISITED(FB) ((FB)->flags |= RIGHT_VISITED_FLG)
+
+#define UNSET_LEFT_VISITED(FB) ((FB)->flags &= ~LEFT_VISITED_FLG)
+#define UNSET_RIGHT_VISITED(FB) ((FB)->flags &= ~RIGHT_VISITED_FLG)
+
+
+#if 0
+# define PRINT_TREE
+#else
+# undef PRINT_TREE
+#endif
+
+#ifdef PRINT_TREE
+static void print_tree(AOFF_RBTree_t*);
+#endif
+
+/*
+ * Checks that the order between parent and children are correct,
+ * and that the Red-Black Tree properies are satisfied. if size > 0,
+ * check_tree() returns the node that satisfies "address order first fit"
+ *
+ * The Red-Black Tree properies are:
+ * 1. Every node is either red or black.
+ * 2. Every leaf (NIL) is black.
+ * 3. If a node is red, then both its children are black.
+ * 4. Every simple path from a node to a descendant leaf
+ * contains the same number of black nodes.
+ *
+ * + own.max_size == MAX(own.size, left.max_size, right.max_size)
+ */
+
+static AOFF_RBTree_t *
+check_tree(AOFF_RBTree_t* root, Uint size)
+{
+ AOFF_RBTree_t *res = NULL;
+ Sint blacks;
+ Sint curr_blacks;
+ AOFF_RBTree_t *x;
+
+#ifdef PRINT_TREE
+ print_tree(root);
+#endif
+
+ if (!root)
+ return res;
+
+ x = root;
+ ASSERT(IS_BLACK(x));
+ ASSERT(!x->parent);
+ curr_blacks = 1;
+ blacks = -1;
+
+ while (x) {
+ if (!IS_LEFT_VISITED(x)) {
+ SET_LEFT_VISITED(x);
+ if (x->left) {
+ x = x->left;
+ if (IS_BLACK(x))
+ curr_blacks++;
+ continue;
+ }
+ else {
+ if (blacks < 0)
+ blacks = curr_blacks;
+ ASSERT(blacks == curr_blacks);
+ }
+ }
+
+ if (!IS_RIGHT_VISITED(x)) {
+ SET_RIGHT_VISITED(x);
+ if (x->right) {
+ x = x->right;
+ if (IS_BLACK(x))
+ curr_blacks++;
+ continue;
+ }
+ else {
+ if (blacks < 0)
+ blacks = curr_blacks;
+ ASSERT(blacks == curr_blacks);
+ }
+ }
+
+
+ if (IS_RED(x)) {
+ ASSERT(IS_BLACK(x->right));
+ ASSERT(IS_BLACK(x->left));
+ }
+
+ ASSERT(x->parent || x == root);
+
+ if (x->left) {
+ ASSERT(x->left->parent == x);
+ ASSERT(x->left < x);
+ ASSERT(x->left->max_sz <= x->max_sz);
+ }
+
+ if (x->right) {
+ ASSERT(x->right->parent == x);
+ ASSERT(x->right > x);
+ ASSERT(x->right->max_sz <= x->max_sz);
+ }
+ ASSERT(x->max_sz >= BLK_SZ(x));
+ ASSERT(x->max_sz == BLK_SZ(x)
+ || x->max_sz == (x->left ? x->left->max_sz : 0)
+ || x->max_sz == (x->right ? x->right->max_sz : 0));
+
+ if (size && BLK_SZ(x) >= size) {
+ if (!res || x < res) {
+ res = x;
+ }
+ }
+
+ UNSET_LEFT_VISITED(x);
+ UNSET_RIGHT_VISITED(x);
+ if (IS_BLACK(x))
+ curr_blacks--;
+ x = x->parent;
+
+ }
+
+ ASSERT(curr_blacks == 0);
+
+ UNSET_LEFT_VISITED(root);
+ UNSET_RIGHT_VISITED(root);
+
+ return res;
+
+}
+
+
+#ifdef PRINT_TREE
+#define INDENT_STEP 2
+
+#include <stdio.h>
+
+static void
+print_tree_aux(AOFF_RBTree_t *x, int indent)
+{
+ int i;
+
+ if (x) {
+ print_tree_aux(x->right, indent + INDENT_STEP);
+ for (i = 0; i < indent; i++) {
+ putc(' ', stderr);
+ }
+ fprintf(stderr, "%s: sz=%lu addr=0x%lx max_size=%lu\r\n",
+ IS_BLACK(x) ? "BLACK" : "RED",
+ BLK_SZ(x), (Uint)x, x->max_sz);
+ print_tree_aux(x->left, indent + INDENT_STEP);
+ }
+}
+
+
+static void
+print_tree(AOFF_RBTree_t* root)
+{
+ fprintf(stderr, " --- AOFF tree begin ---\r\n");
+ print_tree_aux(root, 0);
+ fprintf(stderr, " --- AOFF tree end ---\r\n");
+}
+
+#endif /* PRINT_TREE */
+
+#endif /* HARD_DEBUG */
+
diff --git a/erts/emulator/beam/erl_ao_firstfit_alloc.h b/erts/emulator/beam/erl_ao_firstfit_alloc.h
new file mode 100644
index 0000000000..6fa626f723
--- /dev/null
+++ b/erts/emulator/beam/erl_ao_firstfit_alloc.h
@@ -0,0 +1,60 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2003-2011. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+
+#ifndef ERL_AO_FIRSTFIT_ALLOC__
+#define ERL_AO_FIRSTFIT_ALLOC__
+
+#include "erl_alloc_util.h"
+
+#define ERTS_ALC_AOFF_ALLOC_VSN_STR "0.9"
+
+typedef struct AOFFAllctr_t_ AOFFAllctr_t;
+
+typedef struct {
+ int dummy;
+} AOFFAllctrInit_t;
+
+#define ERTS_DEFAULT_AOFF_ALLCTR_INIT {0/*dummy*/}
+
+void erts_aoffalc_init(void);
+Allctr_t *erts_aoffalc_start(AOFFAllctr_t *, AOFFAllctrInit_t*, AllctrInit_t *);
+
+#endif /* #ifndef ERL_AO_FIRSTFIT_ALLOC__ */
+
+
+
+#if defined(GET_ERL_AOFF_ALLOC_IMPL) && !defined(ERL_AOFF_ALLOC_IMPL__)
+#define ERL_AOFF_ALLOC_IMPL__
+
+#define GET_ERL_ALLOC_UTIL_IMPL
+#include "erl_alloc_util.h"
+
+
+struct AOFFAllctr_t_ {
+ Allctr_t allctr; /* Has to be first! */
+
+ struct AOFF_RBTree_t_* mbc_root;
+ struct AOFF_RBTree_t_* sbmbc_root;
+};
+
+unsigned long erts_aoffalc_test(unsigned long, unsigned long, unsigned long);
+
+#endif /* #if defined(GET_ERL_AOFF_ALLOC_IMPL)
+ && !defined(ERL_AOFF_ALLOC_IMPL__) */
diff --git a/erts/emulator/beam/erl_arith.c b/erts/emulator/beam/erl_arith.c
index 64fad9fe0e..5150a8a507 100644
--- a/erts/emulator/beam/erl_arith.c
+++ b/erts/emulator/beam/erl_arith.c
@@ -164,14 +164,14 @@ BIF_RETTYPE bxor_2(BIF_ALIST_2)
BIF_RET(erts_bxor(BIF_P, BIF_ARG_1, BIF_ARG_2));
}
-BIF_RETTYPE bsl_2(Process* p, Eterm arg1, Eterm arg2)
+BIF_RETTYPE bsl_2(BIF_ALIST_2)
{
- BIF_RET(shift(p, arg1, arg2, 0));
+ BIF_RET(shift(BIF_P, BIF_ARG_1, BIF_ARG_2, 0));
}
-BIF_RETTYPE bsr_2(Process* p, Eterm arg1, Eterm arg2)
+BIF_RETTYPE bsr_2(BIF_ALIST_2)
{
- BIF_RET(shift(p, arg1, arg2, 1));
+ BIF_RET(shift(BIF_P, BIF_ARG_1, BIF_ARG_2, 1));
}
static Eterm
diff --git a/erts/emulator/beam/erl_async.c b/erts/emulator/beam/erl_async.c
index 91b64411d4..f321ed21aa 100644
--- a/erts/emulator/beam/erl_async.c
+++ b/erts/emulator/beam/erl_async.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2000-2011. All Rights Reserved.
+ * Copyright Ericsson AB 2000-2012. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -24,10 +24,19 @@
#include "erl_sys_driver.h"
#include "global.h"
#include "erl_threads.h"
+#include "erl_thr_queue.h"
+#include "erl_async.h"
+#include "dtrace-wrapper.h"
+
+#define ERTS_MAX_ASYNC_READY_CALLS_IN_SEQ 20
+
+#define ERTS_ASYNC_PRINT_JOB 0
+
+#if !defined(ERTS_SMP) && defined(USE_THREADS) && !ERTS_USE_ASYNC_READY_Q
+# error "Need async ready queue in non-smp case"
+#endif
typedef struct _erl_async {
- struct _erl_async* next;
- struct _erl_async* prev;
DE_Handle* hndl; /* The DE_Handle is needed when port is gone */
Eterm port;
long async_id;
@@ -35,345 +44,530 @@ typedef struct _erl_async {
ErlDrvPDL pdl;
void (*async_invoke)(void*);
void (*async_free)(void*);
-} ErlAsync;
+#if ERTS_USE_ASYNC_READY_Q
+ Uint sched_id;
+ union {
+ ErtsThrQPrepEnQ_t *prep_enq;
+ ErtsThrQFinDeQ_t fin_deq;
+ } q;
+#endif
+} ErtsAsync;
+
+#if ERTS_USE_ASYNC_READY_Q
+
+/*
+ * We can do without the enqueue mutex since it isn't needed for
+ * thread safety. Its only purpose is to put async threads to sleep
+ * during a blast of ready async jobs. This in order to reduce
+ * contention on the enqueue end of the async ready queues. During
+ * such a blast without the enqueue mutex much cpu time is consumed
+ * by the async threads without them doing much progress which in turn
+ * slow down progress of scheduler threads.
+ */
+#define ERTS_USE_ASYNC_READY_ENQ_MTX 1
+
+#if ERTS_USE_ASYNC_READY_ENQ_MTX
typedef struct {
- erts_mtx_t mtx;
- erts_cnd_t cv;
- erts_tid_t thr;
- int len;
-#ifndef ERTS_SMP
- int hndl;
+ erts_mtx_t enq_mtx;
+} ErtsAsyncReadyQXData;
+
#endif
- ErlAsync* head;
- ErlAsync* tail;
-#ifdef ERTS_ENABLE_LOCK_CHECK
- int no;
+
+typedef struct {
+#if ERTS_USE_ASYNC_READY_ENQ_MTX
+ union {
+ ErtsAsyncReadyQXData data;
+ char align__[ERTS_ALC_CACHE_LINE_ALIGN_SIZE(
+ sizeof(ErtsAsyncReadyQXData))];
+ } x;
#endif
-} AsyncQueue;
+ ErtsThrQ_t thr_q;
+ ErtsThrQFinDeQ_t fin_deq;
+} ErtsAsyncReadyQ;
-static erts_smp_spinlock_t async_id_lock;
-static long async_id = 0;
+typedef union {
+ ErtsAsyncReadyQ arq;
+ char align__[ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(ErtsAsyncReadyQ))];
+} ErtsAlgndAsyncReadyQ;
-#ifndef ERTS_SMP
+#endif /* ERTS_USE_ASYNC_READY_Q */
-erts_mtx_t async_ready_mtx;
-static ErlAsync* async_ready_list = NULL;
+typedef struct {
+ ErtsThrQ_t thr_q;
+ erts_tid_t thr_id;
+} ErtsAsyncQ;
+
+typedef union {
+ ErtsAsyncQ aq;
+ char align__[ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(ErtsAsyncQ))];
+} ErtsAlgndAsyncQ;
+typedef struct {
+ int no_initialized;
+ erts_mtx_t mtx;
+ erts_cnd_t cnd;
+ erts_atomic_t id;
+} ErtsAsyncInit;
+
+typedef struct {
+ union {
+ ErtsAsyncInit data;
+ char align__[ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(ErtsAsyncInit))];
+ } init;
+ ErtsAlgndAsyncQ *queue;
+#if ERTS_USE_ASYNC_READY_Q
+ ErtsAlgndAsyncReadyQ *ready_queue;
#endif
+} ErtsAsyncData;
/*
-** Initialize worker threads (if supported)
-*/
+ * Some compilers, e.g. GCC 4.2.1 and -O3, will optimize away DTrace
+ * calls if they're the last thing in the function. :-(
+ * Many thanks to Trond Norbye, via:
+ * https://github.com/memcached/memcached/commit/6298b3978687530bc9d219b6ac707a1b681b2a46
+ */
+static unsigned gcc_optimizer_hack = 0;
-/* Detach from driver */
-static void async_detach(DE_Handle* dh)
-{
- return;
-}
+int erts_async_max_threads; /* Initialized by erl_init.c */
+int erts_async_thread_suggested_stack_size; /* Initialized by erl_init.c */
+static ErtsAsyncData *async;
-#ifdef USE_THREADS
+#ifndef USE_THREADS
-static AsyncQueue* async_q;
+void
+erts_init_async(void)
+{
-static void* async_main(void*);
-static void async_add(ErlAsync*, AsyncQueue*);
+}
-#ifndef ERTS_SMP
-typedef struct ErtsAsyncReadyCallback_ ErtsAsyncReadyCallback;
-struct ErtsAsyncReadyCallback_ {
- struct ErtsAsyncReadyCallback_ *next;
- void (*callback)(void);
-};
+#else
-static ErtsAsyncReadyCallback *callbacks;
-static int async_handle;
+static void *async_main(void *);
-int erts_register_async_ready_callback(void (*funcp)(void))
+static ERTS_INLINE ErtsAsyncQ *
+async_q(int i)
{
- ErtsAsyncReadyCallback *cb = erts_alloc(ERTS_ALC_T_ARCALLBACK,
- sizeof(ErtsAsyncReadyCallback));
- cb->next = callbacks;
- cb->callback = funcp;
- erts_mtx_lock(&async_ready_mtx);
- callbacks = cb;
- erts_mtx_unlock(&async_ready_mtx);
- return async_handle;
+ return &async->queue[i].aq;
}
-#endif
-int init_async(int hndl)
-{
- erts_thr_opts_t thr_opts = ERTS_THR_OPTS_DEFAULT_INITER;
- AsyncQueue* q;
- int i;
+#if ERTS_USE_ASYNC_READY_Q
- thr_opts.detached = 0;
- thr_opts.suggested_stack_size = erts_async_thread_suggested_stack_size;
-
-#ifndef ERTS_SMP
- callbacks = NULL;
- async_handle = hndl;
- erts_mtx_init(&async_ready_mtx, "async_ready");
- async_ready_list = NULL;
-#endif
-
- async_id = 0;
- erts_smp_spinlock_init(&async_id_lock, "async_id");
-
- async_q = q = (AsyncQueue*)
- (erts_async_max_threads
- ? erts_alloc(ERTS_ALC_T_ASYNC_Q,
- erts_async_max_threads * sizeof(AsyncQueue))
- : NULL);
- for (i = 0; i < erts_async_max_threads; i++) {
- q->head = NULL;
- q->tail = NULL;
- q->len = 0;
-#ifndef ERTS_SMP
- q->hndl = hndl;
-#endif
-#ifdef ERTS_ENABLE_LOCK_CHECK
- q->no = i;
-#endif
- erts_mtx_init(&q->mtx, "asyncq");
- erts_cnd_init(&q->cv);
- erts_thr_create(&q->thr, async_main, (void*)q, &thr_opts);
- q++;
- }
- return 0;
+static ERTS_INLINE ErtsAsyncReadyQ *
+async_ready_q(Uint sched_id)
+{
+ return &async->ready_queue[((int)sched_id)-1].arq;
}
+#endif
-int exit_async()
+void
+erts_init_async(void)
{
- int i;
+ async = NULL;
+ if (erts_async_max_threads > 0) {
+#if ERTS_USE_ASYNC_READY_Q
+ ErtsThrQInit_t qinit = ERTS_THR_Q_INIT_DEFAULT;
+#endif
+ erts_thr_opts_t thr_opts = ERTS_THR_OPTS_DEFAULT_INITER;
+ char *ptr;
+ size_t tot_size = 0;
+ int i;
+
+ tot_size += ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(ErtsAsyncData));
+ tot_size += sizeof(ErtsAlgndAsyncQ)*erts_async_max_threads;
+#if ERTS_USE_ASYNC_READY_Q
+ tot_size += sizeof(ErtsAlgndAsyncReadyQ)*erts_no_schedulers;
+#endif
- /* terminate threads */
- for (i = 0; i < erts_async_max_threads; i++) {
- ErlAsync* a = (ErlAsync*) erts_alloc(ERTS_ALC_T_ASYNC,
- sizeof(ErlAsync));
- a->port = NIL;
- async_add(a, &async_q[i]);
- }
+ ptr = erts_alloc_permanent_cache_aligned(ERTS_ALC_T_ASYNC_DATA,
+ tot_size);
- for (i = 0; i < erts_async_max_threads; i++) {
- erts_thr_join(async_q[i].thr, NULL);
- erts_mtx_destroy(&async_q[i].mtx);
- erts_cnd_destroy(&async_q[i].cv);
- }
-#ifndef ERTS_SMP
- erts_mtx_destroy(&async_ready_mtx);
+ async = (ErtsAsyncData *) ptr;
+ ptr += ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(ErtsAsyncData));
+
+ async->init.data.no_initialized = 0;
+ erts_mtx_init(&async->init.data.mtx, "async_init_mtx");
+ erts_cnd_init(&async->init.data.cnd);
+ erts_atomic_init_nob(&async->init.data.id, 0);
+
+ async->queue = (ErtsAlgndAsyncQ *) ptr;
+ ptr += sizeof(ErtsAlgndAsyncQ)*erts_async_max_threads;
+
+#if ERTS_USE_ASYNC_READY_Q
+
+ qinit.live.queue = ERTS_THR_Q_LIVE_LONG;
+ qinit.live.objects = ERTS_THR_Q_LIVE_SHORT;
+ qinit.notify = erts_notify_check_async_ready_queue;
+
+ async->ready_queue = (ErtsAlgndAsyncReadyQ *) ptr;
+ ptr += sizeof(ErtsAlgndAsyncReadyQ)*erts_no_schedulers;
+
+ for (i = 1; i <= erts_no_schedulers; i++) {
+ ErtsAsyncReadyQ *arq = async_ready_q(i);
+#if ERTS_USE_ASYNC_READY_ENQ_MTX
+ erts_mtx_init(&arq->x.data.enq_mtx, "async_enq_mtx");
#endif
- if (async_q)
- erts_free(ERTS_ALC_T_ASYNC_Q, (void *) async_q);
- return 0;
+ erts_thr_q_finalize_dequeue_state_init(&arq->fin_deq);
+ qinit.arg = (void *) (SWord) i;
+ erts_thr_q_initialize(&arq->thr_q, &qinit);
+ }
+
+#endif
+
+ /* Create async threads... */
+
+ thr_opts.detached = 0;
+ thr_opts.suggested_stack_size
+ = erts_async_thread_suggested_stack_size;
+
+ for (i = 0; i < erts_async_max_threads; i++) {
+ ErtsAsyncQ *aq = async_q(i);
+ erts_thr_create(&aq->thr_id, async_main, (void*) aq, &thr_opts);
+ }
+
+ /* Wait for async threads to initialize... */
+
+ erts_mtx_lock(&async->init.data.mtx);
+ while (async->init.data.no_initialized != erts_async_max_threads)
+ erts_cnd_wait(&async->init.data.cnd, &async->init.data.mtx);
+ erts_mtx_unlock(&async->init.data.mtx);
+
+ erts_mtx_destroy(&async->init.data.mtx);
+ erts_cnd_destroy(&async->init.data.cnd);
+
+ }
}
+#if ERTS_USE_ASYNC_READY_Q
-static void async_add(ErlAsync* a, AsyncQueue* q)
+void *
+erts_get_async_ready_queue(Uint sched_id)
{
+ return (void *) async ? async_ready_q(sched_id) : NULL;
+}
+
+#endif
+
+static ERTS_INLINE void async_add(ErtsAsync *a, ErtsAsyncQ* q)
+{
+ int len;
+
if (is_internal_port(a->port)) {
- ERTS_LC_ASSERT(erts_drvportid2port(a->port));
+#if ERTS_USE_ASYNC_READY_Q
+ ErtsAsyncReadyQ *arq = async_ready_q(a->sched_id);
+ a->q.prep_enq = erts_thr_q_prepare_enqueue(&arq->thr_q);
+#endif
/* make sure the driver will stay around */
- driver_lock_driver(internal_port_index(a->port));
+ if (a->hndl)
+ erts_ddll_reference_referenced_driver(a->hndl);
}
- erts_mtx_lock(&q->mtx);
+#if ERTS_ASYNC_PRINT_JOB
+ erts_fprintf(stderr, "-> %ld\n", a->async_id);
+#endif
- if (q->len == 0) {
- q->head = a;
- q->tail = a;
- q->len = 1;
- erts_cnd_signal(&q->cv);
- }
- else { /* no need to signal (since the worker is working) */
- a->next = q->head;
- q->head->prev = a;
- q->head = a;
- q->len++;
+ erts_thr_q_enqueue(&q->thr_q, a);
+#ifdef USE_VM_PROBES
+ if (DTRACE_ENABLED(aio_pool_add)) {
+ DTRACE_CHARBUF(port_str, 16);
+
+ erts_snprintf(port_str, sizeof(port_str), "%T", a->port);
+ /* DTRACE TODO: Get the queue length from erts_thr_q_enqueue() ? */
+ len = -1;
+ DTRACE2(aio_pool_add, port_str, len);
}
- erts_mtx_unlock(&q->mtx);
+#endif
+ gcc_optimizer_hack++;
}
-static ErlAsync* async_get(AsyncQueue* q)
+static ERTS_INLINE ErtsAsync *async_get(ErtsThrQ_t *q,
+ erts_tse_t *tse,
+ ErtsThrQPrepEnQ_t **prep_enq)
{
- ErlAsync* a;
+#if ERTS_USE_ASYNC_READY_Q
+ int saved_fin_deq = 0;
+ ErtsThrQFinDeQ_t fin_deq;
+#endif
+ int len;
- erts_mtx_lock(&q->mtx);
- while((a = q->tail) == NULL) {
- erts_cnd_wait(&q->cv, &q->mtx);
- }
+ while (1) {
+ ErtsAsync *a = (ErtsAsync *) erts_thr_q_dequeue(q);
+ if (a) {
+
+#if ERTS_USE_ASYNC_READY_Q
+ *prep_enq = a->q.prep_enq;
+ erts_thr_q_get_finalize_dequeue_data(q, &a->q.fin_deq);
+ if (saved_fin_deq)
+ erts_thr_q_append_finalize_dequeue_data(&a->q.fin_deq, &fin_deq);
+#endif
+#ifdef USE_VM_PROBES
+ if (DTRACE_ENABLED(aio_pool_get)) {
+ DTRACE_CHARBUF(port_str, 16);
+
+ erts_snprintf(port_str, sizeof(port_str), "%T", a->port);
+ /* DTRACE TODO: Get the length from erts_thr_q_dequeue() ? */
+ len = -1;
+ DTRACE2(aio_pool_get, port_str, len);
+ }
+#endif
+ return a;
+ }
+
+ if (ERTS_THR_Q_DIRTY != erts_thr_q_clean(q)) {
+ ErtsThrQFinDeQ_t tmp_fin_deq;
+
+ erts_tse_reset(tse);
+
+#if ERTS_USE_ASYNC_READY_Q
+ chk_fin_deq:
+ if (erts_thr_q_get_finalize_dequeue_data(q, &tmp_fin_deq)) {
+ if (!saved_fin_deq) {
+ erts_thr_q_finalize_dequeue_state_init(&fin_deq);
+ saved_fin_deq = 1;
+ }
+ erts_thr_q_append_finalize_dequeue_data(&fin_deq,
+ &tmp_fin_deq);
+ }
+#endif
+
+ switch (erts_thr_q_inspect(q, 1)) {
+ case ERTS_THR_Q_DIRTY:
+ break;
+ case ERTS_THR_Q_NEED_THR_PRGR:
#ifdef ERTS_SMP
- ASSERT(a && q->tail == a);
+ {
+ ErtsThrPrgrVal prgr = erts_thr_q_need_thr_progress(q);
+ erts_thr_progress_wakeup(NULL, prgr);
+ /*
+ * We do no dequeue finalizing in hope that a new async
+ * job will arrive before we are woken due to thread
+ * progress...
+ */
+ erts_tse_wait(tse);
+ break;
+ }
#endif
- if (q->head == q->tail) {
- q->head = q->tail = NULL;
- q->len = 0;
- }
- else {
- q->tail->prev->next = NULL;
- q->tail = q->tail->prev;
- q->len--;
+ case ERTS_THR_Q_CLEAN:
+
+#if ERTS_USE_ASYNC_READY_Q
+ if (saved_fin_deq) {
+ if (erts_thr_q_finalize_dequeue(&fin_deq))
+ goto chk_fin_deq;
+ else
+ saved_fin_deq = 0;
+ }
+#endif
+
+ erts_tse_wait(tse);
+ break;
+
+ default:
+ ASSERT(0);
+ break;
+ }
+
+ }
}
- erts_mtx_unlock(&q->mtx);
- return a;
}
-
-static int async_del(long id)
+static ERTS_INLINE void call_async_ready(ErtsAsync *a)
{
- int i;
- /* scan all queue for an entry with async_id == 'id' */
-
- for (i = 0; i < erts_async_max_threads; i++) {
- ErlAsync* a;
- erts_mtx_lock(&async_q[i].mtx);
-
- a = async_q[i].head;
- while(a != NULL) {
- if (a->async_id == id) {
- if (a->prev != NULL)
- a->prev->next = a->next;
- else
- async_q[i].head = a->next;
- if (a->next != NULL)
- a->next->prev = a->prev;
- else
- async_q[i].tail = a->prev;
- async_q[i].len--;
- erts_mtx_unlock(&async_q[i].mtx);
- if (a->async_free != NULL)
- a->async_free(a->async_data);
- async_detach(a->hndl);
- erts_free(ERTS_ALC_T_ASYNC, a);
- return 1;
- }
- a = a->next;
+ Port *p = erts_id2port_sflgs(a->port,
+ NULL,
+ 0,
+ ERTS_PORT_SFLGS_INVALID_DRIVER_LOOKUP);
+ if (!p) {
+ if (a->async_free)
+ a->async_free(a->async_data);
+ }
+ else {
+ if (async_ready(p, a->async_data)) {
+ if (a->async_free)
+ a->async_free(a->async_data);
}
- erts_mtx_unlock(&async_q[i].mtx);
+ erts_port_release(p);
}
- return 0;
+ if (a->hndl)
+ erts_ddll_dereference_driver(a->hndl);
}
-static void* async_main(void* arg)
+static ERTS_INLINE void async_reply(ErtsAsync *a, ErtsThrQPrepEnQ_t *prep_enq)
{
- AsyncQueue* q = (AsyncQueue*) arg;
+#if ERTS_USE_ASYNC_READY_Q
+ ErtsAsyncReadyQ *arq;
-#ifdef ERTS_ENABLE_LOCK_CHECK
- {
- char buf[27];
- erts_snprintf(&buf[0], 27, "async %d", q->no);
- erts_lc_set_thread_name(&buf[0]);
- }
+ if (a->pdl)
+ driver_pdl_dec_refc(a->pdl);
+
+#if ERTS_ASYNC_PRINT_JOB
+ erts_fprintf(stderr, "=>> %ld\n", a->async_id);
#endif
- while(1) {
- ErlAsync* a = async_get(q);
+ arq = async_ready_q(a->sched_id);
- if (a->port == NIL) { /* TIME TO DIE SIGNAL */
- erts_free(ERTS_ALC_T_ASYNC, (void *) a);
- break;
- }
- else {
- (*a->async_invoke)(a->async_data);
- /* Major problem if the code for async_invoke
- or async_free is removed during a blocking operation */
+#if ERTS_USE_ASYNC_READY_ENQ_MTX
+ erts_mtx_lock(&arq->x.data.enq_mtx);
+#endif
+
+ erts_thr_q_enqueue_prepared(&arq->thr_q, (void *) a, prep_enq);
+
+#if ERTS_USE_ASYNC_READY_ENQ_MTX
+ erts_mtx_unlock(&arq->x.data.enq_mtx);
+#endif
+
+#else /* ERTS_USE_ASYNC_READY_Q */
+
+ call_async_ready(a);
+ if (a->pdl)
+ driver_pdl_dec_refc(a->pdl);
+ erts_free(ERTS_ALC_T_ASYNC, (void *) a);
+
+#endif /* ERTS_USE_ASYNC_READY_Q */
+}
+
+
+static void
+async_wakeup(void *vtse)
+{
+ erts_tse_set((erts_tse_t *) vtse);
+}
+
+static erts_tse_t *async_thread_init(ErtsAsyncQ *aq)
+{
+ ErtsThrQInit_t qinit = ERTS_THR_Q_INIT_DEFAULT;
+ erts_tse_t *tse = erts_tse_fetch();
#ifdef ERTS_SMP
- {
- Port *p;
- p = erts_id2port_sflgs(a->port,
- NULL,
- 0,
- ERTS_PORT_SFLGS_INVALID_DRIVER_LOOKUP);
- if (!p) {
- if (a->async_free)
- (*a->async_free)(a->async_data);
- }
- else {
- if (async_ready(p, a->async_data)) {
- if (a->async_free)
- (*a->async_free)(a->async_data);
- }
- async_detach(a->hndl);
- erts_port_release(p);
- }
- if (a->pdl) {
- driver_pdl_dec_refc(a->pdl);
- }
- erts_free(ERTS_ALC_T_ASYNC, (void *) a);
- }
-#else
- if (a->pdl) {
- driver_pdl_dec_refc(a->pdl);
- }
- erts_mtx_lock(&async_ready_mtx);
- a->next = async_ready_list;
- async_ready_list = a;
- erts_mtx_unlock(&async_ready_mtx);
- sys_async_ready(q->hndl);
+ ErtsThrPrgrCallbacks callbacks;
+
+ callbacks.arg = (void *) tse;
+ callbacks.wakeup = async_wakeup;
+ callbacks.prepare_wait = NULL;
+ callbacks.wait = NULL;
+
+ erts_thr_progress_register_unmanaged_thread(&callbacks);
#endif
- }
- }
- return NULL;
+ qinit.live.queue = ERTS_THR_Q_LIVE_LONG;
+ qinit.live.objects = ERTS_THR_Q_LIVE_SHORT;
+ qinit.arg = (void *) tse;
+ qinit.notify = async_wakeup;
+#if ERTS_USE_ASYNC_READY_Q
+ qinit.auto_finalize_dequeue = 0;
+#endif
+
+ erts_thr_q_initialize(&aq->thr_q, &qinit);
+
+ /* Inform main thread that we are done initializing... */
+ erts_mtx_lock(&async->init.data.mtx);
+ async->init.data.no_initialized++;
+ erts_cnd_signal(&async->init.data.cnd);
+ erts_mtx_unlock(&async->init.data.mtx);
+
+ return tse;
}
+static void *async_main(void* arg)
+{
+ ErtsAsyncQ *aq = (ErtsAsyncQ *) arg;
+ erts_tse_t *tse = async_thread_init(aq);
+ while (1) {
+ ErtsThrQPrepEnQ_t *prep_enq;
+ ErtsAsync *a = async_get(&aq->thr_q, tse, &prep_enq);
+ if (is_nil(a->port))
+ break; /* Time to die */
+
+#if ERTS_ASYNC_PRINT_JOB
+ erts_fprintf(stderr, "<- %ld\n", a->async_id);
#endif
-#ifndef ERTS_SMP
+ a->async_invoke(a->async_data);
+
+ async_reply(a, prep_enq);
+ }
+
+ return NULL;
+}
+
+#endif /* USE_THREADS */
-int check_async_ready(void)
+void
+erts_exit_flush_async(void)
{
#ifdef USE_THREADS
- ErtsAsyncReadyCallback *cbs;
+ int i;
+ ErtsAsync a;
+ a.port = NIL;
+ /*
+ * Terminate threads in order to flush queues. We do not
+ * bother to clean everything up since we are about to
+ * terminate the runtime system and a cleanup would only
+ * delay the termination.
+ */
+ for (i = 0; i < erts_async_max_threads; i++)
+ async_add(&a, async_q(i));
+ for (i = 0; i < erts_async_max_threads; i++)
+ erts_thr_join(async->queue[i].aq.thr_id, NULL);
#endif
- ErlAsync* a;
- int count = 0;
+}
- erts_mtx_lock(&async_ready_mtx);
- a = async_ready_list;
- async_ready_list = NULL;
-#ifdef USE_THREADS
- cbs = callbacks;
-#endif
- erts_mtx_unlock(&async_ready_mtx);
-
- while(a != NULL) {
- ErlAsync* a_next = a->next;
- /* Every port not dead */
- Port *p = erts_id2port_sflgs(a->port,
- NULL,
- 0,
- ERTS_PORT_SFLGS_INVALID_DRIVER_LOOKUP);
- if (!p) {
- if (a->async_free)
- (*a->async_free)(a->async_data);
- }
- else {
- count++;
- if (async_ready(p, a->async_data)) {
- if (a->async_free != NULL)
- (*a->async_free)(a->async_data);
- }
- async_detach(a->hndl);
- erts_port_release(p);
+#if defined(USE_THREADS) && ERTS_USE_ASYNC_READY_Q
+
+int erts_check_async_ready(void *varq)
+{
+ ErtsAsyncReadyQ *arq = (ErtsAsyncReadyQ *) varq;
+ int res = 1;
+ int i;
+
+ for (i = 0; i < ERTS_MAX_ASYNC_READY_CALLS_IN_SEQ; i++) {
+ ErtsAsync *a = (ErtsAsync *) erts_thr_q_dequeue(&arq->thr_q);
+ if (!a) {
+ res = 0;
+ break;
}
+
+#if ERTS_ASYNC_PRINT_JOB
+ erts_fprintf(stderr, "<<= %ld\n", a->async_id);
+#endif
+ erts_thr_q_append_finalize_dequeue_data(&arq->fin_deq, &a->q.fin_deq);
+ call_async_ready(a);
erts_free(ERTS_ALC_T_ASYNC, (void *) a);
- a = a_next;
}
-#ifdef USE_THREADS
- for (; cbs; cbs = cbs->next)
- (*cbs->callback)();
-#endif
- return count;
+
+ erts_thr_q_finalize_dequeue(&arq->fin_deq);
+
+ return res;
}
+int erts_async_ready_clean(void *varq, void *val)
+{
+ ErtsAsyncReadyQ *arq = (ErtsAsyncReadyQ *) varq;
+ ErtsThrQCleanState_t cstate;
+
+ cstate = erts_thr_q_clean(&arq->thr_q);
+
+ if (erts_thr_q_finalize_dequeue(&arq->fin_deq))
+ return ERTS_ASYNC_READY_DIRTY;
+
+ switch (cstate) {
+ case ERTS_THR_Q_DIRTY:
+ return ERTS_ASYNC_READY_DIRTY;
+ case ERTS_THR_Q_NEED_THR_PRGR:
+#ifdef ERTS_SMP
+ *((ErtsThrPrgrVal *) val)
+ = erts_thr_q_need_thr_progress(&arq->thr_q);
+ return ERTS_ASYNC_READY_NEED_THR_PRGR;
#endif
+ case ERTS_THR_Q_CLEAN:
+ break;
+ }
+ return ERTS_ASYNC_READY_CLEAN;
+}
+#endif
/*
** Schedule async_invoke on a worker thread
@@ -393,19 +587,29 @@ long driver_async(ErlDrvPort ix, unsigned int* key,
void (*async_invoke)(void*), void* async_data,
void (*async_free)(void*))
{
- ErlAsync* a = (ErlAsync*) erts_alloc(ERTS_ALC_T_ASYNC, sizeof(ErlAsync));
- Port* prt = erts_drvport2port(ix);
+ ErtsAsync* a;
+ Port* prt;
long id;
unsigned int qix;
+#if ERTS_USE_ASYNC_READY_Q
+ Uint sched_id;
+ sched_id = erts_get_scheduler_id();
+ if (!sched_id)
+ sched_id = 1;
+#endif
+ prt = erts_drvport2port(ix);
if (!prt)
return -1;
ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt));
- a->next = NULL;
- a->prev = NULL;
+ a = (ErtsAsync*) erts_alloc(ERTS_ALC_T_ASYNC, sizeof(ErtsAsync));
+
+#if ERTS_USE_ASYNC_READY_Q
+ a->sched_id = sched_id;
+#endif
a->hndl = (DE_Handle*)prt->drv_ptr->handle;
a->port = prt->id;
a->pdl = NULL;
@@ -413,12 +617,16 @@ long driver_async(ErlDrvPort ix, unsigned int* key,
a->async_invoke = async_invoke;
a->async_free = async_free;
- erts_smp_spin_lock(&async_id_lock);
- async_id = (async_id + 1) & 0x7fffffff;
- if (async_id == 0)
- async_id++;
- id = async_id;
- erts_smp_spin_unlock(&async_id_lock);
+ if (!async)
+ id = 0;
+ else {
+ do {
+ id = erts_atomic_inc_read_nob(&async->init.data.id);
+ } while (id == 0);
+ if (id < 0)
+ id *= -1;
+ ASSERT(id > 0);
+ }
a->async_id = id;
@@ -437,7 +645,7 @@ long driver_async(ErlDrvPort ix, unsigned int* key,
driver_pdl_inc_refc(prt->port_data_lock);
a->pdl = prt->port_data_lock;
}
- async_add(a, &async_q[qix]);
+ async_add(a, async_q(qix));
return id;
}
#endif
@@ -455,10 +663,16 @@ long driver_async(ErlDrvPort ix, unsigned int* key,
int driver_async_cancel(unsigned int id)
{
-#ifdef USE_THREADS
- if (erts_async_max_threads > 0)
- return async_del(id);
-#endif
+ /*
+ * Not supported anymore. Always fail (which is backward
+ * compatible).
+ *
+ * This functionality could be implemented again. However,
+ * it is (and always has been) completely useless since
+ * it doesn't give you any guarantees whatsoever. The user
+ * needs to (and always have had to) synchronize in his/her
+ * own code in order to get any guarantees.
+ */
return 0;
}
diff --git a/erts/emulator/beam/erl_async.h b/erts/emulator/beam/erl_async.h
new file mode 100644
index 0000000000..95374a8fc9
--- /dev/null
+++ b/erts/emulator/beam/erl_async.h
@@ -0,0 +1,66 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2011. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+#ifndef ERL_ASYNC_H__
+#define ERL_ASYNC_H__
+
+#define ERTS_MAX_NO_OF_ASYNC_THREADS 1024
+extern int erts_async_max_threads;
+#define ERTS_ASYNC_THREAD_MIN_STACK_SIZE 16 /* Kilo words */
+#define ERTS_ASYNC_THREAD_MAX_STACK_SIZE 8192 /* Kilo words */
+extern int erts_async_thread_suggested_stack_size;
+
+#ifdef USE_THREADS
+
+#ifdef ERTS_SMP
+/*
+ * With smp support we can choose to have, or not to
+ * have an async ready queue.
+ */
+#define ERTS_USE_ASYNC_READY_Q 1
+#endif
+
+#ifndef ERTS_SMP
+/* In non-smp case we *need* the async ready queue */
+# undef ERTS_USE_ASYNC_READY_Q
+# define ERTS_USE_ASYNC_READY_Q 1
+#endif
+
+#ifndef ERTS_USE_ASYNC_READY_Q
+# define ERTS_USE_ASYNC_READY_Q 0
+#endif
+
+#if ERTS_USE_ASYNC_READY_Q
+int erts_check_async_ready(void *);
+int erts_async_ready_clean(void *, void *);
+void *erts_get_async_ready_queue(Uint sched_id);
+#define ERTS_ASYNC_READY_CLEAN 0
+#define ERTS_ASYNC_READY_DIRTY 1
+#ifdef ERTS_SMP
+#define ERTS_ASYNC_READY_NEED_THR_PRGR 2
+#endif
+#endif /* ERTS_USE_ASYNC_READY_Q */
+
+#endif /* USE_THREADS */
+
+void erts_init_async(void);
+void erts_exit_flush_async(void);
+
+
+#endif /* ERL_ASYNC_H__ */
diff --git a/erts/emulator/beam/erl_bestfit_alloc.c b/erts/emulator/beam/erl_bestfit_alloc.c
index 3035e5df16..c50fdeb4e8 100644
--- a/erts/emulator/beam/erl_bestfit_alloc.c
+++ b/erts/emulator/beam/erl_bestfit_alloc.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2003-2009. All Rights Reserved.
+ * Copyright Ericsson AB 2003-2011. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -84,24 +84,24 @@
#ifdef HARD_DEBUG
-static RBTree_t * check_tree(BFAllctr_t *, Uint);
+static RBTree_t * check_tree(RBTree_t, int, Uint);
#endif
-static void tree_delete(Allctr_t *allctr, Block_t *del);
+static void tree_delete(Allctr_t *allctr, Block_t *del, Uint32 flags);
/* Prototypes of callback functions */
/* "address order best fit" specific callback functions */
static Block_t * aobf_get_free_block (Allctr_t *, Uint,
- Block_t *, Uint);
-static void aobf_link_free_block (Allctr_t *, Block_t *);
+ Block_t *, Uint, Uint32);
+static void aobf_link_free_block (Allctr_t *, Block_t *, Uint32);
#define aobf_unlink_free_block tree_delete
/* "best fit" specific callback functions */
static Block_t * bf_get_free_block (Allctr_t *, Uint,
- Block_t *, Uint);
-static void bf_link_free_block (Allctr_t *, Block_t *);
-static ERTS_INLINE void bf_unlink_free_block (Allctr_t *, Block_t *);
+ Block_t *, Uint, Uint32);
+static void bf_link_free_block (Allctr_t *, Block_t *, Uint32);
+static ERTS_INLINE void bf_unlink_free_block (Allctr_t *, Block_t *, Uint32);
static Eterm info_options (Allctr_t *, char *, int *,
@@ -161,14 +161,18 @@ erts_bfalc_start(BFAllctr_t *bfallctr,
BFAllctrInit_t *bfinit,
AllctrInit_t *init)
{
- BFAllctr_t nulled_state = {{0}};
- /* {{0}} is used instead of {0}, in order to avoid (an incorrect) gcc
- warning. gcc warns if {0} is used as initializer of a struct when
- the first member is a struct (not if, for example, the third member
- is a struct). */
+ struct {
+ int dummy;
+ BFAllctr_t allctr;
+ } zero = {0};
+ /* The struct with a dummy element first is used in order to avoid (an
+ incorrect) gcc warning. gcc warns if {0} is used as initializer of
+ a struct when the first member is a struct (not if, for example,
+ the third member is a struct). */
+
Allctr_t *allctr = (Allctr_t *) bfallctr;
- sys_memcpy((void *) bfallctr, (void *) &nulled_state, sizeof(BFAllctr_t));
+ sys_memcpy((void *) bfallctr, (void *) &zero.allctr, sizeof(BFAllctr_t));
bfallctr->address_order = bfinit->ao;
@@ -303,7 +307,7 @@ replace(RBTree_t **root, RBTree_t *x, RBTree_t *y)
}
static void
-tree_insert_fixup(BFAllctr_t *bfallctr, RBTree_t *blk)
+tree_insert_fixup(RBTree_t **root, RBTree_t *blk)
{
RBTree_t *x = blk, *y;
@@ -311,7 +315,7 @@ tree_insert_fixup(BFAllctr_t *bfallctr, RBTree_t *blk)
* Rearrange the tree so that it satisfies the Red-Black Tree properties
*/
- RBT_ASSERT(x != bfallctr->root && IS_RED(x->parent));
+ RBT_ASSERT(x != *root && IS_RED(x->parent));
do {
/*
@@ -336,7 +340,7 @@ tree_insert_fixup(BFAllctr_t *bfallctr, RBTree_t *blk)
if (x == x->parent->right) {
x = x->parent;
- left_rotate(&bfallctr->root, x);
+ left_rotate(root, x);
}
RBT_ASSERT(x == x->parent->parent->left->left);
@@ -347,7 +351,7 @@ tree_insert_fixup(BFAllctr_t *bfallctr, RBTree_t *blk)
SET_BLACK(x->parent);
SET_RED(x->parent->parent);
- right_rotate(&bfallctr->root, x->parent->parent);
+ right_rotate(root, x->parent->parent);
RBT_ASSERT(x == x->parent->left);
RBT_ASSERT(IS_RED(x));
@@ -370,7 +374,7 @@ tree_insert_fixup(BFAllctr_t *bfallctr, RBTree_t *blk)
if (x == x->parent->left) {
x = x->parent;
- right_rotate(&bfallctr->root, x);
+ right_rotate(root, x);
}
RBT_ASSERT(x == x->parent->parent->right->right);
@@ -381,7 +385,7 @@ tree_insert_fixup(BFAllctr_t *bfallctr, RBTree_t *blk)
SET_BLACK(x->parent);
SET_RED(x->parent->parent);
- left_rotate(&bfallctr->root, x->parent->parent);
+ left_rotate(root, x->parent->parent);
RBT_ASSERT(x == x->parent->right);
RBT_ASSERT(IS_RED(x));
@@ -390,9 +394,9 @@ tree_insert_fixup(BFAllctr_t *bfallctr, RBTree_t *blk)
break;
}
}
- } while (x != bfallctr->root && IS_RED(x->parent));
+ } while (x != *root && IS_RED(x->parent));
- SET_BLACK(bfallctr->root);
+ SET_BLACK(*root);
}
@@ -402,18 +406,22 @@ tree_insert_fixup(BFAllctr_t *bfallctr, RBTree_t *blk)
* callback function in the address order case.
*/
static void
-tree_delete(Allctr_t *allctr, Block_t *del)
+tree_delete(Allctr_t *allctr, Block_t *del, Uint32 flags)
{
BFAllctr_t *bfallctr = (BFAllctr_t *) allctr;
Uint spliced_is_black;
+ RBTree_t **root = ((flags & ERTS_ALCU_FLG_SBMBC)
+ ? &bfallctr->sbmbc_root
+ : &bfallctr->mbc_root);
RBTree_t *x, *y, *z = (RBTree_t *) del;
RBTree_t null_x; /* null_x is used to get the fixup started when we
splice out a node without children. */
null_x.parent = NULL;
+
#ifdef HARD_DEBUG
- check_tree(bfallctr, 0);
+ check_tree(*root, bfallctr->address_order, 0);
#endif
/* Remove node from tree... */
@@ -440,8 +448,8 @@ tree_delete(Allctr_t *allctr, Block_t *del)
}
if (!y->parent) {
- RBT_ASSERT(bfallctr->root == y);
- bfallctr->root = x;
+ RBT_ASSERT(*root == y);
+ *root = x;
}
else if (y == y->parent->left)
y->parent->left = x;
@@ -451,7 +459,7 @@ tree_delete(Allctr_t *allctr, Block_t *del)
}
if (y != z) {
/* We spliced out the successor of z; replace z by the successor */
- replace(&bfallctr->root, z, y);
+ replace(root, z, y);
}
if (spliced_is_black) {
@@ -476,7 +484,7 @@ tree_delete(Allctr_t *allctr, Block_t *del)
SET_BLACK(y);
RBT_ASSERT(IS_BLACK(x->parent));
SET_RED(x->parent);
- left_rotate(&bfallctr->root, x->parent);
+ left_rotate(root, x->parent);
y = x->parent->right;
}
RBT_ASSERT(y);
@@ -489,7 +497,7 @@ tree_delete(Allctr_t *allctr, Block_t *del)
if (IS_BLACK(y->right)) {
SET_BLACK(y->left);
SET_RED(y);
- right_rotate(&bfallctr->root, y);
+ right_rotate(root, y);
y = x->parent->right;
}
RBT_ASSERT(y);
@@ -500,8 +508,8 @@ tree_delete(Allctr_t *allctr, Block_t *del)
}
RBT_ASSERT(y->right);
SET_BLACK(y->right);
- left_rotate(&bfallctr->root, x->parent);
- x = bfallctr->root;
+ left_rotate(root, x->parent);
+ x = *root;
break;
}
}
@@ -515,7 +523,7 @@ tree_delete(Allctr_t *allctr, Block_t *del)
SET_BLACK(y);
RBT_ASSERT(IS_BLACK(x->parent));
SET_RED(x->parent);
- right_rotate(&bfallctr->root, x->parent);
+ right_rotate(root, x->parent);
y = x->parent->left;
}
RBT_ASSERT(y);
@@ -528,7 +536,7 @@ tree_delete(Allctr_t *allctr, Block_t *del)
if (IS_BLACK(y->left)) {
SET_BLACK(y->right);
SET_RED(y);
- left_rotate(&bfallctr->root, y);
+ left_rotate(root, y);
y = x->parent->left;
}
RBT_ASSERT(y);
@@ -538,8 +546,8 @@ tree_delete(Allctr_t *allctr, Block_t *del)
}
RBT_ASSERT(y->left);
SET_BLACK(y->left);
- right_rotate(&bfallctr->root, x->parent);
- x = bfallctr->root;
+ right_rotate(root, x->parent);
+ x = *root;
break;
}
}
@@ -556,8 +564,8 @@ tree_delete(Allctr_t *allctr, Block_t *del)
RBT_ASSERT(!null_x.left);
RBT_ASSERT(!null_x.right);
}
- else if (bfallctr->root == &null_x) {
- bfallctr->root = NULL;
+ else if (*root == &null_x) {
+ *root = NULL;
RBT_ASSERT(!null_x.left);
RBT_ASSERT(!null_x.right);
}
@@ -567,7 +575,7 @@ tree_delete(Allctr_t *allctr, Block_t *del)
DESTROY_TREE_NODE(del);
#ifdef HARD_DEBUG
- check_tree(bfallctr, 0);
+ check_tree(root, bfallctr->address_order, 0);
#endif
}
@@ -577,23 +585,28 @@ tree_delete(Allctr_t *allctr, Block_t *del)
\* */
static void
-aobf_link_free_block(Allctr_t *allctr, Block_t *block)
+aobf_link_free_block(Allctr_t *allctr, Block_t *block, Uint32 flags)
{
BFAllctr_t *bfallctr = (BFAllctr_t *) allctr;
+ RBTree_t **root = ((flags & ERTS_ALCU_FLG_SBMBC)
+ ? &bfallctr->sbmbc_root
+ : &bfallctr->mbc_root);
RBTree_t *blk = (RBTree_t *) block;
Uint blk_sz = BLK_SZ(blk);
+
+
blk->flags = 0;
blk->left = NULL;
blk->right = NULL;
- if (!bfallctr->root) {
+ if (!*root) {
blk->parent = NULL;
SET_BLACK(blk);
- bfallctr->root = blk;
+ *root = blk;
}
else {
- RBTree_t *x = bfallctr->root;
+ RBTree_t *x = *root;
while (1) {
Uint size;
@@ -623,28 +636,32 @@ aobf_link_free_block(Allctr_t *allctr, Block_t *block)
SET_RED(blk);
if (IS_RED(blk->parent))
- tree_insert_fixup(bfallctr, blk);
+ tree_insert_fixup(root, blk);
}
#ifdef HARD_DEBUG
- check_tree(bfallctr, 0);
+ check_tree(root, 1, 0);
#endif
}
#if 0 /* tree_delete() is directly used instead */
static void
-aobf_unlink_free_block(Allctr_t *allctr, Block_t *block)
+aobf_unlink_free_block(Allctr_t *allctr, Block_t *block, Uint32 flags)
{
- tree_delete(allctr, block);
+ tree_delete(allctr, block, flags);
}
#endif
static Block_t *
aobf_get_free_block(Allctr_t *allctr, Uint size,
- Block_t *cand_blk, Uint cand_size)
+ Block_t *cand_blk, Uint cand_size,
+ Uint32 flags)
{
BFAllctr_t *bfallctr = (BFAllctr_t *) allctr;
- RBTree_t *x = bfallctr->root;
+ RBTree_t **root = ((flags & ERTS_ALCU_FLG_SBMBC)
+ ? &bfallctr->sbmbc_root
+ : &bfallctr->mbc_root);
+ RBTree_t *x = *root;
RBTree_t *blk = NULL;
Uint blk_sz;
@@ -665,7 +682,7 @@ aobf_get_free_block(Allctr_t *allctr, Uint size,
return NULL;
#ifdef HARD_DEBUG
- ASSERT(blk == check_tree(bfallctr, size));
+ ASSERT(blk == check_tree(root, 1, size));
#endif
if (cand_blk) {
@@ -676,7 +693,7 @@ aobf_get_free_block(Allctr_t *allctr, Uint size,
return NULL; /* cand_blk was better */
}
- aobf_unlink_free_block(allctr, (Block_t *) blk);
+ aobf_unlink_free_block(allctr, (Block_t *) blk, flags);
return (Block_t *) blk;
}
@@ -687,9 +704,12 @@ aobf_get_free_block(Allctr_t *allctr, Uint size,
\* */
static void
-bf_link_free_block(Allctr_t *allctr, Block_t *block)
+bf_link_free_block(Allctr_t *allctr, Block_t *block, Uint32 flags)
{
BFAllctr_t *bfallctr = (BFAllctr_t *) allctr;
+ RBTree_t **root = ((flags & ERTS_ALCU_FLG_SBMBC)
+ ? &bfallctr->sbmbc_root
+ : &bfallctr->mbc_root);
RBTree_t *blk = (RBTree_t *) block;
Uint blk_sz = BLK_SZ(blk);
@@ -700,13 +720,13 @@ bf_link_free_block(Allctr_t *allctr, Block_t *block)
blk->left = NULL;
blk->right = NULL;
- if (!bfallctr->root) {
+ if (!*root) {
blk->parent = NULL;
SET_BLACK(blk);
- bfallctr->root = blk;
+ *root = blk;
}
else {
- RBTree_t *x = bfallctr->root;
+ RBTree_t *x = *root;
while (1) {
Uint size;
@@ -745,7 +765,7 @@ bf_link_free_block(Allctr_t *allctr, Block_t *block)
SET_RED(blk);
if (IS_RED(blk->parent))
- tree_insert_fixup(bfallctr, blk);
+ tree_insert_fixup(root, blk);
}
@@ -753,14 +773,17 @@ bf_link_free_block(Allctr_t *allctr, Block_t *block)
LIST_NEXT(blk) = NULL;
#ifdef HARD_DEBUG
- check_tree(bfallctr, 0);
+ check_tree(root, 0, 0);
#endif
}
static ERTS_INLINE void
-bf_unlink_free_block(Allctr_t *allctr, Block_t *block)
+bf_unlink_free_block(Allctr_t *allctr, Block_t *block, Uint32 flags)
{
BFAllctr_t *bfallctr = (BFAllctr_t *) allctr;
+ RBTree_t **root = ((flags & ERTS_ALCU_FLG_SBMBC)
+ ? &bfallctr->sbmbc_root
+ : &bfallctr->mbc_root);
RBTree_t *x = (RBTree_t *) block;
if (IS_LIST_ELEM(x)) {
@@ -778,9 +801,9 @@ bf_unlink_free_block(Allctr_t *allctr, Block_t *block)
ASSERT(IS_LIST_ELEM(LIST_NEXT(x)));
#ifdef HARD_DEBUG
- check_tree(bfallctr, 0);
+ check_tree(root, 0, 0);
#endif
- replace(&bfallctr->root, x, LIST_NEXT(x));
+ replace(root, x, LIST_NEXT(x));
#ifdef HARD_DEBUG
check_tree(bfallctr, 0);
@@ -788,7 +811,7 @@ bf_unlink_free_block(Allctr_t *allctr, Block_t *block)
}
else {
/* Remove from tree */
- tree_delete(allctr, block);
+ tree_delete(allctr, block, flags);
}
DESTROY_LIST_ELEM(x);
@@ -797,10 +820,14 @@ bf_unlink_free_block(Allctr_t *allctr, Block_t *block)
static Block_t *
bf_get_free_block(Allctr_t *allctr, Uint size,
- Block_t *cand_blk, Uint cand_size)
+ Block_t *cand_blk, Uint cand_size,
+ Uint32 flags)
{
BFAllctr_t *bfallctr = (BFAllctr_t *) allctr;
- RBTree_t *x = bfallctr->root;
+ RBTree_t **root = ((flags & ERTS_ALCU_FLG_SBMBC)
+ ? &bfallctr->sbmbc_root
+ : &bfallctr->mbc_root);
+ RBTree_t *x = *root;
RBTree_t *blk = NULL;
Uint blk_sz;
@@ -827,7 +854,7 @@ bf_get_free_block(Allctr_t *allctr, Uint size,
#ifdef HARD_DEBUG
{
- RBTree_t *ct_blk = check_tree(bfallctr, size);
+ RBTree_t *ct_blk = check_tree(root, 0, size);
ASSERT(BLK_SZ(ct_blk) == BLK_SZ(blk));
}
#endif
@@ -839,7 +866,7 @@ bf_get_free_block(Allctr_t *allctr, Uint size,
the tree node */
blk = LIST_NEXT(blk) ? LIST_NEXT(blk) : blk;
- bf_unlink_free_block(allctr, (Block_t *) blk);
+ bf_unlink_free_block(allctr, (Block_t *) blk, flags);
return (Block_t *) blk;
}
@@ -949,13 +976,14 @@ erts_bfalc_test(unsigned long op, unsigned long a1, unsigned long a2)
{
switch (op) {
case 0x200: return (unsigned long) ((BFAllctr_t *) a1)->address_order;
- case 0x201: return (unsigned long) ((BFAllctr_t *) a1)->root;
+ case 0x201: return (unsigned long) ((BFAllctr_t *) a1)->mbc_root;
case 0x202: return (unsigned long) ((RBTree_t *) a1)->parent;
case 0x203: return (unsigned long) ((RBTree_t *) a1)->left;
case 0x204: return (unsigned long) ((RBTree_t *) a1)->right;
case 0x205: return (unsigned long) ((RBTreeList_t *) a1)->next;
case 0x206: return (unsigned long) IS_BLACK((RBTree_t *) a1);
case 0x207: return (unsigned long) IS_TREE_NODE((RBTree_t *) a1);
+ case 0x208: return (unsigned long) 0; /* IS_AOFF */
default: ASSERT(0); return ~((unsigned long) 0);
}
}
@@ -985,7 +1013,7 @@ erts_bfalc_test(unsigned long op, unsigned long a1, unsigned long a2)
#endif
#ifdef PRINT_TREE
-static void print_tree(BFAllctr_t *);
+static void print_tree(RBTree_t *, int);
#endif
/*
@@ -1003,7 +1031,7 @@ static void print_tree(BFAllctr_t *);
*/
static RBTree_t *
-check_tree(BFAllctr_t *bfallctr, Uint size)
+check_tree(RBTree_t *root, int ao, Uint size)
{
RBTree_t *res = NULL;
Sint blacks;
@@ -1011,13 +1039,13 @@ check_tree(BFAllctr_t *bfallctr, Uint size)
RBTree_t *x;
#ifdef PRINT_TREE
- print_tree(bfallctr);
+ print_tree(root, ao);
#endif
- if (!bfallctr->root)
+ if (!root)
return res;
- x = bfallctr->root;
+ x = root;
ASSERT(IS_BLACK(x));
ASSERT(!x->parent);
curr_blacks = 1;
@@ -1060,11 +1088,11 @@ check_tree(BFAllctr_t *bfallctr, Uint size)
ASSERT(IS_BLACK(x->left));
}
- ASSERT(x->parent || x == bfallctr->root);
+ ASSERT(x->parent || x == root);
if (x->left) {
ASSERT(x->left->parent == x);
- if (bfallctr->address_order) {
+ if (ao) {
ASSERT(BLK_SZ(x->left) < BLK_SZ(x)
|| (BLK_SZ(x->left) == BLK_SZ(x) && x->left < x));
}
@@ -1076,7 +1104,7 @@ check_tree(BFAllctr_t *bfallctr, Uint size)
if (x->right) {
ASSERT(x->right->parent == x);
- if (bfallctr->address_order) {
+ if (ao) {
ASSERT(BLK_SZ(x->right) > BLK_SZ(x)
|| (BLK_SZ(x->right) == BLK_SZ(x) && x->right > x));
}
@@ -1087,7 +1115,7 @@ check_tree(BFAllctr_t *bfallctr, Uint size)
}
if (size && BLK_SZ(x) >= size) {
- if (bfallctr->address_order) {
+ if (ao) {
if (!res
|| BLK_SZ(x) < BLK_SZ(res)
|| (BLK_SZ(x) == BLK_SZ(res) && x < res))
@@ -1109,8 +1137,8 @@ check_tree(BFAllctr_t *bfallctr, Uint size)
ASSERT(curr_blacks == 0);
- UNSET_LEFT_VISITED(bfallctr->root);
- UNSET_RIGHT_VISITED(bfallctr->root);
+ UNSET_LEFT_VISITED(root);
+ UNSET_RIGHT_VISITED(root);
return res;
@@ -1148,11 +1176,11 @@ print_tree_aux(RBTree_t *x, int indent)
static void
-print_tree(BFAllctr_t *bfallctr)
+print_tree(RBTree_t *root, int ao)
{
- char *type = bfallctr->address_order ? "Size-Adress" : "Size";
+ char *type = ao ? "Size-Adress" : "Size";
fprintf(stderr, " --- %s tree begin ---\r\n", type);
- print_tree_aux(bfallctr->root, 0);
+ print_tree_aux(root, 0);
fprintf(stderr, " --- %s tree end ---\r\n", type);
}
diff --git a/erts/emulator/beam/erl_bestfit_alloc.h b/erts/emulator/beam/erl_bestfit_alloc.h
index cb35e21e57..0c29662852 100644
--- a/erts/emulator/beam/erl_bestfit_alloc.h
+++ b/erts/emulator/beam/erl_bestfit_alloc.h
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2003-2009. All Rights Reserved.
+ * Copyright Ericsson AB 2003-2011. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -54,7 +54,8 @@ typedef struct RBTree_t_ RBTree_t;
struct BFAllctr_t_ {
Allctr_t allctr; /* Has to be first! */
- RBTree_t * root;
+ RBTree_t * mbc_root;
+ RBTree_t * sbmbc_root;
int address_order;
};
diff --git a/erts/emulator/beam/erl_bif_binary.c b/erts/emulator/beam/erl_bif_binary.c
index 684fa5d12f..cc4f2be8eb 100644
--- a/erts/emulator/beam/erl_bif_binary.c
+++ b/erts/emulator/beam/erl_bif_binary.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2010. All Rights Reserved.
+ * Copyright Ericsson AB 2010-2011. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -65,6 +65,10 @@ static Export binary_copy_trap_export;
static BIF_RETTYPE binary_copy_trap(BIF_ALIST_2);
static Uint max_loop_limit;
+static BIF_RETTYPE
+binary_match(Process *p, Eterm arg1, Eterm arg2, Eterm arg3);
+static BIF_RETTYPE
+binary_matches(Process *p, Eterm arg1, Eterm arg2, Eterm arg3);
void erts_init_bif_binary(void)
{
@@ -1148,7 +1152,7 @@ static int do_binary_match(Process *p, Eterm subject, Uint hsstart, Uint hsend,
erts_free_aligned_binary_bytes(temp_alloc);
return DO_BIN_MATCH_RESTART;
} else {
- Eterm epos = erts_make_integer(pos+hsstart,p);
+ Eterm epos = erts_make_integer(pos,p);
Eterm erlen = erts_make_integer(rlen,p);
hp = HAlloc(p,3);
ret = TUPLE2(hp, epos, erlen);
@@ -1399,6 +1403,12 @@ static BIF_RETTYPE binary_matches_trap(BIF_ALIST_3)
BIF_RETTYPE binary_match_3(BIF_ALIST_3)
{
+ return binary_match(BIF_P, BIF_ARG_1, BIF_ARG_2, BIF_ARG_3);
+}
+
+static BIF_RETTYPE
+binary_match(Process *p, Eterm arg1, Eterm arg2, Eterm arg3)
+{
Uint hsstart;
Uint hsend;
Eterm *tp;
@@ -1408,17 +1418,17 @@ BIF_RETTYPE binary_match_3(BIF_ALIST_3)
int runres;
Eterm result;
- if (is_not_binary(BIF_ARG_1)) {
+ if (is_not_binary(arg1)) {
goto badarg;
}
- if (parse_match_opts_list(BIF_ARG_3,BIF_ARG_1,&hsstart,&hsend)) {
+ if (parse_match_opts_list(arg3,arg1,&hsstart,&hsend)) {
goto badarg;
}
if (hsend == 0) {
BIF_RET(am_nomatch);
}
- if (is_tuple(BIF_ARG_2)) {
- tp = tuple_val(BIF_ARG_2);
+ if (is_tuple(arg2)) {
+ tp = tuple_val(arg2);
if (arityval(*tp) != 2 || is_not_atom(tp[1])) {
goto badarg;
}
@@ -1437,13 +1447,13 @@ BIF_RETTYPE binary_match_3(BIF_ALIST_3)
goto badarg;
}
bin_term = tp[2];
- } else if (do_binary_match_compile(BIF_ARG_2,&type,&bin)) {
+ } else if (do_binary_match_compile(arg2,&type,&bin)) {
goto badarg;
}
- runres = do_binary_match(BIF_P,BIF_ARG_1,hsstart,hsend,type,bin,NIL,&result);
+ runres = do_binary_match(p,arg1,hsstart,hsend,type,bin,NIL,&result);
if (runres == DO_BIN_MATCH_RESTART && bin_term == NIL) {
- Eterm *hp = HAlloc(BIF_P, PROC_BIN_SIZE);
- bin_term = erts_mk_magic_binary_term(&hp, &MSO(BIF_P), bin);
+ Eterm *hp = HAlloc(p, PROC_BIN_SIZE);
+ bin_term = erts_mk_magic_binary_term(&hp, &MSO(p), bin);
} else if (bin_term == NIL) {
erts_bin_free(bin);
}
@@ -1451,17 +1461,23 @@ BIF_RETTYPE binary_match_3(BIF_ALIST_3)
case DO_BIN_MATCH_OK:
BIF_RET(result);
case DO_BIN_MATCH_RESTART:
- BUMP_ALL_REDS(BIF_P);
- BIF_TRAP3(&binary_match_trap_export, BIF_P, BIF_ARG_1, result, bin_term);
+ BUMP_ALL_REDS(p);
+ BIF_TRAP3(&binary_match_trap_export, p, arg1, result, bin_term);
default:
goto badarg;
}
badarg:
- BIF_ERROR(BIF_P,BADARG);
+ BIF_ERROR(p,BADARG);
}
BIF_RETTYPE binary_matches_3(BIF_ALIST_3)
{
+ return binary_matches(BIF_P, BIF_ARG_1, BIF_ARG_2, BIF_ARG_3);
+}
+
+static BIF_RETTYPE
+binary_matches(Process *p, Eterm arg1, Eterm arg2, Eterm arg3)
+{
Uint hsstart, hsend;
Eterm *tp;
Eterm type;
@@ -1470,17 +1486,17 @@ BIF_RETTYPE binary_matches_3(BIF_ALIST_3)
int runres;
Eterm result;
- if (is_not_binary(BIF_ARG_1)) {
+ if (is_not_binary(arg1)) {
goto badarg;
}
- if (parse_match_opts_list(BIF_ARG_3,BIF_ARG_1,&hsstart,&hsend)) {
+ if (parse_match_opts_list(arg3,arg1,&hsstart,&hsend)) {
goto badarg;
}
if (hsend == 0) {
BIF_RET(NIL);
}
- if (is_tuple(BIF_ARG_2)) {
- tp = tuple_val(BIF_ARG_2);
+ if (is_tuple(arg2)) {
+ tp = tuple_val(arg2);
if (arityval(*tp) != 2 || is_not_atom(tp[1])) {
goto badarg;
}
@@ -1499,14 +1515,14 @@ BIF_RETTYPE binary_matches_3(BIF_ALIST_3)
goto badarg;
}
bin_term = tp[2];
- } else if (do_binary_match_compile(BIF_ARG_2,&type,&bin)) {
+ } else if (do_binary_match_compile(arg2,&type,&bin)) {
goto badarg;
}
- runres = do_binary_matches(BIF_P,BIF_ARG_1,hsstart,hsend,type,bin,
+ runres = do_binary_matches(p,arg1,hsstart,hsend,type,bin,
NIL,&result);
if (runres == DO_BIN_MATCH_RESTART && bin_term == NIL) {
- Eterm *hp = HAlloc(BIF_P, PROC_BIN_SIZE);
- bin_term = erts_mk_magic_binary_term(&hp, &MSO(BIF_P), bin);
+ Eterm *hp = HAlloc(p, PROC_BIN_SIZE);
+ bin_term = erts_mk_magic_binary_term(&hp, &MSO(p), bin);
} else if (bin_term == NIL) {
erts_bin_free(bin);
}
@@ -1514,26 +1530,26 @@ BIF_RETTYPE binary_matches_3(BIF_ALIST_3)
case DO_BIN_MATCH_OK:
BIF_RET(result);
case DO_BIN_MATCH_RESTART:
- BUMP_ALL_REDS(BIF_P);
- BIF_TRAP3(&binary_matches_trap_export, BIF_P, BIF_ARG_1, result,
+ BUMP_ALL_REDS(p);
+ BIF_TRAP3(&binary_matches_trap_export, p, arg1, result,
bin_term);
default:
goto badarg;
}
badarg:
- BIF_ERROR(BIF_P,BADARG);
+ BIF_ERROR(p,BADARG);
}
BIF_RETTYPE binary_match_2(BIF_ALIST_2)
{
- return binary_match_3(BIF_P,BIF_ARG_1,BIF_ARG_2,((Eterm) 0));
+ return binary_match(BIF_P,BIF_ARG_1,BIF_ARG_2,((Eterm) 0));
}
BIF_RETTYPE binary_matches_2(BIF_ALIST_2)
{
- return binary_matches_3(BIF_P,BIF_ARG_1,BIF_ARG_2,((Eterm) 0));
+ return binary_matches(BIF_P,BIF_ARG_1,BIF_ARG_2,((Eterm) 0));
}
@@ -1882,9 +1898,9 @@ static BIF_RETTYPE do_longest_common(Process *p, Eterm list, int direction)
cd = (CommonData *) ERTS_MAGIC_BIN_DATA(mb);
l = list;
while (is_list(l)) {
- Uint bitoffs;
+ ERTS_DECLARE_DUMMY(Uint bitoffs);
Uint bitsize;
- Uint offset;
+ ERTS_DECLARE_DUMMY(Uint offset);
Eterm real_bin;
ProcBin* pb;
@@ -2361,7 +2377,7 @@ static BIF_RETTYPE do_binary_copy(Process *p, Eterm bin, Eterm en)
{
Uint n;
byte *bytes;
- Uint bit_offs;
+ ERTS_DECLARE_DUMMY(Uint bit_offs);
Uint bit_size;
size_t size;
Uint reds = get_reds(p, BINARY_COPY_LOOP_FACTOR);
@@ -2390,9 +2406,9 @@ static BIF_RETTYPE do_binary_copy(Process *p, Eterm bin, Eterm en)
if ((target_size - size) >= reds) {
Eterm orig;
- Uint offset;
- Uint bit_offset;
- Uint bit_size;
+ ERTS_DECLARE_DUMMY(Uint offset);
+ ERTS_DECLARE_DUMMY(Uint bit_offset);
+ ERTS_DECLARE_DUMMY(Uint bit_size);
CopyBinState *cbs;
Eterm *hp;
Eterm trap_term;
diff --git a/erts/emulator/beam/erl_bif_ddll.c b/erts/emulator/beam/erl_bif_ddll.c
index 9631fb50db..c338ee1c4b 100644
--- a/erts/emulator/beam/erl_bif_ddll.c
+++ b/erts/emulator/beam/erl_bif_ddll.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2006-2011. All Rights Reserved.
+ * Copyright Ericsson AB 2006-2012. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -45,6 +45,7 @@
#include "big.h"
#include "dist.h"
#include "erl_version.h"
+#include "dtrace-wrapper.h"
#ifdef ERTS_SMP
#define DDLL_SMP 1
@@ -142,9 +143,11 @@ static void ddll_no_more_references(void *vdh);
* really load and add as LOADED {ok,loaded} {ok,pending_driver}
* {error, permanent} {error,load_error()}
*/
-BIF_RETTYPE erl_ddll_try_load_3(Process *p, Eterm path_term,
- Eterm name_term, Eterm options)
+BIF_RETTYPE erl_ddll_try_load_3(BIF_ALIST_3)
{
+ Eterm path_term = BIF_ARG_1;
+ Eterm name_term = BIF_ARG_2;
+ Eterm options = BIF_ARG_3;
char *path = NULL;
Uint path_len;
char *name = NULL;
@@ -236,7 +239,7 @@ BIF_RETTYPE erl_ddll_try_load_3(Process *p, Eterm path_term,
sys_strcpy(path+path_len,name);
#if DDLL_SMP
- erts_smp_proc_unlock(p, ERTS_PROC_LOCK_MAIN);
+ erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
lock_drv_list();
#endif
if ((drv = lookup_driver(name)) != NULL) {
@@ -247,7 +250,7 @@ BIF_RETTYPE erl_ddll_try_load_3(Process *p, Eterm path_term,
} else {
dh = drv->handle;
if (dh->status == ERL_DE_OK) {
- int is_last = is_last_user(dh,p);
+ int is_last = is_last_user(dh, BIF_P);
if (reload == 1 && !is_last) {
/*Want reload if no other users,
but there are others...*/
@@ -261,7 +264,8 @@ BIF_RETTYPE erl_ddll_try_load_3(Process *p, Eterm path_term,
soft_error_term = am_inconsistent;
goto soft_error;
}
- if ((old = find_proc_entry(dh, p, ERL_DE_PROC_LOADED)) ==
+ if ((old = find_proc_entry(dh, BIF_P,
+ ERL_DE_PROC_LOADED)) ==
NULL) {
soft_error_term = am_not_loaded_by_this_process;
goto soft_error;
@@ -272,7 +276,7 @@ BIF_RETTYPE erl_ddll_try_load_3(Process *p, Eterm path_term,
}
/* Reload requested and granted */
dereference_all_processes(dh);
- set_driver_reloading(dh, p, path, name, flags);
+ set_driver_reloading(dh, BIF_P, path, name, flags);
if (dh->flags & ERL_DE_FL_KILL_PORTS) {
kill_ports = 1;
}
@@ -286,7 +290,7 @@ BIF_RETTYPE erl_ddll_try_load_3(Process *p, Eterm path_term,
soft_error_term = am_inconsistent;
goto soft_error;
}
- add_proc_loaded(dh,p);
+ add_proc_loaded(dh, BIF_P);
erts_ddll_reference_driver(dh);
monitor = 0;
ok_term = mkatom("already_loaded");
@@ -308,7 +312,7 @@ BIF_RETTYPE erl_ddll_try_load_3(Process *p, Eterm path_term,
notify_all(dh, drv->name,
ERL_DE_PROC_AWAIT_UNLOAD, am_UP,
am_unload_cancelled);
- add_proc_loaded(dh,p);
+ add_proc_loaded(dh, BIF_P);
erts_ddll_reference_driver(dh);
monitor = 0;
ok_term = mkatom("already_loaded");
@@ -325,7 +329,8 @@ BIF_RETTYPE erl_ddll_try_load_3(Process *p, Eterm path_term,
goto soft_error;
}
/* Load of granted unload... */
- add_proc_loaded_deref(dh,p); /* Dont reference, will happen after reload */
+ /* Don't reference, will happen after reload */
+ add_proc_loaded_deref(dh, BIF_P);
++monitor;
ok_term = am_pending_driver;
} else { /* ERL_DE_PERMANENT */
@@ -345,7 +350,7 @@ BIF_RETTYPE erl_ddll_try_load_3(Process *p, Eterm path_term,
goto soft_error;
} else {
dh->flags = flags;
- add_proc_loaded(dh,p);
+ add_proc_loaded(dh, BIF_P);
first_ddll_reference(dh);
monitor = 0;
ok_term = mkatom("loaded");
@@ -369,7 +374,7 @@ BIF_RETTYPE erl_ddll_try_load_3(Process *p, Eterm path_term,
if (!(prt->status & FREE_PORT_FLAGS) &&
prt->drv_ptr->handle == dh) {
#if DDLL_SMP
- erts_smp_atomic_inc(&prt->refc);
+ erts_smp_atomic_inc_nob(&prt->refc);
/* Extremely rare spinlock */
while(prt->status & ERTS_PORT_SFLG_INITIALIZING) {
erts_smp_port_state_unlock(prt);
@@ -397,18 +402,18 @@ BIF_RETTYPE erl_ddll_try_load_3(Process *p, Eterm path_term,
#if DDLL_SMP
erts_ddll_reference_driver(dh);
unlock_drv_list();
- erts_smp_proc_lock(p, ERTS_PROC_LOCK_MAIN);
+ erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
lock_drv_list();
erts_ddll_dereference_driver(dh);
#endif
- p->flags |= F_USING_DDLL;
+ BIF_P->flags |= F_USING_DDLL;
if (monitor) {
- Eterm mref = add_monitor(p, dh, ERL_DE_PROC_AWAIT_LOAD);
- hp = HAlloc(p,4);
+ Eterm mref = add_monitor(BIF_P, dh, ERL_DE_PROC_AWAIT_LOAD);
+ hp = HAlloc(BIF_P, 4);
t = TUPLE3(hp, am_ok, ok_term, mref);
} else {
- hp = HAlloc(p,3);
+ hp = HAlloc(BIF_P, 3);
t = TUPLE2(hp, am_ok, ok_term);
}
#if DDLL_SMP
@@ -416,33 +421,33 @@ BIF_RETTYPE erl_ddll_try_load_3(Process *p, Eterm path_term,
#endif
erts_free(ERTS_ALC_T_DDLL_TMP_BUF, (void *) path);
erts_free(ERTS_ALC_T_DDLL_TMP_BUF, (void *) name);
- ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_MAIN & erts_proc_lc_my_proc_locks(p));
+ ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_MAIN & erts_proc_lc_my_proc_locks(BIF_P));
BIF_RET(t);
soft_error:
#if DDLL_SMP
unlock_drv_list();
- erts_smp_proc_lock(p, ERTS_PROC_LOCK_MAIN);
+ erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
#endif
if (do_build_load_error) {
- soft_error_term = build_load_error(p, build_this_load_error);
+ soft_error_term = build_load_error(BIF_P, build_this_load_error);
}
- hp = HAlloc(p,3);
+ hp = HAlloc(BIF_P, 3);
t = TUPLE2(hp, am_error, soft_error_term);
erts_free(ERTS_ALC_T_DDLL_TMP_BUF, (void *) path);
erts_free(ERTS_ALC_T_DDLL_TMP_BUF, (void *) name);
- ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_MAIN & erts_proc_lc_my_proc_locks(p));
+ ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_MAIN & erts_proc_lc_my_proc_locks(BIF_P));
BIF_RET(t);
error:
assert_drv_list_not_locked();
- ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_MAIN & erts_proc_lc_my_proc_locks(p));
+ ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_MAIN & erts_proc_lc_my_proc_locks(BIF_P));
if (path != NULL) {
erts_free(ERTS_ALC_T_DDLL_TMP_BUF, (void *) path);
}
if (name != NULL) {
erts_free(ERTS_ALC_T_DDLL_TMP_BUF, (void *) name);
}
- BIF_ERROR(p,BADARG);
+ BIF_ERROR(BIF_P, BADARG);
}
/*
@@ -481,8 +486,10 @@ BIF_RETTYPE erl_ddll_try_load_3(Process *p, Eterm path_term,
any AWAIT_LOAD-waiters with {'DOWN', ref(), driver, name(), load_cancelled}
If the driver made itself permanent, {'UP', ref(), driver, name(), permanent}
*/
-Eterm erl_ddll_try_unload_2(Process *p, Eterm name_term, Eterm options)
+Eterm erl_ddll_try_unload_2(BIF_ALIST_2)
{
+ Eterm name_term = BIF_ARG_1;
+ Eterm options = BIF_ARG_2;
char *name = NULL;
Eterm ok_term = NIL;
Eterm soft_error_term = NIL;
@@ -495,7 +502,7 @@ Eterm erl_ddll_try_unload_2(Process *p, Eterm name_term, Eterm options)
Eterm l;
int kill_ports = 0;
- erts_smp_proc_unlock(p, ERTS_PROC_LOCK_MAIN);
+ erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
for(l = options; is_list(l); l = CDR(list_val(l))) {
Eterm opt = CAR(list_val(l));
@@ -548,7 +555,7 @@ Eterm erl_ddll_try_unload_2(Process *p, Eterm name_term, Eterm options)
if (dh->flags & ERL_DE_FL_KILL_PORTS) {
kill_ports = 1;
}
- if ((pe = find_proc_entry(dh, p, ERL_DE_PROC_LOADED)) == NULL) {
+ if ((pe = find_proc_entry(dh, BIF_P, ERL_DE_PROC_LOADED)) == NULL) {
if (num_procs(dh, ERL_DE_PROC_LOADED) > 0) {
soft_error_term = am_not_loaded_by_this_process;
goto soft_error;
@@ -597,7 +604,7 @@ done:
if (!(prt->status & FREE_PORT_FLAGS)
&& prt->drv_ptr->handle == dh) {
#if DDLL_SMP
- erts_smp_atomic_inc(&prt->refc);
+ erts_smp_atomic_inc_nob(&prt->refc);
/* Extremely rare spinlock */
while(prt->status & ERTS_PORT_SFLG_INITIALIZING) {
erts_smp_port_state_unlock(prt);
@@ -624,22 +631,22 @@ done:
#if DDLL_SMP
erts_ddll_reference_driver(dh);
unlock_drv_list();
- erts_smp_proc_lock(p, ERTS_PROC_LOCK_MAIN);
+ erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
lock_drv_list();
erts_ddll_dereference_driver(dh);
#endif
erts_free(ERTS_ALC_T_DDLL_TMP_BUF, (void *) name);
- p->flags |= F_USING_DDLL;
+ BIF_P->flags |= F_USING_DDLL;
if (monitor > 0) {
- Eterm mref = add_monitor(p, dh, ERL_DE_PROC_AWAIT_UNLOAD);
- hp = HAlloc(p,4);
+ Eterm mref = add_monitor(BIF_P, dh, ERL_DE_PROC_AWAIT_UNLOAD);
+ hp = HAlloc(BIF_P, 4);
t = TUPLE3(hp, am_ok, ok_term, mref);
} else {
- hp = HAlloc(p,3);
+ hp = HAlloc(BIF_P, 3);
t = TUPLE2(hp, am_ok, ok_term);
}
if (kill_ports > 1) {
- ERTS_BIF_CHK_EXITED(p); /* May be exited by port killing */
+ ERTS_BIF_CHK_EXITED(BIF_P); /* May be exited by port killing */
}
#if DDLL_SMP
unlock_drv_list();
@@ -651,8 +658,8 @@ soft_error:
unlock_drv_list();
#endif
erts_free(ERTS_ALC_T_DDLL_TMP_BUF, (void *) name);
- erts_smp_proc_lock(p, ERTS_PROC_LOCK_MAIN);
- hp = HAlloc(p,3);
+ erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
+ hp = HAlloc(BIF_P, 3);
t = TUPLE2(hp, am_error, soft_error_term);
BIF_RET(t);
@@ -661,21 +668,21 @@ soft_error:
if (name != NULL) {
erts_free(ERTS_ALC_T_DDLL_TMP_BUF, (void *) name);
}
- erts_smp_proc_lock(p, ERTS_PROC_LOCK_MAIN);
- BIF_ERROR(p,BADARG);
+ erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
+ BIF_ERROR(BIF_P, BADARG);
}
/*
* A shadow of the "real" demonitor BIF
*/
-BIF_RETTYPE erl_ddll_demonitor_1(Process *p, Eterm ref)
+BIF_RETTYPE erl_ddll_demonitor_1(BIF_ALIST_1)
{
- if (is_not_internal_ref(ref)) {
- BIF_ERROR(p, BADARG);
+ if (is_not_internal_ref(BIF_ARG_1)) {
+ BIF_ERROR(BIF_P, BADARG);
}
- if (p->flags & F_USING_DDLL) {
- erts_ddll_remove_monitor(p, ref, ERTS_PROC_LOCK_MAIN);
+ if (BIF_P->flags & F_USING_DDLL) {
+ erts_ddll_remove_monitor(BIF_P, BIF_ARG_1, ERTS_PROC_LOCK_MAIN);
}
BIF_RET(am_true);
}
@@ -683,18 +690,18 @@ BIF_RETTYPE erl_ddll_demonitor_1(Process *p, Eterm ref)
/*
* A shadow of the "real" monitor BIF
*/
-BIF_RETTYPE erl_ddll_monitor_2(Process *p, Eterm dr, Eterm what)
+BIF_RETTYPE erl_ddll_monitor_2(BIF_ALIST_2)
{
- if (dr != am_driver) {
- BIF_ERROR(p,BADARG);
+ if (BIF_ARG_1 != am_driver) {
+ BIF_ERROR(BIF_P, BADARG);
}
- return erts_ddll_monitor_driver(p, what, ERTS_PROC_LOCK_MAIN);
+ return erts_ddll_monitor_driver(BIF_P, BIF_ARG_2, ERTS_PROC_LOCK_MAIN);
}
/*
* Return list of loaded drivers {ok,[string()]}
*/
-Eterm erl_ddll_loaded_drivers_0(Process *p)
+BIF_RETTYPE erl_ddll_loaded_drivers_0(BIF_ALIST_0)
{
Eterm *hp;
int need = 3;
@@ -706,7 +713,7 @@ Eterm erl_ddll_loaded_drivers_0(Process *p)
for (drv = driver_list; drv; drv = drv->next) {
need += sys_strlen(drv->name)*2+2;
}
- hp = HAlloc(p,need);
+ hp = HAlloc(BIF_P, need);
for (drv = driver_list; drv; drv = drv->next) {
Eterm l;
l = buf_to_intlist(&hp, drv->name, sys_strlen(drv->name), NIL);
@@ -726,8 +733,11 @@ Eterm erl_ddll_loaded_drivers_0(Process *p)
* item is processes, driver_options, port_count, linked_in_driver,
* permanent, awaiting_load, awaiting_unload
*/
-Eterm erl_ddll_info_2(Process *p, Eterm name_term, Eterm item)
+BIF_RETTYPE erl_ddll_info_2(BIF_ALIST_2)
{
+ Process *p = BIF_P;
+ Eterm name_term = BIF_ARG_1;
+ Eterm item = BIF_ARG_2;
char *name = NULL;
Eterm res = NIL;
erts_driver_t *drv;
@@ -850,8 +860,10 @@ Eterm erl_ddll_info_2(Process *p, Eterm name_term, Eterm item)
* Backend for erl_ddll:format_error, handles all "soft" errors returned by builtins,
* possibly by calling the system specific error handler
*/
-Eterm erl_ddll_format_error_int_1(Process *p, Eterm code_term)
+BIF_RETTYPE erl_ddll_format_error_int_1(BIF_ALIST_1)
{
+ Process *p = BIF_P;
+ Eterm code_term = BIF_ARG_1;
char *errstring = NULL;
int errint;
int len;
@@ -1054,7 +1066,7 @@ void erts_ddll_proc_dead(Process *p, ErtsProcLocks plocks)
if (!(prt->status & FREE_PORT_FLAGS) &&
prt->drv_ptr->handle == dh) {
#if DDLL_SMP
- erts_smp_atomic_inc(&prt->refc);
+ erts_smp_atomic_inc_nob(&prt->refc);
while(prt->status & ERTS_PORT_SFLG_INITIALIZING) {
erts_smp_port_state_unlock(prt);
erts_smp_port_state_lock(prt);
@@ -1558,51 +1570,36 @@ static int do_load_driver_entry(DE_Handle *dh, char *path, char *name)
if ((res = erts_sys_ddll_load_driver_init(dh->handle,
&init_handle)) != ERL_DE_NO_ERROR) {
- erts_sys_ddll_close(dh->handle);
- return ERL_DE_LOAD_ERROR_NO_INIT;
+ res = ERL_DE_LOAD_ERROR_NO_INIT;
+ goto error;
}
dp = erts_sys_ddll_call_init(init_handle);
if (dp == NULL) {
- erts_sys_ddll_close(dh->handle);
- return ERL_DE_LOAD_ERROR_FAILED_INIT;
+ res = ERL_DE_LOAD_ERROR_FAILED_INIT;
+ goto error;
}
switch (dp->extended_marker) {
- case 0:
- /*
- * This may be an old driver that has been recompiled. If so,
- * at least the fields that existed in extended driver version
- * 1.0 should be zero. If not, a it is a bad driver. We cannot
- * be completely certain that this is a valid driver but this is
- * the best we can do with old drivers...
- */
- if (dp->major_version != 0
- || dp->minor_version != 0
- || dp->driver_flags != 0
- || dp->handle2 != NULL
- || dp->process_exit != NULL) {
- /* Old driver; needs to be recompiled... */
- return ERL_DE_LOAD_ERROR_INCORRECT_VERSION;
- }
- break;
case ERL_DRV_EXTENDED_MARKER:
if (ERL_DRV_EXTENDED_MAJOR_VERSION != dp->major_version
|| ERL_DRV_EXTENDED_MINOR_VERSION < dp->minor_version) {
/* Incompatible driver version */
- return ERL_DE_LOAD_ERROR_INCORRECT_VERSION;
+ res = ERL_DE_LOAD_ERROR_INCORRECT_VERSION;
+ goto error;
}
break;
default:
/* Old driver; needs to be recompiled... */
- return ERL_DE_LOAD_ERROR_INCORRECT_VERSION;
+ res = ERL_DE_LOAD_ERROR_INCORRECT_VERSION;
+ goto error;
}
if (strcmp(name, dp->driver_name) != 0) {
- erts_sys_ddll_close(dh->handle);
- return ERL_DE_LOAD_ERROR_BAD_NAME;
+ res = ERL_DE_LOAD_ERROR_BAD_NAME;
+ goto error;
}
- erts_smp_atomic_init(&(dh->refc), (erts_aint_t) 0);
+ erts_smp_atomic_init_nob(&(dh->refc), (erts_aint_t) 0);
dh->port_count = 0;
dh->full_path = erts_alloc(ERTS_ALC_T_DDLL_HANDLE, sys_strlen(path) + 1);
sys_strcpy(dh->full_path, path);
@@ -1615,11 +1612,14 @@ static int do_load_driver_entry(DE_Handle *dh, char *path, char *name)
*/
erts_free(ERTS_ALC_T_DDLL_HANDLE, dh->full_path);
dh->full_path = NULL;
- erts_sys_ddll_close(dh->handle);
- return ERL_DE_LOAD_ERROR_FAILED_INIT;
+ res = ERL_DE_LOAD_ERROR_FAILED_INIT;
+ goto error;
}
-
return ERL_DE_NO_ERROR;
+
+error:
+ erts_sys_ddll_close(dh->handle);
+ return res;
}
static int do_unload_driver_entry(DE_Handle *dh, Eterm *save_name)
@@ -1648,6 +1648,7 @@ static int do_unload_driver_entry(DE_Handle *dh, Eterm *save_name)
diver_list lock here!*/
if (q->finish) {
int fpe_was_unmasked = erts_block_fpe();
+ DTRACE1(driver_finish, q->name);
(*(q->finish))();
erts_unblock_fpe(fpe_was_unmasked);
}
@@ -1761,7 +1762,11 @@ static void notify_proc(Process *proc, Eterm ref, Eterm driver_name, Eterm type,
hp += REF_THING_SIZE;
mess = TUPLE5(hp,type,r,am_driver,driver_name,tag);
}
- erts_queue_message(proc, &rp_locks, bp, mess, am_undefined);
+ erts_queue_message(proc, &rp_locks, bp, mess, am_undefined
+#ifdef USE_VM_PROBES
+ , NIL
+#endif
+ );
erts_smp_proc_unlock(proc, rp_locks);
ERTS_SMP_CHK_NO_PROC_LOCKS;
}
diff --git a/erts/emulator/beam/erl_bif_guard.c b/erts/emulator/beam/erl_bif_guard.c
index 01e6977a2c..a715756c15 100644
--- a/erts/emulator/beam/erl_bif_guard.c
+++ b/erts/emulator/beam/erl_bif_guard.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2006-2010. All Rights Reserved.
+ * Copyright Ericsson AB 2006-2011. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -52,7 +52,7 @@ BIF_RETTYPE abs_1(BIF_ALIST_1)
/* integer arguments */
if (is_small(BIF_ARG_1)) {
i0 = signed_val(BIF_ARG_1);
- i = labs(i0);
+ i = ERTS_SMALL_ABS(i0);
if (i0 == MIN_SMALL) {
hp = HAlloc(BIF_P, BIG_UINT_HEAP_SIZE);
BIF_RET(uint_to_big(i, hp));
@@ -467,7 +467,7 @@ Eterm erts_gc_abs_1(Process* p, Eterm* reg, Uint live)
/* integer arguments */
if (is_small(arg)) {
i0 = signed_val(arg);
- i = labs(i0);
+ i = ERTS_SMALL_ABS(i0);
if (i0 == MIN_SMALL) {
if (ERTS_NEED_GC(p, BIG_UINT_HEAP_SIZE)) {
erts_garbage_collect(p, BIG_UINT_HEAP_SIZE, reg, live+1);
diff --git a/erts/emulator/beam/erl_bif_info.c b/erts/emulator/beam/erl_bif_info.c
index f264bf44df..f889ccdb93 100644
--- a/erts/emulator/beam/erl_bif_info.c
+++ b/erts/emulator/beam/erl_bif_info.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1999-2011. All Rights Reserved.
+ * Copyright Ericsson AB 1999-2012. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -39,6 +39,8 @@
#include "dist.h"
#include "erl_gc.h"
#include "erl_cpu_topology.h"
+#include "erl_async.h"
+#include "erl_thr_progress.h"
#ifdef HIPE
#include "hipe_arch.h"
#endif
@@ -52,6 +54,11 @@
#include <valgrind/memcheck.h>
#endif
+static Export* alloc_info_trap = NULL;
+static Export* alloc_sizes_trap = NULL;
+
+static Export *gather_sched_wall_time_res_trap;
+
#define DECL_AM(S) Eterm AM_ ## S = am_atom_put(#S, sizeof(#S) - 1)
/* Keep erts_system_version as a global variable for easy access from a core */
@@ -73,7 +80,6 @@ static char erts_system_version[] = ("Erlang " ERLANG_OTP_RELEASE
#ifdef ERTS_SMP
" [smp:%beu:%beu]"
#endif
- " [rq:%beu]"
#ifdef USE_THREADS
" [async-threads:%d]"
#endif
@@ -109,6 +115,12 @@ static char erts_system_version[] = ("Erlang " ERLANG_OTP_RELEASE
#ifdef VALGRIND
" [valgrind-compiled]"
#endif
+#ifdef USE_DTRACE
+ " [dtrace]"
+#endif
+#ifdef USE_SYSTEMTAP
+ " [systemtap]"
+#endif
"\n");
#define ASIZE(a) (sizeof(a)/sizeof(a[0]))
@@ -119,6 +131,16 @@ static char erts_system_version[] = ("Erlang " ERLANG_OTP_RELEASE
# define PERFMON_GETPCR _IOR('P', 2, unsigned long long)
#endif
+/* Cached, pre-built {OsType,OsFlavor} and {Major,Minor,Build} tuples */
+static Eterm os_type_tuple;
+static Eterm os_version_tuple;
+
+static BIF_RETTYPE port_info(Process* p, Eterm portid, Eterm item);
+
+static Eterm
+current_function(Process* p, Process* rp, Eterm** hpp, int full_info);
+static Eterm current_stacktrace(Process* p, Process* rp, Eterm** hpp);
+
static Eterm
bld_bin_list(Uint **hpp, Uint *szp, ErlOffHeap* oh)
{
@@ -135,7 +157,7 @@ bld_bin_list(Uint **hpp, Uint *szp, ErlOffHeap* oh)
if (szp)
*szp += 4+2;
if (hpp) {
- Uint refc = (Uint) erts_smp_atomic_read(&pb->val->refc);
+ Uint refc = (Uint) erts_smp_atomic_read_nob(&pb->val->refc);
tuple = TUPLE3(*hpp, val, orig_size, make_small(refc));
res = CONS(*hpp + 4, tuple, res);
*hpp += 4+2;
@@ -286,9 +308,7 @@ erts_print_system_version(int to, void *arg, Process *c_p)
#endif
return erts_print(to, arg, erts_system_version
#ifdef ERTS_SMP
- , total, online, erts_no_run_queues
-#else
- , 1
+ , total, online
#endif
#ifdef USE_THREADS
, erts_async_max_threads
@@ -554,6 +574,8 @@ static Eterm pi_args[] = {
am_suspending,
am_min_heap_size,
am_min_bin_vheap_size,
+ am_current_location,
+ am_current_stacktrace,
#ifdef HYBRID
am_message_binary
#endif
@@ -602,8 +624,10 @@ pi_arg2ix(Eterm arg)
case am_suspending: return 26;
case am_min_heap_size: return 27;
case am_min_bin_vheap_size: return 28;
+ case am_current_location: return 29;
+ case am_current_stacktrace: return 30;
#ifdef HYBRID
- case am_message_binary: return 29;
+ case am_message_binary: return 31;
#endif
default: return -1;
}
@@ -1006,35 +1030,15 @@ process_info_aux(Process *BIF_P,
break;
case am_current_function:
- if (rp->current == NULL) {
- rp->current = find_function_from_pc(rp->i);
- }
- if (rp->current == NULL) {
- hp = HAlloc(BIF_P, 3);
- res = am_undefined;
- } else {
- BeamInstr* current;
-
- if (rp->current[0] == am_erlang &&
- rp->current[1] == am_process_info &&
- (rp->current[2] == 1 || rp->current[2] == 2) &&
- (current = find_function_from_pc(rp->cp)) != NULL) {
-
- /*
- * The current function is erlang:process_info/2,
- * which is not the answer that the application want.
- * We will use the function pointed into by rp->cp
- * instead.
- */
+ res = current_function(BIF_P, rp, &hp, 0);
+ break;
- rp->current = current;
- }
+ case am_current_location:
+ res = current_function(BIF_P, rp, &hp, 1);
+ break;
- hp = HAlloc(BIF_P, 3+4);
- res = TUPLE3(hp, rp->current[0],
- rp->current[1], make_small(rp->current[2]));
- hp += 4;
- }
+ case am_current_stacktrace:
+ res = current_stacktrace(BIF_P, rp, &hp);
break;
case am_initial_call:
@@ -1608,6 +1612,113 @@ process_info_aux(Process *BIF_P,
}
#undef MI_INC
+static Eterm
+current_function(Process* BIF_P, Process* rp, Eterm** hpp, int full_info)
+{
+ Eterm* hp;
+ Eterm res;
+ FunctionInfo fi;
+
+ if (rp->current == NULL) {
+ erts_lookup_function_info(&fi, rp->i, full_info);
+ rp->current = fi.current;
+ } else if (full_info) {
+ erts_lookup_function_info(&fi, rp->i, full_info);
+ if (fi.current == NULL) {
+ /* Use the current function without location info */
+ erts_set_current_function(&fi, rp->current);
+ }
+ }
+
+ if (BIF_P->id == rp->id) {
+ FunctionInfo fi2;
+
+ /*
+ * The current function is erlang:process_info/{1,2},
+ * which is not the answer that the application want.
+ * We will use the function pointed into by rp->cp
+ * instead if it can be looked up.
+ */
+ erts_lookup_function_info(&fi2, rp->cp, full_info);
+ if (fi2.current) {
+ fi = fi2;
+ rp->current = fi2.current;
+ }
+ }
+
+ /*
+ * Return the result.
+ */
+ if (rp->current == NULL) {
+ hp = HAlloc(BIF_P, 3);
+ res = am_undefined;
+ } else if (full_info) {
+ hp = HAlloc(BIF_P, 3+fi.needed);
+ hp = erts_build_mfa_item(&fi, hp, am_true, &res);
+ } else {
+ hp = HAlloc(BIF_P, 3+4);
+ res = TUPLE3(hp, rp->current[0],
+ rp->current[1], make_small(rp->current[2]));
+ hp += 4;
+ }
+ *hpp = hp;
+ return res;
+}
+
+static Eterm
+current_stacktrace(Process* p, Process* rp, Eterm** hpp)
+{
+ Uint sz;
+ struct StackTrace* s;
+ int depth;
+ FunctionInfo* stk;
+ FunctionInfo* stkp;
+ Uint heap_size;
+ int i;
+ Eterm* hp = *hpp;
+ Eterm mfa;
+ Eterm res = NIL;
+
+ depth = 8;
+ sz = offsetof(struct StackTrace, trace) + sizeof(BeamInstr *)*depth;
+ s = (struct StackTrace *) erts_alloc(ERTS_ALC_T_TMP, sz);
+ s->depth = 0;
+ if (rp->i) {
+ s->trace[s->depth++] = rp->i;
+ depth--;
+ }
+ if (depth > 0 && rp->cp != 0) {
+ s->trace[s->depth++] = rp->cp - 1;
+ depth--;
+ }
+ erts_save_stacktrace(rp, s, depth);
+
+ depth = s->depth;
+ stk = stkp = (FunctionInfo *) erts_alloc(ERTS_ALC_T_TMP,
+ depth*sizeof(FunctionInfo));
+ heap_size = 3;
+ for (i = 0; i < depth; i++) {
+ erts_lookup_function_info(stkp, s->trace[i], 1);
+ if (stkp->current) {
+ heap_size += stkp->needed + 2;
+ stkp++;
+ }
+ }
+
+ hp = HAlloc(p, heap_size);
+ while (stkp > stk) {
+ stkp--;
+ hp = erts_build_mfa_item(stkp, hp, am_true, &mfa);
+ res = CONS(hp, mfa, res);
+ hp += 2;
+ }
+
+ erts_free(ERTS_ALC_T_TMP, stk);
+ erts_free(ERTS_ALC_T_TMP, s);
+ *hpp = hp;
+ return res;
+}
+
#if defined(VALGRIND)
static int check_if_xml(void)
{
@@ -1633,9 +1744,19 @@ info_1_tuple(Process* BIF_P, /* Pointer to current process. */
sel = *tp++;
- if (sel == am_allocator_sizes && arity == 2) {
- return erts_allocator_info_term(BIF_P, *tp, 1);
- } else if (sel == am_wordsize && arity == 2) {
+ if (sel == am_allocator_sizes) {
+ switch (arity) {
+ case 2:
+ ERTS_BIF_PREP_TRAP1(ret, alloc_sizes_trap, BIF_P, *tp);
+ return ret;
+ case 3:
+ if (erts_request_alloc_info(BIF_P, tp[0], tp[1], 1))
+ return am_true;
+ default:
+ goto badarg;
+ }
+ }
+ else if (sel == am_wordsize && arity == 2) {
if (tp[0] == am_internal) {
return make_small(sizeof(Eterm));
}
@@ -1682,8 +1803,17 @@ info_1_tuple(Process* BIF_P, /* Pointer to current process. */
}
else
goto badarg;
- } else if (sel == am_allocator && arity == 2) {
- return erts_allocator_info_term(BIF_P, *tp, 0);
+ } else if (sel == am_allocator) {
+ switch (arity) {
+ case 2:
+ ERTS_BIF_PREP_TRAP1(ret, alloc_info_trap, BIF_P, *tp);
+ return ret;
+ case 3:
+ if (erts_request_alloc_info(BIF_P, tp[0], tp[1], 0))
+ return am_true;
+ default:
+ goto badarg;
+ }
} else if (ERTS_IS_ATOM_STR("internal_cpu_topology", sel) && arity == 2) {
return erts_get_cpu_topology_term(BIF_P, *tp);
} else if (ERTS_IS_ATOM_STR("cpu_topology", sel) && arity == 2) {
@@ -2005,7 +2135,7 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1)
BIF_RET(am_undefined);
#endif
} else if (BIF_ARG_1 == am_trace_control_word) {
- BIF_RET(db_get_trace_control_word_0(BIF_P));
+ BIF_RET(db_get_trace_control_word(BIF_P));
} else if (ERTS_IS_ATOM_STR("ets_realloc_moves", BIF_ARG_1)) {
BIF_RET((erts_ets_realloc_always_moves) ? am_true : am_false);
} else if (ERTS_IS_ATOM_STR("ets_always_compress", BIF_ARG_1)) {
@@ -2026,7 +2156,7 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1)
res = TUPLE2(hp, am_sequential_tracer, val);
BIF_RET(res);
} else if (BIF_ARG_1 == am_garbage_collection){
- Uint val = (Uint) erts_smp_atomic32_read(&erts_max_gen_gcs);
+ Uint val = (Uint) erts_smp_atomic32_read_nob(&erts_max_gen_gcs);
Eterm tup;
hp = HAlloc(BIF_P, 3+2 + 3+2 + 3+2);
@@ -2041,7 +2171,7 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1)
BIF_RET(res);
} else if (BIF_ARG_1 == am_fullsweep_after){
- Uint val = (Uint) erts_smp_atomic32_read(&erts_max_gen_gcs);
+ Uint val = (Uint) erts_smp_atomic32_read_nob(&erts_max_gen_gcs);
hp = HAlloc(BIF_P, 3);
res = TUPLE2(hp, am_fullsweep_after, make_small(val));
BIF_RET(res);
@@ -2065,7 +2195,7 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1)
/* Need to be the only thread running... */
erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
- erts_smp_block_system(0);
+ erts_smp_thr_progress_block();
if (BIF_ARG_1 == am_info)
info(ERTS_PRINT_DSBUF, (void *) dsbufp);
@@ -2076,7 +2206,7 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1)
else
distribution_info(ERTS_PRINT_DSBUF, (void *) dsbufp);
- erts_smp_release_system();
+ erts_smp_thr_progress_unblock();
erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
ASSERT(dsbufp && dsbufp->str);
@@ -2088,7 +2218,7 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1)
i = 0;
/* Need to be the only thread running... */
erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
- erts_smp_block_system(0);
+ erts_smp_thr_progress_block();
for (dep = erts_visible_dist_entries; dep; dep = dep->next)
++i;
for (dep = erts_hidden_dist_entries; dep; dep = dep->next)
@@ -2111,7 +2241,7 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1)
res = CONS(hp, tpl, res);
hp += 2;
}
- erts_smp_release_system();
+ erts_smp_thr_progress_unblock();
erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
BIF_RET(res);
} else if (BIF_ARG_1 == am_system_version) {
@@ -2132,16 +2262,7 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1)
return erts_instr_get_type_info(BIF_P);
}
else if (BIF_ARG_1 == am_os_type) {
- Eterm type = am_atom_put(os_type, strlen(os_type));
- Eterm flav, tup;
- char *buf = erts_alloc(ERTS_ALC_T_TMP, 1024); /* More than enough */
-
- os_flavor(buf, 1024);
- flav = am_atom_put(buf, strlen(buf));
- hp = HAlloc(BIF_P, 3);
- tup = TUPLE2(hp, type, flav);
- erts_free(ERTS_ALC_T_TMP, (void *) buf);
- BIF_RET(tup);
+ BIF_RET(os_type_tuple);
}
else if (BIF_ARG_1 == am_allocator) {
BIF_RET(erts_allocator_options((void *) BIF_P));
@@ -2167,16 +2288,7 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1)
BIF_RET(am_false);
}
else if (BIF_ARG_1 == am_os_version) {
- int major, minor, build;
- Eterm tup;
-
- os_version(&major, &minor, &build);
- hp = HAlloc(BIF_P, 4);
- tup = TUPLE3(hp,
- make_small(major),
- make_small(minor),
- make_small(build));
- BIF_RET(tup);
+ BIF_RET(os_version_tuple);
}
else if (BIF_ARG_1 == am_version) {
int n = strlen(ERLANG_VERSION);
@@ -2545,14 +2657,108 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1)
hp = hsz ? HAlloc(BIF_P, hsz) : NULL;
res = erts_bld_uint(&hp, NULL, erts_dist_buf_busy_limit);
BIF_RET(res);
+ } else if (ERTS_IS_ATOM_STR("print_ethread_info", BIF_ARG_1)) {
+#if defined(ETHR_NATIVE_ATOMIC32_IMPL) \
+ || defined(ETHR_NATIVE_ATOMIC64_IMPL) \
+ || defined(ETHR_NATIVE_DW_ATOMIC_IMPL)
+ int i;
+ char **str;
+#endif
+#ifdef ETHR_NATIVE_ATOMIC32_IMPL
+ erts_printf("32-bit native atomics: %s\n",
+ ETHR_NATIVE_ATOMIC32_IMPL);
+ str = ethr_native_atomic32_ops();
+ for (i = 0; str[i]; i++)
+ erts_printf("ethr_native_atomic32_%s()\n", str[i]);
+#endif
+#ifdef ETHR_NATIVE_ATOMIC64_IMPL
+ erts_printf("64-bit native atomics: %s\n",
+ ETHR_NATIVE_ATOMIC64_IMPL);
+ str = ethr_native_atomic64_ops();
+ for (i = 0; str[i]; i++)
+ erts_printf("ethr_native_atomic64_%s()\n", str[i]);
+#endif
+#ifdef ETHR_NATIVE_DW_ATOMIC_IMPL
+ if (ethr_have_native_dw_atomic()) {
+ erts_printf("Double word native atomics: %s\n",
+ ETHR_NATIVE_DW_ATOMIC_IMPL);
+ str = ethr_native_dw_atomic_ops();
+ for (i = 0; str[i]; i++)
+ erts_printf("ethr_native_dw_atomic_%s()\n", str[i]);
+ str = ethr_native_su_dw_atomic_ops();
+ for (i = 0; str[i]; i++)
+ erts_printf("ethr_native_su_dw_atomic_%s()\n", str[i]);
+ }
+#endif
+#ifdef ETHR_NATIVE_SPINLOCK_IMPL
+ erts_printf("Native spin-locks: %s\n", ETHR_NATIVE_SPINLOCK_IMPL);
+#endif
+#ifdef ETHR_NATIVE_RWSPINLOCK_IMPL
+ erts_printf("Native rwspin-locks: %s\n", ETHR_NATIVE_RWSPINLOCK_IMPL);
+#endif
+#ifdef ETHR_X86_RUNTIME_CONF_HAVE_SSE2__
+ erts_printf("SSE2 support: %s\n", (ETHR_X86_RUNTIME_CONF_HAVE_SSE2__
+ ? "yes" : "no"));
+#endif
+#ifdef ETHR_X86_OUT_OF_ORDER
+ erts_printf("x86"
+#ifdef ARCH_64
+ "_64"
+#endif
+ " out of order\n");
+#endif
+#ifdef ETHR_SPARC_TSO
+ erts_printf("Sparc TSO\n");
+#endif
+#ifdef ETHR_SPARC_PSO
+ erts_printf("Sparc PSO\n");
+#endif
+#ifdef ETHR_SPARC_RMO
+ erts_printf("Sparc RMO\n");
+#endif
+#if defined(ETHR_PPC_HAVE_LWSYNC)
+ erts_printf("Have lwsync instruction: yes\n");
+#elif defined(ETHR_PPC_HAVE_NO_LWSYNC)
+ erts_printf("Have lwsync instruction: no\n");
+#elif defined(ETHR_PPC_RUNTIME_CONF_HAVE_LWSYNC__)
+ erts_printf("Have lwsync instruction: %s (runtime test)\n",
+ ETHR_PPC_RUNTIME_CONF_HAVE_LWSYNC__ ? "yes" : "no");
+#endif
+ BIF_RET(am_true);
+ }
+ else if (ERTS_IS_ATOM_STR("dynamic_trace", BIF_ARG_1)) {
+#if defined(USE_DTRACE)
+ DECL_AM(dtrace);
+ BIF_RET(AM_dtrace);
+#elif defined(USE_SYSTEMTAP)
+ DECL_AM(systemtap);
+ BIF_RET(AM_systemtap);
+#else
+ BIF_RET(am_none);
+#endif
+ }
+ else if (ERTS_IS_ATOM_STR("dynamic_trace_probes", BIF_ARG_1)) {
+#if defined(USE_VM_PROBES)
+ BIF_RET(am_true);
+#else
+ BIF_RET(am_false);
+#endif
}
+#ifdef ERTS_SMP
+ else if (ERTS_IS_ATOM_STR("thread_progress", BIF_ARG_1)) {
+ erts_thr_progress_dbg_print_state();
+ BIF_RET(am_true);
+ }
+#endif
BIF_ERROR(BIF_P, BADARG);
}
-Eterm
-port_info_1(Process* p, Eterm pid)
+BIF_RETTYPE
+port_info_1(BIF_ALIST_1)
{
+ Process* p = BIF_P;
+ Eterm pid = BIF_ARG_1;
static Eterm keys[] = {
am_name,
am_links,
@@ -2575,7 +2781,7 @@ port_info_1(Process* p, Eterm pid)
for (i = 0; i < ASIZE(keys); i++) {
Eterm item;
- item = port_info_2(p, pid, keys[i]);
+ item = port_info(p, pid, keys[i]);
if (is_non_value(item)) {
return THE_NON_VALUE;
}
@@ -2584,7 +2790,7 @@ port_info_1(Process* p, Eterm pid)
}
items[i] = item;
}
- reg_name = port_info_2(p, pid, am_registered_name);
+ reg_name = port_info(p, pid, am_registered_name);
/*
* Build the resulting list.
@@ -2620,24 +2826,27 @@ port_info_1(Process* p, Eterm pid)
BIF_RETTYPE port_info_2(BIF_ALIST_2)
{
+ return port_info(BIF_P, BIF_ARG_1, BIF_ARG_2);
+}
+
+static BIF_RETTYPE port_info(Process* p, Eterm portid, Eterm item)
+{
BIF_RETTYPE ret;
- Eterm portid = BIF_ARG_1;
Port *prt;
- Eterm item = BIF_ARG_2;
Eterm res;
Eterm* hp;
int count;
if (is_internal_port(portid))
- prt = erts_id2port(portid, BIF_P, ERTS_PROC_LOCK_MAIN);
+ prt = erts_id2port(portid, p, ERTS_PROC_LOCK_MAIN);
else if (is_atom(portid))
- erts_whereis_name(BIF_P, ERTS_PROC_LOCK_MAIN,
+ erts_whereis_name(p, ERTS_PROC_LOCK_MAIN,
portid, NULL, 0, 0, &prt);
else if (is_external_port(portid)
&& external_port_dist_entry(portid) == erts_this_dist_entry)
BIF_RET(am_undefined);
else {
- BIF_ERROR(BIF_P, BADARG);
+ BIF_ERROR(p, BADARG);
}
if (!prt) {
@@ -2645,7 +2854,7 @@ BIF_RETTYPE port_info_2(BIF_ALIST_2)
}
if (item == am_id) {
- hp = HAlloc(BIF_P, 3);
+ hp = HAlloc(p, 3);
res = make_small(internal_port_number(portid));
}
else if (item == am_links) {
@@ -2657,10 +2866,10 @@ BIF_RETTYPE port_info_2(BIF_ALIST_2)
erts_doforall_links(prt->nlinks, &collect_one_link, &mic);
- hp = HAlloc(BIF_P, 3 + mic.sz);
+ hp = HAlloc(p, 3 + mic.sz);
res = NIL;
for (i = 0; i < mic.mi_i; i++) {
- item = STORE_NC(&hp, &MSO(BIF_P), mic.mi[i].entity);
+ item = STORE_NC(&hp, &MSO(p), mic.mi[i].entity);
res = CONS(hp, item, res);
hp += 2;
}
@@ -2676,11 +2885,11 @@ BIF_RETTYPE port_info_2(BIF_ALIST_2)
erts_doforall_monitors(prt->monitors, &collect_one_origin_monitor, &mic);
- hp = HAlloc(BIF_P, 3 + mic.sz);
+ hp = HAlloc(p, 3 + mic.sz);
res = NIL;
for (i = 0; i < mic.mi_i; i++) {
Eterm t;
- item = STORE_NC(&hp, &MSO(BIF_P), mic.mi[i].entity);
+ item = STORE_NC(&hp, &MSO(p), mic.mi[i].entity);
t = TUPLE2(hp, am_process, item);
hp += 3;
res = CONS(hp, t, res);
@@ -2692,25 +2901,25 @@ BIF_RETTYPE port_info_2(BIF_ALIST_2)
else if (item == am_name) {
count = sys_strlen(prt->name);
- hp = HAlloc(BIF_P, 3 + 2*count);
+ hp = HAlloc(p, 3 + 2*count);
res = buf_to_intlist(&hp, prt->name, count, NIL);
}
else if (item == am_connected) {
- hp = HAlloc(BIF_P, 3);
+ hp = HAlloc(p, 3);
res = prt->connected; /* internal pid */
}
else if (item == am_input) {
Uint hsz = 3;
Uint n = prt->bytes_in;
(void) erts_bld_uint(NULL, &hsz, n);
- hp = HAlloc(BIF_P, hsz);
+ hp = HAlloc(p, hsz);
res = erts_bld_uint(&hp, NULL, n);
}
else if (item == am_output) {
Uint hsz = 3;
Uint n = prt->bytes_out;
(void) erts_bld_uint(NULL, &hsz, n);
- hp = HAlloc(BIF_P, hsz);
+ hp = HAlloc(p, hsz);
res = erts_bld_uint(&hp, NULL, n);
}
else if (item == am_registered_name) {
@@ -2720,7 +2929,7 @@ BIF_RETTYPE port_info_2(BIF_ALIST_2)
ERTS_BIF_PREP_RET(ret, NIL);
goto done;
} else {
- hp = HAlloc(BIF_P, 3);
+ hp = HAlloc(p, 3);
res = reg->name;
}
}
@@ -2732,7 +2941,7 @@ BIF_RETTYPE port_info_2(BIF_ALIST_2)
Uint size = 0;
ErlHeapFragment* bp;
- hp = HAlloc(BIF_P, 3);
+ hp = HAlloc(p, 3);
erts_doforall_links(prt->nlinks, &one_link_size, &size);
@@ -2749,18 +2958,18 @@ BIF_RETTYPE port_info_2(BIF_ALIST_2)
hard to retrieve... */
(void) erts_bld_uint(NULL, &hsz, size);
- hp = HAlloc(BIF_P, hsz);
+ hp = HAlloc(p, hsz);
res = erts_bld_uint(&hp, NULL, size);
}
else if (item == am_queue_size) {
Uint ioq_size = erts_port_ioq_size(prt);
Uint hsz = 3;
(void) erts_bld_uint(NULL, &hsz, ioq_size);
- hp = HAlloc(BIF_P, hsz);
+ hp = HAlloc(p, hsz);
res = erts_bld_uint(&hp, NULL, ioq_size);
}
else if (ERTS_IS_ATOM_STR("locking", item)) {
- hp = HAlloc(BIF_P, 3);
+ hp = HAlloc(p, 3);
#ifndef ERTS_SMP
res = am_false;
#else
@@ -2779,7 +2988,7 @@ BIF_RETTYPE port_info_2(BIF_ALIST_2)
#endif
}
else {
- ERTS_BIF_PREP_ERROR(ret, BIF_P, BADARG);
+ ERTS_BIF_PREP_ERROR(ret, p, BADARG);
goto done;
}
@@ -2793,9 +3002,12 @@ BIF_RETTYPE port_info_2(BIF_ALIST_2)
}
-Eterm
-fun_info_2(Process* p, Eterm fun, Eterm what)
+BIF_RETTYPE
+fun_info_2(BIF_ALIST_2)
{
+ Process* p = BIF_P;
+ Eterm fun = BIF_ARG_1;
+ Eterm what = BIF_ARG_2;
Eterm* hp;
Eterm val;
@@ -2845,7 +3057,7 @@ fun_info_2(Process* p, Eterm fun, Eterm what)
}
break;
case am_refc:
- val = erts_make_integer(erts_smp_atomic_read(&funp->fe->refc), p);
+ val = erts_make_integer(erts_smp_atomic_read_nob(&funp->fe->refc), p);
hp = HAlloc(p, 3);
break;
case am_arity:
@@ -2994,7 +3206,12 @@ BIF_RETTYPE statistics_1(BIF_ALIST_1)
Eterm res;
Eterm* hp;
- if (BIF_ARG_1 == am_context_switches) {
+ if (BIF_ARG_1 == am_scheduler_wall_time) {
+ res = erts_sched_wall_time_request(BIF_P, 0, 0);
+ if (is_non_value(res))
+ BIF_RET(am_undefined);
+ BIF_TRAP1(gather_sched_wall_time_res_trap, BIF_P, res);
+ } else if (BIF_ARG_1 == am_context_switches) {
Eterm cs = erts_make_integer(erts_get_total_context_switches(), BIF_P);
hp = HAlloc(BIF_P, 3);
res = TUPLE2(hp, cs, SMALL_ZERO);
@@ -3041,7 +3258,7 @@ BIF_RETTYPE statistics_1(BIF_ALIST_1)
res = TUPLE2(hp, b1, b2);
BIF_RET(res);
} else if (BIF_ARG_1 == am_runtime) {
- unsigned long u1, u2, dummy;
+ UWord u1, u2, dummy;
Eterm b1, b2;
elapsed_time_both(&u1,&dummy,&u2,&dummy);
b1 = erts_make_integer(u1,BIF_P);
@@ -3065,8 +3282,8 @@ BIF_RETTYPE statistics_1(BIF_ALIST_1)
Eterm r1, r2;
Eterm in, out;
Uint hsz = 9;
- Uint bytes_in = (Uint) erts_smp_atomic_read(&erts_bytes_in);
- Uint bytes_out = (Uint) erts_smp_atomic_read(&erts_bytes_out);
+ Uint bytes_in = (Uint) erts_smp_atomic_read_nob(&erts_bytes_in);
+ Uint bytes_out = (Uint) erts_smp_atomic_read_nob(&erts_bytes_out);
(void) erts_bld_uint(NULL, &hsz, bytes_in);
(void) erts_bld_uint(NULL, &hsz, bytes_out);
@@ -3106,26 +3323,6 @@ BIF_RETTYPE statistics_1(BIF_ALIST_1)
BIF_ERROR(BIF_P, BADARG);
}
-BIF_RETTYPE memory_0(BIF_ALIST_0)
-{
- BIF_RETTYPE res = erts_memory(NULL, NULL, BIF_P, THE_NON_VALUE);
- switch (res) {
- case am_badarg: BIF_ERROR(BIF_P, EXC_INTERNAL_ERROR); /* never... */
- case am_notsup: BIF_ERROR(BIF_P, EXC_NOTSUP);
- default: BIF_RET(res);
- }
-}
-
-BIF_RETTYPE memory_1(BIF_ALIST_1)
-{
- BIF_RETTYPE res = erts_memory(NULL, NULL, BIF_P, BIF_ARG_1);
- switch (res) {
- case am_badarg: BIF_ERROR(BIF_P, BADARG);
- case am_notsup: BIF_ERROR(BIF_P, EXC_NOTSUP);
- default: BIF_RET(res);
- }
-}
-
BIF_RETTYPE error_logger_warning_map_0(BIF_ALIST_0)
{
BIF_RET(erts_error_logger_warnings);
@@ -3139,7 +3336,7 @@ BIF_RETTYPE erts_debug_get_internal_state_1(BIF_ALIST_1)
* NOTE: Only supposed to be used for testing, and debugging.
*/
- if (!erts_smp_atomic_read(&available_internal_state)) {
+ if (!erts_smp_atomic_read_nob(&available_internal_state)) {
BIF_ERROR(BIF_P, EXC_UNDEF);
}
@@ -3227,6 +3424,15 @@ BIF_RETTYPE erts_debug_get_internal_state_1(BIF_ALIST_1)
BIF_RET(am_false);
#endif
}
+ else if (ERTS_IS_ATOM_STR("memory", BIF_ARG_1)) {
+ Eterm res;
+ erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
+ erts_smp_thr_progress_block();
+ erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
+ res = erts_memory(NULL, NULL, BIF_P, THE_NON_VALUE);
+ erts_smp_thr_progress_unblock();
+ BIF_RET(res);
+ }
}
else if (is_tuple(BIF_ARG_1)) {
Eterm* tp = tuple_val(BIF_ARG_1);
@@ -3429,6 +3635,7 @@ BIF_RETTYPE erts_debug_get_internal_state_1(BIF_ALIST_1)
static erts_smp_atomic_t hipe_test_reschedule_flag;
+
BIF_RETTYPE erts_debug_set_internal_state_2(BIF_ALIST_2)
{
/*
@@ -3437,7 +3644,7 @@ BIF_RETTYPE erts_debug_set_internal_state_2(BIF_ALIST_2)
if (ERTS_IS_ATOM_STR("available_internal_state", BIF_ARG_1)
&& (BIF_ARG_2 == am_true || BIF_ARG_2 == am_false)) {
erts_aint_t on = (erts_aint_t) (BIF_ARG_2 == am_true);
- erts_aint_t prev_on = erts_smp_atomic_xchg(&available_internal_state, on);
+ erts_aint_t prev_on = erts_smp_atomic_xchg_nob(&available_internal_state, on);
if (on) {
erts_dsprintf_buf_t *dsbufp = erts_create_logger_dsbuf();
erts_dsprintf(dsbufp, "Process %T ", BIF_P->id);
@@ -3453,7 +3660,7 @@ BIF_RETTYPE erts_debug_set_internal_state_2(BIF_ALIST_2)
BIF_RET(prev_on ? am_true : am_false);
}
- if (!erts_smp_atomic_read(&available_internal_state)) {
+ if (!erts_smp_atomic_read_nob(&available_internal_state)) {
BIF_ERROR(BIF_P, EXC_UNDEF);
}
@@ -3479,10 +3686,10 @@ BIF_RETTYPE erts_debug_set_internal_state_2(BIF_ALIST_2)
if (ms > 0) {
erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
if (block)
- erts_smp_block_system(0);
+ erts_smp_thr_progress_block();
while (erts_milli_sleep((long) ms) != 0);
if (block)
- erts_smp_release_system();
+ erts_smp_thr_progress_unblock();
erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
}
BIF_RET(am_true);
@@ -3634,14 +3841,14 @@ BIF_RETTYPE erts_debug_set_internal_state_2(BIF_ALIST_2)
}
else if (ERTS_IS_ATOM_STR("hipe_test_reschedule_suspend", BIF_ARG_1)) {
/* Used by hipe test suites */
- erts_aint_t flag = erts_smp_atomic_read(&hipe_test_reschedule_flag);
+ erts_aint_t flag = erts_smp_atomic_read_nob(&hipe_test_reschedule_flag);
if (!flag && BIF_ARG_2 != am_false) {
- erts_smp_atomic_set(&hipe_test_reschedule_flag, 1);
+ erts_smp_atomic_set_nob(&hipe_test_reschedule_flag, 1);
erts_suspend(BIF_P, ERTS_PROC_LOCK_MAIN, NULL);
ERTS_BIF_YIELD2(bif_export[BIF_erts_debug_set_internal_state_2],
BIF_P, BIF_ARG_1, BIF_ARG_2);
}
- erts_smp_atomic_set(&hipe_test_reschedule_flag, !flag);
+ erts_smp_atomic_set_nob(&hipe_test_reschedule_flag, !flag);
BIF_RET(NIL);
}
else if (ERTS_IS_ATOM_STR("hipe_test_reschedule_resume", BIF_ARG_1)) {
@@ -3692,16 +3899,23 @@ BIF_RETTYPE erts_debug_set_internal_state_2(BIF_ALIST_2)
}
erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
- erts_smp_block_system(0);
+ erts_smp_thr_progress_block();
old_use_opt = !erts_disable_proc_not_running_opt;
erts_disable_proc_not_running_opt = !use_opt;
- erts_smp_release_system();
+ erts_smp_thr_progress_unblock();
erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
BIF_RET(old_use_opt ? am_true : am_false);
#else
BIF_ERROR(BIF_P, EXC_NOTSUP);
#endif
}
+ else if (ERTS_IS_ATOM_STR("wait", BIF_ARG_1)) {
+ if (ERTS_IS_ATOM_STR("deallocations", BIF_ARG_2)) {
+ if (erts_debug_wait_deallocations(BIF_P)) {
+ ERTS_BIF_YIELD_RETURN(BIF_P, am_ok);
+ }
+ }
+ }
}
BIF_ERROR(BIF_P, BADARG);
@@ -3860,7 +4074,7 @@ BIF_RETTYPE erts_debug_lock_counters_1(BIF_ALIST_1)
Eterm* hp;
erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
- erts_smp_block_system(0);
+ erts_smp_thr_progress_block();
erts_lcnt_set_rt_opt(ERTS_LCNT_OPT_SUSPEND);
data = erts_lcnt_get_data();
@@ -3878,17 +4092,17 @@ BIF_RETTYPE erts_debug_lock_counters_1(BIF_ALIST_1)
erts_lcnt_clear_rt_opt(ERTS_LCNT_OPT_SUSPEND);
- erts_smp_release_system();
+ erts_smp_thr_progress_unblock();
erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
BIF_RET(res);
} else if (BIF_ARG_1 == am_clear) {
erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
- erts_smp_block_system(0);
+ erts_smp_thr_progress_block();
erts_lcnt_clear_counters();
- erts_smp_release_system();
+ erts_smp_thr_progress_unblock();
erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
BIF_RET(am_ok);
@@ -3899,7 +4113,7 @@ BIF_RETTYPE erts_debug_lock_counters_1(BIF_ALIST_1)
case 2:
if (ERTS_IS_ATOM_STR("copy_save", tp[1])) {
erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
- erts_smp_block_system(0);
+ erts_smp_thr_progress_block();
if (tp[2] == am_true) {
res = erts_lcnt_set_rt_opt(ERTS_LCNT_OPT_COPYSAVE) ? am_true : am_false;
@@ -3909,17 +4123,17 @@ BIF_RETTYPE erts_debug_lock_counters_1(BIF_ALIST_1)
res = erts_lcnt_clear_rt_opt(ERTS_LCNT_OPT_COPYSAVE) ? am_true : am_false;
} else {
- erts_smp_release_system();
+ erts_smp_thr_progress_unblock();
erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
BIF_ERROR(BIF_P, BADARG);
}
- erts_smp_release_system();
+ erts_smp_thr_progress_unblock();
erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
BIF_RET(res);
} else if (ERTS_IS_ATOM_STR("process_locks", tp[1])) {
erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
- erts_smp_block_system(0);
+ erts_smp_thr_progress_block();
if (tp[2] == am_true) {
res = erts_lcnt_set_rt_opt(ERTS_LCNT_OPT_PROCLOCK) ? am_true : am_false;
@@ -3929,11 +4143,11 @@ BIF_RETTYPE erts_debug_lock_counters_1(BIF_ALIST_1)
res = erts_lcnt_set_rt_opt(ERTS_LCNT_OPT_PROCLOCK) ? am_true : am_false;
} else {
- erts_smp_release_system();
+ erts_smp_thr_progress_unblock();
erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
BIF_ERROR(BIF_P, BADARG);
}
- erts_smp_release_system();
+ erts_smp_thr_progress_unblock();
erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
BIF_RET(res);
}
@@ -3948,11 +4162,37 @@ BIF_RETTYPE erts_debug_lock_counters_1(BIF_ALIST_1)
BIF_ERROR(BIF_P, BADARG);
}
+static void os_info_init(void)
+{
+ Eterm type = am_atom_put(os_type, strlen(os_type));
+ Eterm flav;
+ int major, minor, build;
+ char* buf = erts_alloc(ERTS_ALC_T_TMP, 1024); /* More than enough */
+ Eterm* hp;
+
+ os_flavor(buf, 1024);
+ flav = am_atom_put(buf, strlen(buf));
+ erts_free(ERTS_ALC_T_TMP, (void *) buf);
+ hp = erts_alloc(ERTS_ALC_T_LL_TEMP_TERM, (3+4)*sizeof(Eterm));
+ os_type_tuple = TUPLE2(hp, type, flav);
+ hp += 3;
+ os_version(&major, &minor, &build);
+ os_version_tuple = TUPLE3(hp,
+ make_small(major),
+ make_small(minor),
+ make_small(build));
+}
+
void
erts_bif_info_init(void)
{
- erts_smp_atomic_init(&available_internal_state, 0);
- erts_smp_atomic_init(&hipe_test_reschedule_flag, 0);
+ erts_smp_atomic_init_nob(&available_internal_state, 0);
+ erts_smp_atomic_init_nob(&hipe_test_reschedule_flag, 0);
+ alloc_info_trap = erts_export_put(am_erlang, am_alloc_info, 1);
+ alloc_sizes_trap = erts_export_put(am_erlang, am_alloc_sizes, 1);
+ gather_sched_wall_time_res_trap
+ = erts_export_put(am_erlang, am_gather_sched_wall_time_result, 1);
process_info_init();
+ os_info_init();
}
diff --git a/erts/emulator/beam/erl_bif_lists.c b/erts/emulator/beam/erl_bif_lists.c
index 47c48e74d6..1805366cfe 100644
--- a/erts/emulator/beam/erl_bif_lists.c
+++ b/erts/emulator/beam/erl_bif_lists.c
@@ -34,27 +34,7 @@
static Eterm keyfind(int Bif, Process* p, Eterm Key, Eterm Pos, Eterm List);
-/*
- * erlang:'++'/2
- */
-
-Eterm
-ebif_plusplus_2(Process* p, Eterm A, Eterm B)
-{
- return append_2(p, A, B);
-}
-
-/*
- * erlang:'--'/2
- */
-
-Eterm
-ebif_minusminus_2(Process* p, Eterm A, Eterm B)
-{
- return subtract_2(p, A, B);
-}
-
-BIF_RETTYPE append_2(BIF_ALIST_2)
+static BIF_RETTYPE append(Process* p, Eterm A, Eterm B)
{
Eterm list;
Eterm copy;
@@ -63,18 +43,18 @@ BIF_RETTYPE append_2(BIF_ALIST_2)
Eterm* hp;
int i;
- if ((i = list_length(BIF_ARG_1)) < 0) {
- BIF_ERROR(BIF_P, BADARG);
+ if ((i = list_length(A)) < 0) {
+ BIF_ERROR(p, BADARG);
}
if (i == 0) {
- BIF_RET(BIF_ARG_2);
- } else if (is_nil(BIF_ARG_2)) {
- BIF_RET(BIF_ARG_1);
+ BIF_RET(B);
+ } else if (is_nil(B)) {
+ BIF_RET(A);
}
need = 2*i;
- hp = HAlloc(BIF_P, need);
- list = BIF_ARG_1;
+ hp = HAlloc(p, need);
+ list = A;
copy = last = CONS(hp, CAR(list_val(list)), make_list(hp+2));
list = CDR(list_val(list));
hp += 2;
@@ -85,12 +65,31 @@ BIF_RETTYPE append_2(BIF_ALIST_2)
list = CDR(listp);
hp += 2;
}
- CDR(list_val(last)) = BIF_ARG_2;
+ CDR(list_val(last)) = B;
BIF_RET(copy);
}
+/*
+ * erlang:'++'/2
+ */
+
+Eterm
+ebif_plusplus_2(BIF_ALIST_2)
+{
+ return append(BIF_P, BIF_ARG_1, BIF_ARG_2);
+}
+
+BIF_RETTYPE append_2(BIF_ALIST_2)
+{
+ return append(BIF_P, BIF_ARG_1, BIF_ARG_2);
+}
+
+/*
+ * erlang:'--'/2
+ */
+
#define SMALL_VEC_SIZE 10
-BIF_RETTYPE subtract_2(BIF_ALIST_2)
+static Eterm subtract(Process* p, Eterm A, Eterm B)
{
Eterm list;
Eterm* hp;
@@ -103,17 +102,17 @@ BIF_RETTYPE subtract_2(BIF_ALIST_2)
int n;
int m;
- if ((n = list_length(BIF_ARG_1)) < 0) {
- BIF_ERROR(BIF_P, BADARG);
+ if ((n = list_length(A)) < 0) {
+ BIF_ERROR(p, BADARG);
}
- if ((m = list_length(BIF_ARG_2)) < 0) {
- BIF_ERROR(BIF_P, BADARG);
+ if ((m = list_length(B)) < 0) {
+ BIF_ERROR(p, BADARG);
}
if (n == 0)
BIF_RET(NIL);
if (m == 0)
- BIF_RET(BIF_ARG_1);
+ BIF_RET(A);
/* allocate element vector */
if (n <= SMALL_VEC_SIZE)
@@ -123,7 +122,7 @@ BIF_RETTYPE subtract_2(BIF_ALIST_2)
/* PUT ALL ELEMENTS IN VP */
vp = vec_p;
- list = BIF_ARG_1;
+ list = A;
i = n;
while(i--) {
Eterm* listp = list_val(list);
@@ -132,7 +131,7 @@ BIF_RETTYPE subtract_2(BIF_ALIST_2)
}
/* UNMARK ALL DELETED CELLS */
- list = BIF_ARG_2;
+ list = B;
m = 0; /* number of deleted elements */
while(is_list(list)) {
Eterm* listp = list_val(list);
@@ -153,11 +152,11 @@ BIF_RETTYPE subtract_2(BIF_ALIST_2)
if (m == n) /* All deleted ? */
res = NIL;
else if (m == 0) /* None deleted ? */
- res = BIF_ARG_1;
+ res = A;
else { /* REBUILD LIST */
res = NIL;
need = 2*(n - m);
- hp = HAlloc(BIF_P, need);
+ hp = HAlloc(p, need);
vp = vec_p + n - 1;
while(vp >= vec_p) {
if (is_value(*vp)) {
@@ -172,6 +171,16 @@ BIF_RETTYPE subtract_2(BIF_ALIST_2)
BIF_RET(res);
}
+BIF_RETTYPE ebif_minusminus_2(BIF_ALIST_2)
+{
+ return subtract(BIF_P, BIF_ARG_1, BIF_ARG_2);
+}
+
+BIF_RETTYPE subtract_2(BIF_ALIST_2)
+{
+ return subtract(BIF_P, BIF_ARG_1, BIF_ARG_2);
+}
+
BIF_RETTYPE lists_member_2(BIF_ALIST_2)
{
Eterm term;
@@ -278,11 +287,12 @@ BIF_RETTYPE lists_reverse_2(BIF_ALIST_2)
}
BIF_RETTYPE
-lists_keymember_3(Process* p, Eterm Key, Eterm Pos, Eterm List)
+lists_keymember_3(BIF_ALIST_3)
{
Eterm res;
- res = keyfind(BIF_lists_keymember_3, p, Key, Pos, List);
+ res = keyfind(BIF_lists_keymember_3, BIF_P,
+ BIF_ARG_1, BIF_ARG_2, BIF_ARG_3);
if (is_value(res) && is_tuple(res)) {
return am_true;
} else {
@@ -291,23 +301,25 @@ lists_keymember_3(Process* p, Eterm Key, Eterm Pos, Eterm List)
}
BIF_RETTYPE
-lists_keysearch_3(Process* p, Eterm Key, Eterm Pos, Eterm List)
+lists_keysearch_3(BIF_ALIST_3)
{
Eterm res;
- res = keyfind(BIF_lists_keysearch_3, p, Key, Pos, List);
+ res = keyfind(BIF_lists_keysearch_3, BIF_P,
+ BIF_ARG_1, BIF_ARG_2, BIF_ARG_3);
if (is_non_value(res) || is_not_tuple(res)) {
return res;
} else { /* Tuple */
- Eterm* hp = HAlloc(p, 3);
+ Eterm* hp = HAlloc(BIF_P, 3);
return TUPLE2(hp, am_value, res);
}
}
BIF_RETTYPE
-lists_keyfind_3(Process* p, Eterm Key, Eterm Pos, Eterm List)
+lists_keyfind_3(BIF_ALIST_3)
{
- return keyfind(BIF_lists_keyfind_3, p, Key, Pos, List);
+ return keyfind(BIF_lists_keyfind_3, BIF_P,
+ BIF_ARG_1, BIF_ARG_2, BIF_ARG_3);
}
static Eterm
diff --git a/erts/emulator/beam/erl_bif_op.c b/erts/emulator/beam/erl_bif_op.c
index deda7adc1f..13f8b1f63c 100644
--- a/erts/emulator/beam/erl_bif_op.c
+++ b/erts/emulator/beam/erl_bif_op.c
@@ -225,18 +225,23 @@ BIF_RETTYPE is_function_1(BIF_ALIST_1)
BIF_RETTYPE is_function_2(BIF_ALIST_2)
{
+ BIF_RET(erl_is_function(BIF_P, BIF_ARG_1, BIF_ARG_2));
+}
+
+Eterm erl_is_function(Process* p, Eterm arg1, Eterm arg2)
+{
Sint arity;
/*
* Verify argument 2 (arity); arity must be >= 0.
*/
- if (is_small(BIF_ARG_2)) {
- arity = signed_val(BIF_ARG_2);
+ if (is_small(arg2)) {
+ arity = signed_val(arg2);
if (arity < 0) {
error:
- BIF_ERROR(BIF_P, BADARG);
+ BIF_ERROR(p, BADARG);
}
- } else if (is_big(BIF_ARG_2) && !bignum_header_is_neg(*big_val(BIF_ARG_2))) {
+ } else if (is_big(arg2) && !bignum_header_is_neg(*big_val(arg2))) {
/* A positive bignum is OK, but can't possibly match. */
arity = -1;
} else {
@@ -244,20 +249,20 @@ BIF_RETTYPE is_function_2(BIF_ALIST_2)
goto error;
}
- if (is_fun(BIF_ARG_1)) {
- ErlFunThing* funp = (ErlFunThing *) fun_val(BIF_ARG_1);
+ if (is_fun(arg1)) {
+ ErlFunThing* funp = (ErlFunThing *) fun_val(arg1);
if (funp->arity == (Uint) arity) {
BIF_RET(am_true);
}
- } else if (is_export(BIF_ARG_1)) {
- Export* exp = (Export *) EXPAND_POINTER((export_val(BIF_ARG_1))[1]);
+ } else if (is_export(arg1)) {
+ Export* exp = (Export *) EXPAND_POINTER((export_val(arg1))[1]);
if (exp->code[2] == (Uint) arity) {
BIF_RET(am_true);
}
- } else if (is_tuple(BIF_ARG_1)) {
- Eterm* tp = tuple_val(BIF_ARG_1);
+ } else if (is_tuple(arg1)) {
+ Eterm* tp = tuple_val(arg1);
if (tp[0] == make_arityval(2) && is_atom(tp[1]) && is_atom(tp[2])) {
BIF_RET(am_true);
}
diff --git a/erts/emulator/beam/erl_bif_os.c b/erts/emulator/beam/erl_bif_os.c
index 954b1f9729..58d48199fa 100644
--- a/erts/emulator/beam/erl_bif_os.c
+++ b/erts/emulator/beam/erl_bif_os.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1999-2009. All Rights Reserved.
+ * Copyright Ericsson AB 1999-2010. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -53,20 +53,18 @@ BIF_RETTYPE os_timestamp_0(BIF_ALIST_0)
}
-Eterm
-os_getpid_0(Process* p)
+BIF_RETTYPE os_getpid_0(BIF_ALIST_0)
{
char pid_string[21]; /* enough for a 64 bit number */
int n;
Eterm* hp;
sys_get_pid(pid_string); /* In sys.c */
n = sys_strlen(pid_string);
- hp = HAlloc(p, n*2);
+ hp = HAlloc(BIF_P, n*2);
BIF_RET(buf_to_intlist(&hp, pid_string, n, NIL));
}
-Eterm
-os_getenv_0(Process* p)
+BIF_RETTYPE os_getenv_0(BIF_ALIST_0)
{
GETENV_STATE state;
char *cp;
@@ -80,7 +78,7 @@ os_getenv_0(Process* p)
ret = NIL;
while ((cp = getenv_string(&state)) != NULL) {
len = strlen(cp);
- hp = HAlloc(p, len*2+2);
+ hp = HAlloc(BIF_P, len*2+2);
str = buf_to_intlist(&hp, cp, len, NIL);
ret = CONS(hp, str, ret);
}
@@ -90,9 +88,11 @@ os_getenv_0(Process* p)
return ret;
}
-Eterm
-os_getenv_1(Process* p, Eterm key)
+
+BIF_RETTYPE os_getenv_1(BIF_ALIST_1)
{
+ Process* p = BIF_P;
+ Eterm key = BIF_ARG_1;
Eterm str;
int len, res;
char *key_str, *val;
@@ -145,9 +145,11 @@ os_getenv_1(Process* p, Eterm key)
BIF_RET(str);
}
-Eterm
-os_putenv_2(Process* p, Eterm key, Eterm value)
+BIF_RETTYPE os_putenv_2(BIF_ALIST_2)
{
+ Process* p = BIF_P;
+ Eterm key = BIF_ARG_1;
+ Eterm value = BIF_ARG_2;
char def_buf[1024];
char *buf = NULL;
int sep_ix, i, key_len, value_len, tot_len;
diff --git a/erts/emulator/beam/erl_bif_port.c b/erts/emulator/beam/erl_bif_port.c
index 3fd35dd963..5525426824 100644
--- a/erts/emulator/beam/erl_bif_port.c
+++ b/erts/emulator/beam/erl_bif_port.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2001-2011. All Rights Reserved.
+ * Copyright Ericsson AB 2001-2012. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -40,6 +40,7 @@
#include "external.h"
#include "packet_parser.h"
#include "erl_bits.h"
+#include "dtrace-wrapper.h"
static int open_port(Process* p, Eterm name, Eterm settings, int *err_nump);
static byte* convert_environment(Process* p, Eterm env);
@@ -48,6 +49,9 @@ static void free_args(char **);
char *erts_default_arg0 = "default";
+static BIF_RETTYPE
+port_call(Process* p, Eterm arg1, Eterm arg2, Eterm arg3);
+
BIF_RETTYPE open_port_2(BIF_ALIST_2)
{
int port_num;
@@ -117,11 +121,9 @@ id_or_name2port(Process *c_p, Eterm id)
#define ERTS_PORT_COMMAND_FLAG_FORCE (((Uint32) 1) << 0)
#define ERTS_PORT_COMMAND_FLAG_NOSUSPEND (((Uint32) 1) << 1)
-static BIF_RETTYPE do_port_command(Process *BIF_P,
- Eterm BIF_ARG_1,
- Eterm BIF_ARG_2,
- Eterm BIF_ARG_3,
- Uint32 flags)
+static BIF_RETTYPE
+do_port_command(Process *BIF_P, Eterm arg1, Eterm arg2, Eterm arg3,
+ Uint32 flags)
{
BIF_RETTYPE res;
Port *p;
@@ -135,7 +137,7 @@ static BIF_RETTYPE do_port_command(Process *BIF_P,
profile_runnable_proc(BIF_P, am_inactive);
}
- p = id_or_name2port(BIF_P, BIF_ARG_1);
+ p = id_or_name2port(BIF_P, arg1);
if (!p) {
if (IS_TRACED_FL(BIF_P, F_TRACE_SCHED_PROCS)) {
trace_virtual_sched(BIF_P, am_in);
@@ -172,13 +174,13 @@ static BIF_RETTYPE do_port_command(Process *BIF_P,
monitor_generic(BIF_P, am_busy_port, p->id);
}
ERTS_BIF_PREP_YIELD3(res, bif_export[BIF_port_command_3], BIF_P,
- BIF_ARG_1, BIF_ARG_2, BIF_ARG_3);
+ arg1, arg2, arg3);
}
} else {
int wres;
erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
ERTS_SMP_CHK_NO_PROC_LOCKS;
- wres = erts_write_to_port(BIF_P->id, p, BIF_ARG_2);
+ wres = erts_write_to_port(BIF_P->id, p, arg2);
erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
if (wres != 0) {
ERTS_BIF_PREP_ERROR(res, BIF_P, BADARG);
@@ -237,23 +239,29 @@ BIF_RETTYPE port_command_3(BIF_ALIST_3)
BIF_RETTYPE port_call_2(BIF_ALIST_2)
{
- return port_call_3(BIF_P,BIF_ARG_1,make_small(0),BIF_ARG_2);
+ return port_call(BIF_P,BIF_ARG_1, make_small(0), BIF_ARG_2);
}
BIF_RETTYPE port_call_3(BIF_ALIST_3)
{
+ return port_call(BIF_P, BIF_ARG_1, BIF_ARG_2, BIF_ARG_3);
+}
+
+static BIF_RETTYPE
+port_call(Process* c_p, Eterm arg1, Eterm arg2, Eterm arg3)
+{
Uint op;
Port *p;
Uint size;
byte *bytes;
byte *endp;
- size_t real_size;
+ ErlDrvSizeT real_size;
erts_driver_t *drv;
byte port_input[256]; /* Default input buffer to encode in */
byte port_result[256]; /* Buffer for result from port. */
byte* port_resp; /* Pointer to result buffer. */
char *prc;
- int ret;
+ ErlDrvSSizeT ret;
Eterm res;
Sint result_size;
Eterm *hp;
@@ -266,15 +274,15 @@ BIF_RETTYPE port_call_3(BIF_ALIST_3)
/* trace of port scheduling with virtual process descheduling
* lock wait
*/
- if (IS_TRACED_FL(BIF_P, F_TRACE_SCHED_PROCS)) {
- trace_virtual_sched(BIF_P, am_out);
+ if (IS_TRACED_FL(c_p, F_TRACE_SCHED_PROCS)) {
+ trace_virtual_sched(c_p, am_out);
}
if (erts_system_profile_flags.runnable_procs && erts_system_profile_flags.exclusive) {
- profile_runnable_proc(BIF_P, am_inactive);
+ profile_runnable_proc(c_p, am_inactive);
}
- p = id_or_name2port(BIF_P, BIF_ARG_1);
+ p = id_or_name2port(c_p, arg1);
if (!p) {
error:
if (port_resp != port_result &&
@@ -286,22 +294,22 @@ BIF_RETTYPE port_call_3(BIF_ALIST_3)
/* Need to virtual schedule in the process if there
* was an error.
*/
- if (IS_TRACED_FL(BIF_P, F_TRACE_SCHED_PROCS)) {
- trace_virtual_sched(BIF_P, am_in);
+ if (IS_TRACED_FL(c_p, F_TRACE_SCHED_PROCS)) {
+ trace_virtual_sched(c_p, am_in);
}
if (erts_system_profile_flags.runnable_procs && erts_system_profile_flags.exclusive) {
- profile_runnable_proc(BIF_P, am_active);
+ profile_runnable_proc(c_p, am_active);
}
if (p)
erts_port_release(p);
#ifdef ERTS_SMP
- ERTS_SMP_BIF_CHK_PENDING_EXIT(BIF_P, ERTS_PROC_LOCK_MAIN);
+ ERTS_SMP_BIF_CHK_PENDING_EXIT(c_p, ERTS_PROC_LOCK_MAIN);
#else
- ERTS_BIF_CHK_EXITED(BIF_P);
+ ERTS_BIF_CHK_EXITED(c_p);
#endif
- BIF_ERROR(BIF_P, BADARG);
+ BIF_ERROR(c_p, BADARG);
}
if ((drv = p->drv_ptr) == NULL) {
@@ -310,10 +318,10 @@ BIF_RETTYPE port_call_3(BIF_ALIST_3)
if (drv->call == NULL) {
goto error;
}
- if (!term_to_Uint(BIF_ARG_2, &op)) {
+ if (!term_to_Uint(arg2, &op)) {
goto error;
}
- p->caller = BIF_P->id;
+ p->caller = c_p->id;
/* Lock taken, virtual schedule of port */
if (IS_TRACED_FL(p, F_TRACE_SCHED_PORTS)) {
@@ -323,19 +331,29 @@ BIF_RETTYPE port_call_3(BIF_ALIST_3)
if (erts_system_profile_flags.runnable_ports && !erts_port_is_scheduled(p)) {
profile_runnable_port(p, am_active);
}
- size = erts_encode_ext_size(BIF_ARG_3);
+ size = erts_encode_ext_size(arg3);
if (size > sizeof(port_input))
bytes = erts_alloc(ERTS_ALC_T_PORT_CALL_BUF, size);
endp = bytes;
- erts_encode_ext(BIF_ARG_3, &endp);
+ erts_encode_ext(arg3, &endp);
real_size = endp - bytes;
if (real_size > size) {
erl_exit(1, "%s, line %d: buffer overflow: %d word(s)\n",
__FILE__, __LINE__, endp - (bytes + size));
}
- erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
+ erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_MAIN);
+#ifdef USE_VM_PROBES
+ if (DTRACE_ENABLED(driver_call)) {
+ DTRACE_CHARBUF(process_str, DTRACE_TERM_BUF_SIZE);
+ DTRACE_CHARBUF(port_str, DTRACE_TERM_BUF_SIZE);
+
+ dtrace_pid_str(p->connected, process_str);
+ dtrace_port_str(p, port_str);
+ DTRACE5(driver_call, process_str, port_str, p->name, op, real_size);
+ }
+#endif
prc = (char *) port_resp;
fpe_was_unmasked = erts_block_fpe();
ret = drv->call((ErlDrvData)p->drv_data,
@@ -356,12 +374,12 @@ BIF_RETTYPE port_call_3(BIF_ALIST_3)
port_resp = (byte *) prc;
p->caller = NIL;
- erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
+ erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_MAIN);
#ifdef HARDDEBUG
{
- int z;
- printf("real_size = %ld,%d, ret = %d\r\n",real_size,
- (int) real_size, ret);
+ ErlDrvSizeT z;
+ printf("real_size = %ld,%d, ret = %ld,%d\r\n", (unsigned long) real_size,
+ (int) real_size, (unsigned long)ret, (int) ret);
printf("[");
for(z = 0; z < real_size; ++z) {
printf("%d, ",(int) bytes[z]);
@@ -378,18 +396,18 @@ BIF_RETTYPE port_call_3(BIF_ALIST_3)
/* Error or a binary without magic/ with wrong magic */
goto error;
}
- result_size = erts_decode_ext_size(port_resp, ret, 0);
+ result_size = erts_decode_ext_size(port_resp, ret);
if (result_size < 0) {
goto error;
}
- hp = HAlloc(BIF_P, result_size);
+ hp = HAlloc(c_p, result_size);
hp_end = hp + result_size;
endp = port_resp;
- res = erts_decode_ext(&hp, &MSO(BIF_P), &endp);
+ res = erts_decode_ext(&hp, &MSO(c_p), &endp);
if (res == THE_NON_VALUE) {
goto error;
}
- HRelease(BIF_P, hp_end, hp);
+ HRelease(c_p, hp_end, hp);
if (port_resp != port_result && !(ret_flags & DRIVER_CALL_KEEP_BUFFER)) {
driver_free(port_resp);
}
@@ -398,16 +416,16 @@ BIF_RETTYPE port_call_3(BIF_ALIST_3)
if (p)
erts_port_release(p);
#ifdef ERTS_SMP
- ERTS_SMP_BIF_CHK_PENDING_EXIT(BIF_P, ERTS_PROC_LOCK_MAIN);
+ ERTS_SMP_BIF_CHK_PENDING_EXIT(c_p, ERTS_PROC_LOCK_MAIN);
#else
- ERTS_BIF_CHK_EXITED(BIF_P);
+ ERTS_BIF_CHK_EXITED(c_p);
#endif
- if (IS_TRACED_FL(BIF_P, F_TRACE_SCHED_PROCS)) {
- trace_virtual_sched(BIF_P, am_in);
+ if (IS_TRACED_FL(c_p, F_TRACE_SCHED_PROCS)) {
+ trace_virtual_sched(c_p, am_in);
}
if (erts_system_profile_flags.runnable_procs && erts_system_profile_flags.exclusive) {
- profile_runnable_proc(BIF_P, am_active);
+ profile_runnable_proc(c_p, am_active);
}
return res;
@@ -532,6 +550,18 @@ BIF_RETTYPE port_connect_2(BIF_ALIST_2)
prt->connected = pid; /* internal pid */
erts_smp_port_unlock(prt);
+#ifdef USE_VM_PROBES
+ if (DTRACE_ENABLED(port_connect)) {
+ DTRACE_CHARBUF(process_str, DTRACE_TERM_BUF_SIZE);
+ DTRACE_CHARBUF(port_str, DTRACE_TERM_BUF_SIZE);
+ DTRACE_CHARBUF(newprocess_str, DTRACE_TERM_BUF_SIZE);
+
+ dtrace_pid_str(prt->connected, process_str);
+ erts_snprintf(port_str, sizeof(port_str), "%T", prt->id);
+ dtrace_proc_str(rp, newprocess_str);
+ DTRACE4(port_connect, process_str, port_str, prt->name, newprocess_str);
+ }
+#endif
BIF_RET(am_true);
}
@@ -897,7 +927,16 @@ open_port(Process* p, Eterm name, Eterm settings, int *err_nump)
erts_smp_proc_unlock(p, ERTS_PROC_LOCK_MAIN);
port_num = erts_open_driver(driver, p->id, name_buf, &opts, err_nump);
+#ifdef USE_VM_PROBES
+ if (port_num >= 0 && DTRACE_ENABLED(port_open)) {
+ DTRACE_CHARBUF(process_str, DTRACE_TERM_BUF_SIZE);
+ DTRACE_CHARBUF(port_str, DTRACE_TERM_BUF_SIZE);
+ dtrace_proc_str(p, process_str);
+ erts_snprintf(port_str, sizeof(port_str), "%T", erts_port[port_num].id);
+ DTRACE3(port_open, process_str, name_buf, port_str);
+ }
+#endif
erts_smp_proc_lock(p, ERTS_PROC_LOCK_MAIN);
if (port_num < 0) {
@@ -1070,7 +1109,7 @@ struct packet_callback_args
};
#define in_area(ptr,start,nbytes) \
- ((unsigned long)((char*)(ptr) - (char*)(start)) < (nbytes))
+ ((UWord)((char*)(ptr) - (char*)(start)) < (nbytes))
static Eterm
http_bld_string(struct packet_callback_args* pca, Uint **hpp, Uint *szp,
diff --git a/erts/emulator/beam/erl_bif_re.c b/erts/emulator/beam/erl_bif_re.c
index 26891c4348..6b843d2e08 100644
--- a/erts/emulator/beam/erl_bif_re.c
+++ b/erts/emulator/beam/erl_bif_re.c
@@ -45,6 +45,7 @@ static Export *urun_trap_exportp = NULL;
static Export *ucompile_trap_exportp = NULL;
static BIF_RETTYPE re_exec_trap(BIF_ALIST_3);
+static BIF_RETTYPE re_run(Process *p, Eterm arg1, Eterm arg2, Eterm arg3);
static void *erts_erts_pcre_malloc(size_t size) {
return erts_alloc(ERTS_ALC_T_RE_HEAP,size);
@@ -414,8 +415,8 @@ build_compile_result(Process *p, Eterm error_tag, pcre *result, int errcode, con
* Compile BIFs
*/
-BIF_RETTYPE
-re_compile_2(BIF_ALIST_2)
+static BIF_RETTYPE
+re_compile(Process* p, Eterm arg1, Eterm arg2)
{
Uint slen;
char *expr;
@@ -429,43 +430,49 @@ re_compile_2(BIF_ALIST_2)
int unicode = 0;
- if (parse_options(BIF_ARG_2,&options,NULL,&pflags,NULL,NULL)
+ if (parse_options(arg2,&options,NULL,&pflags,NULL,NULL)
< 0) {
- BIF_ERROR(BIF_P,BADARG);
+ BIF_ERROR(p,BADARG);
}
if (pflags & PARSE_FLAG_UNIQUE_EXEC_OPT) {
- BIF_ERROR(BIF_P,BADARG);
+ BIF_ERROR(p,BADARG);
}
unicode = (pflags & PARSE_FLAG_UNICODE) ? 1 : 0;
- if (pflags & PARSE_FLAG_UNICODE && !is_binary(BIF_ARG_1)) {
- BIF_TRAP2(ucompile_trap_exportp, BIF_P, BIF_ARG_1, BIF_ARG_2);
+ if (pflags & PARSE_FLAG_UNICODE && !is_binary(arg1)) {
+ BIF_TRAP2(ucompile_trap_exportp, p, arg1, arg2);
}
- if (erts_iolist_size(BIF_ARG_1, &slen)) {
- BIF_ERROR(BIF_P,BADARG);
+ if (erts_iolist_size(arg1, &slen)) {
+ BIF_ERROR(p,BADARG);
}
expr = erts_alloc(ERTS_ALC_T_RE_TMP_BUF, slen + 1);
- if (io_list_to_buf(BIF_ARG_1, expr, slen) != 0) {
+ if (io_list_to_buf(arg1, expr, slen) != 0) {
erts_free(ERTS_ALC_T_RE_TMP_BUF, expr);
- BIF_ERROR(BIF_P,BADARG);
+ BIF_ERROR(p,BADARG);
}
expr[slen]='\0';
result = erts_pcre_compile2(expr, options, &errcode,
&errstr, &errofset, default_table);
- ret = build_compile_result(BIF_P, am_error, result, errcode,
+ ret = build_compile_result(p, am_error, result, errcode,
errstr, errofset, unicode, 1);
erts_free(ERTS_ALC_T_RE_TMP_BUF, expr);
BIF_RET(ret);
}
BIF_RETTYPE
+re_compile_2(BIF_ALIST_2)
+{
+ return re_compile(BIF_P, BIF_ARG_1, BIF_ARG_2);
+}
+
+BIF_RETTYPE
re_compile_1(BIF_ALIST_1)
{
- return re_compile_2(BIF_P,BIF_ARG_1,NIL);
+ return re_compile(BIF_P, BIF_ARG_1, NIL);
}
/*
@@ -845,8 +852,8 @@ build_capture(Eterm capture_spec[CAPSPEC_SIZE], const pcre *code)
/*
* The actual re:run/2,3 BIFs
*/
-BIF_RETTYPE
-re_run_3(BIF_ALIST_3)
+static BIF_RETTYPE
+re_run(Process *p, Eterm arg1, Eterm arg2, Eterm arg3)
{
const pcre *code_tmp;
RestartContext restart;
@@ -865,15 +872,15 @@ re_run_3(BIF_ALIST_3)
Eterm capture[CAPSPEC_SIZE];
int is_list_cap;
- if (parse_options(BIF_ARG_3,&comp_options,&options,&pflags,&startoffset,capture)
+ if (parse_options(arg3,&comp_options,&options,&pflags,&startoffset,capture)
< 0) {
- BIF_ERROR(BIF_P,BADARG);
+ BIF_ERROR(p,BADARG);
}
is_list_cap = ((pflags & PARSE_FLAG_CAPTURE_OPT) &&
(capture[CAPSPEC_TYPE] == am_list));
- if (is_not_tuple(BIF_ARG_2) || (arityval(*tuple_val(BIF_ARG_2)) != 4)) {
- if (is_binary(BIF_ARG_2) || is_list(BIF_ARG_2) || is_nil(BIF_ARG_2)) {
+ if (is_not_tuple(arg2) || (arityval(*tuple_val(arg2)) != 4)) {
+ if (is_binary(arg2) || is_list(arg2) || is_nil(arg2)) {
/* Compile from textual RE */
Uint slen;
char *expr;
@@ -884,19 +891,19 @@ re_run_3(BIF_ALIST_3)
int capture_count;
if (pflags & PARSE_FLAG_UNICODE &&
- (!is_binary(BIF_ARG_2) || !is_binary(BIF_ARG_1) ||
+ (!is_binary(arg2) || !is_binary(arg1) ||
(is_list_cap && !(pflags & PARSE_FLAG_GLOBAL)))) {
- BIF_TRAP3(urun_trap_exportp, BIF_P, BIF_ARG_1, BIF_ARG_2, BIF_ARG_3);
+ BIF_TRAP3(urun_trap_exportp, p, arg1, arg2, arg3);
}
- if (erts_iolist_size(BIF_ARG_2, &slen)) {
- BIF_ERROR(BIF_P,BADARG);
+ if (erts_iolist_size(arg2, &slen)) {
+ BIF_ERROR(p,BADARG);
}
expr = erts_alloc(ERTS_ALC_T_RE_TMP_BUF, slen + 1);
- if (io_list_to_buf(BIF_ARG_2, expr, slen) != 0) {
+ if (io_list_to_buf(arg2, expr, slen) != 0) {
erts_free(ERTS_ALC_T_RE_TMP_BUF, expr);
- BIF_ERROR(BIF_P,BADARG);
+ BIF_ERROR(p,BADARG);
}
expr[slen]='\0';
result = erts_pcre_compile2(expr, comp_options, &errcode,
@@ -905,11 +912,11 @@ re_run_3(BIF_ALIST_3)
erts_free(ERTS_ALC_T_RE_TMP_BUF, expr);
/* Compilation error gives badarg except in the compile
function */
- BIF_ERROR(BIF_P,BADARG);
+ BIF_ERROR(p,BADARG);
}
if (pflags & PARSE_FLAG_GLOBAL) {
Eterm precompiled =
- build_compile_result(BIF_P, am_error,
+ build_compile_result(p, am_error,
result, errcode,
errstr, errofset,
(pflags &
@@ -917,13 +924,13 @@ re_run_3(BIF_ALIST_3)
0);
Eterm *hp,r;
erts_free(ERTS_ALC_T_RE_TMP_BUF, expr);
- hp = HAlloc(BIF_P,4);
- /* BIF_ARG_2 is in the tuple just to make exceptions right */
- r = TUPLE3(hp,BIF_ARG_3,
+ hp = HAlloc(p,4);
+ /* arg2 is in the tuple just to make exceptions right */
+ r = TUPLE3(hp,arg3,
((pflags & PARSE_FLAG_UNIQUE_COMPILE_OPT) ?
am_true :
- am_false), BIF_ARG_2);
- BIF_TRAP3(grun_trap_exportp, BIF_P, BIF_ARG_1, precompiled, r);
+ am_false), arg2);
+ BIF_TRAP3(grun_trap_exportp, p, arg1, precompiled, r);
}
erts_pcre_fullinfo(result, NULL, PCRE_INFO_SIZE, &code_size);
@@ -935,31 +942,31 @@ re_run_3(BIF_ALIST_3)
erts_free(ERTS_ALC_T_RE_TMP_BUF, expr);
/*unicode = (pflags & PARSE_FLAG_UNICODE) ? 1 : 0;*/
} else {
- BIF_ERROR(BIF_P,BADARG);
+ BIF_ERROR(p,BADARG);
}
} else {
if (pflags & PARSE_FLAG_UNIQUE_COMPILE_OPT) {
- BIF_ERROR(BIF_P,BADARG);
+ BIF_ERROR(p,BADARG);
}
- tp = tuple_val(BIF_ARG_2);
+ tp = tuple_val(arg2);
if (tp[1] != am_re_pattern || is_not_small(tp[2]) ||
is_not_small(tp[3]) || is_not_binary(tp[4])) {
- BIF_ERROR(BIF_P,BADARG);
+ BIF_ERROR(p,BADARG);
}
if (unsigned_val(tp[3]) &&
- (!is_binary(BIF_ARG_1) ||
+ (!is_binary(arg1) ||
(is_list_cap && !(pflags & PARSE_FLAG_GLOBAL)))) { /* unicode */
- BIF_TRAP3(urun_trap_exportp, BIF_P, BIF_ARG_1, BIF_ARG_2,
- BIF_ARG_3);
+ BIF_TRAP3(urun_trap_exportp, p, arg1, arg2,
+ arg3);
}
if (pflags & PARSE_FLAG_GLOBAL) {
Eterm *hp,r;
- hp = HAlloc(BIF_P,3);
- r = TUPLE2(hp,BIF_ARG_3,am_false);
- BIF_TRAP3(grun_trap_exportp, BIF_P, BIF_ARG_1, BIF_ARG_2,
+ hp = HAlloc(p,3);
+ r = TUPLE2(hp,arg3,am_false);
+ BIF_TRAP3(grun_trap_exportp, p, arg1, arg2,
r);
}
@@ -968,7 +975,7 @@ re_run_3(BIF_ALIST_3)
if ((code_tmp = (const pcre *)
erts_get_aligned_binary_bytes(tp[4], &temp_alloc)) == NULL) {
erts_free_aligned_binary_bytes(temp_alloc);
- BIF_ERROR(BIF_P, BADARG);
+ BIF_ERROR(p, BADARG);
}
restart.code = erts_alloc(ERTS_ALC_T_RE_SUBJECT, code_size);
memcpy(restart.code, code_tmp, code_size);
@@ -980,7 +987,7 @@ re_run_3(BIF_ALIST_3)
restart.ovector = erts_alloc(ERTS_ALC_T_RE_SUBJECT, ovsize * sizeof(int));
restart.extra.flags = PCRE_EXTRA_TABLES | PCRE_EXTRA_LOOP_LIMIT;
restart.extra.tables = default_table;
- restart.extra.loop_limit = ERTS_BIF_REDS_LEFT(BIF_P) * LOOP_FACTOR;
+ restart.extra.loop_limit = ERTS_BIF_REDS_LEFT(p) * LOOP_FACTOR;
loop_limit_tmp = max_loop_limit; /* To lesser probability of race in debug
situation (erts_debug) */
if (restart.extra.loop_limit > loop_limit_tmp) {
@@ -996,7 +1003,7 @@ re_run_3(BIF_ALIST_3)
if ((restart.ret_info = build_capture(capture,restart.code)) == NULL) {
erts_free(ERTS_ALC_T_RE_SUBJECT, restart.ovector);
erts_free(ERTS_ALC_T_RE_SUBJECT, restart.code);
- BIF_ERROR(BIF_P,BADARG);
+ BIF_ERROR(p,BADARG);
}
}
@@ -1004,7 +1011,7 @@ re_run_3(BIF_ALIST_3)
copying, also binary returns can be sub binaries in that case */
restart.flags = 0;
- if (is_binary(BIF_ARG_1)) {
+ if (is_binary(arg1)) {
Eterm real_bin;
Uint offset;
Eterm* bptr;
@@ -1012,9 +1019,9 @@ re_run_3(BIF_ALIST_3)
int bitsize;
ProcBin* pb;
- ERTS_GET_REAL_BIN(BIF_ARG_1, real_bin, offset, bitoffs, bitsize);
+ ERTS_GET_REAL_BIN(arg1, real_bin, offset, bitoffs, bitsize);
- slength = binary_size(BIF_ARG_1);
+ slength = binary_size(arg1);
bptr = binary_val(real_bin);
if (bitsize != 0 || bitoffs != 0 || (*bptr != HEADER_PROC_BIN)) {
goto handle_iolist;
@@ -1027,24 +1034,24 @@ re_run_3(BIF_ALIST_3)
restart.flags |= RESTART_FLAG_SUBJECT_IN_BINARY;
} else {
handle_iolist:
- if (erts_iolist_size(BIF_ARG_1, &slength)) {
+ if (erts_iolist_size(arg1, &slength)) {
erts_free(ERTS_ALC_T_RE_SUBJECT, restart.ovector);
erts_free(ERTS_ALC_T_RE_SUBJECT, restart.code);
if (restart.ret_info != NULL) {
erts_free(ERTS_ALC_T_RE_SUBJECT, restart.ret_info);
}
- BIF_ERROR(BIF_P,BADARG);
+ BIF_ERROR(p,BADARG);
}
restart.subject = erts_alloc(ERTS_ALC_T_RE_SUBJECT, slength);
- if (io_list_to_buf(BIF_ARG_1, restart.subject, slength) != 0) {
+ if (io_list_to_buf(arg1, restart.subject, slength) != 0) {
erts_free(ERTS_ALC_T_RE_SUBJECT, restart.ovector);
erts_free(ERTS_ALC_T_RE_SUBJECT, restart.code);
erts_free(ERTS_ALC_T_RE_SUBJECT, restart.subject);
if (restart.ret_info != NULL) {
erts_free(ERTS_ALC_T_RE_SUBJECT, restart.ret_info);
}
- BIF_ERROR(BIF_P,BADARG);
+ BIF_ERROR(p,BADARG);
}
}
@@ -1056,7 +1063,7 @@ handle_iolist:
rc = erts_pcre_exec(restart.code, &(restart.extra), restart.subject, slength, startoffset,
options, restart.ovector, ovsize);
ASSERT(loop_count != 0xFFFFFFFF);
- BUMP_REDS(BIF_P, loop_count / LOOP_FACTOR);
+ BUMP_REDS(p, loop_count / LOOP_FACTOR);
if (rc == PCRE_ERROR_LOOP_LIMIT) {
/* Trap */
Binary *mbp = erts_create_magic_binary(sizeof(RestartContext),
@@ -1065,17 +1072,17 @@ handle_iolist:
Eterm magic_bin;
Eterm *hp;
memcpy(restartp,&restart,sizeof(RestartContext));
- BUMP_ALL_REDS(BIF_P);
- hp = HAlloc(BIF_P, PROC_BIN_SIZE);
- magic_bin = erts_mk_magic_binary_term(&hp, &MSO(BIF_P), mbp);
+ BUMP_ALL_REDS(p);
+ hp = HAlloc(p, PROC_BIN_SIZE);
+ magic_bin = erts_mk_magic_binary_term(&hp, &MSO(p), mbp);
BIF_TRAP3(&re_exec_trap_export,
- BIF_P,
- BIF_ARG_1,
- BIF_ARG_2 /* To avoid GC of precompiled code, XXX: not utilized yet */,
+ p,
+ arg1,
+ arg2 /* To avoid GC of precompiled code, XXX: not utilized yet */,
magic_bin);
}
- res = build_exec_return(BIF_P, rc, &restart, BIF_ARG_1);
+ res = build_exec_return(p, rc, &restart, arg1);
cleanup_restart_context(&restart);
@@ -1083,9 +1090,15 @@ handle_iolist:
}
BIF_RETTYPE
+re_run_3(BIF_ALIST_3)
+{
+ return re_run(BIF_P,BIF_ARG_1, BIF_ARG_2, BIF_ARG_3);
+}
+
+BIF_RETTYPE
re_run_2(BIF_ALIST_2)
{
- return re_run_3(BIF_P,BIF_ARG_1, BIF_ARG_2, NIL);
+ return re_run(BIF_P,BIF_ARG_1, BIF_ARG_2, NIL);
}
/*
diff --git a/erts/emulator/beam/erl_bif_timer.c b/erts/emulator/beam/erl_bif_timer.c
index db771bd216..d806be0704 100644
--- a/erts/emulator/beam/erl_bif_timer.c
+++ b/erts/emulator/beam/erl_bif_timer.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2005-2011. All Rights Reserved.
+ * Copyright Ericsson AB 2005-2012. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -26,6 +26,7 @@
#include "bif.h"
#include "error.h"
#include "big.h"
+#include "erl_thr_progress.h"
/****************************************************************************
** BIF Timer support
@@ -372,7 +373,11 @@ bif_timer_timeout(ErtsBifTimer* btm)
message = TUPLE3(hp, am_timeout, ref, message);
}
- erts_queue_message(rp, &rp_locks, bp, message, NIL);
+ erts_queue_message(rp, &rp_locks, bp, message, NIL
+#ifdef USE_VM_PROBES
+ , NIL
+#endif
+ );
erts_smp_proc_unlock(rp, rp_locks);
erts_smp_proc_dec_refc(rp);
}
@@ -686,7 +691,7 @@ erts_bif_timer_foreach(void (*func)(Eterm, Eterm, ErlHeapFragment *, void *),
{
int i;
- ERTS_SMP_LC_ASSERT(erts_smp_is_system_blocked(0));
+ ERTS_SMP_LC_ASSERT(erts_smp_thr_progress_is_blocking());
for (i = 0; i < TIMER_HASH_VEC_SZ; i++) {
ErtsBifTimer *btm;
diff --git a/erts/emulator/beam/erl_bif_trace.c b/erts/emulator/beam/erl_bif_trace.c
index 0509e51a6f..80f774523c 100644
--- a/erts/emulator/beam/erl_bif_trace.c
+++ b/erts/emulator/beam/erl_bif_trace.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1999-2010. All Rights Reserved.
+ * Copyright Ericsson AB 1999-2012. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -37,6 +37,7 @@
#include "erl_version.h"
#include "beam_bp.h"
#include "erl_binary.h"
+#include "erl_thr_progress.h"
#define DECL_AM(S) Eterm AM_ ## S = am_atom_put(#S, sizeof(#S) - 1)
@@ -47,6 +48,11 @@ static Binary *erts_default_meta_match_spec;
static struct trace_pattern_flags erts_default_trace_pattern_flags;
static Eterm erts_default_meta_tracer_pid;
+static Eterm
+trace_pattern(Process* p, Eterm MFA, Eterm Pattern, Eterm flaglist);
+static BIF_RETTYPE
+system_monitor(Process *p, Eterm monitor_pid, Eterm list);
+
static void new_seq_trace_token(Process* p); /* help func for seq_trace_2*/
static int already_traced(Process *p, Process *tracee_p, Eterm tracer);
static int port_already_traced(Process *p, Port *tracee_port, Eterm tracer);
@@ -76,13 +82,19 @@ erts_bif_trace_init(void)
*/
Eterm
-trace_pattern_2(Process* p, Eterm MFA, Eterm Pattern)
+trace_pattern_2(BIF_ALIST_2)
{
- return trace_pattern_3(p,MFA,Pattern,NIL);
+ return trace_pattern(BIF_P, BIF_ARG_1, BIF_ARG_2, NIL);
}
Eterm
-trace_pattern_3(Process* p, Eterm MFA, Eterm Pattern, Eterm flaglist)
+trace_pattern_3(BIF_ALIST_3)
+{
+ return trace_pattern(BIF_P, BIF_ARG_1, BIF_ARG_2, BIF_ARG_3);
+}
+
+static Eterm
+trace_pattern(Process* p, Eterm MFA, Eterm Pattern, Eterm flaglist)
{
DeclareTmpHeap(mfa,3,p); /* Not really heap here, but might be when setting pattern */
int i;
@@ -97,7 +109,7 @@ trace_pattern_3(Process* p, Eterm MFA, Eterm Pattern, Eterm flaglist)
Eterm meta_tracer_pid = p->id;
erts_smp_proc_unlock(p, ERTS_PROC_LOCK_MAIN);
- erts_smp_block_system(0);
+ erts_smp_thr_progress_block();
UseTmpHeap(3,p);
/*
@@ -326,7 +338,7 @@ trace_pattern_3(Process* p, Eterm MFA, Eterm Pattern, Eterm flaglist)
done:
UnUseTmpHeap(3,p);
- erts_smp_release_system();
+ erts_smp_thr_progress_unblock();
erts_smp_proc_lock(p, ERTS_PROC_LOCK_MAIN);
return make_small(matches);
@@ -336,7 +348,7 @@ trace_pattern_3(Process* p, Eterm MFA, Eterm Pattern, Eterm flaglist)
MatchSetUnref(match_prog_set);
UnUseTmpHeap(3,p);
- erts_smp_release_system();
+ erts_smp_thr_progress_unblock();
erts_smp_proc_lock(p, ERTS_PROC_LOCK_MAIN);
BIF_ERROR(p, BADARG);
}
@@ -435,9 +447,12 @@ erts_trace_flags(Eterm List,
return 0;
}
-Eterm
-trace_3(Process* p, Eterm pid_spec, Eterm how, Eterm list)
+Eterm trace_3(BIF_ALIST_3)
{
+ Process* p = BIF_P;
+ Eterm pid_spec = BIF_ARG_1;
+ Eterm how = BIF_ARG_2;
+ Eterm list = BIF_ARG_3;
int on;
Eterm tracer = NIL;
int matches = 0;
@@ -630,7 +645,7 @@ trace_3(Process* p, Eterm pid_spec, Eterm how, Eterm list)
#ifdef ERTS_SMP
erts_smp_proc_unlock(p, ERTS_PROC_LOCK_MAIN);
- erts_smp_block_system(0);
+ erts_smp_thr_progress_block();
system_blocked = 1;
#endif
@@ -679,7 +694,7 @@ trace_3(Process* p, Eterm pid_spec, Eterm how, Eterm list)
} else if (tracer != NIL) {
tracee_port->tracer_proc = tracer;
}
- /* matches are not counted for ports since it would violate compability */
+ /* matches are not counted for ports since it would violate compatibility */
/* This could be a reason to modify this function or make a new one. */
}
}
@@ -711,7 +726,7 @@ trace_3(Process* p, Eterm pid_spec, Eterm how, Eterm list)
#ifdef ERTS_SMP
if (system_blocked) {
- erts_smp_release_system();
+ erts_smp_thr_progress_unblock();
erts_smp_proc_lock(p, ERTS_PROC_LOCK_MAIN);
}
#endif
@@ -726,7 +741,7 @@ trace_3(Process* p, Eterm pid_spec, Eterm how, Eterm list)
#ifdef ERTS_SMP
if (system_blocked) {
- erts_smp_release_system();
+ erts_smp_thr_progress_unblock();
erts_smp_proc_lock(p, ERTS_PROC_LOCK_MAIN);
}
#endif
@@ -820,9 +835,11 @@ static int already_traced(Process *c_p, Process *tracee_p, Eterm tracer)
* Return information about a process or an external function being traced.
*/
-Eterm
-trace_info_2(Process* p, Eterm What, Eterm Key)
+Eterm trace_info_2(BIF_ALIST_2)
{
+ Process* p = BIF_P;
+ Eterm What = BIF_ARG_1;
+ Eterm Key = BIF_ARG_2;
Eterm res;
if (What == am_on_load) {
res = trace_info_on_load(p, Key);
@@ -1060,7 +1077,7 @@ trace_info_func(Process* p, Eterm func_spec, Eterm key)
#ifdef ERTS_SMP
if ( (key == am_call_time) || (key == am_all)) {
erts_smp_proc_unlock(p, ERTS_PROC_LOCK_MAIN);
- erts_smp_block_system(0);
+ erts_smp_thr_progress_block();
}
#endif
@@ -1068,7 +1085,7 @@ trace_info_func(Process* p, Eterm func_spec, Eterm key)
#ifdef ERTS_SMP
if ( (key == am_call_time) || (key == am_all)) {
- erts_smp_release_system();
+ erts_smp_thr_progress_unblock();
erts_smp_proc_lock(p, ERTS_PROC_LOCK_MAIN);
}
#endif
@@ -1727,9 +1744,17 @@ Eterm erts_seq_trace(Process *p, Eterm arg1, Eterm arg2,
return THE_NON_VALUE;
}
if (build_result) {
+#ifdef USE_VM_PROBES
+ old_value = (SEQ_TRACE_TOKEN(p) == am_have_dt_utag) ? NIL : SEQ_TRACE_TOKEN(p);
+#else
old_value = SEQ_TRACE_TOKEN(p);
+#endif
}
+#ifdef USE_VM_PROBES
+ SEQ_TRACE_TOKEN(p) = (DT_UTAG(p) != NIL) ? am_have_dt_utag : NIL;
+#else
SEQ_TRACE_TOKEN(p) = NIL;
+#endif
return old_value;
}
else {
@@ -1742,7 +1767,11 @@ new_seq_trace_token(Process* p)
{
Eterm* hp;
- if (SEQ_TRACE_TOKEN(p) == NIL) {
+ if (SEQ_TRACE_TOKEN(p) == NIL
+#ifdef USE_VM_PROBES
+ || SEQ_TRACE_TOKEN(p) == am_have_dt_utag
+#endif
+ ) {
hp = HAlloc(p, 6);
SEQ_TRACE_TOKEN(p) = TUPLE5(hp, make_small(0), /* Flags */
make_small(0), /* Label */
@@ -1752,23 +1781,24 @@ new_seq_trace_token(Process* p)
}
}
-BIF_RETTYPE seq_trace_info_1(BIF_ALIST_1)
+BIF_RETTYPE erl_seq_trace_info(Process *p, Eterm item)
{
- Eterm item;
Eterm res;
Eterm* hp;
Uint current_flag;
- if (is_not_atom(BIF_ARG_1)) {
- BIF_ERROR(BIF_P, BADARG);
+ if (is_not_atom(item)) {
+ BIF_ERROR(p, BADARG);
}
- item = BIF_ARG_1;
-
- if (SEQ_TRACE_TOKEN(BIF_P) == NIL) {
+ if (SEQ_TRACE_TOKEN(p) == NIL
+#ifdef USE_VM_PROBES
+ || SEQ_TRACE_TOKEN(p) == am_have_dt_utag
+#endif
+ ) {
if ((item == am_send) || (item == am_receive) ||
(item == am_print) || (item == am_timestamp)) {
- hp = HAlloc(BIF_P,3);
+ hp = HAlloc(p,3);
res = TUPLE2(hp, item, am_false);
BIF_RET(res);
} else if ((item == am_label) || (item == am_serial)) {
@@ -1778,35 +1808,40 @@ BIF_RETTYPE seq_trace_info_1(BIF_ALIST_1)
}
}
- if (BIF_ARG_1 == am_send) {
+ if (item == am_send) {
current_flag = SEQ_TRACE_SEND;
- } else if (BIF_ARG_1 == am_receive) {
+ } else if (item == am_receive) {
current_flag = SEQ_TRACE_RECEIVE;
- } else if (BIF_ARG_1 == am_print) {
+ } else if (item == am_print) {
current_flag = SEQ_TRACE_PRINT;
- } else if (BIF_ARG_1 == am_timestamp) {
+ } else if (item == am_timestamp) {
current_flag = SEQ_TRACE_TIMESTAMP;
} else {
current_flag = 0;
}
if (current_flag) {
- res = unsigned_val(SEQ_TRACE_TOKEN_FLAGS(BIF_P)) & current_flag ?
+ res = unsigned_val(SEQ_TRACE_TOKEN_FLAGS(p)) & current_flag ?
am_true : am_false;
} else if (item == am_label) {
- res = SEQ_TRACE_TOKEN_LABEL(BIF_P);
+ res = SEQ_TRACE_TOKEN_LABEL(p);
} else if (item == am_serial) {
- hp = HAlloc(BIF_P, 3);
- res = TUPLE2(hp, SEQ_TRACE_TOKEN_LASTCNT(BIF_P), SEQ_TRACE_TOKEN_SERIAL(BIF_P));
+ hp = HAlloc(p, 3);
+ res = TUPLE2(hp, SEQ_TRACE_TOKEN_LASTCNT(p), SEQ_TRACE_TOKEN_SERIAL(p));
} else {
error:
- BIF_ERROR(BIF_P, BADARG);
+ BIF_ERROR(p, BADARG);
}
- hp = HAlloc(BIF_P, 3);
+ hp = HAlloc(p, 3);
res = TUPLE2(hp, item, res);
BIF_RET(res);
}
+BIF_RETTYPE seq_trace_info_1(BIF_ALIST_1)
+{
+ BIF_RET(erl_seq_trace_info(BIF_P, BIF_ARG_1));
+}
+
/*
seq_trace_print(Message) -> true | false
This function passes Message to the system_tracer
@@ -1817,8 +1852,13 @@ BIF_RETTYPE seq_trace_info_1(BIF_ALIST_1)
*/
BIF_RETTYPE seq_trace_print_1(BIF_ALIST_1)
{
- if (SEQ_TRACE_TOKEN(BIF_P) == NIL)
+ if (SEQ_TRACE_TOKEN(BIF_P) == NIL
+#ifdef USE_VM_PROBES
+ || SEQ_TRACE_TOKEN(BIF_P) == am_have_dt_utag
+#endif
+ ) {
BIF_RET(am_false);
+ }
seq_trace_update_send(BIF_P);
seq_trace_output(SEQ_TRACE_TOKEN(BIF_P), BIF_ARG_1,
SEQ_TRACE_PRINT, NIL, BIF_P);
@@ -1835,8 +1875,13 @@ BIF_RETTYPE seq_trace_print_1(BIF_ALIST_1)
*/
BIF_RETTYPE seq_trace_print_2(BIF_ALIST_2)
{
- if (SEQ_TRACE_TOKEN(BIF_P) == NIL)
+ if (SEQ_TRACE_TOKEN(BIF_P) == NIL
+#ifdef USE_VM_PROBES
+ || SEQ_TRACE_TOKEN(BIF_P) == am_have_dt_utag
+#endif
+ ) {
BIF_RET(am_false);
+ }
if (!(is_atom(BIF_ARG_1) || is_small(BIF_ARG_1))) {
BIF_ERROR(BIF_P, BADARG);
}
@@ -1852,7 +1897,7 @@ void erts_system_monitor_clear(Process *c_p) {
#ifdef ERTS_SMP
if (c_p) {
erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_MAIN);
- erts_smp_block_system(0);
+ erts_smp_thr_progress_block();
}
#endif
erts_set_system_monitor(NIL);
@@ -1862,7 +1907,7 @@ void erts_system_monitor_clear(Process *c_p) {
erts_system_monitor_flags.busy_dist_port = 0;
#ifdef ERTS_SMP
if (c_p) {
- erts_smp_release_system();
+ erts_smp_thr_progress_unblock();
erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_MAIN);
}
#endif
@@ -1919,23 +1964,35 @@ static Eterm system_monitor_get(Process *p)
}
-BIF_RETTYPE system_monitor_0(Process *p) {
- BIF_RET(system_monitor_get(p));
+BIF_RETTYPE system_monitor_0(BIF_ALIST_0)
+{
+ BIF_RET(system_monitor_get(BIF_P));
}
-BIF_RETTYPE system_monitor_1(Process *p, Eterm spec) {
+BIF_RETTYPE system_monitor_1(BIF_ALIST_1)
+{
+ Process* p = BIF_P;
+ Eterm spec = BIF_ARG_1;
+
if (spec == am_undefined) {
- BIF_RET(system_monitor_2(p, spec, NIL));
+ BIF_RET(system_monitor(p, spec, NIL));
} else if (is_tuple(spec)) {
Eterm *tp = tuple_val(spec);
if (tp[0] != make_arityval(2)) goto error;
- BIF_RET(system_monitor_2(p, tp[1], tp[2]));
+ BIF_RET(system_monitor(p, tp[1], tp[2]));
}
error:
BIF_ERROR(p, BADARG);
}
-BIF_RETTYPE system_monitor_2(Process *p, Eterm monitor_pid, Eterm list) {
+BIF_RETTYPE system_monitor_2(BIF_ALIST_2)
+{
+ return system_monitor(BIF_P, BIF_ARG_1, BIF_ARG_2);
+}
+
+static BIF_RETTYPE
+system_monitor(Process *p, Eterm monitor_pid, Eterm list)
+{
Eterm prev;
int system_blocked = 0;
@@ -1951,7 +2008,7 @@ BIF_RETTYPE system_monitor_2(Process *p, Eterm monitor_pid, Eterm list) {
system_blocked = 1;
erts_smp_proc_unlock(p, ERTS_PROC_LOCK_MAIN);
- erts_smp_block_system(0);
+ erts_smp_thr_progress_block();
if (!erts_pid2proc(p, ERTS_PROC_LOCK_MAIN, monitor_pid, 0))
goto error;
@@ -1985,7 +2042,7 @@ BIF_RETTYPE system_monitor_2(Process *p, Eterm monitor_pid, Eterm list) {
erts_system_monitor_flags.busy_port = !!busy_port;
erts_system_monitor_flags.busy_dist_port = !!busy_dist_port;
- erts_smp_release_system();
+ erts_smp_thr_progress_unblock();
erts_smp_proc_lock(p, ERTS_PROC_LOCK_MAIN);
BIF_RET(prev);
}
@@ -1993,7 +2050,7 @@ BIF_RETTYPE system_monitor_2(Process *p, Eterm monitor_pid, Eterm list) {
error:
if (system_blocked) {
- erts_smp_release_system();
+ erts_smp_thr_progress_unblock();
erts_smp_proc_lock(p, ERTS_PROC_LOCK_MAIN);
}
@@ -2006,7 +2063,7 @@ void erts_system_profile_clear(Process *c_p) {
#ifdef ERTS_SMP
if (c_p) {
erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_MAIN);
- erts_smp_block_system(0);
+ erts_smp_thr_progress_block();
}
#endif
erts_set_system_profile(NIL);
@@ -2016,7 +2073,7 @@ void erts_system_profile_clear(Process *c_p) {
erts_system_profile_flags.exclusive = 0;
#ifdef ERTS_SMP
if (c_p) {
- erts_smp_release_system();
+ erts_smp_thr_progress_unblock();
erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_MAIN);
}
#endif
@@ -2053,11 +2110,16 @@ static Eterm system_profile_get(Process *p) {
}
}
-BIF_RETTYPE system_profile_0(Process *p) {
- BIF_RET(system_profile_get(p));
+BIF_RETTYPE system_profile_0(BIF_ALIST_0)
+{
+ BIF_RET(system_profile_get(BIF_P));
}
-BIF_RETTYPE system_profile_2(Process *p, Eterm profiler, Eterm list) {
+BIF_RETTYPE system_profile_2(BIF_ALIST_2)
+{
+ Process *p = BIF_P;
+ Eterm profiler = BIF_ARG_1;
+ Eterm list = BIF_ARG_2;
Eterm prev;
int system_blocked = 0;
Process *profiler_p = NULL;
@@ -2075,7 +2137,7 @@ BIF_RETTYPE system_profile_2(Process *p, Eterm profiler, Eterm list) {
system_blocked = 1;
erts_smp_proc_unlock(p, ERTS_PROC_LOCK_MAIN);
- erts_smp_block_system(0);
+ erts_smp_thr_progress_block();
/* Check if valid process, no locks are taken */
@@ -2117,7 +2179,7 @@ BIF_RETTYPE system_profile_2(Process *p, Eterm profiler, Eterm list) {
erts_system_profile_flags.runnable_procs = !!runnable_procs;
erts_system_profile_flags.exclusive = !!exclusive;
- erts_smp_release_system();
+ erts_smp_thr_progress_unblock();
erts_smp_proc_lock(p, ERTS_PROC_LOCK_MAIN);
BIF_RET(prev);
@@ -2126,7 +2188,7 @@ BIF_RETTYPE system_profile_2(Process *p, Eterm profiler, Eterm list) {
error:
if (system_blocked) {
- erts_smp_release_system();
+ erts_smp_thr_progress_unblock();
erts_smp_proc_lock(p, ERTS_PROC_LOCK_MAIN);
}
diff --git a/erts/emulator/beam/erl_bits.c b/erts/emulator/beam/erl_bits.c
index e56084b9cb..6f7309f493 100644
--- a/erts/emulator/beam/erl_bits.c
+++ b/erts/emulator/beam/erl_bits.c
@@ -76,14 +76,12 @@ struct erl_bits_state ErlBitsState;
#define byte_buf (ErlBitsState.byte_buf_)
#define byte_buf_len (ErlBitsState.byte_buf_len_)
-#ifdef ERTS_SMP
static erts_smp_atomic_t bits_bufs_size;
-#endif
Uint
erts_bits_bufs_size(void)
{
- return 0;
+ return (Uint) erts_smp_atomic_read_nob(&bits_bufs_size);
}
#if !defined(ERTS_SMP)
@@ -109,8 +107,8 @@ erts_bits_destroy_state(ERL_BITS_PROTO_0)
void
erts_init_bits(void)
{
+ erts_smp_atomic_init_nob(&bits_bufs_size, 0);
#if defined(ERTS_SMP)
- erts_smp_atomic_init(&bits_bufs_size, 0);
/* erl_process.c calls erts_bits_init_state() on all state instances */
#else
ERL_BITS_DECLARE_STATEP;
@@ -713,9 +711,7 @@ static void
ERTS_INLINE need_byte_buf(ERL_BITS_PROTO_1(int need))
{
if (byte_buf_len < need) {
-#ifdef ERTS_SMP
- erts_smp_atomic_add(&bits_bufs_size, need - byte_buf_len);
-#endif
+ erts_smp_atomic_add_nob(&bits_bufs_size, need - byte_buf_len);
byte_buf_len = need;
byte_buf = erts_realloc(ERTS_ALC_T_BITS_BUF, byte_buf, byte_buf_len);
}
@@ -849,8 +845,7 @@ erts_bs_put_utf8(ERL_BITS_PROTO_1(Eterm arg))
dst[1] = 0x80 | (val & 0x3F);
num_bits = 16;
} else if (val < 0x10000UL) {
- if ((0xD800 <= val && val <= 0xDFFF) ||
- val == 0xFFFE || val == 0xFFFF) {
+ if (0xD800 <= val && val <= 0xDFFF) {
return 0;
}
dst[0] = 0xE0 | (val >> 12);
@@ -890,8 +885,7 @@ erts_bs_put_utf16(ERL_BITS_PROTO_2(Eterm arg, Uint flags))
return 0;
}
val = unsigned_val(arg);
- if (val > 0x10FFFF || (0xD800 <= val && val <= 0xDFFF) ||
- val == 0xFFFE || val == 0xFFFF) {
+ if (val > 0x10FFFF || (0xD800 <= val && val <= 0xDFFF)) {
return 0;
}
@@ -1656,8 +1650,7 @@ erts_bs_get_utf8(ErlBinMatchBuffer* mb)
return THE_NON_VALUE;
}
result = (((result << 6) + a) << 6) + b - (Eterm) 0x000E2080UL;
- if ((0xD800 <= result && result <= 0xDFFF) ||
- result == 0xFFFE || result == 0xFFFF) {
+ if (0xD800 <= result && result <= 0xDFFF) {
return THE_NON_VALUE;
}
mb->offset += 24;
@@ -1727,9 +1720,6 @@ erts_bs_get_utf16(ErlBinMatchBuffer* mb, Uint flags)
w1 = (src[0] << 8) | src[1];
}
if (w1 < 0xD800 || w1 > 0xDFFF) {
- if (w1 == 0xFFFE || w1 == 0xFFFF) {
- return THE_NON_VALUE;
- }
mb->offset += 16;
return make_small(w1);
} else if (w1 > 0xDBFF) {
diff --git a/erts/emulator/beam/erl_bits.h b/erts/emulator/beam/erl_bits.h
index 0f67733fa4..388d943755 100644
--- a/erts/emulator/beam/erl_bits.h
+++ b/erts/emulator/beam/erl_bits.h
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1999-2010. All Rights Reserved.
+ * Copyright Ericsson AB 1999-2011. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -150,7 +150,7 @@ void erts_bits_destroy_state(ERL_BITS_PROTO_0);
* NBYTES(x) returns the number of bytes needed to store x bits.
*/
-#define NBYTES(x) (((x) + 7) >> 3)
+#define NBYTES(x) (((Uint64)(x) + (Uint64) 7) >> 3)
#define BYTE_OFFSET(ofs) ((Uint) (ofs) >> 3)
#define BIT_OFFSET(ofs) ((ofs) & 7)
diff --git a/erts/emulator/beam/erl_cpu_topology.c b/erts/emulator/beam/erl_cpu_topology.c
index bcf8bcf270..fe3693d0ca 100644
--- a/erts/emulator/beam/erl_cpu_topology.c
+++ b/erts/emulator/beam/erl_cpu_topology.c
@@ -486,10 +486,7 @@ erts_sched_check_cpu_bind_post_suspend(ErtsSchedulerData *esdp)
erts_thr_set_main_status(1, (int) esdp->no);
/* Make sure we check if we should bind to a cpu or not... */
- if (esdp->run_queue->flags & ERTS_RUNQ_FLG_SHARED_RUNQ)
- erts_smp_atomic32_set(&esdp->chk_cpu_bind, 1);
- else
- esdp->run_queue->flags |= ERTS_RUNQ_FLG_CHK_CPU_BIND;
+ esdp->run_queue->flags |= ERTS_RUNQ_FLG_CHK_CPU_BIND;
}
#endif
@@ -502,11 +499,7 @@ erts_sched_check_cpu_bind(ErtsSchedulerData *esdp)
erts_cpu_groups_callback_list_t *cgcl;
erts_cpu_groups_callback_call_t *cgcc;
#ifdef ERTS_SMP
- if (erts_common_run_queue)
- erts_smp_atomic32_set(&esdp->chk_cpu_bind, 0);
- else {
- esdp->run_queue->flags &= ~ERTS_RUNQ_FLG_CHK_CPU_BIND;
- }
+ esdp->run_queue->flags &= ~ERTS_RUNQ_FLG_CHK_CPU_BIND;
#endif
erts_smp_runq_unlock(esdp->run_queue);
erts_smp_rwmtx_rwlock(&cpuinfo_rwmtx);
@@ -1729,16 +1722,8 @@ erts_init_cpu_topology(void)
scheduler2cpu_map[ix].bound_id = -1;
}
- if (cpu_bind_order == ERTS_CPU_BIND_UNDEFINED) {
- int ncpus = erts_get_cpu_configured(cpuinfo);
- if (ncpus < 1 || erts_no_schedulers < ncpus)
- cpu_bind_order = ERTS_CPU_BIND_NONE;
- else
- cpu_bind_order = ((system_cpudata || user_cpudata)
- && (erts_bind_to_cpu(cpuinfo, -1) != -ENOTSUP)
- ? ERTS_CPU_BIND_DEFAULT_BIND
- : ERTS_CPU_BIND_NONE);
- }
+ if (cpu_bind_order == ERTS_CPU_BIND_UNDEFINED)
+ cpu_bind_order = ERTS_CPU_BIND_NONE;
reader_groups_map = add_cpu_groups(reader_groups,
reader_groups_callback,
diff --git a/erts/emulator/beam/erl_db.c b/erts/emulator/beam/erl_db.c
index e0a6aa05c6..51bdf53823 100644
--- a/erts/emulator/beam/erl_db.c
+++ b/erts/emulator/beam/erl_db.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1996-2011. All Rights Reserved.
+ * Copyright Ericsson AB 1996-2012. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -129,8 +129,6 @@ static Uint meta_main_tab_slot_mask; /* The slot index part of an unnamed tab
static Uint meta_main_tab_seq_incr;
static Uint meta_main_tab_seq_cnt = 0; /* To give unique(-ish) table identifiers */
-
-
/*
** The meta hash table of all NAMED ets tables
*/
@@ -202,12 +200,17 @@ static int free_table_cont(Process *p,
int first,
int clean_meta_tab);
static void print_table(int to, void *to_arg, int show, DbTable* tb);
-static BIF_RETTYPE ets_select_delete_1(Process *p, Eterm a1);
-static BIF_RETTYPE ets_select_count_1(Process *p, Eterm a1);
-static BIF_RETTYPE ets_select_trap_1(Process *p, Eterm a1);
-static BIF_RETTYPE ets_delete_trap(Process *p, Eterm a1);
+static BIF_RETTYPE ets_select_delete_1(BIF_ALIST_1);
+static BIF_RETTYPE ets_select_count_1(BIF_ALIST_1);
+static BIF_RETTYPE ets_select_trap_1(BIF_ALIST_1);
+static BIF_RETTYPE ets_delete_trap(BIF_ALIST_1);
static Eterm table_info(Process* p, DbTable* tb, Eterm What);
+static BIF_RETTYPE ets_select1(Process* p, Eterm arg1);
+static BIF_RETTYPE ets_select2(Process* p, Eterm arg1, Eterm arg2);
+static BIF_RETTYPE ets_select3(Process* p, Eterm arg1, Eterm arg2, Eterm arg3);
+
+
/*
* Exported global
*/
@@ -224,21 +227,21 @@ static void
free_dbtable(DbTable* tb)
{
#ifdef HARDDEBUG
- if (erts_smp_atomic_read(&tb->common.memory_size) != sizeof(DbTable)) {
+ if (erts_smp_atomic_read_nob(&tb->common.memory_size) != sizeof(DbTable)) {
erts_fprintf(stderr, "ets: free_dbtable memory remain=%ld fix=%x\n",
- erts_smp_atomic_read(&tb->common.memory_size)-sizeof(DbTable),
+ erts_smp_atomic_read_nob(&tb->common.memory_size)-sizeof(DbTable),
tb->common.fixations);
}
erts_fprintf(stderr, "ets: free_dbtable(%T) deleted!!!\r\n",
tb->common.id);
erts_fprintf(stderr, "ets: free_dbtable: meta_pid_to_tab common.memory_size = %ld\n",
- erts_smp_atomic_read(&meta_pid_to_tab->common.memory_size));
+ erts_smp_atomic_read_nob(&meta_pid_to_tab->common.memory_size));
print_table(ERTS_PRINT_STDOUT, NULL, 1, meta_pid_to_tab);
erts_fprintf(stderr, "ets: free_dbtable: meta_pid_to_fixed_tab common.memory_size = %ld\n",
- erts_smp_atomic_read(&meta_pid_to_fixed_tab->common.memory_size));
+ erts_smp_atomic_read_nob(&meta_pid_to_fixed_tab->common.memory_size));
print_table(ERTS_PRINT_STDOUT, NULL, 1, meta_pid_to_fixed_tab);
#endif
#ifdef ERTS_SMP
@@ -248,6 +251,7 @@ free_dbtable(DbTable* tb)
ASSERT(is_immed(tb->common.heir_data));
erts_db_free(ERTS_ALC_T_DB_TABLE, tb, (void *) tb, sizeof(DbTable));
ERTS_ETS_MISC_MEM_ADD(-sizeof(DbTable));
+ ERTS_SMP_MEMORY_BARRIER;
}
#ifdef ERTS_SMP
@@ -276,8 +280,7 @@ static void schedule_free_dbtable(DbTable* tb)
ASSERT(scheds >= 1);
ASSERT(erts_refc_read(&tb->common.ref, 0) == 0);
erts_refc_init(&tb->common.ref, scheds);
- ERTS_THR_MEMORY_BARRIER;
- erts_smp_schedule_misc_aux_work(0, scheds, chk_free_dbtable, tb);
+ erts_schedule_multi_misc_aux_work(0, scheds, chk_free_dbtable, tb);
#else
free_dbtable(tb);
#endif
@@ -338,13 +341,13 @@ static ERTS_INLINE void db_unlock(DbTable* tb, db_lock_kind_t kind)
ASSERT(tb != meta_pid_to_tab && tb != meta_pid_to_fixed_tab);
if (tb->common.type & DB_FINE_LOCKED) {
- if (tb->common.is_thread_safe) {
- ASSERT(kind == LCK_WRITE);
+ if (kind == LCK_WRITE) {
+ ASSERT(tb->common.is_thread_safe);
tb->common.is_thread_safe = 0;
erts_smp_rwmtx_rwunlock(&tb->common.rwlock);
}
else {
- ASSERT(kind != LCK_WRITE);
+ ASSERT(!tb->common.is_thread_safe);
erts_smp_rwmtx_runlock(&tb->common.rwlock);
}
}
@@ -543,9 +546,9 @@ static int remove_named_tab(DbTable *tb, int have_lock)
* We keep our increased refc over this op in order to
* prevent the table from disapearing.
*/
- erts_smp_rwmtx_rwunlock(&tb->common.rwlock);
+ db_unlock(tb, LCK_WRITE);
erts_smp_rwmtx_rwlock(rwlock);
- erts_smp_rwmtx_rwlock(&tb->common.rwlock);
+ db_lock(tb, LCK_WRITE);
}
#endif
@@ -1295,8 +1298,13 @@ BIF_RETTYPE ets_new_2(BIF_ALIST_2)
UWord heir_data;
Uint32 status;
Sint keypos;
- int is_named, is_fine_locked, frequent_read, is_compressed;
+ int is_named, is_compressed;
+#ifdef ERTS_SMP
+ int is_fine_locked, frequent_read;
+#endif
+#ifdef DEBUG
int cret;
+#endif
DeclareTmpHeap(meta_tuple,3,BIF_P);
DbTableMethod* meth;
erts_smp_rwmtx_t *mmtl;
@@ -1311,8 +1319,10 @@ BIF_RETTYPE ets_new_2(BIF_ALIST_2)
status = DB_NORMAL | DB_SET | DB_PROTECTED;
keypos = 1;
is_named = 0;
+#ifdef ERTS_SMP
is_fine_locked = 0;
frequent_read = 0;
+#endif
heir = am_none;
heir_data = (UWord) am_undefined;
is_compressed = erts_ets_always_compress;
@@ -1341,18 +1351,31 @@ BIF_RETTYPE ets_new_2(BIF_ALIST_2)
keypos = signed_val(tp[2]);
}
else if (tp[1] == am_write_concurrency) {
+#ifdef ERTS_SMP
if (tp[2] == am_true) {
is_fine_locked = 1;
} else if (tp[2] == am_false) {
is_fine_locked = 0;
} else break;
+#else
+ if ((tp[2] != am_true) && (tp[2] != am_false)) {
+ break;
+ }
+#endif
}
else if (tp[1] == am_read_concurrency) {
+#ifdef ERTS_SMP
if (tp[2] == am_true) {
frequent_read = 1;
} else if (tp[2] == am_false) {
frequent_read = 0;
} else break;
+#else
+ if ((tp[2] != am_true) && (tp[2] != am_false)) {
+ break;
+ }
+#endif
+
}
else if (tp[1] == am_heir && tp[2] == am_none) {
heir = am_none;
@@ -1392,11 +1415,11 @@ BIF_RETTYPE ets_new_2(BIF_ALIST_2)
}
if (IS_HASH_TABLE(status)) {
meth = &db_hash;
- #ifdef ERTS_SMP
+#ifdef ERTS_SMP
if (is_fine_locked && !(status & DB_PRIVATE)) {
status |= DB_FINE_LOCKED;
}
- #endif
+#endif
}
else if (IS_TREE_TABLE(status)) {
meth = &db_tree;
@@ -1417,12 +1440,12 @@ BIF_RETTYPE ets_new_2(BIF_ALIST_2)
{
DbTable init_tb;
- erts_smp_atomic_init(&init_tb.common.memory_size, 0);
+ erts_smp_atomic_init_nob(&init_tb.common.memory_size, 0);
tb = (DbTable*) erts_db_alloc(ERTS_ALC_T_DB_TABLE,
&init_tb, sizeof(DbTable));
ERTS_ETS_MISC_MEM_ADD(sizeof(DbTable));
- erts_smp_atomic_init(&tb->common.memory_size,
- erts_smp_atomic_read(&init_tb.common.memory_size));
+ erts_smp_atomic_init_nob(&tb->common.memory_size,
+ erts_smp_atomic_read_nob(&init_tb.common.memory_size));
}
tb->common.meth = meth;
@@ -1439,12 +1462,15 @@ BIF_RETTYPE ets_new_2(BIF_ALIST_2)
tb->common.owner = BIF_P->id;
set_heir(BIF_P, tb, heir, heir_data);
- erts_smp_atomic_init(&tb->common.nitems, 0);
+ erts_smp_atomic_init_nob(&tb->common.nitems, 0);
tb->common.fixations = NULL;
tb->common.compress = is_compressed;
- cret = meth->db_create(BIF_P, tb);
+#ifdef DEBUG
+ cret =
+#endif
+ meth->db_create(BIF_P, tb);
ASSERT(cret == DB_ERROR_NONE);
erts_smp_spin_lock(&meta_main_tab_main_lock);
@@ -1505,9 +1531,9 @@ BIF_RETTYPE ets_new_2(BIF_ALIST_2)
BIF_ARG_1, BIF_ARG_2, ret, BIF_P->id,
BIF_P->initial[0], BIF_P->initial[1], BIF_P->initial[2]);
erts_fprintf(stderr, "ets: new: meta_pid_to_tab common.memory_size = %ld\n",
- erts_smp_atomic_read(&meta_pid_to_tab->common.memory_size));
+ erts_smp_atomic_read_nob(&meta_pid_to_tab->common.memory_size));
erts_fprintf(stderr, "ets: new: meta_pid_to_fixed_tab common.memory_size = %ld\n",
- erts_smp_atomic_read(&meta_pid_to_fixed_tab->common.memory_size));
+ erts_smp_atomic_read_nob(&meta_pid_to_fixed_tab->common.memory_size));
#endif
UseTmpHeap(3,BIF_P);
@@ -1650,24 +1676,6 @@ BIF_RETTYPE ets_delete_1(BIF_ALIST_1)
tb->common.status &= ~(DB_PROTECTED|DB_PUBLIC|DB_PRIVATE);
tb->common.status |= DB_DELETE;
- mmtl = get_meta_main_tab_lock(tb->common.slot);
-#ifdef ERTS_SMP
- if (erts_smp_rwmtx_tryrwlock(mmtl) == EBUSY) {
- /*
- * We keep our increased refc over this op in order to
- * prevent the table from disapearing.
- */
- erts_smp_rwmtx_rwunlock(&tb->common.rwlock);
- erts_smp_rwmtx_rwlock(mmtl);
- erts_smp_rwmtx_rwlock(&tb->common.rwlock);
- }
-#endif
- /* We must keep the slot, to be found by db_proc_dead() if process dies */
- MARK_SLOT_DEAD(tb->common.slot);
- erts_smp_rwmtx_rwunlock(mmtl);
- if (is_atom(tb->common.id))
- remove_named_tab(tb, 0);
-
if (tb->common.owner != BIF_P->id) {
DeclareTmpHeap(meta_tuple,3,BIF_P);
@@ -1691,6 +1699,25 @@ BIF_RETTYPE ets_delete_1(BIF_ALIST_1)
db_meta_unlock(meta_pid_to_tab, LCK_WRITE_REC);
UnUseTmpHeap(3,BIF_P);
}
+
+ mmtl = get_meta_main_tab_lock(tb->common.slot);
+#ifdef ERTS_SMP
+ if (erts_smp_rwmtx_tryrwlock(mmtl) == EBUSY) {
+ /*
+ * We keep our increased refc over this op in order to
+ * prevent the table from disapearing.
+ */
+ db_unlock(tb, LCK_WRITE);
+ erts_smp_rwmtx_rwlock(mmtl);
+ db_lock(tb, LCK_WRITE);
+ }
+#endif
+ /* We must keep the slot, to be found by db_proc_dead() if process dies */
+ MARK_SLOT_DEAD(tb->common.slot);
+ erts_smp_rwmtx_rwunlock(mmtl);
+ if (is_atom(tb->common.id))
+ remove_named_tab(tb, 0);
+
/* disable inheritance */
free_heir_data(tb);
tb->common.heir = am_none;
@@ -1940,8 +1967,10 @@ BIF_RETTYPE ets_delete_object_2(BIF_ALIST_2)
/*
** This is for trapping, cannot be called directly.
*/
-static BIF_RETTYPE ets_select_delete_1(Process *p, Eterm a1)
+static BIF_RETTYPE ets_select_delete_1(BIF_ALIST_1)
{
+ Process *p = BIF_P;
+ Eterm a1 = BIF_ARG_1;
BIF_RETTYPE result;
DbTable* tb;
int cret;
@@ -1995,7 +2024,7 @@ BIF_RETTYPE ets_select_delete_2(BIF_ALIST_2)
if ((tb = db_get_table(BIF_P, BIF_ARG_1, DB_WRITE, LCK_WRITE)) == NULL) {
BIF_ERROR(BIF_P, BADARG);
}
- nitems = erts_smp_atomic_read(&tb->common.nitems);
+ nitems = erts_smp_atomic_read_nob(&tb->common.nitems);
tb->common.meth->db_delete_all_objects(BIF_P, tb);
db_unlock(tb, LCK_WRITE);
BIF_RET(erts_make_integer(nitems,BIF_P));
@@ -2107,7 +2136,7 @@ BIF_RETTYPE ets_slot_2(BIF_ALIST_2)
BIF_RETTYPE ets_match_1(BIF_ALIST_1)
{
- return ets_select_1(BIF_P, BIF_ARG_1);
+ return ets_select1(BIF_P, BIF_ARG_1);
}
BIF_RETTYPE ets_match_2(BIF_ALIST_2)
@@ -2123,7 +2152,7 @@ BIF_RETTYPE ets_match_2(BIF_ALIST_2)
ms = TUPLE3(hp, BIF_ARG_2, NIL, ms);
hp += 4;
ms = CONS(hp, ms, NIL);
- res = ets_select_2(BIF_P, BIF_ARG_1, ms);
+ res = ets_select2(BIF_P, BIF_ARG_1, ms);
UnUseTmpHeap(8,BIF_P);
return res;
}
@@ -2141,7 +2170,7 @@ BIF_RETTYPE ets_match_3(BIF_ALIST_3)
ms = TUPLE3(hp, BIF_ARG_2, NIL, ms);
hp += 4;
ms = CONS(hp, ms, NIL);
- res = ets_select_3(BIF_P, BIF_ARG_1, ms, BIF_ARG_3);
+ res = ets_select3(BIF_P, BIF_ARG_1, ms, BIF_ARG_3);
UnUseTmpHeap(8,BIF_P);
return res;
}
@@ -2149,6 +2178,12 @@ BIF_RETTYPE ets_match_3(BIF_ALIST_3)
BIF_RETTYPE ets_select_3(BIF_ALIST_3)
{
+ return ets_select3(BIF_P, BIF_ARG_1, BIF_ARG_2, BIF_ARG_3);
+}
+
+static BIF_RETTYPE
+ets_select3(Process* p, Eterm arg1, Eterm arg2, Eterm arg3)
+{
BIF_RETTYPE result;
DbTable* tb;
int cret;
@@ -2159,22 +2194,22 @@ BIF_RETTYPE ets_select_3(BIF_ALIST_3)
CHECK_TABLES();
/* Chunk size strictly greater than 0 */
- if (is_not_small(BIF_ARG_3) || (chunk_size = signed_val(BIF_ARG_3)) <= 0) {
- BIF_ERROR(BIF_P, BADARG);
+ if (is_not_small(arg3) || (chunk_size = signed_val(arg3)) <= 0) {
+ BIF_ERROR(p, BADARG);
}
- if ((tb = db_get_table(BIF_P, BIF_ARG_1, DB_READ, LCK_READ)) == NULL) {
- BIF_ERROR(BIF_P, BADARG);
+ if ((tb = db_get_table(p, arg1, DB_READ, LCK_READ)) == NULL) {
+ BIF_ERROR(p, BADARG);
}
- safety = ITERATION_SAFETY(BIF_P,tb);
+ safety = ITERATION_SAFETY(p,tb);
if (safety == ITER_UNSAFE) {
local_fix_table(tb);
}
- cret = tb->common.meth->db_select_chunk(BIF_P, tb,
- BIF_ARG_2, chunk_size,
+ cret = tb->common.meth->db_select_chunk(p, tb,
+ arg2, chunk_size,
0 /* not reversed */,
&ret);
- if (DID_TRAP(BIF_P,ret) && safety != ITER_SAFE) {
- fix_table_locked(BIF_P, tb);
+ if (DID_TRAP(p,ret) && safety != ITER_SAFE) {
+ fix_table_locked(p, tb);
}
if (safety == ITER_UNSAFE) {
local_unfix_table(tb);
@@ -2186,22 +2221,24 @@ BIF_RETTYPE ets_select_3(BIF_ALIST_3)
ERTS_BIF_PREP_RET(result, ret);
break;
case DB_ERROR_SYSRES:
- ERTS_BIF_PREP_ERROR(result, BIF_P, SYSTEM_LIMIT);
+ ERTS_BIF_PREP_ERROR(result, p, SYSTEM_LIMIT);
break;
default:
- ERTS_BIF_PREP_ERROR(result, BIF_P, BADARG);
+ ERTS_BIF_PREP_ERROR(result, p, BADARG);
break;
}
- erts_match_set_release_result(BIF_P);
+ erts_match_set_release_result(p);
return result;
}
/* We get here instead of in the real BIF when trapping */
-static BIF_RETTYPE ets_select_trap_1(Process *p, Eterm a1)
+static BIF_RETTYPE ets_select_trap_1(BIF_ALIST_1)
{
+ Process *p = BIF_P;
+ Eterm a1 = BIF_ARG_1;
BIF_RETTYPE result;
DbTable* tb;
int cret;
@@ -2246,6 +2283,11 @@ static BIF_RETTYPE ets_select_trap_1(Process *p, Eterm a1)
BIF_RETTYPE ets_select_1(BIF_ALIST_1)
{
+ return ets_select1(BIF_P, BIF_ARG_1);
+}
+
+static BIF_RETTYPE ets_select1(Process *p, Eterm arg1)
+{
BIF_RETTYPE result;
DbTable* tb;
int cret;
@@ -2259,28 +2301,27 @@ BIF_RETTYPE ets_select_1(BIF_ALIST_1)
* Make sure that the table exists.
*/
- if (!is_tuple(BIF_ARG_1)) {
- if (BIF_ARG_1 == am_EOT) {
+ if (!is_tuple(arg1)) {
+ if (arg1 == am_EOT) {
BIF_RET(am_EOT);
}
- BIF_ERROR(BIF_P, BADARG);
+ BIF_ERROR(p, BADARG);
}
- tptr = tuple_val(BIF_ARG_1);
+ tptr = tuple_val(arg1);
if (arityval(*tptr) < 1 ||
- (tb = db_get_table(BIF_P, tptr[1], DB_READ, LCK_READ)) == NULL) {
- BIF_ERROR(BIF_P, BADARG);
+ (tb = db_get_table(p, tptr[1], DB_READ, LCK_READ)) == NULL) {
+ BIF_ERROR(p, BADARG);
}
- safety = ITERATION_SAFETY(BIF_P,tb);
+ safety = ITERATION_SAFETY(p,tb);
if (safety == ITER_UNSAFE) {
local_fix_table(tb);
}
- cret = tb->common.meth->db_select_continue(BIF_P,tb,
- BIF_ARG_1, &ret);
+ cret = tb->common.meth->db_select_continue(p,tb, arg1, &ret);
- if (DID_TRAP(BIF_P,ret) && safety != ITER_SAFE) {
- fix_table_locked(BIF_P, tb);
+ if (DID_TRAP(p,ret) && safety != ITER_SAFE) {
+ fix_table_locked(p, tb);
}
if (safety == ITER_UNSAFE) {
local_unfix_table(tb);
@@ -2292,20 +2333,26 @@ BIF_RETTYPE ets_select_1(BIF_ALIST_1)
ERTS_BIF_PREP_RET(result, ret);
break;
case DB_ERROR_SYSRES:
- ERTS_BIF_PREP_ERROR(result, BIF_P, SYSTEM_LIMIT);
+ ERTS_BIF_PREP_ERROR(result, p, SYSTEM_LIMIT);
break;
default:
- ERTS_BIF_PREP_ERROR(result, BIF_P, BADARG);
+ ERTS_BIF_PREP_ERROR(result, p, BADARG);
break;
}
- erts_match_set_release_result(BIF_P);
+ erts_match_set_release_result(p);
return result;
}
BIF_RETTYPE ets_select_2(BIF_ALIST_2)
{
+ return ets_select2(BIF_P, BIF_ARG_1, BIF_ARG_2);
+}
+
+static BIF_RETTYPE
+ets_select2(Process* p, Eterm arg1, Eterm arg2)
+{
BIF_RETTYPE result;
DbTable* tb;
int cret;
@@ -2318,19 +2365,19 @@ BIF_RETTYPE ets_select_2(BIF_ALIST_2)
* Make sure that the table exists.
*/
- if ((tb = db_get_table(BIF_P, BIF_ARG_1, DB_READ, LCK_READ)) == NULL) {
- BIF_ERROR(BIF_P, BADARG);
+ if ((tb = db_get_table(p, arg1, DB_READ, LCK_READ)) == NULL) {
+ BIF_ERROR(p, BADARG);
}
- safety = ITERATION_SAFETY(BIF_P,tb);
+ safety = ITERATION_SAFETY(p,tb);
if (safety == ITER_UNSAFE) {
local_fix_table(tb);
}
- cret = tb->common.meth->db_select(BIF_P, tb, BIF_ARG_2,
+ cret = tb->common.meth->db_select(p, tb, arg2,
0, &ret);
- if (DID_TRAP(BIF_P,ret) && safety != ITER_SAFE) {
- fix_table_locked(BIF_P, tb);
+ if (DID_TRAP(p,ret) && safety != ITER_SAFE) {
+ fix_table_locked(p, tb);
}
if (safety == ITER_UNSAFE) {
local_unfix_table(tb);
@@ -2342,21 +2389,23 @@ BIF_RETTYPE ets_select_2(BIF_ALIST_2)
ERTS_BIF_PREP_RET(result, ret);
break;
case DB_ERROR_SYSRES:
- ERTS_BIF_PREP_ERROR(result, BIF_P, SYSTEM_LIMIT);
+ ERTS_BIF_PREP_ERROR(result, p, SYSTEM_LIMIT);
break;
default:
- ERTS_BIF_PREP_ERROR(result, BIF_P, BADARG);
+ ERTS_BIF_PREP_ERROR(result, p, BADARG);
break;
}
- erts_match_set_release_result(BIF_P);
+ erts_match_set_release_result(p);
return result;
}
/* We get here instead of in the real BIF when trapping */
-static BIF_RETTYPE ets_select_count_1(Process *p, Eterm a1)
+static BIF_RETTYPE ets_select_count_1(BIF_ALIST_1)
{
+ Process *p = BIF_P;
+ Eterm a1 = BIF_ARG_1;
BIF_RETTYPE result;
DbTable* tb;
int cret;
@@ -2497,7 +2546,7 @@ BIF_RETTYPE ets_select_reverse_3(BIF_ALIST_3)
BIF_RETTYPE ets_select_reverse_1(BIF_ALIST_1)
{
- return ets_select_1(BIF_P, BIF_ARG_1);
+ return ets_select1(BIF_P, BIF_ARG_1);
}
BIF_RETTYPE ets_select_reverse_2(BIF_ALIST_2)
@@ -2551,7 +2600,7 @@ BIF_RETTYPE ets_select_reverse_2(BIF_ALIST_2)
*/
BIF_RETTYPE ets_match_object_1(BIF_ALIST_1)
{
- return ets_select_1(BIF_P, BIF_ARG_1);
+ return ets_select1(BIF_P, BIF_ARG_1);
}
BIF_RETTYPE ets_match_object_2(BIF_ALIST_2)
@@ -2567,7 +2616,7 @@ BIF_RETTYPE ets_match_object_2(BIF_ALIST_2)
ms = TUPLE3(hp, BIF_ARG_2, NIL, ms);
hp += 4;
ms = CONS(hp, ms, NIL);
- res = ets_select_2(BIF_P, BIF_ARG_1, ms);
+ res = ets_select2(BIF_P, BIF_ARG_1, ms);
UnUseTmpHeap(8,BIF_P);
return res;
}
@@ -2585,7 +2634,7 @@ BIF_RETTYPE ets_match_object_3(BIF_ALIST_3)
ms = TUPLE3(hp, BIF_ARG_2, NIL, ms);
hp += 4;
ms = CONS(hp, ms, NIL);
- res = ets_select_3(BIF_P, BIF_ARG_1, ms, BIF_ARG_3);
+ res = ets_select3(BIF_P, BIF_ARG_1, ms, BIF_ARG_3);
UnUseTmpHeap(8,BIF_P);
return res;
}
@@ -2604,7 +2653,9 @@ BIF_RETTYPE ets_info_1(BIF_ALIST_1)
int i;
Eterm* hp;
/*Process* rp = NULL;*/
+ /* If/when we implement lockless private tables:
Eterm owner;
+ */
if ((tb = db_get_table(BIF_P, BIF_ARG_1, DB_INFO, LCK_READ)) == NULL) {
if (is_atom(BIF_ARG_1) || is_small(BIF_ARG_1)) {
@@ -2613,7 +2664,9 @@ BIF_RETTYPE ets_info_1(BIF_ALIST_1)
BIF_ERROR(BIF_P, BADARG);
}
+ /* If/when we implement lockless private tables:
owner = tb->common.owner;
+ */
/* If/when we implement lockless private tables:
if ((tb->common.status & DB_PRIVATE) && owner != BIF_P->id) {
@@ -2789,7 +2842,7 @@ void init_db(void)
}
#endif
- erts_smp_atomic_init(&erts_ets_misc_mem_size, 0);
+ erts_smp_atomic_init_nob(&erts_ets_misc_mem_size, 0);
db_initialize_util();
if (user_requested_db_max_tabs < DB_DEF_MAX_TABS)
@@ -2800,10 +2853,10 @@ void init_db(void)
bits = erts_fit_in_bits(db_max_tabs-1);
if (bits > SMALL_BITS) {
erl_exit(1,"Max limit for ets tabled too high %u (max %u).",
- db_max_tabs, 1L<<SMALL_BITS);
+ db_max_tabs, ((Uint)1)<<SMALL_BITS);
}
- meta_main_tab_slot_mask = (1L<<bits) - 1;
- meta_main_tab_seq_incr = (1L<<bits);
+ meta_main_tab_slot_mask = (((Uint)1)<<bits) - 1;
+ meta_main_tab_seq_incr = (((Uint)1)<<bits);
size = sizeof(*meta_main_tab)*db_max_tabs;
meta_main_tab = erts_db_alloc_nt(ERTS_ALC_T_DB_TABLES, size);
@@ -2816,7 +2869,7 @@ void init_db(void)
SET_NEXT_FREE_SLOT(db_max_tabs-1, (Uint)-1);
meta_main_tab_first_free = 0;
- meta_name_tab_mask = (1L<<(bits-1)) - 1; /* At least half the size of main tab */
+ meta_name_tab_mask = (((Uint) 1)<<(bits-1)) - 1; /* At least half the size of main tab */
size = sizeof(struct meta_name_tab_entry)*(meta_name_tab_mask+1);
meta_name_tab = erts_db_alloc_nt(ERTS_ALC_T_DB_TABLES, size);
ERTS_ETS_MISC_MEM_ADD(size);
@@ -2831,13 +2884,13 @@ void init_db(void)
/*TT*/
/* Create meta table invertion. */
- erts_smp_atomic_init(&init_tb.common.memory_size, 0);
+ erts_smp_atomic_init_nob(&init_tb.common.memory_size, 0);
meta_pid_to_tab = (DbTable*) erts_db_alloc(ERTS_ALC_T_DB_TABLE,
&init_tb,
sizeof(DbTable));
ERTS_ETS_MISC_MEM_ADD(sizeof(DbTable));
- erts_smp_atomic_init(&meta_pid_to_tab->common.memory_size,
- erts_smp_atomic_read(&init_tb.common.memory_size));
+ erts_smp_atomic_init_nob(&meta_pid_to_tab->common.memory_size,
+ erts_smp_atomic_read_nob(&init_tb.common.memory_size));
meta_pid_to_tab->common.id = NIL;
meta_pid_to_tab->common.the_name = am_true;
@@ -2850,7 +2903,7 @@ void init_db(void)
#endif
meta_pid_to_tab->common.keypos = 1;
meta_pid_to_tab->common.owner = NIL;
- erts_smp_atomic_init(&meta_pid_to_tab->common.nitems, 0);
+ erts_smp_atomic_init_nob(&meta_pid_to_tab->common.nitems, 0);
meta_pid_to_tab->common.slot = -1;
meta_pid_to_tab->common.meth = &db_hash;
meta_pid_to_tab->common.compress = 0;
@@ -2863,13 +2916,13 @@ void init_db(void)
erl_exit(1,"Unable to create ets metadata tables.");
}
- erts_smp_atomic_set(&init_tb.common.memory_size, 0);
+ erts_smp_atomic_set_nob(&init_tb.common.memory_size, 0);
meta_pid_to_fixed_tab = (DbTable*) erts_db_alloc(ERTS_ALC_T_DB_TABLE,
&init_tb,
sizeof(DbTable));
ERTS_ETS_MISC_MEM_ADD(sizeof(DbTable));
- erts_smp_atomic_init(&meta_pid_to_fixed_tab->common.memory_size,
- erts_smp_atomic_read(&init_tb.common.memory_size));
+ erts_smp_atomic_init_nob(&meta_pid_to_fixed_tab->common.memory_size,
+ erts_smp_atomic_read_nob(&init_tb.common.memory_size));
meta_pid_to_fixed_tab->common.id = NIL;
meta_pid_to_fixed_tab->common.the_name = am_true;
@@ -2882,7 +2935,7 @@ void init_db(void)
#endif
meta_pid_to_fixed_tab->common.keypos = 1;
meta_pid_to_fixed_tab->common.owner = NIL;
- erts_smp_atomic_init(&meta_pid_to_fixed_tab->common.nitems, 0);
+ erts_smp_atomic_init_nob(&meta_pid_to_fixed_tab->common.nitems, 0);
meta_pid_to_fixed_tab->common.slot = -1;
meta_pid_to_fixed_tab->common.meth = &db_hash;
meta_pid_to_fixed_tab->common.compress = 0;
@@ -3421,7 +3474,7 @@ static void unfix_table_locked(Process* p, DbTable* tb,
unlocked:
if (!IS_FIXED(tb) && IS_HASH_TABLE(tb->common.status)
- && erts_smp_atomic_read(&tb->hash.fixdel) != (erts_aint_t)NULL) {
+ && erts_smp_atomic_read_nob(&tb->hash.fixdel) != (erts_aint_t)NULL) {
#ifdef ERTS_SMP
if (*kind_p == LCK_READ && tb->common.is_thread_safe) {
/* Must have write lock while purging pseudo-deleted (OTP-8166) */
@@ -3519,8 +3572,10 @@ static void free_heir_data(DbTable* tb)
#endif
}
-static BIF_RETTYPE ets_delete_trap(Process *p, Eterm cont)
+static BIF_RETTYPE ets_delete_trap(BIF_ALIST_1)
{
+ Process *p = BIF_P;
+ Eterm cont = BIF_ARG_1;
int trap;
Eterm* ptr = big_val(cont);
DbTable *tb = *((DbTable **) (UWord) (ptr + 1));
@@ -3606,7 +3661,7 @@ static Eterm table_info(Process* p, DbTable* tb, Eterm What)
Eterm ret = THE_NON_VALUE;
if (What == am_size) {
- ret = make_small(erts_smp_atomic_read(&tb->common.nitems));
+ ret = make_small(erts_smp_atomic_read_nob(&tb->common.nitems));
} else if (What == am_type) {
if (tb->common.status & DB_SET) {
ret = am_set;
@@ -3619,7 +3674,7 @@ static Eterm table_info(Process* p, DbTable* tb, Eterm What)
ret = am_bag;
}
} else if (What == am_memory) {
- Uint words = (Uint) ((erts_smp_atomic_read(&tb->common.memory_size)
+ Uint words = (Uint) ((erts_smp_atomic_read_nob(&tb->common.memory_size)
+ sizeof(Uint)
- 1)
/ sizeof(Uint));
@@ -3657,9 +3712,6 @@ static Eterm table_info(Process* p, DbTable* tb, Eterm What)
ret = am_true;
else
ret = am_false;
- } else if (What == am_atom_put("kept_objects",12)) {
- ret = make_small(IS_HASH_TABLE(tb->common.status)
- ? db_kept_items_hash(&tb->hash) : 0);
} else if (What == am_atom_put("safe_fixed",10)) {
#ifdef ERTS_SMP
erts_smp_mtx_lock(&tb->common.fixlock);
@@ -3701,7 +3753,7 @@ static Eterm table_info(Process* p, DbTable* tb, Eterm What)
Eterm* hp;
db_calc_stats_hash(&tb->hash, &stats);
- hp = HAlloc(p, 1 + 6 + FLOAT_SIZE_OBJECT*3);
+ hp = HAlloc(p, 1 + 7 + FLOAT_SIZE_OBJECT*3);
f.fd = stats.avg_chain_len;
avg = make_float(hp);
PUT_DOUBLE(f, hp);
@@ -3716,10 +3768,11 @@ static Eterm table_info(Process* p, DbTable* tb, Eterm What)
std_dev_exp = make_float(hp);
PUT_DOUBLE(f, hp);
hp += FLOAT_SIZE_OBJECT;
- ret = TUPLE6(hp, make_small(erts_smp_atomic_read(&tb->hash.nactive)),
+ ret = TUPLE7(hp, make_small(erts_smp_atomic_read_nob(&tb->hash.nactive)),
avg, std_dev_real, std_dev_exp,
make_small(stats.min_chain_len),
- make_small(stats.max_chain_len));
+ make_small(stats.max_chain_len),
+ make_small(db_kept_items_hash(&tb->hash)));
}
else {
ret = am_false;
@@ -3735,9 +3788,9 @@ static void print_table(int to, void *to_arg, int show, DbTable* tb)
tb->common.meth->db_print(to, to_arg, show, tb);
- erts_print(to, to_arg, "Objects: %d\n", (int)erts_smp_atomic_read(&tb->common.nitems));
+ erts_print(to, to_arg, "Objects: %d\n", (int)erts_smp_atomic_read_nob(&tb->common.nitems));
erts_print(to, to_arg, "Words: %bpu\n",
- (UWord) ((erts_smp_atomic_read(&tb->common.memory_size)
+ (Uint) ((erts_smp_atomic_read_nob(&tb->common.memory_size)
+ sizeof(Uint)
- 1)
/ sizeof(Uint)));
@@ -3763,8 +3816,9 @@ void db_info(int to, void *to_arg, int show) /* Called by break handler */
Uint
erts_get_ets_misc_mem_size(void)
{
+ ERTS_SMP_MEMORY_BARRIER;
/* Memory not allocated in ets_alloc */
- return (Uint) erts_smp_atomic_read(&erts_ets_misc_mem_size);
+ return (Uint) erts_smp_atomic_read_nob(&erts_ets_misc_mem_size);
}
/* SMP Note: May only be used when system is locked */
diff --git a/erts/emulator/beam/erl_db.h b/erts/emulator/beam/erl_db.h
index e0bdebcb01..2e5deaf338 100644
--- a/erts/emulator/beam/erl_db.h
+++ b/erts/emulator/beam/erl_db.h
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1996-2010. All Rights Reserved.
+ * Copyright Ericsson AB 1996-2011. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -86,11 +86,11 @@ do { \
erts_aint_t sz__ = (((erts_aint_t) (ALLOC_SZ)) \
- ((erts_aint_t) (FREE_SZ))); \
ASSERT((TAB)); \
- erts_smp_atomic_add(&(TAB)->common.memory_size, sz__); \
+ erts_smp_atomic_add_nob(&(TAB)->common.memory_size, sz__); \
} while (0)
#define ERTS_ETS_MISC_MEM_ADD(SZ) \
- erts_smp_atomic_add(&erts_ets_misc_mem_size, (SZ));
+ erts_smp_atomic_add_nob(&erts_ets_misc_mem_size, (SZ));
ERTS_GLB_INLINE void *erts_db_alloc(ErtsAlcType_t type,
DbTable *tab,
@@ -227,7 +227,7 @@ erts_db_free(ErtsAlcType_t type, DbTable *tab, void *ptr, Uint size)
ERTS_DB_ALC_MEM_UPDATE_(tab, size, 0);
ASSERT(((void *) tab) != ptr
- || erts_smp_atomic_read(&tab->common.memory_size) == 0);
+ || erts_smp_atomic_read_nob(&tab->common.memory_size) == 0);
erts_free(type, ptr);
}
diff --git a/erts/emulator/beam/erl_db_hash.c b/erts/emulator/beam/erl_db_hash.c
index e65d397cfb..2fea4671e1 100644
--- a/erts/emulator/beam/erl_db_hash.c
+++ b/erts/emulator/beam/erl_db_hash.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1998-2011. All Rights Reserved.
+ * Copyright Ericsson AB 1998-2012. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -111,12 +111,16 @@
# define DB_USING_FINE_LOCKING(TB) 0
#endif
+#ifdef ETHR_ORDERED_READ_DEPEND
+#define SEGTAB(tb) ((struct segment**) erts_smp_atomic_read_nob(&(tb)->segtab))
+#else
#define SEGTAB(tb) \
(DB_USING_FINE_LOCKING(tb) \
- ? ((struct segment**) erts_smp_atomic_read_acqb(&(tb)->segtab)) \
- : ((struct segment**) erts_smp_atomic_read(&(tb)->segtab)))
-#define NACTIVE(tb) ((int)erts_smp_atomic_read(&(tb)->nactive))
-#define NITEMS(tb) ((int)erts_smp_atomic_read(&(tb)->common.nitems))
+ ? ((struct segment**) erts_smp_atomic_read_ddrb(&(tb)->segtab)) \
+ : ((struct segment**) erts_smp_atomic_read_nob(&(tb)->segtab)))
+#endif
+#define NACTIVE(tb) ((int)erts_smp_atomic_read_nob(&(tb)->nactive))
+#define NITEMS(tb) ((int)erts_smp_atomic_read_nob(&(tb)->common.nitems))
#define BUCKET(tb, i) SEGTAB(tb)[(i) >> SEGSZ_EXP]->buckets[(i) & SEGSZ_MASK]
@@ -133,11 +137,11 @@ static ERTS_INLINE Uint hash_to_ix(DbTableHash* tb, HashValue hval)
{
Uint mask = (DB_USING_FINE_LOCKING(tb)
? erts_smp_atomic_read_acqb(&tb->szm)
- : erts_smp_atomic_read(&tb->szm));
- Uint ix = hval & mask;
- if (ix >= erts_smp_atomic_read(&tb->nactive)) {
+ : erts_smp_atomic_read_nob(&tb->szm));
+ Uint ix = hval & mask;
+ if (ix >= erts_smp_atomic_read_nob(&tb->nactive)) {
ix &= mask>>1;
- ASSERT(ix < erts_smp_atomic_read(&tb->nactive));
+ ASSERT(ix < erts_smp_atomic_read_nob(&tb->nactive));
}
return ix;
}
@@ -152,14 +156,14 @@ static ERTS_INLINE void add_fixed_deletion(DbTableHash* tb, int ix)
(DbTable *) tb,
sizeof(FixedDeletion));
ERTS_ETS_MISC_MEM_ADD(sizeof(FixedDeletion));
- fixd->slot = ix;
- was_next = erts_smp_atomic_read(&tb->fixdel);
+ fixd->slot = ix;
+ was_next = erts_smp_atomic_read_acqb(&tb->fixdel);
do { /* Lockless atomic insertion in linked list: */
exp_next = was_next;
fixd->next = (FixedDeletion*) exp_next;
- was_next = erts_smp_atomic_cmpxchg(&tb->fixdel,
- (erts_aint_t) fixd,
- exp_next);
+ was_next = erts_smp_atomic_cmpxchg_relb(&tb->fixdel,
+ (erts_aint_t) fixd,
+ exp_next);
}while (was_next != exp_next);
}
@@ -319,15 +323,27 @@ struct ext_segment {
struct segment* segtab[1]; /* The segment table */
};
#define SIZEOF_EXTSEG(NSEGS) \
- (sizeof(struct ext_segment) - sizeof(struct segment*) + sizeof(struct segment*)*(NSEGS))
+ (offsetof(struct ext_segment,segtab) + sizeof(struct segment*)*(NSEGS))
-#ifdef DEBUG
-# include <stddef.h> /* offsetof */
+#if defined(DEBUG) || defined(VALGRIND)
# define EXTSEG(SEGTAB_PTR) \
((struct ext_segment*) (((char*)(SEGTAB_PTR)) - offsetof(struct ext_segment,segtab)))
#endif
+static ERTS_INLINE void SET_SEGTAB(DbTableHash* tb,
+ struct segment** segtab)
+{
+ if (DB_USING_FINE_LOCKING(tb))
+ erts_smp_atomic_set_wb(&tb->segtab, (erts_aint_t) segtab);
+ else
+ erts_smp_atomic_set_nob(&tb->segtab, (erts_aint_t) segtab);
+#ifdef VALGRIND
+ tb->top_ptr_to_segment_with_active_segtab = EXTSEG(segtab);
+#endif
+}
+
+
/* How the table segments relate to each other:
ext_segment: ext_segment: "plain" segment
@@ -551,22 +567,24 @@ static void restore_fixdel(DbTableHash* tb, FixedDeletion* fixdel)
{
/*int tries = 0;*/
DEBUG_WAIT();
- if (erts_smp_atomic_cmpxchg(&tb->fixdel, (erts_aint_t)fixdel,
- (erts_aint_t)NULL) != (erts_aint_t)NULL) {
+ if (erts_smp_atomic_cmpxchg_relb(&tb->fixdel,
+ (erts_aint_t) fixdel,
+ (erts_aint_t) NULL) != (erts_aint_t) NULL) {
/* Oboy, must join lists */
FixedDeletion* last = fixdel;
erts_aint_t was_tail;
erts_aint_t exp_tail;
- while (last->next != NULL) last = last->next;
- was_tail = erts_smp_atomic_read(&tb->fixdel);
+ while (last->next != NULL) last = last->next;
+ was_tail = erts_smp_atomic_read_acqb(&tb->fixdel);
do { /* Lockless atomic list insertion */
exp_tail = was_tail;
last->next = (FixedDeletion*) exp_tail;
/*++tries;*/
DEBUG_WAIT();
- was_tail = erts_smp_atomic_cmpxchg(&tb->fixdel, (erts_aint_t)fixdel,
- exp_tail);
+ was_tail = erts_smp_atomic_cmpxchg_relb(&tb->fixdel,
+ (erts_aint_t) fixdel,
+ exp_tail);
}while (was_tail != exp_tail);
}
/*erts_fprintf(stderr,"erl_db_hash: restore_fixdel tries=%d\r\n", tries);*/
@@ -583,7 +601,8 @@ void db_unfix_table_hash(DbTableHash *tb)
|| (erts_smp_lc_rwmtx_is_rlocked(&tb->common.rwlock)
&& !tb->common.is_thread_safe));
restart:
- fixdel = (FixedDeletion*) erts_smp_atomic_xchg(&tb->fixdel, (erts_aint_t)NULL);
+ fixdel = (FixedDeletion*) erts_smp_atomic_xchg_acqb(&tb->fixdel,
+ (erts_aint_t) NULL);
while (fixdel != NULL) {
FixedDeletion *fx = fixdel;
int ix = fx->slot;
@@ -650,14 +669,15 @@ int db_create_hash(Process *p, DbTable *tbl)
{
DbTableHash *tb = &tbl->hash;
- erts_smp_atomic_init(&tb->szm, SEGSZ_MASK);
- erts_smp_atomic_init(&tb->nactive, SEGSZ);
- erts_smp_atomic_init(&tb->fixdel, (erts_aint_t)NULL);
- erts_smp_atomic_init(&tb->segtab, (erts_aint_t) alloc_ext_seg(tb,0,NULL)->segtab);
+ erts_smp_atomic_init_nob(&tb->szm, SEGSZ_MASK);
+ erts_smp_atomic_init_nob(&tb->nactive, SEGSZ);
+ erts_smp_atomic_init_nob(&tb->fixdel, (erts_aint_t)NULL);
+ erts_smp_atomic_init_nob(&tb->segtab, (erts_aint_t)NULL);
+ SET_SEGTAB(tb, alloc_ext_seg(tb,0,NULL)->segtab);
tb->nsegs = NSEG_1;
tb->nslots = SEGSZ;
- erts_smp_atomic_init(&tb->is_resizing, 0);
+ erts_smp_atomic_init_nob(&tb->is_resizing, 0);
#ifdef ERTS_SMP
if (tb->common.type & DB_FINE_LOCKED) {
erts_smp_rwmtx_opt_t rwmtx_opt = ERTS_SMP_RWMTX_OPT_DEFAULT_INITER;
@@ -674,7 +694,7 @@ int db_create_hash(Process *p, DbTable *tbl)
/* This important property is needed to guarantee that the buckets
* involved in a grow/shrink operation it protected by the same lock:
*/
- ASSERT(erts_smp_atomic_read(&tb->nactive) % DB_HASH_LOCK_CNT == 0);
+ ASSERT(erts_smp_atomic_read_nob(&tb->nactive) % DB_HASH_LOCK_CNT == 0);
}
else { /* coarse locking */
tb->locks = NULL;
@@ -794,7 +814,7 @@ int db_put_hash(DbTable *tbl, Eterm obj, int key_clash_fail)
if (tb->common.status & DB_SET) {
HashDbTerm* bnext = b->next;
if (b->hvalue == INVALID_HASH) {
- erts_smp_atomic_inc(&tb->common.nitems);
+ erts_smp_atomic_inc_nob(&tb->common.nitems);
}
else if (key_clash_fail) {
ret = DB_ERROR_BADKEY;
@@ -822,7 +842,7 @@ int db_put_hash(DbTable *tbl, Eterm obj, int key_clash_fail)
do {
if (db_eq(&tb->common,obj,&q->dbterm)) {
if (q->hvalue == INVALID_HASH) {
- erts_smp_atomic_inc(&tb->common.nitems);
+ erts_smp_atomic_inc_nob(&tb->common.nitems);
q->hvalue = hval;
if (q != b) { /* must move to preserve key insertion order */
*qp = q->next;
@@ -843,7 +863,7 @@ Lnew:
q->hvalue = hval;
q->next = b;
*bp = q;
- nitems = erts_smp_atomic_inctest(&tb->common.nitems);
+ nitems = erts_smp_atomic_inc_read_nob(&tb->common.nitems);
WUNLOCK_HASH(lck);
{
int nactive = NACTIVE(tb);
@@ -1080,7 +1100,7 @@ int db_erase_bag_exact2(DbTable *tbl, Eterm key, Eterm value)
EQ(value, b->dbterm.tpl[2])) {
*bp = b->next;
free_term(tb, b);
- erts_smp_atomic_dec(&tb->common.nitems);
+ erts_smp_atomic_dec_nob(&tb->common.nitems);
b = *bp;
break;
}
@@ -1139,7 +1159,7 @@ int db_erase_hash(DbTable *tbl, Eterm key, Eterm *ret)
}
WUNLOCK_HASH(lck);
if (nitems_diff) {
- erts_smp_atomic_add(&tb->common.nitems, nitems_diff);
+ erts_smp_atomic_add_nob(&tb->common.nitems, nitems_diff);
try_shrink(tb);
}
*ret = am_true;
@@ -1198,7 +1218,7 @@ static int db_erase_object_hash(DbTable *tbl, Eterm object, Eterm *ret)
}
WUNLOCK_HASH(lck);
if (nitems_diff) {
- erts_smp_atomic_add(&tb->common.nitems, nitems_diff);
+ erts_smp_atomic_add_nob(&tb->common.nitems, nitems_diff);
try_shrink(tb);
}
*ret = am_true;
@@ -1809,7 +1829,7 @@ static int db_select_delete_hash(Process *p,
free_term(tb, del);
did_erase = 1;
}
- erts_smp_atomic_dec(&tb->common.nitems);
+ erts_smp_atomic_dec_nob(&tb->common.nitems);
++got;
}
--num_left;
@@ -1920,7 +1940,7 @@ static int db_select_delete_continue_hash(Process *p,
free_term(tb, del);
did_erase = 1;
}
- erts_smp_atomic_dec(&tb->common.nitems);
+ erts_smp_atomic_dec_nob(&tb->common.nitems);
++got;
}
@@ -2075,7 +2095,7 @@ int db_mark_all_deleted_hash(DbTable *tbl)
}while(list != NULL);
}
}
- erts_smp_atomic_set(&tb->common.nitems, 0);
+ erts_smp_atomic_set_nob(&tb->common.nitems, 0);
return DB_ERROR_NONE;
}
@@ -2126,7 +2146,7 @@ static int db_free_table_continue_hash(DbTable *tbl)
{
DbTableHash *tb = &tbl->hash;
int done;
- FixedDeletion* fixdel = (FixedDeletion*) erts_smp_atomic_read(&tb->fixdel);
+ FixedDeletion* fixdel = (FixedDeletion*) erts_smp_atomic_read_acqb(&tb->fixdel);
ERTS_SMP_LC_ASSERT(IS_TAB_WLOCKED(tb));
done = 0;
@@ -2140,11 +2160,11 @@ static int db_free_table_continue_hash(DbTable *tbl)
sizeof(FixedDeletion));
ERTS_ETS_MISC_MEM_ADD(-sizeof(FixedDeletion));
if (++done >= 2*DELETE_RECORD_LIMIT) {
- erts_smp_atomic_set(&tb->fixdel, (erts_aint_t)fixdel);
+ erts_smp_atomic_set_relb(&tb->fixdel, (erts_aint_t)fixdel);
return 0; /* Not done */
}
}
- erts_smp_atomic_set(&tb->fixdel, (erts_aint_t)NULL);
+ erts_smp_atomic_set_relb(&tb->fixdel, (erts_aint_t)NULL);
done /= 2;
while(tb->nslots != 0) {
@@ -2168,7 +2188,7 @@ static int db_free_table_continue_hash(DbTable *tbl)
tb->locks = NULL;
}
#endif
- ASSERT(erts_smp_atomic_read(&tb->common.memory_size) == sizeof(DbTable));
+ ASSERT(erts_smp_atomic_read_nob(&tb->common.memory_size) == sizeof(DbTable));
return 1; /* Done */
}
@@ -2361,10 +2381,7 @@ static int alloc_seg(DbTableHash *tb)
struct ext_segment* eseg;
eseg = (struct ext_segment*) SEGTAB(tb)[seg_ix-1];
MY_ASSERT(eseg!=NULL && eseg->s.is_ext_segment);
- if (DB_USING_FINE_LOCKING(tb))
- erts_smp_atomic_set_relb(&tb->segtab, (erts_aint_t) eseg->segtab);
- else
- erts_smp_atomic_set(&tb->segtab, (erts_aint_t) eseg->segtab);
+ SET_SEGTAB(tb, eseg->segtab);
tb->nsegs = eseg->nsegs;
}
ASSERT(seg_ix < tb->nsegs);
@@ -2436,12 +2453,7 @@ static int free_seg(DbTableHash *tb, int free_records)
MY_ASSERT(newtop->s.is_ext_segment);
if (newtop->prev_segtab != NULL) {
/* Time to use a smaller segtab */
- if (DB_USING_FINE_LOCKING(tb))
- erts_smp_atomic_set_relb(&tb->segtab,
- (erts_aint_t)newtop->prev_segtab);
- else
- erts_smp_atomic_set(&tb->segtab,
- (erts_aint_t) newtop->prev_segtab);
+ SET_SEGTAB(tb, newtop->prev_segtab);
tb->nsegs = seg_ix;
ASSERT(tb->nsegs == EXTSEG(SEGTAB(tb))->nsegs);
}
@@ -2458,7 +2470,7 @@ static int free_seg(DbTableHash *tb, int free_records)
if (seg_ix > 0) {
if (seg_ix < tb->nsegs) SEGTAB(tb)[seg_ix] = NULL;
} else {
- erts_smp_atomic_set_relb(&tb->segtab, (erts_aint_t)NULL);
+ SET_SEGTAB(tb, NULL);
}
#endif
tb->nslots -= SEGSZ;
@@ -2511,11 +2523,11 @@ static ERTS_INLINE int
begin_resizing(DbTableHash* tb)
{
if (DB_USING_FINE_LOCKING(tb))
- return !erts_smp_atomic_xchg(&tb->is_resizing, 1);
+ return !erts_smp_atomic_xchg_acqb(&tb->is_resizing, 1);
else {
- if (erts_smp_atomic_read(&tb->is_resizing))
+ if (erts_smp_atomic_read_nob(&tb->is_resizing))
return 0;
- erts_smp_atomic_set(&tb->is_resizing, 1);
+ erts_smp_atomic_set_nob(&tb->is_resizing, 1);
return 1;
}
}
@@ -2526,7 +2538,7 @@ done_resizing(DbTableHash* tb)
if (DB_USING_FINE_LOCKING(tb))
erts_smp_atomic_set_relb(&tb->is_resizing, 0);
else
- erts_smp_atomic_set(&tb->is_resizing, 0);
+ erts_smp_atomic_set_nob(&tb->is_resizing, 0);
}
/* Grow table with one new bucket.
@@ -2555,7 +2567,7 @@ static void grow(DbTableHash* tb, int nactive)
}
ASSERT(nactive < tb->nslots);
- szm = erts_smp_atomic_read(&tb->szm);
+ szm = erts_smp_atomic_read_nob(&tb->szm);
if (nactive <= szm) {
from_ix = nactive & (szm >> 1);
} else {
@@ -2572,12 +2584,12 @@ static void grow(DbTableHash* tb, int nactive)
WUNLOCK_HASH(lck);
goto abort;
}
- erts_smp_atomic_inc(&tb->nactive);
+ erts_smp_atomic_inc_nob(&tb->nactive);
if (from_ix == 0) {
if (DB_USING_FINE_LOCKING(tb))
erts_smp_atomic_set_relb(&tb->szm, szm);
else
- erts_smp_atomic_set(&tb->szm, szm);
+ erts_smp_atomic_set_nob(&tb->szm, szm);
}
done_resizing(tb);
@@ -2625,7 +2637,7 @@ static void shrink(DbTableHash* tb, int nactive)
if (NACTIVE(tb) == nactive) {
erts_smp_rwmtx_t* lck;
int src_ix = nactive - 1;
- int low_szm = erts_smp_atomic_read(&tb->szm) >> 1;
+ int low_szm = erts_smp_atomic_read_nob(&tb->szm) >> 1;
int dst_ix = src_ix & low_szm;
ASSERT(dst_ix < src_ix);
@@ -2652,7 +2664,7 @@ static void shrink(DbTableHash* tb, int nactive)
*dst_bp = *src_bp;
*src_bp = NULL;
- erts_smp_atomic_set(&tb->nactive, src_ix);
+ erts_smp_atomic_set_nob(&tb->nactive, src_ix);
if (dst_ix == 0) {
erts_smp_atomic_set_relb(&tb->szm, low_szm);
}
@@ -2788,7 +2800,7 @@ static int db_delete_all_objects_hash(Process* p, DbTable* tbl)
} else {
db_free_table_hash(tbl);
db_create_hash(p, tbl);
- erts_smp_atomic_set(&tbl->hash.common.nitems, 0);
+ erts_smp_atomic_set_nob(&tbl->hash.common.nitems, 0);
}
return 0;
}
diff --git a/erts/emulator/beam/erl_db_hash.h b/erts/emulator/beam/erl_db_hash.h
index e0285fa5ed..cddd8dfadd 100644
--- a/erts/emulator/beam/erl_db_hash.h
+++ b/erts/emulator/beam/erl_db_hash.h
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1998-2009. All Rights Reserved.
+ * Copyright Ericsson AB 1998-2011. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -58,6 +58,9 @@ typedef struct db_table_hash {
#ifdef ERTS_SMP
DbTableHashFineLocks* locks;
#endif
+#ifdef VALGRIND
+ struct ext_segment* top_ptr_to_segment_with_active_segtab;
+#endif
} DbTableHash;
diff --git a/erts/emulator/beam/erl_db_tree.c b/erts/emulator/beam/erl_db_tree.c
index 9a0ba3a418..312050b931 100644
--- a/erts/emulator/beam/erl_db_tree.c
+++ b/erts/emulator/beam/erl_db_tree.c
@@ -49,7 +49,7 @@
#include "erl_db_tree.h"
#define GETKEY_WITH_POS(Keypos, Tplp) (*((Tplp) + Keypos))
-#define NITEMS(tb) ((int)erts_smp_atomic_read(&(tb)->common.nitems))
+#define NITEMS(tb) ((int)erts_smp_atomic_read_nob(&(tb)->common.nitems))
/*
** A stack of this size is enough for an AVL tree with more than
@@ -84,7 +84,7 @@
*/
static DbTreeStack* get_static_stack(DbTableTree* tb)
{
- if (!erts_smp_atomic_xchg(&tb->is_stack_busy, 1)) {
+ if (!erts_smp_atomic_xchg_acqb(&tb->is_stack_busy, 1)) {
return &tb->static_stack;
}
return NULL;
@@ -96,7 +96,7 @@ static DbTreeStack* get_static_stack(DbTableTree* tb)
static DbTreeStack* get_any_stack(DbTableTree* tb)
{
DbTreeStack* stack;
- if (!erts_smp_atomic_xchg(&tb->is_stack_busy, 1)) {
+ if (!erts_smp_atomic_xchg_acqb(&tb->is_stack_busy, 1)) {
return &tb->static_stack;
}
stack = erts_db_alloc(ERTS_ALC_T_DB_STK, (DbTable *) tb,
@@ -110,7 +110,7 @@ static DbTreeStack* get_any_stack(DbTableTree* tb)
static void release_stack(DbTableTree* tb, DbTreeStack* stack)
{
if (stack == &tb->static_stack) {
- ASSERT(erts_smp_atomic_read(&tb->is_stack_busy) == 1);
+ ASSERT(erts_smp_atomic_read_nob(&tb->is_stack_busy) == 1);
erts_smp_atomic_set_relb(&tb->is_stack_busy, 0);
}
else {
@@ -344,8 +344,8 @@ static int do_partly_bound_can_match_lesser(Eterm a, Eterm b,
int *done);
static int do_partly_bound_can_match_greater(Eterm a, Eterm b,
int *done);
-static BIF_RETTYPE ets_select_reverse(Process *p, Eterm a1,
- Eterm a2, Eterm a3);
+static BIF_RETTYPE ets_select_reverse(BIF_ALIST_3);
+
/* Method interface functions */
static int db_first_tree(Process *p, DbTable *tbl,
@@ -478,7 +478,7 @@ int db_create_tree(Process *p, DbTable *tbl)
sizeof(TreeDbTerm *) * STACK_NEED);
tb->static_stack.pos = 0;
tb->static_stack.slot = 0;
- erts_smp_atomic_init(&tb->is_stack_busy, 0);
+ erts_smp_atomic_init_nob(&tb->is_stack_busy, 0);
tb->deletion = 0;
return DB_ERROR_NONE;
}
@@ -613,8 +613,8 @@ static int db_put_tree(DbTable *tbl, Eterm obj, int key_clash_fail)
for (;;)
if (!*this) { /* Found our place */
state = 1;
- if (erts_smp_atomic_inctest(&tb->common.nitems) >= TREE_MAX_ELEMENTS) {
- erts_smp_atomic_dec(&tb->common.nitems);
+ if (erts_smp_atomic_inc_read_nob(&tb->common.nitems) >= TREE_MAX_ELEMENTS) {
+ erts_smp_atomic_dec_nob(&tb->common.nitems);
return DB_ERROR_SYSRES;
}
*this = new_dbterm(tb, obj);
@@ -844,8 +844,12 @@ static int db_slot_tree(Process *p, DbTable *tbl,
-static BIF_RETTYPE ets_select_reverse(Process *p, Eterm a1, Eterm a2, Eterm a3)
+static BIF_RETTYPE ets_select_reverse(BIF_ALIST_3)
{
+ Process *p = BIF_P;
+ Eterm a1 = BIF_ARG_1;
+ Eterm a2 = BIF_ARG_2;
+ Eterm a3 = BIF_ARG_3;
Eterm list;
Eterm result;
Eterm* hp;
@@ -1583,7 +1587,7 @@ static int db_select_delete_continue_tree(Process *p,
sc.max = 1000;
sc.keypos = tb->common.keypos;
- ASSERT(!erts_smp_atomic_read(&tb->is_stack_busy));
+ ASSERT(!erts_smp_atomic_read_nob(&tb->is_stack_busy));
traverse_backwards(tb, &tb->static_stack, lastkey, NULL, &doit_select_delete, &sc);
BUMP_REDS(p, 1000 - sc.max);
@@ -1774,7 +1778,7 @@ static int db_free_table_continue_tree(DbTable *tbl)
(DbTable *) tb,
(void *) tb->static_stack.array,
sizeof(TreeDbTerm *) * STACK_NEED);
- ASSERT(erts_smp_atomic_read(&tb->common.memory_size)
+ ASSERT(erts_smp_atomic_read_nob(&tb->common.memory_size)
== sizeof(DbTable));
}
return result;
@@ -1784,7 +1788,7 @@ static int db_delete_all_objects_tree(Process* p, DbTable* tbl)
{
db_free_table_tree(tbl);
db_create_tree(p, tbl);
- erts_smp_atomic_set(&tbl->tree.common.nitems, 0);
+ erts_smp_atomic_set_nob(&tbl->tree.common.nitems, 0);
return 0;
}
@@ -1866,7 +1870,7 @@ static TreeDbTerm *linkout_tree(DbTableTree *tb,
tstack[tpos++] = this;
state = delsub(this);
}
- erts_smp_atomic_dec(&tb->common.nitems);
+ erts_smp_atomic_dec_nob(&tb->common.nitems);
break;
}
}
@@ -1933,7 +1937,7 @@ static TreeDbTerm *linkout_object_tree(DbTableTree *tb,
tstack[tpos++] = this;
state = delsub(this);
}
- erts_smp_atomic_dec(&tb->common.nitems);
+ erts_smp_atomic_dec_nob(&tb->common.nitems);
break;
}
}
diff --git a/erts/emulator/beam/erl_db_util.c b/erts/emulator/beam/erl_db_util.c
index c3b074f782..c2f6cfa933 100644
--- a/erts/emulator/beam/erl_db_util.c
+++ b/erts/emulator/beam/erl_db_util.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1998-2011. All Rights Reserved.
+ * Copyright Ericsson AB 1998-2012. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -35,6 +35,7 @@
#include "bif.h"
#include "big.h"
#include "erl_binary.h"
+#include "erl_thr_progress.h"
#include "erl_db_util.h"
@@ -491,11 +492,11 @@ erts_match_set_release_result(Process* c_p)
/* The trace control word. */
-static erts_smp_atomic_t trace_control_word;
+static erts_smp_atomic32_t trace_control_word;
/* This needs to be here, before the bif table... */
-static Eterm db_set_trace_control_word_fake_1(Process *p, Eterm val);
+static Eterm db_set_trace_control_word_fake_1(BIF_ALIST_1);
/*
** The table of callable bif's, i e guard bif's and
@@ -908,14 +909,18 @@ static void db_free_tmp_uncompressed(DbTerm* obj);
/*
** Pseudo BIF:s to be callable from the PAM VM.
*/
-
-BIF_RETTYPE db_get_trace_control_word_0(Process *p)
+BIF_RETTYPE db_get_trace_control_word(Process *p)
{
- Uint32 tcw = (Uint32) erts_smp_atomic_read(&trace_control_word);
+ Uint32 tcw = (Uint32) erts_smp_atomic32_read_acqb(&trace_control_word);
BIF_RET(erts_make_integer((Uint) tcw, p));
}
-BIF_RETTYPE db_set_trace_control_word_1(Process *p, Eterm new)
+BIF_RETTYPE db_get_trace_control_word_0(BIF_ALIST_0)
+{
+ BIF_RET(db_get_trace_control_word(BIF_P));
+}
+
+BIF_RETTYPE db_set_trace_control_word(Process *p, Eterm new)
{
Uint val;
Uint32 old_tcw;
@@ -923,19 +928,27 @@ BIF_RETTYPE db_set_trace_control_word_1(Process *p, Eterm new)
BIF_ERROR(p, BADARG);
if (val != ((Uint32)val))
BIF_ERROR(p, BADARG);
-
- old_tcw = (Uint32) erts_smp_atomic_xchg(&trace_control_word, (erts_aint_t) val);
+
+ old_tcw = (Uint32) erts_smp_atomic32_xchg_relb(&trace_control_word,
+ (erts_aint32_t) val);
BIF_RET(erts_make_integer((Uint) old_tcw, p));
}
-static Eterm db_set_trace_control_word_fake_1(Process *p, Eterm new)
+BIF_RETTYPE db_set_trace_control_word_1(BIF_ALIST_1)
{
+ BIF_RET(db_set_trace_control_word(BIF_P, BIF_ARG_1));
+}
+
+static Eterm db_set_trace_control_word_fake_1(BIF_ALIST_1)
+{
+ Process *p = BIF_P;
+ Eterm new = BIF_ARG_1;
Uint val;
if (!term_to_Uint(new, &val))
BIF_ERROR(p, BADARG);
if (val != ((Uint32)val))
BIF_ERROR(p, BADARG);
- BIF_RET(db_get_trace_control_word_0(p));
+ BIF_RET(db_get_trace_control_word(p));
}
/*
@@ -1249,7 +1262,7 @@ void db_initialize_util(void){
sizeof(DMCGuardBif),
(int (*)(const void *, const void *)) &cmp_guard_bif);
match_pseudo_process_init();
- erts_smp_atomic_init(&trace_control_word, 0);
+ erts_smp_atomic32_init_nob(&trace_control_word, 0);
}
@@ -1703,6 +1716,7 @@ Eterm db_prog_match(Process *c_p, Binary *bprog,
Process *current_scheduled;
ErtsSchedulerData *esdp;
Eterm (*bif)(Process*, ...);
+ Eterm bif_args[3];
int fail_label;
int atomic_trace;
#if HALFWORD_HEAP
@@ -1731,16 +1745,20 @@ Eterm db_prog_match(Process *c_p, Binary *bprog,
#define BEGIN_ATOMIC_TRACE(p) \
do { \
if (! atomic_trace) { \
+ erts_refc_inc(&bprog->refc, 2); \
erts_smp_proc_unlock((p), ERTS_PROC_LOCK_MAIN); \
- erts_smp_block_system(0); \
+ erts_smp_thr_progress_block(); \
atomic_trace = !0; \
} \
} while (0)
#define END_ATOMIC_TRACE(p) \
do { \
if (atomic_trace) { \
- erts_smp_release_system(); \
+ erts_smp_thr_progress_unblock(); \
erts_smp_proc_lock((p), ERTS_PROC_LOCK_MAIN); \
+ if (erts_refc_dectest(&bprog->refc, 0) == 0) {\
+ erts_bin_free(bprog); \
+ } \
atomic_trace = 0; \
} \
} while (0)
@@ -1952,7 +1970,7 @@ restart:
break;
case matchCall0:
bif = (Eterm (*)(Process*, ...)) *pc++;
- t = (*bif)(build_proc);
+ t = (*bif)(build_proc, bif_args);
if (is_non_value(t)) {
if (do_catch)
t = FAIL_TERM;
@@ -1963,7 +1981,7 @@ restart:
break;
case matchCall1:
bif = (Eterm (*)(Process*, ...)) *pc++;
- t = (*bif)(build_proc, esp[-1]);
+ t = (*bif)(build_proc, esp-1);
if (is_non_value(t)) {
if (do_catch)
t = FAIL_TERM;
@@ -1974,7 +1992,9 @@ restart:
break;
case matchCall2:
bif = (Eterm (*)(Process*, ...)) *pc++;
- t = (*bif)(build_proc, esp[-1], esp[-2]);
+ bif_args[0] = esp[-1];
+ bif_args[1] = esp[-2];
+ t = (*bif)(build_proc, bif_args);
if (is_non_value(t)) {
if (do_catch)
t = FAIL_TERM;
@@ -1986,7 +2006,10 @@ restart:
break;
case matchCall3:
bif = (Eterm (*)(Process*, ...)) *pc++;
- t = (*bif)(build_proc, esp[-1], esp[-2], esp[-3]);
+ bif_args[0] = esp[-1];
+ bif_args[1] = esp[-2];
+ bif_args[2] = esp[-3];
+ t = (*bif)(build_proc, bif_args);
if (is_non_value(t)) {
if (do_catch)
t = FAIL_TERM;
@@ -2180,7 +2203,11 @@ restart:
*esp++ = am_true;
break;
case matchIsSeqTrace:
- if (SEQ_TRACE_TOKEN(c_p) != NIL)
+ if (SEQ_TRACE_TOKEN(c_p) != NIL
+#ifdef USE_VM_PROBES
+ && SEQ_TRACE_TOKEN(c_p) != am_have_dt_utag
+#endif
+ )
*esp++ = am_true;
else
*esp++ = am_false;
@@ -2204,7 +2231,11 @@ restart:
--esp;
break;
case matchGetSeqToken:
- if (SEQ_TRACE_TOKEN(c_p) == NIL)
+ if (SEQ_TRACE_TOKEN(c_p) == NIL
+#ifdef USE_VM_PROBES
+ || SEQ_TRACE_TOKEN(c_p) == am_have_dt_utag
+#endif
+ )
*esp++ = NIL;
else {
Eterm sender = SEQ_TRACE_TOKEN_SENDER(c_p);
@@ -2841,7 +2872,9 @@ void* db_store_term_comp(DbTableCommon *tb, DbTerm* old, Uint offset, Eterm obj)
Uint new_sz = offset + db_size_dbterm_comp(tb, obj);
byte* basep;
DbTerm* newp;
+#ifdef DEBUG
byte* top;
+#endif
ASSERT(tb->compress);
if (old != 0) {
@@ -2863,7 +2896,10 @@ void* db_store_term_comp(DbTableCommon *tb, DbTerm* old, Uint offset, Eterm obj)
}
newp->size = size_object(obj);
- top = copy_to_comp(tb, obj, newp, new_sz);
+#ifdef DEBUG
+ top =
+#endif
+ copy_to_comp(tb, obj, newp, new_sz);
ASSERT(top <= basep + new_sz);
/* ToDo: Maybe realloc if ((basep+new_sz) - top) > WASTED_SPACE_LIMIT */
@@ -4965,7 +5001,7 @@ static Eterm match_spec_test(Process *p, Eterm against, Eterm spec, int trace)
static Eterm seq_trace_fake(Process *p, Eterm arg1)
{
- Eterm result = seq_trace_info_1(p,arg1);
+ Eterm result = erl_seq_trace_info(p, arg1);
if (is_tuple(result) && *tuple_val(result) == 2) {
return (tuple_val(result))[2];
}
diff --git a/erts/emulator/beam/erl_db_util.h b/erts/emulator/beam/erl_db_util.h
index bb1751d309..6a96e174e1 100644
--- a/erts/emulator/beam/erl_db_util.h
+++ b/erts/emulator/beam/erl_db_util.h
@@ -326,8 +326,10 @@ ERTS_GLB_INLINE int db_eq(DbTableCommon* tb, Eterm a, DbTerm* b)
(T)->common.owner == (P)->id)
/* Function prototypes */
-Eterm db_get_trace_control_word_0(Process *p);
-Eterm db_set_trace_control_word_1(Process *p, Eterm val);
+BIF_RETTYPE db_get_trace_control_word(Process* p);
+BIF_RETTYPE db_set_trace_control_word(Process* p, Eterm tcw);
+BIF_RETTYPE db_get_trace_control_word_0(BIF_ALIST_0);
+BIF_RETTYPE db_set_trace_control_word_1(BIF_ALIST_1);
void db_initialize_util(void);
Eterm db_getkey(int keypos, Eterm obj);
diff --git a/erts/emulator/beam/erl_debug.h b/erts/emulator/beam/erl_debug.h
index bdfbaddbbf..c49354a2b3 100644
--- a/erts/emulator/beam/erl_debug.h
+++ b/erts/emulator/beam/erl_debug.h
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2004-2010. All Rights Reserved.
+ * Copyright Ericsson AB 2004-2011. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -51,7 +51,7 @@
extern Uint32 verbose;
-void upp(byte*, int);
+void upp(byte*, size_t);
void pat(Eterm);
void pinfo(void);
void pp(Process*);
diff --git a/erts/emulator/beam/erl_driver.h b/erts/emulator/beam/erl_driver.h
index 401967a8de..1ae9a211d7 100644
--- a/erts/emulator/beam/erl_driver.h
+++ b/erts/emulator/beam/erl_driver.h
@@ -28,6 +28,14 @@
# include "config.h"
#endif
+#define ERL_DRV_DEPRECATED_FUNC
+#ifdef __GNUC__
+# if __GNUC__ >= 3
+# undef ERL_DRV_DEPRECATED_FUNC
+# define ERL_DRV_DEPRECATED_FUNC __attribute__((deprecated))
+# endif
+#endif
+
#ifdef SIZEOF_CHAR
# define SIZEOF_CHAR_SAVED__ SIZEOF_CHAR
# undef SIZEOF_CHAR
@@ -77,6 +85,7 @@
#include "erl_drv_nif.h"
#include <stdlib.h>
+#include <string.h> /* ssize_t on Mac OS X */
#if defined(VXWORKS)
# include <ioLib.h>
@@ -126,8 +135,8 @@ typedef struct {
#define DO_WRITE ERL_DRV_WRITE
#define ERL_DRV_EXTENDED_MARKER (0xfeeeeeed)
-#define ERL_DRV_EXTENDED_MAJOR_VERSION 1
-#define ERL_DRV_EXTENDED_MINOR_VERSION 5
+#define ERL_DRV_EXTENDED_MAJOR_VERSION 2
+#define ERL_DRV_EXTENDED_MINOR_VERSION 0
/*
* The emulator will refuse to load a driver with different major
@@ -152,10 +161,15 @@ typedef struct {
/*
* Integer types
*/
-
+#if defined(__WIN32__) && (SIZEOF_VOID_P == 8)
+typedef unsigned __int64 ErlDrvTermData;
+typedef unsigned __int64 ErlDrvUInt;
+typedef signed __int64 ErlDrvSInt;
+#else
typedef unsigned long ErlDrvTermData;
typedef unsigned long ErlDrvUInt;
typedef signed long ErlDrvSInt;
+#endif
#if defined(__WIN32__)
typedef unsigned __int64 ErlDrvUInt64;
@@ -170,13 +184,21 @@ typedef long long ErlDrvSInt64;
#error No 64-bit integer type
#endif
+#if defined(__WIN32__)
+typedef ErlDrvUInt ErlDrvSizeT;
+typedef ErlDrvSInt ErlDrvSSizeT;
+#else
+typedef size_t ErlDrvSizeT;
+typedef ssize_t ErlDrvSSizeT;
+#endif
+
/*
* A binary as seen in a driver. Note that a binary should never be
* altered by the driver when it has been sent to Erlang.
*/
typedef struct erl_drv_binary {
- long orig_size; /* total length of binary */
+ ErlDrvSInt orig_size; /* total length of binary */
char orig_bytes[1]; /* the data (char instead of byte!) */
} ErlDrvBinary;
@@ -236,7 +258,7 @@ typedef struct {
typedef struct erl_io_vec {
int vsize; /* length of vectors */
- int size; /* total size in bytes */
+ ErlDrvSizeT size; /* total size in bytes */
SysIOVec* iov;
ErlDrvBinary** binv;
} ErlIOVec;
@@ -277,8 +299,8 @@ typedef struct erl_drv_entry {
void (*stop)(ErlDrvData drv_data);
/* called when port is closed, and when the
emulator is halted. */
- void (*output)(ErlDrvData drv_data, char *buf, int len);
- /* called when we have output from erlang to
+ void (*output)(ErlDrvData drv_data, char *buf, ErlDrvSizeT len);
+ /* called when we have output from erlang to
the port */
void (*ready_input)(ErlDrvData drv_data, ErlDrvEvent event);
/* called when we have input from one of
@@ -291,10 +313,10 @@ typedef struct erl_drv_entry {
void (*finish)(void); /* called before unloading the driver -
DYNAMIC DRIVERS ONLY */
void *handle; /* Reserved -- Used by emulator internally */
- int (*control)(ErlDrvData drv_data, unsigned int command, char *buf,
- int len, char **rbuf, int rlen);
- /* "ioctl" for drivers - invoked by
- port_control/3 */
+ ErlDrvSSizeT (*control)(ErlDrvData drv_data, unsigned int command,
+ char *buf, ErlDrvSizeT len, char **rbuf,
+ ErlDrvSizeT rlen); /* "ioctl" for drivers - invoked by
+ port_control/3 */
void (*timeout)(ErlDrvData drv_data); /* Handling of timeout in driver */
void (*outputv)(ErlDrvData drv_data, ErlIOVec *ev);
/* called when we have output from erlang
@@ -305,10 +327,12 @@ typedef struct erl_drv_entry {
closed, and there is data in the
driver queue that needs to be flushed
before 'stop' can be called */
- int (*call)(ErlDrvData drv_data, unsigned int command, char *buf,
- int len, char **rbuf, int rlen, unsigned int *flags);
- /* Works mostly like 'control', a synchronous
- call into the driver. */
+ ErlDrvSSizeT (*call)(ErlDrvData drv_data,
+ unsigned int command, char *buf, ErlDrvSizeT len,
+ char **rbuf, ErlDrvSizeT rlen,
+ unsigned int *flags); /* Works mostly like 'control',
+ a synchronous
+ call into the driver. */
void (*event)(ErlDrvData drv_data, ErlDrvEvent event,
ErlDrvEventData event_data);
/* Called when an event selected by
@@ -347,11 +371,17 @@ typedef struct erl_drv_entry {
#ifndef ERL_DRIVER_TYPES_ONLY
#if defined(VXWORKS)
-# define DRIVER_INIT(DRIVER_NAME) ErlDrvEntry* DRIVER_NAME ## _init(void)
+# define DRIVER_INIT(DRIVER_NAME) \
+ ErlDrvEntry* DRIVER_NAME ## _init(void); \
+ ErlDrvEntry* DRIVER_NAME ## _init(void)
#elif defined(__WIN32__)
-# define DRIVER_INIT(DRIVER_NAME) __declspec(dllexport) ErlDrvEntry* driver_init(void)
+# define DRIVER_INIT(DRIVER_NAME) \
+ __declspec(dllexport) ErlDrvEntry* driver_init(void); \
+ __declspec(dllexport) ErlDrvEntry* driver_init(void)
#else
-# define DRIVER_INIT(DRIVER_NAME) ErlDrvEntry* driver_init(void)
+# define DRIVER_INIT(DRIVER_NAME) \
+ ErlDrvEntry* driver_init(void); \
+ ErlDrvEntry* driver_init(void)
#endif
/*
@@ -360,14 +390,16 @@ typedef struct erl_drv_entry {
EXTERN int driver_select(ErlDrvPort port, ErlDrvEvent event, int mode, int on);
EXTERN int driver_event(ErlDrvPort port, ErlDrvEvent event,
ErlDrvEventData event_data);
-EXTERN int driver_output(ErlDrvPort port, char *buf, int len);
-EXTERN int driver_output2(ErlDrvPort port, char *hbuf, int hlen,
- char *buf, int len);
-EXTERN int driver_output_binary(ErlDrvPort port, char *hbuf, int hlen,
- ErlDrvBinary* bin, int offset, int len);
-EXTERN int driver_outputv(ErlDrvPort port, char* hbuf, int hlen, ErlIOVec *ev,
- int skip);
-EXTERN int driver_vec_to_buf(ErlIOVec *ev, char *buf, int len);
+
+EXTERN int driver_output(ErlDrvPort port, char *buf, ErlDrvSizeT len);
+EXTERN int driver_output2(ErlDrvPort port, char *hbuf, ErlDrvSizeT hlen,
+ char *buf, ErlDrvSizeT len);
+EXTERN int driver_output_binary(ErlDrvPort port, char *hbuf, ErlDrvSizeT hlen,
+ ErlDrvBinary* bin,
+ ErlDrvSizeT offset, ErlDrvSizeT len);
+EXTERN int driver_outputv(ErlDrvPort port, char* hbuf, ErlDrvSizeT hlen,
+ ErlIOVec *ev, ErlDrvSizeT skip);
+EXTERN ErlDrvSizeT driver_vec_to_buf(ErlIOVec *ev, char *buf, ErlDrvSizeT len);
EXTERN int driver_set_timer(ErlDrvPort port, unsigned long time);
EXTERN int driver_cancel_timer(ErlDrvPort port);
EXTERN int driver_read_timer(ErlDrvPort port, unsigned long *time_left);
@@ -428,8 +460,8 @@ EXTERN int get_port_flags(ErlDrvPort port);
* since the binary is a shared object it MUST be written once.
*/
-EXTERN ErlDrvBinary* driver_alloc_binary(int size);
-EXTERN ErlDrvBinary* driver_realloc_binary(ErlDrvBinary *bin, int size);
+EXTERN ErlDrvBinary* driver_alloc_binary(ErlDrvSizeT size);
+EXTERN ErlDrvBinary* driver_realloc_binary(ErlDrvBinary *bin, ErlDrvSizeT size);
EXTERN void driver_free_binary(ErlDrvBinary *bin);
/* Referenc count on driver binaries */
@@ -438,24 +470,24 @@ EXTERN ErlDrvSInt driver_binary_inc_refc(ErlDrvBinary *dbp);
EXTERN ErlDrvSInt driver_binary_dec_refc(ErlDrvBinary *dbp);
/* Allocation interface */
-EXTERN void *driver_alloc(size_t size);
-EXTERN void *driver_realloc(void *ptr, size_t size);
+EXTERN void *driver_alloc(ErlDrvSizeT size);
+EXTERN void *driver_realloc(void *ptr, ErlDrvSizeT size);
EXTERN void driver_free(void *ptr);
/* Queue interface */
-EXTERN int driver_enq(ErlDrvPort port, char* buf, int len);
-EXTERN int driver_pushq(ErlDrvPort port, char* buf, int len);
-EXTERN int driver_deq(ErlDrvPort port, int size);
-EXTERN int driver_sizeq(ErlDrvPort port);
-EXTERN int driver_enq_bin(ErlDrvPort port, ErlDrvBinary *bin, int offset,
- int len);
-EXTERN int driver_pushq_bin(ErlDrvPort port, ErlDrvBinary *bin, int offset,
- int len);
-
-EXTERN int driver_peekqv(ErlDrvPort port, ErlIOVec *ev);
+EXTERN int driver_enq(ErlDrvPort port, char* buf, ErlDrvSizeT len);
+EXTERN int driver_pushq(ErlDrvPort port, char* buf, ErlDrvSizeT len);
+EXTERN ErlDrvSizeT driver_deq(ErlDrvPort port, ErlDrvSizeT size);
+EXTERN ErlDrvSizeT driver_sizeq(ErlDrvPort port);
+EXTERN int driver_enq_bin(ErlDrvPort port, ErlDrvBinary *bin, ErlDrvSizeT offset,
+ ErlDrvSizeT len);
+EXTERN int driver_pushq_bin(ErlDrvPort port, ErlDrvBinary *bin, ErlDrvSizeT offset,
+ ErlDrvSizeT len);
+
+EXTERN ErlDrvSizeT driver_peekqv(ErlDrvPort port, ErlIOVec *ev);
EXTERN SysIOVec* driver_peekq(ErlDrvPort port, int *vlen);
-EXTERN int driver_enqv(ErlDrvPort port, ErlIOVec *ev, int skip);
-EXTERN int driver_pushqv(ErlDrvPort port, ErlIOVec *ev, int skip);
+EXTERN int driver_enqv(ErlDrvPort port, ErlIOVec *ev, ErlDrvSizeT skip);
+EXTERN int driver_pushqv(ErlDrvPort port, ErlIOVec *ev, ErlDrvSizeT skip);
/*
* Add and remove driver entries.
@@ -582,8 +614,11 @@ EXTERN long driver_async(ErlDrvPort ix,
void* async_data,
void (*async_free)(void*));
-
-EXTERN int driver_async_cancel(unsigned int key);
+/*
+ * driver_async_cancel() is deprecated. It is scheduled for removal
+ * in OTP-R16. For more information see the erl_driver(3) documentation.
+ */
+EXTERN int driver_async_cancel(unsigned int key) ERL_DRV_DEPRECATED_FUNC;
/* Locks the driver in the machine "forever", there is
no unlock function. Note that this is almost never useful, as an open
@@ -614,6 +649,8 @@ EXTERN int erl_drv_getenv(char *key, char *value, size_t *value_size);
#endif
+/* also in global.h, but driver's can't include global.h */
+void dtrace_drvport_str(ErlDrvPort port, char *port_buf);
diff --git a/erts/emulator/beam/erl_drv_thread.c b/erts/emulator/beam/erl_drv_thread.c
index dc578f6d2a..a49a155701 100644
--- a/erts/emulator/beam/erl_drv_thread.c
+++ b/erts/emulator/beam/erl_drv_thread.c
@@ -158,7 +158,9 @@ erl_drv_mutex_create(char *name)
(sizeof(ErlDrvMutex)
+ (name ? sys_strlen(name) + 1 : 0)));
if (dmtx) {
- if (ethr_mutex_init(&dmtx->mtx) != 0) {
+ ethr_mutex_opt opt = ETHR_MUTEX_OPT_DEFAULT_INITER;
+ opt.posix_compliant = 1;
+ if (ethr_mutex_init_opt(&dmtx->mtx, &opt) != 0) {
erts_free(ERTS_ALC_T_DRV_MTX, (void *) dmtx);
dmtx = NULL;
}
@@ -226,7 +228,9 @@ erl_drv_cond_create(char *name)
(sizeof(ErlDrvCond)
+ (name ? sys_strlen(name) + 1 : 0)));
if (dcnd) {
- if (ethr_cond_init(&dcnd->cnd) != 0) {
+ ethr_cond_opt opt = ETHR_COND_OPT_DEFAULT_INITER;
+ opt.posix_compliant = 1;
+ if (ethr_cond_init_opt(&dcnd->cnd, &opt) != 0) {
erts_free(ERTS_ALC_T_DRV_CND, (void *) dcnd);
dcnd = NULL;
}
diff --git a/erts/emulator/beam/erl_gc.c b/erts/emulator/beam/erl_gc.c
index 5edcd667e7..52a6e52e6c 100644
--- a/erts/emulator/beam/erl_gc.c
+++ b/erts/emulator/beam/erl_gc.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2002-2011. All Rights Reserved.
+ * Copyright Ericsson AB 2002-2012. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -35,6 +35,7 @@
#include "hipe_stack.h"
#include "hipe_mode_switch.h"
#endif
+#include "dtrace-wrapper.h"
#define ERTS_INACT_WR_PB_LEAVE_MUCH_LIMIT 1
#define ERTS_INACT_WR_PB_LEAVE_MUCH_PERCENTAGE 20
@@ -100,14 +101,14 @@ static Uint combined_message_size(Process* p);
static void remove_message_buffers(Process* p);
static int major_collection(Process* p, int need, Eterm* objv, int nobj, Uint *recl);
static int minor_collection(Process* p, int need, Eterm* objv, int nobj, Uint *recl);
-static void do_minor(Process *p, int new_sz, Eterm* objv, int nobj);
+static void do_minor(Process *p, Uint new_sz, Eterm* objv, int nobj);
static Eterm* sweep_rootset(Rootset *rootset, Eterm* htop, char* src, Uint src_size);
static Eterm* sweep_one_area(Eterm* n_hp, Eterm* n_htop, char* src, Uint src_size);
static Eterm* sweep_one_heap(Eterm* heap_ptr, Eterm* heap_end, Eterm* htop,
char* src, Uint src_size);
static Eterm* collect_heap_frags(Process* p, Eterm* heap,
Eterm* htop, Eterm* objv, int nobj);
-static Uint adjust_after_fullsweep(Process *p, int size_before,
+static Uint adjust_after_fullsweep(Process *p, Uint size_before,
int need, Eterm *objv, int nobj);
static void shrink_new_heap(Process *p, Uint new_sz, Eterm *objv, int nobj);
static void grow_new_heap(Process *p, Uint new_sz, Eterm* objv, int nobj);
@@ -315,7 +316,12 @@ erts_gc_after_bif_call(Process* p, Eterm result, Eterm* regs, Uint arity)
if (is_non_value(result)) {
if (p->freason == TRAP) {
- cost = erts_garbage_collect(p, 0, p->def_arg_reg, p->arity);
+ #if HIPE
+ if (regs == NULL) {
+ regs = ERTS_PROC_GET_SCHDATA(p)->x_reg_array;
+ }
+ #endif
+ cost = erts_garbage_collect(p, 0, regs, p->arity);
} else {
cost = erts_garbage_collect(p, 0, regs, arity);
}
@@ -344,7 +350,9 @@ erts_garbage_collect(Process* p, int need, Eterm* objv, int nobj)
Uint reclaimed_now = 0;
int done = 0;
Uint ms1, s1, us1;
-
+#ifdef USE_VM_PROBES
+ DTRACE_CHARBUF(pidbuf, DTRACE_TERM_BUF_SIZE);
+#endif
if (IS_TRACED_FL(p, F_TRACE_GC)) {
trace_gc(p, am_gc_start);
}
@@ -352,12 +360,11 @@ erts_garbage_collect(Process* p, int need, Eterm* objv, int nobj)
erts_smp_proc_lock(p, ERTS_PROC_LOCK_STATUS);
p->gcstatus = p->status;
p->status = P_GARBING;
+ erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS);
+
if (erts_system_monitor_long_gc != 0) {
get_now(&ms1, &s1, &us1);
}
- erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS);
-
- erts_smp_locked_activity_begin(ERTS_ACTIVITY_GC);
ERTS_CHK_OFFHEAP(p);
@@ -365,15 +372,27 @@ erts_garbage_collect(Process* p, int need, Eterm* objv, int nobj)
if (GEN_GCS(p) >= MAX_GEN_GCS(p)) {
FLAGS(p) |= F_NEED_FULLSWEEP;
}
-
+#ifdef USE_VM_PROBES
+ *pidbuf = '\0';
+ if (DTRACE_ENABLED(gc_major_start)
+ || DTRACE_ENABLED(gc_major_end)
+ || DTRACE_ENABLED(gc_minor_start)
+ || DTRACE_ENABLED(gc_minor_end)) {
+ dtrace_proc_str(p, pidbuf);
+ }
+#endif
/*
* Test which type of GC to do.
*/
while (!done) {
if ((FLAGS(p) & F_NEED_FULLSWEEP) != 0) {
+ DTRACE2(gc_major_start, pidbuf, need);
done = major_collection(p, need, objv, nobj, &reclaimed_now);
+ DTRACE2(gc_major_end, pidbuf, reclaimed_now);
} else {
+ DTRACE2(gc_minor_start, pidbuf, need);
done = minor_collection(p, need, objv, nobj, &reclaimed_now);
+ DTRACE2(gc_minor_end, pidbuf, reclaimed_now);
}
}
@@ -392,8 +411,6 @@ erts_garbage_collect(Process* p, int need, Eterm* objv, int nobj)
trace_gc(p, am_gc_end);
}
- erts_smp_locked_activity_end(ERTS_ACTIVITY_GC);
-
if (erts_system_monitor_long_gc != 0) {
Uint ms2, s2, us2;
Sint t;
@@ -441,7 +458,15 @@ erts_garbage_collect(Process* p, int need, Eterm* objv, int nobj)
p->last_old_htop = p->old_htop;
#endif
- return ((int) (HEAP_TOP(p) - HEAP_START(p)) / 10);
+ /* FIXME: This function should really return an Sint, i.e., a possibly
+ 64 bit wide signed integer, but that requires updating all the code
+ that calls it. For now, we just return INT_MAX if the result is too
+ large for an int. */
+ {
+ Sint result = (HEAP_TOP(p) - HEAP_START(p)) / 10;
+ if (result >= INT_MAX) return INT_MAX;
+ else return (int) result;
+ }
}
/*
@@ -469,7 +494,6 @@ erts_garbage_collect_hibernate(Process* p)
p->gcstatus = p->status;
p->status = P_GARBING;
erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS);
- erts_smp_locked_activity_begin(ERTS_ACTIVITY_GC);
ErtsGcQuickSanityCheck(p);
ASSERT(p->mbuf_sz == 0);
ASSERT(p->mbuf == 0);
@@ -583,12 +607,13 @@ erts_garbage_collect_hibernate(Process* p)
erts_smp_proc_lock(p, ERTS_PROC_LOCK_STATUS);
p->status = p->gcstatus;
erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS);
- erts_smp_locked_activity_end(ERTS_ACTIVITY_GC);
}
void
-erts_garbage_collect_literals(Process* p, Eterm* literals, Uint lit_size)
+erts_garbage_collect_literals(Process* p, Eterm* literals,
+ Uint lit_size,
+ struct erl_off_heap_header* oh)
{
Uint byte_lit_size = sizeof(Eterm)*lit_size;
Uint old_heap_size;
@@ -599,7 +624,8 @@ erts_garbage_collect_literals(Process* p, Eterm* literals, Uint lit_size)
char* area;
Uint area_size;
Eterm* old_htop;
- int n;
+ Uint n;
+ struct erl_off_heap_header** prev;
/*
* Set GC state.
@@ -608,7 +634,6 @@ erts_garbage_collect_literals(Process* p, Eterm* literals, Uint lit_size)
p->gcstatus = p->status;
p->status = P_GARBING;
erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS);
- erts_smp_locked_activity_begin(ERTS_ACTIVITY_GC);
/*
* We assume that the caller has already done a major collection
@@ -634,6 +659,9 @@ erts_garbage_collect_literals(Process* p, Eterm* literals, Uint lit_size)
offset_heap(temp_lit, lit_size, offs, (char *) literals, byte_lit_size);
offset_heap(p->heap, p->htop - p->heap, offs, (char *) literals, byte_lit_size);
offset_rootset(p, offs, (char *) literals, byte_lit_size, p->arg_reg, p->arity);
+ if (oh) {
+ oh = (struct erl_off_heap_header *) ((Eterm *)(void *) oh + offs);
+ }
/*
* Now the literals are placed in memory that is safe to write into,
@@ -701,6 +729,45 @@ erts_garbage_collect_literals(Process* p, Eterm* literals, Uint lit_size)
p->old_htop = old_htop;
/*
+ * Prepare to sweep binaries. Since all MSOs on the new heap
+ * must be come before MSOs on the old heap, find the end of
+ * current MSO list and use that as a starting point.
+ */
+
+ if (oh) {
+ prev = &MSO(p).first;
+ while (*prev) {
+ prev = &(*prev)->next;
+ }
+ }
+
+ /*
+ * Sweep through all binaries in the temporary literal area.
+ */
+
+ while (oh) {
+ if (IS_MOVED_BOXED(oh->thing_word)) {
+ Binary* bptr;
+ struct erl_off_heap_header* ptr;
+
+ ptr = (struct erl_off_heap_header*) boxed_val(oh->thing_word);
+ ASSERT(thing_subtag(ptr->thing_word) == REFC_BINARY_SUBTAG);
+ bptr = ((ProcBin*)ptr)->val;
+
+ /*
+ * This binary has been copied to the heap.
+ * We must increment its reference count and
+ * link it into the MSO list for the process.
+ */
+
+ erts_refc_inc(&bptr->refc, 1);
+ *prev = ptr;
+ prev = &ptr->next;
+ }
+ oh = oh->next;
+ }
+
+ /*
* We no longer need this temporary area.
*/
erts_free(ERTS_ALC_T_TMP, (void *) temp_lit);
@@ -711,7 +778,6 @@ erts_garbage_collect_literals(Process* p, Eterm* literals, Uint lit_size)
erts_smp_proc_lock(p, ERTS_PROC_LOCK_STATUS);
p->status = p->gcstatus;
erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS);
- erts_smp_locked_activity_end(ERTS_ACTIVITY_GC);
}
static int
@@ -731,7 +797,7 @@ minor_collection(Process* p, int need, Eterm* objv, int nobj, Uint *recl)
* This improved Estone by more than 1200 estones on my computer
* (Ultra Sparc 10).
*/
- size_t new_sz = erts_next_heap_size(HEAP_TOP(p) - HEAP_START(p), 1);
+ Uint new_sz = erts_next_heap_size(HEAP_TOP(p) - HEAP_START(p), 1);
/* Create new, empty old_heap */
n_old = (Eterm *) ERTS_HEAP_ALLOC(ERTS_ALC_T_OLD_HEAP,
@@ -860,7 +926,18 @@ minor_collection(Process* p, int need, Eterm* objv, int nobj, Uint *recl)
* XXX: WARNING: If HiPE starts storing other non-Erlang values on the
* nstack, such as floats, then this will have to be changed.
*/
-#define offset_nstack(p,offs,area,area_size) offset_heap_ptr(hipe_nstack_start((p)),hipe_nstack_used((p)),(offs),(area),(area_size))
+static ERTS_INLINE void offset_nstack(Process* p, Sint offs,
+ char* area, Uint area_size)
+{
+ if (p->hipe.nstack) {
+ ASSERT(p->hipe.nsp && p->hipe.nstend);
+ offset_heap_ptr(hipe_nstack_start(p), hipe_nstack_used(p),
+ offs, area, area_size);
+ }
+ else {
+ ASSERT(!p->hipe.nsp && !p->hipe.nstend);
+ }
+}
#else /* !HIPE */
@@ -871,12 +948,12 @@ minor_collection(Process* p, int need, Eterm* objv, int nobj, Uint *recl)
#endif /* HIPE */
static void
-do_minor(Process *p, int new_sz, Eterm* objv, int nobj)
+do_minor(Process *p, Uint new_sz, Eterm* objv, int nobj)
{
Rootset rootset; /* Rootset for GC (stack, dictionary, etc). */
Roots* roots;
Eterm* n_htop;
- int n;
+ Uint n;
Eterm* ptr;
Eterm val;
Eterm gval;
@@ -1056,6 +1133,15 @@ do_minor(Process *p, int new_sz, Eterm* objv, int nobj)
sys_memcpy(n_heap + new_sz - n, p->stop, n * sizeof(Eterm));
p->stop = n_heap + new_sz - n;
+#ifdef USE_VM_PROBES
+ if (HEAP_SIZE(p) != new_sz && DTRACE_ENABLED(process_heap_grow)) {
+ DTRACE_CHARBUF(pidbuf, DTRACE_TERM_BUF_SIZE);
+
+ dtrace_proc_str(p, pidbuf);
+ DTRACE3(process_heap_grow, pidbuf, HEAP_SIZE(p), new_sz);
+ }
+#endif
+
ERTS_HEAP_FREE(ERTS_ALC_T_HEAP,
(void*)HEAP_START(p),
HEAP_SIZE(p) * sizeof(Eterm));
@@ -1079,14 +1165,14 @@ major_collection(Process* p, int need, Eterm* objv, int nobj, Uint *recl)
{
Rootset rootset;
Roots* roots;
- int size_before;
+ Uint size_before;
Eterm* n_heap;
Eterm* n_htop;
char* src = (char *) HEAP_START(p);
Uint src_size = (char *) HEAP_TOP(p) - src;
char* oh = (char *) OLD_HEAP(p);
Uint oh_size = (char *) OLD_HTOP(p) - oh;
- int n;
+ Uint n;
Uint new_sz;
Uint fragments = MBUF_SIZE(p) + combined_message_size(p);
ErlMessage *msgp;
@@ -1277,6 +1363,15 @@ major_collection(Process* p, int need, Eterm* objv, int nobj, Uint *recl)
sys_memcpy(n_heap + new_sz - n, p->stop, n * sizeof(Eterm));
p->stop = n_heap + new_sz - n;
+#ifdef USE_VM_PROBES
+ if (HEAP_SIZE(p) != new_sz && DTRACE_ENABLED(process_heap_grow)) {
+ DTRACE_CHARBUF(pidbuf, DTRACE_TERM_BUF_SIZE);
+
+ dtrace_proc_str(p, pidbuf);
+ DTRACE3(process_heap_grow, pidbuf, HEAP_SIZE(p), new_sz);
+ }
+#endif
+
ERTS_HEAP_FREE(ERTS_ALC_T_HEAP,
(void *) HEAP_START(p),
(HEAP_END(p) - HEAP_START(p)) * sizeof(Eterm));
@@ -1312,10 +1407,10 @@ major_collection(Process* p, int need, Eterm* objv, int nobj, Uint *recl)
}
static Uint
-adjust_after_fullsweep(Process *p, int size_before, int need, Eterm *objv, int nobj)
+adjust_after_fullsweep(Process *p, Uint size_before, int need, Eterm *objv, int nobj)
{
- int wanted, sz, size_after, need_after;
- int stack_size = STACK_SZ_ON_HEAP(p);
+ Uint wanted, sz, size_after, need_after;
+ Uint stack_size = STACK_SZ_ON_HEAP(p);
Uint reclaimed_now;
size_after = (HEAP_TOP(p) - HEAP_START(p));
@@ -1845,7 +1940,13 @@ setup_rootset(Process *p, Eterm *objv, int nobj, Rootset *rootset)
roots[n].sz = 1;
n++;
}
-
+#ifdef USE_VM_PROBES
+ if (is_not_immed(p->dt_utag)) {
+ roots[n].v = &p->dt_utag;
+ roots[n].sz = 1;
+ n++;
+ }
+#endif
ASSERT(is_nil(p->tracer_proc) ||
is_internal_pid(p->tracer_proc) ||
is_internal_port(p->tracer_proc));
@@ -1915,8 +2016,8 @@ static void
grow_new_heap(Process *p, Uint new_sz, Eterm* objv, int nobj)
{
Eterm* new_heap;
- int heap_size = HEAP_TOP(p) - HEAP_START(p);
- int stack_size = p->hend - p->stop;
+ Uint heap_size = HEAP_TOP(p) - HEAP_START(p);
+ Uint stack_size = p->hend - p->stop;
Sint offs;
ASSERT(HEAP_SIZE(p) < new_sz);
@@ -1947,6 +2048,16 @@ grow_new_heap(Process *p, Uint new_sz, Eterm* objv, int nobj)
HEAP_TOP(p) = new_heap + heap_size;
HEAP_START(p) = new_heap;
}
+
+#ifdef USE_VM_PROBES
+ if (DTRACE_ENABLED(process_heap_grow)) {
+ DTRACE_CHARBUF(pidbuf, DTRACE_TERM_BUF_SIZE);
+
+ dtrace_proc_str(p, pidbuf);
+ DTRACE3(process_heap_grow, pidbuf, HEAP_SIZE(p), new_sz);
+ }
+#endif
+
HEAP_SIZE(p) = new_sz;
}
@@ -1954,10 +2065,9 @@ static void
shrink_new_heap(Process *p, Uint new_sz, Eterm *objv, int nobj)
{
Eterm* new_heap;
- int heap_size = HEAP_TOP(p) - HEAP_START(p);
+ Uint heap_size = HEAP_TOP(p) - HEAP_START(p);
Sint offs;
-
- int stack_size = p->hend - p->stop;
+ Uint stack_size = p->hend - p->stop;
ASSERT(new_sz < p->heap_sz);
sys_memmove(p->heap + new_sz - stack_size, p->stop, stack_size *
@@ -1985,6 +2095,16 @@ shrink_new_heap(Process *p, Uint new_sz, Eterm *objv, int nobj)
HEAP_TOP(p) = new_heap + heap_size;
HEAP_START(p) = new_heap;
}
+
+#ifdef USE_VM_PROBES
+ if (DTRACE_ENABLED(process_heap_shrink)) {
+ DTRACE_CHARBUF(pidbuf, DTRACE_TERM_BUF_SIZE);
+
+ dtrace_proc_str(p, pidbuf);
+ DTRACE3(process_heap_shrink, pidbuf, HEAP_SIZE(p), new_sz);
+ }
+#endif
+
HEAP_SIZE(p) = new_sz;
}
@@ -2367,6 +2487,13 @@ offset_mqueue(Process *p, Sint offs, char* area, Uint area_size)
if (is_boxed(mesg) && in_area(ptr_val(mesg), area, area_size)) {
ERL_MESSAGE_TOKEN(mp) = offset_ptr(mesg, offs);
}
+#ifdef USE_VM_PROBES
+ mesg = ERL_MESSAGE_DT_UTAG(mp);
+ if (is_boxed(mesg) && in_area(ptr_val(mesg), area, area_size)) {
+ ERL_MESSAGE_DT_UTAG(mp) = offset_ptr(mesg, offs);
+ }
+#endif
+
ASSERT((is_nil(ERL_MESSAGE_TOKEN(mp)) ||
is_tuple(ERL_MESSAGE_TOKEN(mp)) ||
is_atom(ERL_MESSAGE_TOKEN(mp))));
@@ -2386,6 +2513,9 @@ offset_one_rootset(Process *p, Sint offs, char* area, Uint area_size,
offset_heap_ptr(&p->fvalue, 1, offs, area, area_size);
offset_heap_ptr(&p->ftrace, 1, offs, area, area_size);
offset_heap_ptr(&p->seq_trace_token, 1, offs, area, area_size);
+#ifdef USE_VM_PROBES
+ offset_heap_ptr(&p->dt_utag, 1, offs, area, area_size);
+#endif
offset_heap_ptr(&p->group_leader, 1, offs, area, area_size);
offset_mqueue(p, offs, area, area_size);
offset_heap_ptr(p->stop, (STACK_START(p) - p->stop), offs, area, area_size);
diff --git a/erts/emulator/beam/erl_gc.h b/erts/emulator/beam/erl_gc.h
index 807ef8ae8d..1801df359a 100644
--- a/erts/emulator/beam/erl_gc.h
+++ b/erts/emulator/beam/erl_gc.h
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2007-2010. All Rights Reserved.
+ * Copyright Ericsson AB 2007-2011. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -62,7 +62,7 @@ do { \
} while(0)
#define in_area(ptr,start,nbytes) \
- ((unsigned long)((char*)(ptr) - (char*)(start)) < (nbytes))
+ ((UWord)((char*)(ptr) - (char*)(start)) < (nbytes))
extern Uint erts_test_long_gc_sleep;
diff --git a/erts/emulator/beam/erl_goodfit_alloc.c b/erts/emulator/beam/erl_goodfit_alloc.c
index 76b206d76f..e7d4ac2b67 100644
--- a/erts/emulator/beam/erl_goodfit_alloc.c
+++ b/erts/emulator/beam/erl_goodfit_alloc.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2003-2010. All Rights Reserved.
+ * Copyright Ericsson AB 2003-2011. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -163,10 +163,10 @@ BKT_MIN_SZ(GFAllctr_t *gfallctr, int ix)
/* Prototypes of callback functions */
static Block_t * get_free_block (Allctr_t *, Uint,
- Block_t *, Uint);
-static void link_free_block (Allctr_t *, Block_t *);
-static void unlink_free_block (Allctr_t *, Block_t *);
-static void update_last_aux_mbc (Allctr_t *, Carrier_t *);
+ Block_t *, Uint, Uint32);
+static void link_free_block (Allctr_t *, Block_t *, Uint32);
+static void unlink_free_block (Allctr_t *, Block_t *, Uint32);
+static void update_last_aux_mbc (Allctr_t *, Carrier_t *, Uint32);
static Eterm info_options (Allctr_t *, char *, int *,
void *, Uint **, Uint *);
static void init_atoms (void);
@@ -190,14 +190,20 @@ erts_gfalc_start(GFAllctr_t *gfallctr,
GFAllctrInit_t *gfinit,
AllctrInit_t *init)
{
- GFAllctr_t nulled_state = {{0}};
- /* {{0}} is used instead of {0}, in order to avoid (an incorrect) gcc
- warning. gcc warns if {0} is used as initializer of a struct when
- the first member is a struct (not if, for example, the third member
- is a struct). */
+ struct {
+ int dummy;
+ GFAllctr_t allctr;
+ } zero = {0};
+ /* The struct with a dummy element first is used in order to avoid (an
+ incorrect) gcc warning. gcc warns if {0} is used as initializer of
+ a struct when the first member is a struct (not if, for example,
+ the third member is a struct). */
+
Allctr_t *allctr = (Allctr_t *) gfallctr;
- sys_memcpy((void *) gfallctr, (void *) &nulled_state, sizeof(GFAllctr_t));
+ sys_memcpy((void *) gfallctr, (void *) &zero.allctr, sizeof(GFAllctr_t));
+
+ init->sbmbct = 0; /* Small mbc not yet supported by goodfit */
allctr->mbc_header_size = sizeof(Carrier_t);
allctr->min_mbc_size = MIN_MBC_SZ;
@@ -379,7 +385,7 @@ search_bucket(Allctr_t *allctr, int ix, Uint size)
static Block_t *
get_free_block(Allctr_t *allctr, Uint size,
- Block_t *cand_blk, Uint cand_size)
+ Block_t *cand_blk, Uint cand_size, Uint32 flags)
{
GFAllctr_t *gfallctr = (GFAllctr_t *) allctr;
int unsafe_bi, min_bi;
@@ -398,7 +404,7 @@ get_free_block(Allctr_t *allctr, Uint size,
if (blk) {
if (cand_blk && cand_size <= BLK_SZ(blk))
return NULL; /* cand_blk was better */
- unlink_free_block(allctr, blk);
+ unlink_free_block(allctr, blk, flags);
return blk;
}
if (min_bi < NO_OF_BKTS - 1) {
@@ -418,14 +424,14 @@ get_free_block(Allctr_t *allctr, Uint size,
ASSERT(blk);
if (cand_blk && cand_size <= BLK_SZ(blk))
return NULL; /* cand_blk was better */
- unlink_free_block(allctr, blk);
+ unlink_free_block(allctr, blk, flags);
return blk;
}
static void
-link_free_block(Allctr_t *allctr, Block_t *block)
+link_free_block(Allctr_t *allctr, Block_t *block, Uint32 flags)
{
GFAllctr_t *gfallctr = (GFAllctr_t *) allctr;
GFFreeBlock_t *blk = (GFFreeBlock_t *) block;
@@ -446,7 +452,7 @@ link_free_block(Allctr_t *allctr, Block_t *block)
}
static void
-unlink_free_block(Allctr_t *allctr, Block_t *block)
+unlink_free_block(Allctr_t *allctr, Block_t *block, Uint32 flags)
{
GFAllctr_t *gfallctr = (GFAllctr_t *) allctr;
GFFreeBlock_t *blk = (GFFreeBlock_t *) block;
@@ -467,7 +473,7 @@ unlink_free_block(Allctr_t *allctr, Block_t *block)
}
static void
-update_last_aux_mbc(Allctr_t *allctr, Carrier_t *mbc)
+update_last_aux_mbc(Allctr_t *allctr, Carrier_t *mbc, Uint32 flags)
{
GFAllctr_t *gfallctr = (GFAllctr_t *) allctr;
diff --git a/erts/emulator/beam/erl_init.c b/erts/emulator/beam/erl_init.c
index 0a57eb6d88..ca4385dd3a 100644
--- a/erts/emulator/beam/erl_init.c
+++ b/erts/emulator/beam/erl_init.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1997-2010. All Rights Reserved.
+ * Copyright Ericsson AB 1997-2012. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -42,6 +42,9 @@
#include "erl_misc_utils.h"
#include "packet_parser.h"
#include "erl_cpu_topology.h"
+#include "erl_thr_progress.h"
+#include "erl_thr_queue.h"
+#include "erl_async.h"
#ifdef HIPE
#include "hipe_mode_switch.h" /* for hipe_mode_switch_init() */
@@ -68,8 +71,11 @@ static void erl_init(int ncpu);
#define ERTS_MIN_COMPAT_REL 7
+static erts_atomic_t exiting;
+
#ifdef ERTS_SMP
-erts_smp_atomic_t erts_writing_erl_crash_dump;
+erts_smp_atomic32_t erts_writing_erl_crash_dump;
+erts_tsd_key_t erts_is_crash_dumping_key;
#else
volatile int erts_writing_erl_crash_dump = 0;
#endif
@@ -86,7 +92,6 @@ int erts_use_sender_punish;
*/
Uint display_items; /* no of items to display in traces etc */
-Uint display_loads; /* print info about loaded modules */
int H_MIN_SIZE; /* The minimum heap grain */
int BIN_VH_MIN_SIZE; /* The minimum binary virtual*/
@@ -98,8 +103,6 @@ int erts_backtrace_depth; /* How many functions to show in a backtrace
* in error codes.
*/
-int erts_async_max_threads; /* number of threads for async support */
-int erts_async_thread_suggested_stack_size;
erts_smp_atomic32_t erts_max_gen_gcs;
Eterm erts_error_logger_warnings; /* What to map warning logs to, am_error,
@@ -108,7 +111,6 @@ Eterm erts_error_logger_warnings; /* What to map warning logs to, am_error,
int erts_compat_rel;
-static int use_multi_run_queue;
static int no_schedulers;
static int no_schedulers_online;
@@ -126,6 +128,8 @@ int erts_modified_timing_level;
int erts_no_crash_dump = 0; /* Use -d to suppress crash dump. */
+int erts_no_line_info = 0; /* -L: Don't load line information */
+
/*
* Other global variables.
*/
@@ -244,19 +248,16 @@ erl_init(int ncpu)
{
init_benchmarking();
-#ifdef ERTS_SMP
- erts_system_block_init();
-#endif
-
erts_init_monitors();
erts_init_gc();
erts_init_time();
erts_init_sys_common_misc();
erts_init_process(ncpu);
- erts_init_scheduling(use_multi_run_queue,
- no_schedulers,
+ erts_init_scheduling(no_schedulers,
no_schedulers_online);
erts_init_cpu_topology(); /* Must be after init_scheduling */
+ erts_alloc_late_init();
+
H_MIN_SIZE = erts_next_heap_size(H_MIN_SIZE, 0);
BIN_VH_MIN_SIZE = erts_next_heap_size(BIN_VH_MIN_SIZE, 0);
@@ -278,6 +279,7 @@ erl_init(int ncpu)
erts_init_node_tables();
init_dist();
erl_drv_thr_init();
+ erts_init_async();
init_io();
init_copy();
init_load();
@@ -323,7 +325,7 @@ init_shared_memory(int argc, char **argv)
#endif
global_gen_gcs = 0;
- global_max_gen_gcs = (Uint16) erts_smp_atomic32_read(&erts_max_gen_gcs);
+ global_max_gen_gcs = (Uint16) erts_smp_atomic32_read_nob(&erts_max_gen_gcs);
global_gc_flags = erts_default_process_flags;
erts_global_offheap.mso = NULL;
@@ -432,7 +434,7 @@ static void
load_preloaded(void)
{
int i;
- int res;
+ Eterm res;
Preload* preload_p;
Eterm module_name;
byte* code;
@@ -451,8 +453,9 @@ load_preloaded(void)
name);
res = erts_load_module(NULL, 0, NIL, &module_name, code, length);
sys_preload_end(&preload_p[i]);
- if (res < 0)
- erl_exit(1,"Failed loading preloaded module %s\n", name);
+ if (res != NIL)
+ erl_exit(1,"Failed loading preloaded module %s (%T)\n",
+ name, res);
i++;
}
}
@@ -494,8 +497,6 @@ void erts_usage(void)
erts_fprintf(stderr, "-K boolean enable or disable kernel poll\n");
- erts_fprintf(stderr, "-l turn on auto load tracing\n");
-
erts_fprintf(stderr, "-M<X> <Y> memory allocator switches,\n");
erts_fprintf(stderr, " see the erts_alloc(3) documentation for more info.\n");
@@ -510,6 +511,8 @@ void erts_usage(void)
erts_fprintf(stderr, "-rg amount set reader groups limit\n");
erts_fprintf(stderr, "-sbt type set scheduler bind type, valid types are:\n");
erts_fprintf(stderr, " u|ns|ts|ps|s|nnts|nnps|tnnps|db\n");
+ erts_fprintf(stderr, "-scl bool enable/disable compaction of scheduler load,\n");
+ erts_fprintf(stderr, " see the erl(1) documentation for more info.\n");
erts_fprintf(stderr, "-sct cput set cpu topology,\n");
erts_fprintf(stderr, " see the erl(1) documentation for more info.\n");
erts_fprintf(stderr, "-swt val set scheduler wakeup threshold, valid values are:\n");
@@ -604,12 +607,13 @@ early_init(int *argc, char **argv) /*
int max_main_threads;
int max_reader_groups;
int reader_groups;
+ char envbuf[21]; /* enough for any 64-bit integer */
+ size_t envbufsz;
- use_multi_run_queue = 1;
+ erts_sched_compact_load = 1;
erts_printf_eterm_func = erts_printf_term;
erts_disable_tolerant_timeofday = 0;
display_items = 200;
- display_loads = 0;
erts_backtrace_depth = DEFAULT_BACKTRACE_SIZE;
erts_async_max_threads = 0;
erts_async_thread_suggested_stack_size = ERTS_ASYNC_THREAD_MIN_STACK_SIZE;
@@ -641,17 +645,23 @@ early_init(int *argc, char **argv) /*
erts_use_r9_pids_ports = 0;
erts_sys_pre_init();
+ erts_atomic_init_nob(&exiting, 0);
+#ifdef ERTS_SMP
+ erts_thr_progress_pre_init();
+#endif
#ifdef ERTS_ENABLE_LOCK_CHECK
erts_lc_init();
#endif
#ifdef ERTS_SMP
- erts_smp_atomic_init(&erts_writing_erl_crash_dump, 0L);
+ erts_smp_atomic32_init_nob(&erts_writing_erl_crash_dump, 0L);
+ erts_tsd_key_create(&erts_is_crash_dumping_key);
#else
erts_writing_erl_crash_dump = 0;
#endif
- erts_smp_atomic32_init(&erts_max_gen_gcs, (erts_aint32_t) ((Uint16) -1));
+ erts_smp_atomic32_init_nob(&erts_max_gen_gcs,
+ (erts_aint32_t) ((Uint16) -1));
erts_pre_init_process();
#if defined(USE_THREADS) && !defined(ERTS_SMP)
@@ -670,6 +680,16 @@ early_init(int *argc, char **argv) /*
schdlrs = no_schedulers;
schdlrs_onln = no_schedulers_online;
+ envbufsz = sizeof(envbuf);
+
+ /* erts_sys_getenv() not initialized yet; need erts_sys_getenv__() */
+ if (erts_sys_getenv__("ERL_THREAD_POOL_SIZE", envbuf, &envbufsz) == 0)
+ erts_async_max_threads = atoi(envbuf);
+ else
+ erts_async_max_threads = 0;
+ if (erts_async_max_threads > ERTS_MAX_NO_OF_ASYNC_THREADS)
+ erts_async_max_threads = ERTS_MAX_NO_OF_ASYNC_THREADS;
+
if (argc && argv) {
int i = 1;
while (i < *argc) {
@@ -697,6 +717,20 @@ early_init(int *argc, char **argv) /*
}
break;
}
+ case 'A': {
+ /* set number of threads in thread pool */
+ char *arg = get_arg(argv[i]+2, argv[i+1], &i);
+ if (((erts_async_max_threads = atoi(arg)) < 0) ||
+ (erts_async_max_threads > ERTS_MAX_NO_OF_ASYNC_THREADS)) {
+ erts_fprintf(stderr,
+ "bad number of async threads %s\n",
+ arg);
+ erts_usage();
+ VERBOSE(DEBUG_SYSTEM, ("using %d async-threads\n",
+ erts_async_max_threads));
+ }
+ break;
+ }
case 'S' : {
int tot, onln;
char *arg = get_arg(argv[i]+2, argv[i+1], &i);
@@ -762,11 +796,29 @@ early_init(int *argc, char **argv) /*
erts_no_schedulers = (Uint) no_schedulers;
#endif
+ erts_early_init_scheduling(no_schedulers);
+ alloc_opts.ncpu = ncpu;
erts_alloc_init(argc, argv, &alloc_opts); /* Handles (and removes)
-M flags. */
/* Require allocators */
- erts_early_init_scheduling();
+#ifdef ERTS_SMP
+ /*
+ * Thread progress management:
+ *
+ * * Managed threads:
+ * ** Scheduler threads (see erl_process.c)
+ * ** Aux thread (see erl_process.c)
+ * ** Sys message dispatcher thread (see erl_trace.c)
+ *
+ * * Unmanaged threads that need to register:
+ * ** Async threads (see erl_async.c)
+ */
+ erts_thr_progress_init(no_schedulers,
+ no_schedulers+2,
+ erts_async_max_threads);
+#endif
+ erts_thr_q_init();
erts_init_utils();
erts_early_init_cpu_topology(no_schedulers,
&max_main_threads,
@@ -803,10 +855,12 @@ early_init(int *argc, char **argv) /*
#if defined(HIPE)
hipe_signal_init(); /* must be done very early */
#endif
- erl_sys_init();
erl_sys_args(argc, argv);
+ /* Creates threads on Windows that depend on the arguments, so has to be after erl_sys_args */
+ erl_sys_init();
+
erts_ets_realloc_always_moves = 0;
erts_ets_always_compress = 0;
erts_dist_buf_busy_limit = ERTS_DE_BUSY_LIMIT;
@@ -844,7 +898,6 @@ erl_start(int argc, char **argv)
int have_break_handler = 1;
char envbuf[21]; /* enough for any 64-bit integer */
size_t envbufsz;
- int async_max_threads = erts_async_max_threads;
int ncpu = early_init(&argc, argv);
envbufsz = sizeof(envbuf);
@@ -856,12 +909,8 @@ erl_start(int argc, char **argv)
envbufsz = sizeof(envbuf);
if (erts_sys_getenv("ERL_FULLSWEEP_AFTER", envbuf, &envbufsz) == 0) {
Uint16 max_gen_gcs = atoi(envbuf);
- erts_smp_atomic32_set(&erts_max_gen_gcs, (erts_aint32_t) max_gen_gcs);
- }
-
- envbufsz = sizeof(envbuf);
- if (erts_sys_getenv("ERL_THREAD_POOL_SIZE", envbuf, &envbufsz) == 0) {
- async_max_threads = atoi(envbuf);
+ erts_smp_atomic32_set_nob(&erts_max_gen_gcs,
+ (erts_aint32_t) max_gen_gcs);
}
#if (defined(__APPLE__) && defined(__MACH__)) || defined(__DARWIN__)
@@ -929,10 +978,9 @@ erl_start(int argc, char **argv)
erts_fprintf(stderr, "%s unknown flag %s\n", argv[0], argv[i]);
erts_usage();
}
- case 'l':
- display_loads++;
+ case 'L':
+ erts_no_line_info = 1;
break;
-
case 'v':
#ifdef DEBUG
if (argv[i][2] == '\0') {
@@ -1150,6 +1198,19 @@ erl_start(int argc, char **argv)
erts_usage();
}
}
+ else if (has_prefix("cl", sub_param)) {
+ arg = get_arg(sub_param+2, argv[i+1], &i);
+ if (sys_strcmp("true", arg) == 0)
+ erts_sched_compact_load = 1;
+ else if (sys_strcmp("false", arg) == 0)
+ erts_sched_compact_load = 0;
+ else {
+ erts_fprintf(stderr,
+ "bad scheduler compact load value '%s'\n",
+ arg);
+ erts_usage();
+ }
+ }
else if (has_prefix("ct", sub_param)) {
arg = get_arg(sub_param+2, argv[i+1], &i);
res = erts_init_cpu_topology_string(arg);
@@ -1193,12 +1254,8 @@ erl_start(int argc, char **argv)
erts_usage();
}
}
- else if (sys_strcmp("mrq", sub_param) == 0)
- use_multi_run_queue = 1;
else if (sys_strcmp("nsp", sub_param) == 0)
erts_use_sender_punish = 0;
- else if (sys_strcmp("srq", sub_param) == 0)
- use_multi_run_queue = 0;
else if (sys_strcmp("wt", sub_param) == 0) {
arg = get_arg(sub_param+2, argv[i+1], &i);
if (erts_sched_set_wakeup_limit(arg) != 0) {
@@ -1291,17 +1348,8 @@ erl_start(int argc, char **argv)
break;
}
- case 'A':
- /* set number of threads in thread pool */
- arg = get_arg(argv[i]+2, argv[i+1], &i);
- if (((async_max_threads = atoi(arg)) < 0) ||
- (async_max_threads > ERTS_MAX_NO_OF_ASYNC_THREADS)) {
- erts_fprintf(stderr, "bad number of async threads %s\n", arg);
- erts_usage();
- }
-
- VERBOSE(DEBUG_SYSTEM, ("using %d async-threads\n",
- async_max_threads));
+ case 'A': /* Was handled in early init just read past it */
+ (void) get_arg(argv[i]+2, argv[i+1], &i);
break;
case 'a':
@@ -1390,10 +1438,6 @@ erl_start(int argc, char **argv)
i++;
}
-#ifdef USE_THREADS
- erts_async_max_threads = async_max_threads;
-#endif
-
/* Delayed check of +P flag */
if (erts_max_processes < ERTS_MIN_PROCESSES
|| erts_max_processes > ERTS_MAX_PROCESSES
@@ -1439,6 +1483,10 @@ erl_start(int argc, char **argv)
erts_sys_main_thread(); /* May or may not return! */
#else
erts_thr_set_main_status(1, 1);
+#if ERTS_USE_ASYNC_READY_Q
+ erts_get_scheduler_data()->aux_work_data.async_ready.queue
+ = erts_get_async_ready_queue(1);
+#endif
set_main_stack_size();
process_main();
#endif
@@ -1462,8 +1510,31 @@ __decl_noreturn void erts_thr_fatal_error(int err, char *what)
#endif
static void
-system_cleanup(int exit_code)
+system_cleanup(int flush_async)
{
+ /*
+ * Make sure only one thread exits the runtime system.
+ */
+ if (erts_atomic_inc_read_nob(&exiting) != 1) {
+ /*
+ * Another thread is currently exiting the system;
+ * wait for it to do its job.
+ */
+#ifdef ERTS_SMP
+ if (erts_thr_progress_is_managed_thread()) {
+ /*
+ * The exiting thread might be waiting for
+ * us to block; need to update status...
+ */
+ erts_thr_progress_active(NULL, 0);
+ erts_thr_progress_prepare_wait(NULL);
+ }
+#endif
+ /* Wait forever... */
+ while (1)
+ erts_milli_sleep(10000000);
+ }
+
/* No cleanup wanted if ...
* 1. we are about to do an abnormal exit
* 2. we haven't finished initializing, or
@@ -1471,7 +1542,7 @@ system_cleanup(int exit_code)
* (in threaded non smp case).
*/
- if (exit_code != 0
+ if (!flush_async
|| !erts_initialized
#if defined(USE_THREADS) && !defined(ERTS_SMP)
|| !erts_equal_tids(main_thread, erts_thr_self())
@@ -1483,7 +1554,6 @@ system_cleanup(int exit_code)
#ifdef ERTS_ENABLE_LOCK_CHECK
erts_lc_check_exact(NULL, 0);
#endif
- erts_smp_block_system(ERTS_BS_FLG_ALLOW_GC); /* We never release it... */
#endif
#ifdef HYBRID
@@ -1512,103 +1582,60 @@ system_cleanup(int exit_code)
erts_cleanup_incgc();
#endif
-#if defined(USE_THREADS)
- exit_async();
-#endif
-#if HAVE_ERTS_MSEG
- erts_mseg_exit();
-#endif
-
- /*
- * A lot more cleaning could/should have been done...
- */
-
+ erts_exit_flush_async();
}
-/*
- * Common exit function, all exits from the system go through here.
- * n <= 0 -> normal exit with status n;
- * n = 127 -> Erlang crash dump produced, exit with status 1;
- * other positive n -> Erlang crash dump and core dump produced.
- */
-
-__decl_noreturn void erl_exit0(char *file, int line, int n, char *fmt,...)
+static __decl_noreturn void __noreturn
+erl_exit_vv(int n, int flush_async, char *fmt, va_list args1, va_list args2)
{
unsigned int an;
- va_list args;
- va_start(args, fmt);
+ system_cleanup(flush_async);
save_statistics();
- system_cleanup(n);
-
an = abs(n);
if (erts_mtrace_enabled)
erts_mtrace_exit((Uint32) an);
/* Produce an Erlang core dump if error */
- if (n > 0 && erts_initialized &&
- (erts_no_crash_dump == 0 || n == ERTS_DUMP_EXIT)) {
- erl_crash_dump_v(file, line, fmt, args);
+ if (((n > 0 && erts_no_crash_dump == 0) || n == ERTS_DUMP_EXIT)
+ && erts_initialized) {
+ erl_crash_dump_v((char*) NULL, 0, fmt, args1);
}
- /* need to reinitialize va_args thing */
- va_end(args);
- va_start(args, fmt);
-
if (fmt != NULL && *fmt != '\0')
- erl_error(fmt, args); /* Print error message. */
- va_end(args);
+ erl_error(fmt, args2); /* Print error message. */
sys_tty_reset(n);
if (n == ERTS_INTR_EXIT)
exit(0);
- else if (n == 127)
+ else if (n == ERTS_DUMP_EXIT)
ERTS_EXIT_AFTER_DUMP(1);
else if (n > 0 || n == ERTS_ABORT_EXIT)
abort();
exit(an);
}
-__decl_noreturn void erl_exit(int n, char *fmt,...)
+/* Exit without flushing async threads */
+__decl_noreturn void __noreturn erl_exit(int n, char *fmt, ...)
{
- unsigned int an;
- va_list args;
-
- va_start(args, fmt);
-
- save_statistics();
-
- system_cleanup(n);
-
- an = abs(n);
-
- if (erts_mtrace_enabled)
- erts_mtrace_exit((Uint32) an);
-
- /* Produce an Erlang core dump if error */
- if (n > 0 && erts_initialized &&
- (erts_no_crash_dump == 0 || n == ERTS_DUMP_EXIT)) {
- erl_crash_dump_v((char*) NULL, 0, fmt, args);
- }
-
- /* need to reinitialize va_args thing */
- va_end(args);
- va_start(args, fmt);
-
- if (fmt != NULL && *fmt != '\0')
- erl_error(fmt, args); /* Print error message. */
- va_end(args);
- sys_tty_reset(n);
-
- if (n == ERTS_INTR_EXIT)
- exit(0);
- else if (n == ERTS_DUMP_EXIT)
- ERTS_EXIT_AFTER_DUMP(1);
- else if (n > 0 || n == ERTS_ABORT_EXIT)
- abort();
- exit(an);
+ va_list args1, args2;
+ va_start(args1, fmt);
+ va_start(args2, fmt);
+ erl_exit_vv(n, 0, fmt, args1, args2);
+ va_end(args2);
+ va_end(args1);
}
+/* Exit after flushing async threads */
+__decl_noreturn void __noreturn erl_exit_flush_async(int n, char *fmt, ...)
+{
+ va_list args1, args2;
+ va_start(args1, fmt);
+ va_start(args2, fmt);
+ erl_exit_vv(n, 1, fmt, args1, args2);
+ va_end(args2);
+ va_end(args1);
+}
diff --git a/erts/emulator/beam/erl_instrument.c b/erts/emulator/beam/erl_instrument.c
index f3f3c22933..963c8b3c58 100644
--- a/erts/emulator/beam/erl_instrument.c
+++ b/erts/emulator/beam/erl_instrument.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2003-2010. All Rights Reserved.
+ * Copyright Ericsson AB 2003-2011. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -1186,6 +1186,8 @@ erts_instr_init(int stat, int map_stat)
sys_memzero((void *) stats->n, sizeof(Stat_t)*(ERTS_ALC_N_MAX+1));
for (i = ERTS_ALC_A_MIN; i <= ERTS_ALC_A_MAX; i++) {
+ if (ERTS_IS_SBMBC_ALLOCATOR_NO__(i))
+ continue;
if (erts_allctrs_info[i].enabled)
stats->ap[i] = &stats->a[i];
else
@@ -1199,6 +1201,8 @@ erts_instr_init(int stat, int map_stat)
erts_instr_memory_map = 1;
erts_instr_stat = 1;
for (i = ERTS_ALC_A_MIN; i <= ERTS_ALC_A_MAX; i++) {
+ if (ERTS_IS_SBMBC_ALLOCATOR_NO__(i))
+ continue;
erts_allctrs[i].alloc = map_stat_alloc;
erts_allctrs[i].realloc = map_stat_realloc;
erts_allctrs[i].free = map_stat_free;
@@ -1209,6 +1213,8 @@ erts_instr_init(int stat, int map_stat)
else {
erts_instr_stat = 1;
for (i = ERTS_ALC_A_MIN; i <= ERTS_ALC_A_MAX; i++) {
+ if (ERTS_IS_SBMBC_ALLOCATOR_NO__(i))
+ continue;
erts_allctrs[i].alloc = stat_alloc;
erts_allctrs[i].realloc = stat_realloc;
erts_allctrs[i].free = stat_free;
diff --git a/erts/emulator/beam/erl_lock_check.c b/erts/emulator/beam/erl_lock_check.c
index 9180508a49..a0f744be9d 100644
--- a/erts/emulator/beam/erl_lock_check.c
+++ b/erts/emulator/beam/erl_lock_check.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2005-2011. All Rights Reserved.
+ * Copyright Ericsson AB 2005-2012. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -110,10 +110,6 @@ static erts_lc_lock_order_t erts_lock_order[] = {
{ "fun_tab", NULL },
{ "environ", NULL },
#endif
- { "asyncq", "address" },
-#ifndef ERTS_SMP
- { "async_ready", NULL },
-#endif
{ "efile_drv", "address" },
#if defined(ENABLE_CHILD_WAITER_THREAD) || defined(ERTS_SMP)
{ "child_status", NULL },
@@ -125,7 +121,7 @@ static erts_lc_lock_order_t erts_lock_order[] = {
{ "drv_ev_state", "address" },
{ "safe_hash", "address" },
{ "pollset_rm_list", NULL },
- { "removed_fd_pre_alloc_lock", NULL },
+ { "removed_fd_pre_alloc_lock", "address" },
{ "state_prealloc", NULL },
{ "schdlr_sspnd", NULL },
{ "run_queue", "address" },
@@ -138,6 +134,7 @@ static erts_lc_lock_order_t erts_lock_order[] = {
{ "alcu_init_atoms", NULL },
{ "mseg_init_atoms", NULL },
{ "drv_tsd", NULL },
+ { "async_enq_mtx", NULL },
#ifdef ERTS_SMP
{ "sys_msg_q", NULL },
{ "atom_tab", NULL },
@@ -151,9 +148,8 @@ static erts_lc_lock_order_t erts_lock_order[] = {
{ "mtrace_op", NULL },
{ "instr_x", NULL },
{ "instr", NULL },
- { "fix_alloc", "index" },
{ "alcu_allocator", "index" },
- { "alcu_delayed_free", "index" },
+ { "sbmbc_alloc", "index" },
{ "mseg", NULL },
#if HALFWORD_HEAP
{ "pmmap", NULL },
@@ -174,15 +170,11 @@ static erts_lc_lock_order_t erts_lock_order[] = {
{ "timeofday", NULL },
{ "breakpoints", NULL },
{ "pollsets_lock", NULL },
- { "async_id", NULL },
{ "pix_lock", "address" },
{ "run_queues_lists", NULL },
- { "misc_aux_work_queue", "index" },
- { "misc_aux_work_pre_alloc_lock", "address" },
{ "sched_stat", NULL },
- { "run_queue_sleep_list", "address" },
#endif
- { "alloc_thr_ix_lock", NULL },
+ { "async_init_mtx", NULL },
#ifdef ERTS_SMP
{ "proc_lck_qs_alloc", NULL },
#endif
@@ -191,6 +183,9 @@ static erts_lc_lock_order_t erts_lock_order[] = {
{ "save_ops_lock", NULL },
#endif
#endif
+#ifdef USE_VM_PROBES
+ { "efile_drv dtrace mutex", NULL },
+#endif
{ "mtrace_buf", NULL },
{ "erts_alloc_hard_debug", NULL }
};
@@ -1260,7 +1255,7 @@ erts_lc_init_lock(erts_lc_lock_t *lck, char *name, Uint16 flags)
{
lck->id = erts_lc_get_lock_order_id(name);
- lck->extra = &lck->extra;
+ lck->extra = (UWord) &lck->extra;
ASSERT(is_not_immed(lck->extra));
lck->flags = flags;
lck->inited = ERTS_LC_INITITALIZED;
diff --git a/erts/emulator/beam/erl_message.c b/erts/emulator/beam/erl_message.c
index 82f272d28a..bd86e3ea9e 100644
--- a/erts/emulator/beam/erl_message.c
+++ b/erts/emulator/beam/erl_message.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1997-2010. All Rights Reserved.
+ * Copyright Ericsson AB 1997-2012. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -31,6 +31,7 @@
#include "erl_process.h"
#include "erl_nmgc.h"
#include "erl_binary.h"
+#include "dtrace-wrapper.h"
ERTS_SCHED_PREF_QUICK_ALLOC_IMPL(message,
ErlMessage,
@@ -240,7 +241,7 @@ erts_msg_distext2heap(Process *pp,
Sint sz;
*bpp = NULL;
- sz = erts_decode_dist_ext_size(dist_extp, 0);
+ sz = erts_decode_dist_ext_size(dist_extp);
if (sz < 0)
goto decode_error;
if (is_not_nil(*tokenp)) {
@@ -335,6 +336,11 @@ erts_queue_dist_message(Process *rcvr,
Eterm token)
{
ErlMessage* mp;
+#ifdef USE_VM_PROBES
+ Sint tok_label = 0;
+ Sint tok_lastcnt = 0;
+ Sint tok_serial = 0;
+#endif
#ifdef ERTS_SMP
ErtsProcLocks need_locks;
#endif
@@ -376,15 +382,61 @@ erts_queue_dist_message(Process *rcvr,
message_free(mp);
msg = erts_msg_distext2heap(rcvr, rcvr_locks, &mbuf, &token, dist_ext);
if (is_value(msg))
- erts_queue_message(rcvr, rcvr_locks, mbuf, msg, token);
+#ifdef USE_VM_PROBES
+ if (DTRACE_ENABLED(message_queued)) {
+ DTRACE_CHARBUF(receiver_name, DTRACE_TERM_BUF_SIZE);
+
+ dtrace_proc_str(rcvr, receiver_name);
+ if (token != NIL && token != am_have_dt_utag) {
+ tok_label = signed_val(SEQ_TRACE_T_LABEL(token));
+ tok_lastcnt = signed_val(SEQ_TRACE_T_LASTCNT(token));
+ tok_serial = signed_val(SEQ_TRACE_T_SERIAL(token));
+ }
+ DTRACE6(message_queued,
+ receiver_name, size_object(msg), rcvr->msg.len,
+ tok_label, tok_lastcnt, tok_serial);
+ }
+#endif
+ erts_queue_message(rcvr, rcvr_locks, mbuf, msg, token
+#ifdef USE_VM_PROBES
+ , NIL
+#endif
+ );
}
else {
/* Enqueue message on external format */
ERL_MESSAGE_TERM(mp) = THE_NON_VALUE;
- ERL_MESSAGE_TOKEN(mp) = token;
+#ifdef USE_VM_PROBES
+ ERL_MESSAGE_DT_UTAG(mp) = NIL;
+ if (token == am_have_dt_utag) {
+ ERL_MESSAGE_TOKEN(mp) = NIL;
+ } else {
+#endif
+ ERL_MESSAGE_TOKEN(mp) = token;
+#ifdef USE_VM_PROBES
+ }
+#endif
mp->next = NULL;
+#ifdef USE_VM_PROBES
+ if (DTRACE_ENABLED(message_queued)) {
+ DTRACE_CHARBUF(receiver_name, DTRACE_TERM_BUF_SIZE);
+
+ dtrace_proc_str(rcvr, receiver_name);
+ if (token != NIL && token != am_have_dt_utag) {
+ tok_label = signed_val(SEQ_TRACE_T_LABEL(token));
+ tok_lastcnt = signed_val(SEQ_TRACE_T_LASTCNT(token));
+ tok_serial = signed_val(SEQ_TRACE_T_SERIAL(token));
+ }
+ /*
+ * TODO: We don't know the real size of the external message here.
+ * -1 will appear to a D script as 4294967295.
+ */
+ DTRACE6(message_queued, receiver_name, -1, rcvr->msg.len + 1,
+ tok_label, tok_lastcnt, tok_serial);
+ }
+#endif
mp->data.dist_ext = dist_ext;
LINK_MESSAGE(rcvr, mp);
@@ -398,7 +450,11 @@ erts_queue_message(Process* receiver,
ErtsProcLocks *receiver_locks,
ErlHeapFragment* bp,
Eterm message,
- Eterm seq_trace_token)
+ Eterm seq_trace_token
+#ifdef USE_VM_PROBES
+ , Eterm dt_utag
+#endif
+)
{
ErlMessage* mp;
#ifdef ERTS_SMP
@@ -439,6 +495,9 @@ erts_queue_message(Process* receiver,
ERL_MESSAGE_TERM(mp) = message;
ERL_MESSAGE_TOKEN(mp) = seq_trace_token;
+#ifdef USE_VM_PROBES
+ ERL_MESSAGE_DT_UTAG(mp) = dt_utag;
+#endif
mp->next = NULL;
mp->data.heap_frag = bp;
@@ -462,12 +521,30 @@ erts_queue_message(Process* receiver,
LINK_MESSAGE(receiver, mp);
#endif
+#ifdef USE_VM_PROBES
+ if (DTRACE_ENABLED(message_queued)) {
+ DTRACE_CHARBUF(receiver_name, DTRACE_TERM_BUF_SIZE);
+ Sint tok_label = 0;
+ Sint tok_lastcnt = 0;
+ Sint tok_serial = 0;
+
+ dtrace_proc_str(receiver, receiver_name);
+ if (seq_trace_token != NIL && is_tuple(seq_trace_token)) {
+ tok_label = signed_val(SEQ_TRACE_T_LABEL(seq_trace_token));
+ tok_lastcnt = signed_val(SEQ_TRACE_T_LASTCNT(seq_trace_token));
+ tok_serial = signed_val(SEQ_TRACE_T_SERIAL(seq_trace_token));
+ }
+ DTRACE6(message_queued,
+ receiver_name, size_object(message), receiver->msg.len,
+ tok_label, tok_lastcnt, tok_serial);
+ }
+#endif
notify_new_message(receiver);
if (IS_TRACED_FL(receiver, F_TRACE_RECEIVE)) {
trace_receive(receiver, message);
}
-
+
#ifndef ERTS_SMP
ERTS_HOLE_CHECK(receiver);
#endif
@@ -497,6 +574,9 @@ erts_move_msg_mbuf_to_heap(Eterm** hpp, ErlOffHeap* off_heap, ErlMessage *msg)
Sint offs;
Uint sz;
ErlHeapFragment *bp;
+#ifdef USE_VM_PROBES
+ Eterm utag;
+#endif
#ifdef HARD_DEBUG
ProcBin *dbg_mso_start = off_heap->mso;
@@ -506,32 +586,56 @@ erts_move_msg_mbuf_to_heap(Eterm** hpp, ErlOffHeap* off_heap, ErlMessage *msg)
ErlHeapFragment *dbg_bp;
Uint *dbg_hp, *dbg_thp_start;
Uint dbg_term_sz, dbg_token_sz;
+#ifdef USE_VM_PROBES
+ Eterm dbg_utag;
+ Uint dbg_utag_sz;
+#endif
#endif
bp = msg->data.heap_frag;
term = ERL_MESSAGE_TERM(msg);
token = ERL_MESSAGE_TOKEN(msg);
+#ifdef USE_VM_PROBES
+ utag = ERL_MESSAGE_DT_UTAG(msg);
+#endif
if (!bp) {
+#ifdef USE_VM_PROBES
+ ASSERT(is_immed(term) && is_immed(token) && is_immed(utag));
+#else
ASSERT(is_immed(term) && is_immed(token));
+#endif
return;
}
#ifdef HARD_DEBUG
dbg_term_sz = size_object(term);
dbg_token_sz = size_object(token);
+ dbg_bp = new_message_buffer(dbg_term_sz + dbg_token_sz);
+#ifdef USE_VM_PROBES
+ dbg_utag_sz = size_object(utag);
+ dbg_bp = new_message_buffer(dbg_term_sz + dbg_token_sz + dbg_utag_sz );
+#endif
/*ASSERT(dbg_term_sz + dbg_token_sz == erts_msg_used_frag_sz(msg));
Copied size may be smaller due to removed SubBins's or garbage.
Copied size may be larger due to duplicated shared terms.
*/
- dbg_bp = new_message_buffer(dbg_term_sz + dbg_token_sz);
dbg_hp = dbg_bp->mem;
dbg_term = copy_struct(term, dbg_term_sz, &dbg_hp, &dbg_bp->off_heap);
dbg_token = copy_struct(token, dbg_token_sz, &dbg_hp, &dbg_bp->off_heap);
- dbg_thp_start = *hpp;
+#ifdef USE_VM_PROBES
+ dbg_utag = copy_struct(utag, dbg_utag_sz, &dbg_hp, &dbg_bp->off_heap);
+#endif
+ dbg_thp_start = *hpp;
#endif
if (bp->next != NULL) {
- move_multi_frags(hpp, off_heap, bp, msg->m, 2);
+ move_multi_frags(hpp, off_heap, bp, msg->m,
+#ifdef USE_VM_PROBES
+ 3
+#else
+ 2
+#endif
+ );
goto copy_done;
}
@@ -633,6 +737,16 @@ erts_move_msg_mbuf_to_heap(Eterm** hpp, ErlOffHeap* off_heap, ErlMessage *msg)
ASSERT(hp > ptr_val(ERL_MESSAGE_TERM(msg)));
#endif
}
+#ifdef USE_VM_PROBES
+ if (is_not_immed(utag)) {
+ ASSERT(in_heapfrag(ptr_val(utag), bp));
+ ERL_MESSAGE_DT_UTAG(msg) = offset_ptr(utag, offs);
+#ifdef HARD_DEBUG
+ ASSERT(dbg_thp_start <= ptr_val(ERL_MESSAGE_DT_UTAG(msg)));
+ ASSERT(hp > ptr_val(ERL_MESSAGE_DT_UTAG(msg)));
+#endif
+ }
+#endif
copy_done:
@@ -699,6 +813,9 @@ copy_done:
#ifdef HARD_DEBUG
ASSERT(eq(ERL_MESSAGE_TERM(msg), dbg_term));
ASSERT(eq(ERL_MESSAGE_TOKEN(msg), dbg_token));
+#ifdef USE_VM_PROBES
+ ASSERT(eq(ERL_MESSAGE_DT_UTAG(msg), dbg_utag));
+#endif
free_message_buffer(dbg_bp);
#endif
@@ -713,7 +830,7 @@ erts_msg_attached_data_size_aux(ErlMessage *msg)
ASSERT(msg->data.dist_ext);
ASSERT(msg->data.dist_ext->heap_size < 0);
- sz = erts_decode_dist_ext_size(msg->data.dist_ext, 0);
+ sz = erts_decode_dist_ext_size(msg->data.dist_ext);
if (sz < 0) {
/* Bad external; remove it */
if (is_not_nil(ERL_MESSAGE_TOKEN(msg))) {
@@ -774,39 +891,101 @@ erts_send_message(Process* sender,
Uint msize;
ErlHeapFragment* bp = NULL;
Eterm token = NIL;
-
+#ifdef USE_VM_PROBES
+ DTRACE_CHARBUF(sender_name, 64);
+ DTRACE_CHARBUF(receiver_name, 64);
+ Sint tok_label = 0;
+ Sint tok_lastcnt = 0;
+ Sint tok_serial = 0;
+#endif
BM_STOP_TIMER(system);
BM_MESSAGE(message,sender,receiver);
BM_START_TIMER(send);
+ #ifdef USE_VM_PROBES
+ *sender_name = *receiver_name = '\0';
+ if (DTRACE_ENABLED(message_send)) {
+ erts_snprintf(sender_name, sizeof(sender_name), "%T", sender->id);
+ erts_snprintf(receiver_name, sizeof(receiver_name), "%T", receiver->id);
+ }
+#endif
if (SEQ_TRACE_TOKEN(sender) != NIL && !(flags & ERTS_SND_FLG_NO_SEQ_TRACE)) {
Eterm* hp;
+ Eterm stoken = SEQ_TRACE_TOKEN(sender);
+ Uint seq_trace_size = 0;
+#ifdef USE_VM_PROBES
+ Uint dt_utag_size = 0;
+ Eterm utag = NIL;
+#endif
- BM_SWAP_TIMER(send,size);
+ BM_SWAP_TIMER(send,size);
msize = size_object(message);
- BM_SWAP_TIMER(size,send);
+ BM_SWAP_TIMER(size,send);
+
+#ifdef USE_VM_PROBES
+ if (stoken != am_have_dt_utag) {
+#endif
+
+ seq_trace_update_send(sender);
+ seq_trace_output(stoken, message, SEQ_TRACE_SEND,
+ receiver->id, sender);
+ seq_trace_size = 6; /* TUPLE5 */
+#ifdef USE_VM_PROBES
+ }
+ if (DT_UTAG_FLAGS(sender) & DT_UTAG_SPREADING) {
+ dt_utag_size = size_object(DT_UTAG(sender));
+ } else if (stoken == am_have_dt_utag ) {
+ stoken = NIL;
+ }
+#endif
- seq_trace_update_send(sender);
- seq_trace_output(SEQ_TRACE_TOKEN(sender), message, SEQ_TRACE_SEND,
- receiver->id, sender);
- bp = new_message_buffer(msize + 6 /* TUPLE5 */);
+ bp = new_message_buffer(msize + seq_trace_size
+#ifdef USE_VM_PROBES
+ + dt_utag_size
+#endif
+ );
hp = bp->mem;
BM_SWAP_TIMER(send,copy);
- token = copy_struct(SEQ_TRACE_TOKEN(sender),
- 6 /* TUPLE5 */,
+ token = copy_struct(stoken,
+ seq_trace_size,
&hp,
&bp->off_heap);
message = copy_struct(message, msize, &hp, &bp->off_heap);
+#ifdef USE_VM_PROBES
+ if (DT_UTAG_FLAGS(sender) & DT_UTAG_SPREADING) {
+ utag = copy_struct(DT_UTAG(sender), dt_utag_size, &hp, &bp->off_heap);
+#ifdef DTRACE_TAG_HARDDEBUG
+ erts_fprintf(stderr,
+ "Dtrace -> (%T) Spreading tag (%T) with "
+ "message %T!\r\n",sender->id, utag, message);
+#endif
+ }
+#endif
BM_MESSAGE_COPIED(msize);
BM_SWAP_TIMER(copy,send);
+#ifdef USE_VM_PROBES
+ if (DTRACE_ENABLED(message_send)) {
+ if (stoken != NIL && stoken != am_have_dt_utag) {
+ tok_label = signed_val(SEQ_TRACE_T_LABEL(stoken));
+ tok_lastcnt = signed_val(SEQ_TRACE_T_LASTCNT(stoken));
+ tok_serial = signed_val(SEQ_TRACE_T_SERIAL(stoken));
+ }
+ DTRACE6(message_send, sender_name, receiver_name,
+ msize, tok_label, tok_lastcnt, tok_serial);
+ }
+#endif
erts_queue_message(receiver,
receiver_locks,
bp,
message,
- token);
+ token
+#ifdef USE_VM_PROBES
+ , utag
+#endif
+ );
BM_SWAP_TIMER(send,system);
#ifdef HYBRID
} else {
@@ -835,8 +1014,13 @@ erts_send_message(Process* sender,
#endif
LAZY_COPY(sender,message);
BM_SWAP_TIMER(copy,send);
+ DTRACE6(message_send, sender_name, receiver_name,
+ size_object(message)msize, tok_label, tok_lastcnt, tok_serial);
ERL_MESSAGE_TERM(mp) = message;
ERL_MESSAGE_TOKEN(mp) = NIL;
+#ifdef USE_VM_PROBES
+ ERL_MESSAGE_DT_UTAG(mp) = NIL;
+#endif
mp->next = NULL;
LINK_MESSAGE(receiver, mp);
ACTIVATE(receiver);
@@ -874,9 +1058,14 @@ erts_send_message(Process* sender,
{
ErlMessage* mp = message_alloc();
+ DTRACE6(message_send, sender_name, receiver_name,
+ size_object(message), tok_label, tok_lastcnt, tok_serial);
mp->data.attached = NULL;
ERL_MESSAGE_TERM(mp) = message;
ERL_MESSAGE_TOKEN(mp) = NIL;
+#ifdef USE_VM_PROBES
+ ERL_MESSAGE_DT_UTAG(mp) = NIL;
+#endif
mp->next = NULL;
/*
* We move 'in queue' to 'private queue' and place
@@ -908,7 +1097,13 @@ erts_send_message(Process* sender,
message = copy_struct(message, msize, &hp, ohp);
BM_MESSAGE_COPIED(msz);
BM_SWAP_TIMER(copy,send);
- erts_queue_message(receiver, receiver_locks, bp, message, token);
+ DTRACE6(message_send, sender_name, receiver_name,
+ msize, tok_label, tok_lastcnt, tok_serial);
+ erts_queue_message(receiver, receiver_locks, bp, message, token
+#ifdef USE_VM_PROBES
+ , NIL
+#endif
+ );
BM_SWAP_TIMER(send,system);
#else
ErlMessage* mp = message_alloc();
@@ -928,8 +1123,13 @@ erts_send_message(Process* sender,
message = copy_struct(message, msize, &hp, &receiver->off_heap);
BM_MESSAGE_COPIED(msize);
BM_SWAP_TIMER(copy,send);
+ DTRACE6(message_send, sender_name, receiver_name,
+ (uint32_t)msize, tok_label, tok_lastcnt, tok_serial);
ERL_MESSAGE_TERM(mp) = message;
ERL_MESSAGE_TOKEN(mp) = NIL;
+#ifdef USE_VM_PROBES
+ ERL_MESSAGE_DT_UTAG(mp) = NIL;
+#endif
mp->next = NULL;
mp->data.attached = NULL;
LINK_MESSAGE(receiver, mp);
@@ -968,7 +1168,11 @@ erts_deliver_exit_message(Eterm from, Process *to, ErtsProcLocks *to_locksp,
Eterm temptoken;
ErlHeapFragment* bp = NULL;
- if (token != NIL) {
+ if (token != NIL
+#ifdef USE_VM_PROBES
+ && token != am_have_dt_utag
+#endif
+ ) {
ASSERT(is_tuple(token));
sz_reason = size_object(reason);
@@ -983,7 +1187,11 @@ erts_deliver_exit_message(Eterm from, Process *to, ErtsProcLocks *to_locksp,
/* the trace token must in this case be updated by the caller */
seq_trace_output(token, save, SEQ_TRACE_SEND, to->id, NULL);
temptoken = copy_struct(token, sz_token, &hp, &bp->off_heap);
- erts_queue_message(to, to_locksp, bp, save, temptoken);
+ erts_queue_message(to, to_locksp, bp, save, temptoken
+#ifdef USE_VM_PROBES
+ , NIL
+#endif
+ );
} else {
ErlOffHeap *ohp;
sz_reason = size_object(reason);
@@ -1000,7 +1208,11 @@ erts_deliver_exit_message(Eterm from, Process *to, ErtsProcLocks *to_locksp,
? from
: copy_struct(from, sz_from, &hp, ohp));
save = TUPLE3(hp, am_EXIT, from_copy, mess);
- erts_queue_message(to, to_locksp, bp, save, NIL);
+ erts_queue_message(to, to_locksp, bp, save, NIL
+#ifdef USE_VM_PROBES
+ , NIL
+#endif
+ );
}
}
diff --git a/erts/emulator/beam/erl_message.h b/erts/emulator/beam/erl_message.h
index 5aca0db6fe..3e9a24ee81 100644
--- a/erts/emulator/beam/erl_message.h
+++ b/erts/emulator/beam/erl_message.h
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1997-2010. All Rights Reserved.
+ * Copyright Ericsson AB 1997-2012. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -70,11 +70,18 @@ typedef struct erl_mesg {
ErlHeapFragment *heap_frag;
void *attached;
} data;
+#ifdef USE_VM_PROBES
+ Eterm m[3]; /* m[0] = message, m[1] = seq trace token, m[3] = dynamic trace user tag */
+#else
Eterm m[2]; /* m[0] = message, m[1] = seq trace token */
+#endif
} ErlMessage;
#define ERL_MESSAGE_TERM(mp) ((mp)->m[0])
#define ERL_MESSAGE_TOKEN(mp) ((mp)->m[1])
+#ifdef USE_VM_PROBES
+#define ERL_MESSAGE_DT_UTAG(mp) ((mp)->m[2])
+#endif
/* Size of default message buffer (erl_message.c) */
#define ERL_MESSAGE_BUF_SZ 500
@@ -221,7 +228,11 @@ ErlHeapFragment* erts_resize_message_buffer(ErlHeapFragment *, Uint,
Eterm *, Uint);
void free_message_buffer(ErlHeapFragment *);
void erts_queue_dist_message(Process*, ErtsProcLocks*, ErtsDistExternal *, Eterm);
-void erts_queue_message(Process*, ErtsProcLocks*, ErlHeapFragment*, Eterm, Eterm);
+void erts_queue_message(Process*, ErtsProcLocks*, ErlHeapFragment*, Eterm, Eterm
+#ifdef USE_VM_PROBES
+ , Eterm dt_utag
+#endif
+);
void erts_deliver_exit_message(Eterm, Process*, ErtsProcLocks *, Eterm, Eterm);
void erts_send_message(Process*, Process*, ErtsProcLocks*, Eterm, unsigned);
void erts_link_mbuf_to_proc(Process *proc, ErlHeapFragment *bp);
diff --git a/erts/emulator/beam/erl_monitors.c b/erts/emulator/beam/erl_monitors.c
index 9751b5d77c..1a84950120 100644
--- a/erts/emulator/beam/erl_monitors.c
+++ b/erts/emulator/beam/erl_monitors.c
@@ -125,7 +125,7 @@ static ErtsMonitor *create_monitor(Uint type, Eterm ref, Eterm pid, Eterm name)
} else {
n = (ErtsMonitor *) erts_alloc(ERTS_ALC_T_MONITOR_LH,
mon_size*sizeof(Uint));
- erts_smp_atomic_add(&tot_link_lh_size, mon_size*sizeof(Uint));
+ erts_smp_atomic_add_nob(&tot_link_lh_size, mon_size*sizeof(Uint));
}
hp = n->heap;
@@ -156,7 +156,7 @@ static ErtsLink *create_link(Uint type, Eterm pid)
} else {
n = (ErtsLink *) erts_alloc(ERTS_ALC_T_NLINK_LH,
lnk_size*sizeof(Uint));
- erts_smp_atomic_add(&tot_link_lh_size, lnk_size*sizeof(Uint));
+ erts_smp_atomic_add_nob(&tot_link_lh_size, lnk_size*sizeof(Uint));
}
hp = n->heap;
@@ -191,13 +191,13 @@ static ErtsSuspendMonitor *create_suspend_monitor(Eterm pid)
void
erts_init_monitors(void)
{
- erts_smp_atomic_init(&tot_link_lh_size, 0);
+ erts_smp_atomic_init_nob(&tot_link_lh_size, 0);
}
Uint
erts_tot_link_lh_size(void)
{
- return (Uint) erts_smp_atomic_read(&tot_link_lh_size);
+ return (Uint) erts_smp_atomic_read_nob(&tot_link_lh_size);
}
void erts_destroy_monitor(ErtsMonitor *mon)
@@ -222,7 +222,7 @@ void erts_destroy_monitor(ErtsMonitor *mon)
erts_free(ERTS_ALC_T_MONITOR_SH, (void *) mon);
} else {
erts_free(ERTS_ALC_T_MONITOR_LH, (void *) mon);
- erts_smp_atomic_add(&tot_link_lh_size, -1*mon_size*sizeof(Uint));
+ erts_smp_atomic_add_nob(&tot_link_lh_size, -1*mon_size*sizeof(Uint));
}
}
@@ -244,7 +244,7 @@ void erts_destroy_link(ErtsLink *lnk)
erts_free(ERTS_ALC_T_NLINK_SH, (void *) lnk);
} else {
erts_free(ERTS_ALC_T_NLINK_LH, (void *) lnk);
- erts_smp_atomic_add(&tot_link_lh_size, -1*lnk_size*sizeof(Uint));
+ erts_smp_atomic_add_nob(&tot_link_lh_size, -1*lnk_size*sizeof(Uint));
}
}
@@ -948,8 +948,10 @@ static void erts_dump_links(ErtsLink *root, int indent)
erts_destroy_tmp_dsbuf(dsbufp);
}
-Eterm erts_debug_dump_monitors_1(Process *p, Eterm pid)
+Eterm erts_debug_dump_monitors_1(BIF_ALIST_1)
{
+ Process *p = BIF_P;
+ Eterm pid = BIF_ARG_1;
Process *rp;
DistEntry *dep;
rp = erts_pid2proc(p, ERTS_PROC_LOCK_MAIN, pid, ERTS_PROC_LOCK_LINK);
@@ -976,8 +978,10 @@ Eterm erts_debug_dump_monitors_1(Process *p, Eterm pid)
}
}
-Eterm erts_debug_dump_links_1(Process *p, Eterm pid)
+Eterm erts_debug_dump_links_1(BIF_ALIST_1)
{
+ Process *p = BIF_P;
+ Eterm pid = BIF_ARG_1;
Process *rp;
DistEntry *dep;
if (is_internal_port(pid)) {
diff --git a/erts/emulator/beam/erl_mtrace.c b/erts/emulator/beam/erl_mtrace.c
index b1478758a1..358c67bf20 100644
--- a/erts/emulator/beam/erl_mtrace.c
+++ b/erts/emulator/beam/erl_mtrace.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2003-2010. All Rights Reserved.
+ * Copyright Ericsson AB 2003-2011. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -503,12 +503,6 @@ write_trace_header(char *nodename, char *pid, char *hostname)
case ERTS_ALC_A_SYSTEM:
PUT_UI16(tracep, ERTS_MTRACE_SEGMENT_ID);
break;
- case ERTS_ALC_A_FIXED_SIZE:
- if (erts_allctrs_info[ERTS_FIX_CORE_ALLOCATOR].enabled)
- PUT_UI16(tracep, ERTS_FIX_CORE_ALLOCATOR);
- else
- PUT_UI16(tracep, ERTS_ALC_A_SYSTEM);
- break;
default:
PUT_UI16(tracep, ERTS_MTRACE_SEGMENT_ID);
break;
diff --git a/erts/emulator/beam/erl_nif.c b/erts/emulator/beam/erl_nif.c
index 68421b4387..b323bc7f69 100644
--- a/erts/emulator/beam/erl_nif.c
+++ b/erts/emulator/beam/erl_nif.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2009-2011. All Rights Reserved.
+ * Copyright Ericsson AB 2009-2012. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -32,6 +32,7 @@
#include "error.h"
#include "big.h"
#include "beam_bp.h"
+#include "erl_thr_progress.h"
#include <limits.h>
#include <stddef.h> /* offsetof */
@@ -65,6 +66,9 @@ static void add_readonly_check(ErlNifEnv*, unsigned char* ptr, unsigned sz);
static int is_offheap(const ErlOffHeap* off_heap);
#endif
+#ifdef USE_VM_PROBES
+void dtrace_nifenv_str(ErlNifEnv *, char *);
+#endif
#define MIN_HEAP_FRAG_SZ 200
static Eterm* alloc_heap_heavy(ErlNifEnv* env, unsigned need, Eterm* hp);
@@ -130,10 +134,13 @@ static void pre_nif_noproc(ErlNifEnv* env, struct erl_module_nif* mod_nif)
env->tmp_obj_list = NULL;
}
-/* Temporary object header, auto-deallocated when NIF returns. */
+/* Temporary object header, auto-deallocated when NIF returns
+ * or when independent environment is cleared.
+ */
struct enif_tmp_obj_t {
struct enif_tmp_obj_t* next;
void (*dtor)(struct enif_tmp_obj_t*);
+ ErtsAlcType_t allocator;
/*char data[];*/
};
@@ -244,7 +251,7 @@ ErlNifEnv* enif_alloc_env(void)
msg_env->env.hp_end = phony_heap;
msg_env->env.heap_frag = NULL;
msg_env->env.mod_nif = NULL;
- msg_env->env.tmp_obj_list = (struct enif_tmp_obj_t*) 1; /* invalid non-NULL */
+ msg_env->env.tmp_obj_list = NULL;
msg_env->env.proc = &msg_env->phony_proc;
memset(&msg_env->phony_proc, 0, sizeof(Process));
HEAP_START(&msg_env->phony_proc) = phony_heap;
@@ -289,6 +296,7 @@ void enif_clear_env(ErlNifEnv* env)
menv->env.hp = menv->env.hp_end = HEAP_TOP(p);
ASSERT(!is_offheap(&MSO(p)));
+ free_tmp_objs(env);
}
int enif_send(ErlNifEnv* env, const ErlNifPid* to_pid,
ErlNifEnv* msg_env, ERL_NIF_TERM msg)
@@ -345,7 +353,11 @@ int enif_send(ErlNifEnv* env, const ErlNifPid* to_pid,
if (flush_me) {
flush_env(env); /* Needed for ERTS_HOLE_CHECK */
}
- erts_queue_message(rp, &rp_locks, frags, msg, am_undefined);
+ erts_queue_message(rp, &rp_locks, frags, msg, am_undefined
+#ifdef USE_VM_PROBES
+ , NIL
+#endif
+ );
if (rp_locks) {
ERTS_SMP_LC_ASSERT(rp_locks == (rp_had_locks | (ERTS_PROC_LOCK_MSGQ |
ERTS_PROC_LOCK_STATUS)));
@@ -435,24 +447,36 @@ int enif_is_exception(ErlNifEnv* env, ERL_NIF_TERM term)
return term == THE_NON_VALUE;
}
+int enif_is_number(ErlNifEnv* env, ERL_NIF_TERM term)
+{
+ return is_number(term);
+}
+
+static ERTS_INLINE int is_proc_bound(ErlNifEnv* env)
+{
+ return env->mod_nif != NULL;
+}
+
static void aligned_binary_dtor(struct enif_tmp_obj_t* obj)
{
- erts_free_aligned_binary_bytes_extra((byte*)obj,ERTS_ALC_T_TMP);
+ erts_free_aligned_binary_bytes_extra((byte*)obj, obj->allocator);
}
int enif_inspect_binary(ErlNifEnv* env, Eterm bin_term, ErlNifBinary* bin)
{
+ ErtsAlcType_t allocator = is_proc_bound(env) ? ERTS_ALC_T_TMP : ERTS_ALC_T_NIF;
union {
struct enif_tmp_obj_t* tmp;
byte* raw_ptr;
}u;
u.tmp = NULL;
- bin->data = erts_get_aligned_binary_bytes_extra(bin_term, &u.raw_ptr, ERTS_ALC_T_TMP,
+ bin->data = erts_get_aligned_binary_bytes_extra(bin_term, &u.raw_ptr, allocator,
sizeof(struct enif_tmp_obj_t));
if (bin->data == NULL) {
return 0;
}
if (u.tmp != NULL) {
+ u.tmp->allocator = allocator;
u.tmp->next = env->tmp_obj_list;
u.tmp->dtor = &aligned_binary_dtor;
env->tmp_obj_list = u.tmp;
@@ -466,12 +490,13 @@ int enif_inspect_binary(ErlNifEnv* env, Eterm bin_term, ErlNifBinary* bin)
static void tmp_alloc_dtor(struct enif_tmp_obj_t* obj)
{
- erts_free(ERTS_ALC_T_TMP, obj);
+ erts_free(obj->allocator, obj);
}
int enif_inspect_iolist_as_binary(ErlNifEnv* env, Eterm term, ErlNifBinary* bin)
{
struct enif_tmp_obj_t* tobj;
+ ErtsAlcType_t allocator;
Uint sz;
if (is_binary(term)) {
return enif_inspect_binary(env,term,bin);
@@ -486,8 +511,10 @@ int enif_inspect_iolist_as_binary(ErlNifEnv* env, Eterm term, ErlNifBinary* bin)
if (erts_iolist_size(term, &sz)) {
return 0;
}
-
- tobj = erts_alloc(ERTS_ALC_T_TMP, sz + sizeof(struct enif_tmp_obj_t));
+
+ allocator = is_proc_bound(env) ? ERTS_ALC_T_TMP : ERTS_ALC_T_NIF;
+ tobj = erts_alloc(allocator, sz + sizeof(struct enif_tmp_obj_t));
+ tobj->allocator = allocator;
tobj->next = env->tmp_obj_list;
tobj->dtor = &tmp_alloc_dtor;
env->tmp_obj_list = tobj;
@@ -511,7 +538,7 @@ int enif_alloc_binary(size_t size, ErlNifBinary* bin)
}
refbin->flags = BIN_FLAG_DRV; /* BUGBUG: Flag? */
erts_refc_init(&refbin->refc, 1);
- refbin->orig_size = (long) size;
+ refbin->orig_size = (SWord) size;
bin->size = size;
bin->data = (unsigned char*) refbin->orig_bytes;
@@ -578,7 +605,15 @@ int enif_is_identical(Eterm lhs, Eterm rhs)
int enif_compare(Eterm lhs, Eterm rhs)
{
- return CMP(lhs,rhs);
+ Sint result = CMP(lhs,rhs);
+
+ if (result < 0) {
+ return -1;
+ } else if (result > 0) {
+ return 1;
+ }
+
+ return result;
}
int enif_get_tuple(ErlNifEnv* env, Eterm tpl, int* arity, const Eterm** array)
@@ -668,6 +703,7 @@ Eterm enif_make_sub_binary(ErlNifEnv* env, ERL_NIF_TERM bin_term,
ErlSubBin* sb;
Eterm orig;
Uint offset, bit_offset, bit_size;
+#ifdef DEBUG
unsigned src_size;
ASSERT(is_binary(bin_term));
@@ -675,6 +711,7 @@ Eterm enif_make_sub_binary(ErlNifEnv* env, ERL_NIF_TERM bin_term,
ASSERT(pos <= src_size);
ASSERT(size <= src_size);
ASSERT(pos + size <= src_size);
+#endif
sb = (ErlSubBin*) alloc_heap(env, ERL_SUB_BIN_SIZE);
ERTS_GET_REAL_BIN(bin_term, orig, offset, bit_offset, bit_size);
sb->thing_word = HEADER_SUB_BIN;
@@ -714,7 +751,8 @@ int enif_get_int(ErlNifEnv* env, Eterm term, int* ip)
{
#if SIZEOF_INT == ERTS_SIZEOF_ETERM
return term_to_Sint(term, (Sint*)ip);
-#elif SIZEOF_LONG == ERTS_SIZEOF_ETERM
+#elif (SIZEOF_LONG == ERTS_SIZEOF_ETERM) || \
+ (SIZEOF_LONG_LONG == ERTS_SIZEOF_ETERM)
Sint i;
if (!term_to_Sint(term, &i) || i < INT_MIN || i > INT_MAX) {
return 0;
@@ -730,7 +768,8 @@ int enif_get_uint(ErlNifEnv* env, Eterm term, unsigned* ip)
{
#if SIZEOF_INT == ERTS_SIZEOF_ETERM
return term_to_Uint(term, (Uint*)ip);
-#elif SIZEOF_LONG == ERTS_SIZEOF_ETERM
+#elif (SIZEOF_LONG == ERTS_SIZEOF_ETERM) || \
+ (SIZEOF_LONG_LONG == ERTS_SIZEOF_ETERM)
Uint i;
if (!term_to_Uint(term, &i) || i > UINT_MAX) {
return 0;
@@ -746,6 +785,13 @@ int enif_get_long(ErlNifEnv* env, Eterm term, long* ip)
return term_to_Sint(term, ip);
#elif SIZEOF_LONG == 8
return term_to_Sint64(term, ip);
+#elif SIZEOF_LONG == SIZEOF_INT
+ int tmp,ret;
+ ret = enif_get_int(env,term,&tmp);
+ if (ret) {
+ *ip = (long) tmp;
+ }
+ return ret;
#else
# error Unknown long word size
#endif
@@ -757,6 +803,14 @@ int enif_get_ulong(ErlNifEnv* env, Eterm term, unsigned long* ip)
return term_to_Uint(term, ip);
#elif SIZEOF_LONG == 8
return term_to_Uint64(term, ip);
+#elif SIZEOF_LONG == SIZEOF_INT
+ int ret;
+ unsigned int tmp;
+ ret = enif_get_uint(env,term,&tmp);
+ if (ret) {
+ *ip = (unsigned long) tmp;
+ }
+ return ret;
#else
# error Unknown long word size
#endif
@@ -817,7 +871,8 @@ ERL_NIF_TERM enif_make_int(ErlNifEnv* env, int i)
{
#if SIZEOF_INT == ERTS_SIZEOF_ETERM
return IS_SSMALL(i) ? make_small(i) : small_to_big(i,alloc_heap(env,2));
-#elif SIZEOF_LONG == ERTS_SIZEOF_ETERM
+#elif (SIZEOF_LONG == ERTS_SIZEOF_ETERM) || \
+ (SIZEOF_LONG_LONG == ERTS_SIZEOF_ETERM)
return make_small(i);
#endif
}
@@ -826,15 +881,21 @@ ERL_NIF_TERM enif_make_uint(ErlNifEnv* env, unsigned i)
{
#if SIZEOF_INT == ERTS_SIZEOF_ETERM
return IS_USMALL(0,i) ? make_small(i) : uint_to_big(i,alloc_heap(env,2));
-#elif SIZEOF_LONG == ERTS_SIZEOF_ETERM
+#elif (SIZEOF_LONG == ERTS_SIZEOF_ETERM) || \
+ (SIZEOF_LONG_LONG == ERTS_SIZEOF_ETERM)
return make_small(i);
#endif
}
ERL_NIF_TERM enif_make_long(ErlNifEnv* env, long i)
{
+ if (IS_SSMALL(i)) {
+ return make_small(i);
+ }
#if SIZEOF_LONG == ERTS_SIZEOF_ETERM
- return IS_SSMALL(i) ? make_small(i) : small_to_big(i, alloc_heap(env,2));
+ return small_to_big(i, alloc_heap(env,2));
+#elif SIZEOF_LONG_LONG == ERTS_SIZEOF_ETERM
+ return make_small(i);
#elif SIZEOF_LONG == 8
ensure_heap(env,3);
return erts_sint64_to_big(i, &env->hp);
@@ -843,8 +904,13 @@ ERL_NIF_TERM enif_make_long(ErlNifEnv* env, long i)
ERL_NIF_TERM enif_make_ulong(ErlNifEnv* env, unsigned long i)
{
+ if (IS_USMALL(0,i)) {
+ return make_small(i);
+ }
#if SIZEOF_LONG == ERTS_SIZEOF_ETERM
- return IS_USMALL(0,i) ? make_small(i) : uint_to_big(i,alloc_heap(env,2));
+ return uint_to_big(i,alloc_heap(env,2));
+#elif SIZEOF_LONG_LONG == ERTS_SIZEOF_ETERM
+ return make_small(i);
#elif SIZEOF_LONG == 8
ensure_heap(env,3);
return erts_uint64_to_big(i, &env->hp);
@@ -1007,6 +1073,29 @@ void enif_system_info(ErlNifSysInfo *sip, size_t si_size)
driver_system_info(sip, si_size);
}
+int enif_make_reverse_list(ErlNifEnv* env, ERL_NIF_TERM term, ERL_NIF_TERM *list) {
+ Eterm *listptr, ret = NIL, *hp;
+
+ if (is_nil(term)) {
+ *list = term;
+ return 1;
+ }
+
+ ret = NIL;
+
+ while (is_not_nil(term)) {
+ if (is_not_list(term)) {
+ return 0;
+ }
+ hp = alloc_heap(env, 2);
+ listptr = list_val(term);
+ ret = CONS(hp, CAR(listptr), ret);
+ term = CDR(listptr);
+ }
+ *list = ret;
+ return 1;
+}
+
ErlNifMutex* enif_mutex_create(char *name) { return erl_drv_mutex_create(name); }
void enif_mutex_destroy(ErlNifMutex *mtx) { erl_drv_mutex_destroy(mtx); }
@@ -1098,7 +1187,7 @@ static ErlNifResourceType* find_resource_type(Eterm module, Eterm name)
}
#define in_area(ptr,start,nbytes) \
- ((unsigned long)((char*)(ptr) - (char*)(start)) < (nbytes))
+ ((UWord)((char*)(ptr) - (char*)(start)) < (nbytes))
static void close_lib(struct erl_module_nif* lib)
@@ -1146,7 +1235,7 @@ enif_open_resource_type(ErlNifEnv* env,
ErlNifResourceFlags op = flags;
Eterm module_am, name_am;
- ASSERT(erts_smp_is_system_blocked(0));
+ ASSERT(erts_smp_thr_progress_is_blocking());
ASSERT(module_str == NULL); /* for now... */
module_am = make_atom(env->mod_nif->mod->module);
name_am = enif_make_atom(env, name_str);
@@ -1425,6 +1514,7 @@ BIF_RETTYPE load_nif_2(BIF_ALIST_2)
Eterm ret = am_ok;
int veto;
struct erl_module_nif* lib = NULL;
+ int reload_warning = 0;
len = list_length(BIF_ARG_1);
if (len < 0) {
@@ -1440,7 +1530,7 @@ BIF_RETTYPE load_nif_2(BIF_ALIST_2)
/* Block system (is this the right place to do it?) */
erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
- erts_smp_block_system(0);
+ erts_smp_thr_progress_block();
/* Find calling module */
ASSERT(BIF_P->current != NULL);
@@ -1564,6 +1654,7 @@ BIF_RETTYPE load_nif_2(BIF_ALIST_2)
else {
mod->nif->entry = NULL; /* to prevent 'unload' callback */
erts_unload_nif(mod->nif);
+ reload_warning = 1;
}
}
else {
@@ -1610,7 +1701,7 @@ BIF_RETTYPE load_nif_2(BIF_ALIST_2)
}
else { /* Function traced, patch the original instruction word */
BpData** bps = (BpData**) code_ptr[1];
- BpData* bp = (BpData*) bps[bp_sched2ix()];
+ BpData* bp = (BpData*) bps[erts_bp_sched2ix()];
bp->orig_instr = (BeamInstr) BeamOp(op_call_nif);
}
code_ptr[5+1] = (BeamInstr) entry->funcs[i].fptr;
@@ -1629,9 +1720,18 @@ BIF_RETTYPE load_nif_2(BIF_ALIST_2)
erts_sys_ddll_free_error(&errdesc);
}
- erts_smp_release_system();
+ erts_smp_thr_progress_unblock();
erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
erts_free(ERTS_ALC_T_TMP, lib_name);
+
+ if (reload_warning) {
+ erts_dsprintf_buf_t* dsbufp = erts_create_logger_dsbuf();
+ erts_dsprintf(dsbufp,
+ "Repeated calls to erlang:load_nif from module '%T'.\n\n"
+ "The NIF reload mechanism is deprecated and must not "
+ "be used in production systems.\n", mod_atom);
+ erts_send_warning_to_logger(BIF_P->group_leader, dsbufp);
+ }
BIF_RET(ret);
}
@@ -1641,7 +1741,7 @@ erts_unload_nif(struct erl_module_nif* lib)
{
ErlNifResourceType* rt;
ErlNifResourceType* next;
- ASSERT(erts_smp_is_system_blocked(0));
+ ASSERT(erts_smp_thr_progress_is_blocking());
ASSERT(lib != NULL);
ASSERT(lib->mod != NULL);
for (rt = resource_type_list.next;
@@ -1686,6 +1786,13 @@ void erl_nif_init()
resource_type_list.name = THE_NON_VALUE;
}
+#ifdef USE_VM_PROBES
+void dtrace_nifenv_str(ErlNifEnv *env, char *process_buf)
+{
+ dtrace_pid_str(env->proc->id, process_buf);
+}
+#endif
+
#ifdef READONLY_CHECK
/* Use checksums to assert that NIFs do not write into inspected binaries
*/
@@ -1701,8 +1808,10 @@ struct readonly_check_t
};
static void add_readonly_check(ErlNifEnv* env, unsigned char* ptr, unsigned sz)
{
- struct readonly_check_t* obj = erts_alloc(ERTS_ALC_T_TMP,
+ ErtsAlcType_t allocator = is_proc_bound(env) ? ERTS_ALC_T_TMP : ERTS_ALC_T_NIF;
+ struct readonly_check_t* obj = erts_alloc(allocator,
sizeof(struct readonly_check_t));
+ obj->hdr.allocator = allocator;
obj->hdr.next = env->tmp_obj_list;
env->tmp_obj_list = &obj->hdr;
obj->hdr.dtor = &readonly_check_dtor;
@@ -1719,7 +1828,7 @@ static void readonly_check_dtor(struct enif_tmp_obj_t* o)
" %x != %x\r\nABORTING\r\n", chksum, obj->checksum);
abort();
}
- erts_free(ERTS_ALC_T_TMP, obj);
+ erts_free(obj->hdr.allocator, obj);
}
static unsigned calc_checksum(unsigned char* ptr, unsigned size)
{
diff --git a/erts/emulator/beam/erl_nif.h b/erts/emulator/beam/erl_nif.h
index d028567faf..e5d99dc4f1 100644
--- a/erts/emulator/beam/erl_nif.h
+++ b/erts/emulator/beam/erl_nif.h
@@ -32,9 +32,10 @@
** 2.0: R14A
** 2.1: R14B02 "vm_variant"
** 2.2: R14B03 enif_is_exception
+** 2.3: R15 enif_make_reverse_list
*/
#define ERL_NIF_MAJOR_VERSION 2
-#define ERL_NIF_MINOR_VERSION 2
+#define ERL_NIF_MINOR_VERSION 3
#include <stdlib.h>
@@ -86,7 +87,11 @@ typedef long long ErlNifSInt64;
typedef unsigned int ERL_NIF_TERM;
#else
# define ERL_NIF_VM_VARIANT "beam.vanilla"
+# if SIZEOF_LONG == SIZEOF_VOID_P
typedef unsigned long ERL_NIF_TERM;
+# elif SIZEOF_LONG_LONG == SIZEOF_VOID_P
+typedef unsigned long long ERL_NIF_TERM;
+# endif
#endif
struct enif_environment_t;
diff --git a/erts/emulator/beam/erl_nif_api_funcs.h b/erts/emulator/beam/erl_nif_api_funcs.h
index c991b61abe..6396af09d0 100644
--- a/erts/emulator/beam/erl_nif_api_funcs.h
+++ b/erts/emulator/beam/erl_nif_api_funcs.h
@@ -136,6 +136,8 @@ ERL_NIF_API_FUNC_DECL(ERL_NIF_TERM,enif_make_int64,(ErlNifEnv*, ErlNifSInt64));
ERL_NIF_API_FUNC_DECL(ERL_NIF_TERM,enif_make_uint64,(ErlNifEnv*, ErlNifUInt64));
#endif
ERL_NIF_API_FUNC_DECL(int,enif_is_exception,(ErlNifEnv*, ERL_NIF_TERM term));
+ERL_NIF_API_FUNC_DECL(int,enif_make_reverse_list,(ErlNifEnv*, ERL_NIF_TERM term, ERL_NIF_TERM *list));
+ERL_NIF_API_FUNC_DECL(int,enif_is_number,(ErlNifEnv*, ERL_NIF_TERM term));
/*
** Add new entries here to keep compatibility on Windows!!!
@@ -256,12 +258,207 @@ ERL_NIF_API_FUNC_DECL(int,enif_is_exception,(ErlNifEnv*, ERL_NIF_TERM term));
#endif
# define enif_is_exception ERL_NIF_API_FUNC_MACRO(enif_is_exception)
+# define enif_make_reverse_list ERL_NIF_API_FUNC_MACRO(enif_make_reverse_list)
+# define enif_is_number ERL_NIF_API_FUNC_MACRO(enif_is_number)
/*
** Add new entries here
*/
#endif
+
+#if defined(__GNUC__) && !(defined(__WIN32__) || defined(_WIN32) || defined(_WIN32_))
+
+/* Inline functions for compile time type checking of arguments to
+ variadic functions.
+*/
+
+# define ERL_NIF_INLINE __inline__
+
+static ERL_NIF_INLINE ERL_NIF_TERM enif_make_tuple1(ErlNifEnv* env,
+ ERL_NIF_TERM e1)
+{
+ return enif_make_tuple(env, 1, e1);
+}
+
+static ERL_NIF_INLINE ERL_NIF_TERM enif_make_tuple2(ErlNifEnv* env,
+ ERL_NIF_TERM e1,
+ ERL_NIF_TERM e2)
+{
+ return enif_make_tuple(env, 2, e1, e2);
+}
+
+static ERL_NIF_INLINE ERL_NIF_TERM enif_make_tuple3(ErlNifEnv* env,
+ ERL_NIF_TERM e1,
+ ERL_NIF_TERM e2,
+ ERL_NIF_TERM e3)
+{
+ return enif_make_tuple(env, 3, e1, e2, e3);
+}
+
+static ERL_NIF_INLINE ERL_NIF_TERM enif_make_tuple4(ErlNifEnv* env,
+ ERL_NIF_TERM e1,
+ ERL_NIF_TERM e2,
+ ERL_NIF_TERM e3,
+ ERL_NIF_TERM e4)
+{
+ return enif_make_tuple(env, 4, e1, e2, e3, e4);
+}
+
+static ERL_NIF_INLINE ERL_NIF_TERM enif_make_tuple5(ErlNifEnv* env,
+ ERL_NIF_TERM e1,
+ ERL_NIF_TERM e2,
+ ERL_NIF_TERM e3,
+ ERL_NIF_TERM e4,
+ ERL_NIF_TERM e5)
+{
+ return enif_make_tuple(env, 5, e1, e2, e3, e4, e5);
+}
+
+static ERL_NIF_INLINE ERL_NIF_TERM enif_make_tuple6(ErlNifEnv* env,
+ ERL_NIF_TERM e1,
+ ERL_NIF_TERM e2,
+ ERL_NIF_TERM e3,
+ ERL_NIF_TERM e4,
+ ERL_NIF_TERM e5,
+ ERL_NIF_TERM e6)
+{
+ return enif_make_tuple(env, 6, e1, e2, e3, e4, e5, e6);
+}
+
+static ERL_NIF_INLINE ERL_NIF_TERM enif_make_tuple7(ErlNifEnv* env,
+ ERL_NIF_TERM e1,
+ ERL_NIF_TERM e2,
+ ERL_NIF_TERM e3,
+ ERL_NIF_TERM e4,
+ ERL_NIF_TERM e5,
+ ERL_NIF_TERM e6,
+ ERL_NIF_TERM e7)
+{
+ return enif_make_tuple(env, 7, e1, e2, e3, e4, e5, e6, e7);
+}
+
+static ERL_NIF_INLINE ERL_NIF_TERM enif_make_tuple8(ErlNifEnv* env,
+ ERL_NIF_TERM e1,
+ ERL_NIF_TERM e2,
+ ERL_NIF_TERM e3,
+ ERL_NIF_TERM e4,
+ ERL_NIF_TERM e5,
+ ERL_NIF_TERM e6,
+ ERL_NIF_TERM e7,
+ ERL_NIF_TERM e8)
+{
+ return enif_make_tuple(env, 8, e1, e2, e3, e4, e5, e6, e7, e8);
+}
+
+static ERL_NIF_INLINE ERL_NIF_TERM enif_make_tuple9(ErlNifEnv* env,
+ ERL_NIF_TERM e1,
+ ERL_NIF_TERM e2,
+ ERL_NIF_TERM e3,
+ ERL_NIF_TERM e4,
+ ERL_NIF_TERM e5,
+ ERL_NIF_TERM e6,
+ ERL_NIF_TERM e7,
+ ERL_NIF_TERM e8,
+ ERL_NIF_TERM e9)
+{
+ return enif_make_tuple(env, 9, e1, e2, e3, e4, e5, e6, e7, e8, e9);
+}
+
+static ERL_NIF_INLINE ERL_NIF_TERM enif_make_list1(ErlNifEnv* env,
+ ERL_NIF_TERM e1)
+{
+ return enif_make_list(env, 1, e1);
+}
+
+static ERL_NIF_INLINE ERL_NIF_TERM enif_make_list2(ErlNifEnv* env,
+ ERL_NIF_TERM e1,
+ ERL_NIF_TERM e2)
+{
+ return enif_make_list(env, 2, e1, e2);
+}
+
+static ERL_NIF_INLINE ERL_NIF_TERM enif_make_list3(ErlNifEnv* env,
+ ERL_NIF_TERM e1,
+ ERL_NIF_TERM e2,
+ ERL_NIF_TERM e3)
+{
+ return enif_make_list(env, 3, e1, e2, e3);
+}
+
+static ERL_NIF_INLINE ERL_NIF_TERM enif_make_list4(ErlNifEnv* env,
+ ERL_NIF_TERM e1,
+ ERL_NIF_TERM e2,
+ ERL_NIF_TERM e3,
+ ERL_NIF_TERM e4)
+{
+ return enif_make_list(env, 4, e1, e2, e3, e4);
+}
+
+static ERL_NIF_INLINE ERL_NIF_TERM enif_make_list5(ErlNifEnv* env,
+ ERL_NIF_TERM e1,
+ ERL_NIF_TERM e2,
+ ERL_NIF_TERM e3,
+ ERL_NIF_TERM e4,
+ ERL_NIF_TERM e5)
+{
+ return enif_make_list(env, 5, e1, e2, e3, e4, e5);
+}
+
+static ERL_NIF_INLINE ERL_NIF_TERM enif_make_list6(ErlNifEnv* env,
+ ERL_NIF_TERM e1,
+ ERL_NIF_TERM e2,
+ ERL_NIF_TERM e3,
+ ERL_NIF_TERM e4,
+ ERL_NIF_TERM e5,
+ ERL_NIF_TERM e6)
+{
+ return enif_make_list(env, 6, e1, e2, e3, e4, e5, e6);
+}
+
+static ERL_NIF_INLINE ERL_NIF_TERM enif_make_list7(ErlNifEnv* env,
+ ERL_NIF_TERM e1,
+ ERL_NIF_TERM e2,
+ ERL_NIF_TERM e3,
+ ERL_NIF_TERM e4,
+ ERL_NIF_TERM e5,
+ ERL_NIF_TERM e6,
+ ERL_NIF_TERM e7)
+{
+ return enif_make_list(env, 7, e1, e2, e3, e4, e5, e6, e7);
+}
+
+static ERL_NIF_INLINE ERL_NIF_TERM enif_make_list8(ErlNifEnv* env,
+ ERL_NIF_TERM e1,
+ ERL_NIF_TERM e2,
+ ERL_NIF_TERM e3,
+ ERL_NIF_TERM e4,
+ ERL_NIF_TERM e5,
+ ERL_NIF_TERM e6,
+ ERL_NIF_TERM e7,
+ ERL_NIF_TERM e8)
+{
+ return enif_make_list(env, 8, e1, e2, e3, e4, e5, e6, e7, e8);
+}
+
+static ERL_NIF_INLINE ERL_NIF_TERM enif_make_list9(ErlNifEnv* env,
+ ERL_NIF_TERM e1,
+ ERL_NIF_TERM e2,
+ ERL_NIF_TERM e3,
+ ERL_NIF_TERM e4,
+ ERL_NIF_TERM e5,
+ ERL_NIF_TERM e6,
+ ERL_NIF_TERM e7,
+ ERL_NIF_TERM e8,
+ ERL_NIF_TERM e9)
+{
+ return enif_make_list(env, 9, e1, e2, e3, e4, e5, e6, e7, e8, e9);
+}
+
+# undef ERL_NIF_INLINE
+
+#else /* fallback with macros */
+
#ifndef enif_make_list1
# define enif_make_list1(ENV,E1) enif_make_list(ENV,1,E1)
# define enif_make_list2(ENV,E1,E2) enif_make_list(ENV,2,E1,E2)
@@ -281,6 +478,11 @@ ERL_NIF_API_FUNC_DECL(int,enif_is_exception,(ErlNifEnv*, ERL_NIF_TERM term));
# define enif_make_tuple7(ENV,E1,E2,E3,E4,E5,E6,E7) enif_make_tuple(ENV,7,E1,E2,E3,E4,E5,E6,E7)
# define enif_make_tuple8(ENV,E1,E2,E3,E4,E5,E6,E7,E8) enif_make_tuple(ENV,8,E1,E2,E3,E4,E5,E6,E7,E8)
# define enif_make_tuple9(ENV,E1,E2,E3,E4,E5,E6,E7,E8,E9) enif_make_tuple(ENV,9,E1,E2,E3,E4,E5,E6,E7,E8,E9)
+#endif
+
+#endif /* __GNUC__ && !WIN32 */
+
+#ifndef enif_make_pid
# define enif_make_pid(ENV, PID) ((const ERL_NIF_TERM)((PID)->pid))
diff --git a/erts/emulator/beam/erl_nmgc.c b/erts/emulator/beam/erl_nmgc.c
index d7bfb2ab12..2a8c819360 100644
--- a/erts/emulator/beam/erl_nmgc.c
+++ b/erts/emulator/beam/erl_nmgc.c
@@ -1391,7 +1391,7 @@ Eterm *erts_inc_alloc(int need)
if (ma_gc_flags & GC_MAJOR) {
if (need > 254) {
blackmap[(Eterm*)this - global_old_heap] = 255;
- *(int*)((long)(&blackmap[(Eterm*)this - global_old_heap]+4) & ~3) =
+ *(int*)((UWord)(&blackmap[(Eterm*)this - global_old_heap]+4) & ~3) =
need;
} else
blackmap[(Eterm*)this - global_old_heap] = need;
diff --git a/erts/emulator/beam/erl_node_container_utils.h b/erts/emulator/beam/erl_node_container_utils.h
index 2c67e781e0..329a2204cc 100644
--- a/erts/emulator/beam/erl_node_container_utils.h
+++ b/erts/emulator/beam/erl_node_container_utils.h
@@ -176,7 +176,7 @@ extern int erts_use_r9_pids_ports;
* 32-bit CPU.
*/
-#define ERTS_MAX_PROCESSES ((1L << 27)-1)
+#define ERTS_MAX_PROCESSES ((SWORD_CONSTANT(1) << 27)-1)
#if (ERTS_MAX_PROCESSES > MAX_SMALL)
# error "The maximum number of processes must fit in a SMALL."
#endif
diff --git a/erts/emulator/beam/erl_node_tables.c b/erts/emulator/beam/erl_node_tables.c
index 6daa127d23..1481f66b55 100644
--- a/erts/emulator/beam/erl_node_tables.c
+++ b/erts/emulator/beam/erl_node_tables.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2001-2010. All Rights Reserved.
+ * Copyright Ericsson AB 2001-2011. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -26,6 +26,8 @@
#include "dist.h"
#include "big.h"
#include "error.h"
+#include "erl_thr_progress.h"
+#include "dtrace-wrapper.h"
Hash erts_dist_table;
Hash erts_node_table;
@@ -41,6 +43,8 @@ Sint erts_no_of_not_connected_dist_entries;
DistEntry *erts_this_dist_entry;
ErlNode *erts_this_node;
+char erts_this_node_sysname_BUFFER[256],
+ *erts_this_node_sysname = "uninitialized yet";
static Uint node_entries;
static Uint dist_entries;
@@ -118,7 +122,7 @@ dist_table_alloc(void *dep_tmpl)
dep->finalized_out_queue.first = NULL;
dep->finalized_out_queue.last = NULL;
- erts_smp_atomic_init(&dep->dist_cmd_scheduled, 0);
+ erts_smp_atomic_init_nob(&dep->dist_cmd_scheduled, 0);
erts_port_task_handle_init(&dep->dist_cmd);
dep->send = NULL;
dep->cache = NULL;
@@ -701,6 +705,9 @@ erts_set_this_node(Eterm sysname, Uint creation)
(void) hash_erase(&erts_node_table, (void *) erts_this_node);
erts_this_node->sysname = sysname;
erts_this_node->creation = creation;
+ erts_this_node_sysname = erts_this_node_sysname_BUFFER;
+ erts_snprintf(erts_this_node_sysname, sizeof(erts_this_node_sysname),
+ "%T", sysname);
(void) hash_put(&erts_node_table, (void *) erts_this_node);
erts_smp_rwmtx_rwunlock(&erts_dist_table_rwmtx);
@@ -767,7 +774,7 @@ void erts_init_node_tables(void)
erts_this_dist_entry->finalized_out_queue.first = NULL;
erts_this_dist_entry->finalized_out_queue.last = NULL;
- erts_smp_atomic_init(&erts_this_dist_entry->dist_cmd_scheduled, 0);
+ erts_smp_atomic_init_nob(&erts_this_dist_entry->dist_cmd_scheduled, 0);
erts_port_task_handle_init(&erts_this_dist_entry->dist_cmd);
erts_this_dist_entry->send = NULL;
erts_this_dist_entry->cache = NULL;
@@ -788,6 +795,9 @@ void erts_init_node_tables(void)
erts_this_node->sysname = am_Noname;
erts_this_node->creation = 0;
erts_this_node->dist_entry = erts_this_dist_entry;
+ erts_this_node_sysname = erts_this_node_sysname_BUFFER;
+ erts_snprintf(erts_this_node_sysname, sizeof(erts_this_node_sysname),
+ "%T", erts_this_node->sysname);
(void) hash_put(&erts_node_table, (void *) erts_this_node);
@@ -907,7 +917,7 @@ erts_get_node_and_dist_references(struct process *proc)
#endif
erts_smp_proc_unlock(proc, ERTS_PROC_LOCK_MAIN);
- erts_smp_block_system(0);
+ erts_smp_thr_progress_block();
/* No need to lock any thing since we are alone... */
if (references_atoms_need_init) {
@@ -951,7 +961,7 @@ erts_get_node_and_dist_references(struct process *proc)
delete_reference_table();
- erts_smp_release_system();
+ erts_smp_thr_progress_unblock();
erts_smp_proc_lock(proc, ERTS_PROC_LOCK_MAIN);
return res;
}
diff --git a/erts/emulator/beam/erl_node_tables.h b/erts/emulator/beam/erl_node_tables.h
index b0a63ae035..4a015bdef9 100644
--- a/erts/emulator/beam/erl_node_tables.h
+++ b/erts/emulator/beam/erl_node_tables.h
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2001-2010. All Rights Reserved.
+ * Copyright Ericsson AB 2001-2012. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -169,6 +169,7 @@ extern Sint erts_no_of_not_connected_dist_entries;
extern DistEntry *erts_this_dist_entry;
extern ErlNode *erts_this_node;
+extern char *erts_this_node_sysname; /* must match erl_node_tables.c */
DistEntry *erts_channel_no_to_dist_entry(Uint);
DistEntry *erts_sysname_to_connected_dist_entry(Eterm);
diff --git a/erts/emulator/beam/erl_port_task.c b/erts/emulator/beam/erl_port_task.c
index e6b55c45e4..0f1a0d441a 100644
--- a/erts/emulator/beam/erl_port_task.c
+++ b/erts/emulator/beam/erl_port_task.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2006-2011. All Rights Reserved.
+ * Copyright Ericsson AB 2006-2012. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -32,6 +32,7 @@
#include "global.h"
#include "erl_port_task.h"
#include "dist.h"
+#include "dtrace-wrapper.h"
#if defined(DEBUG) && 0
#define HARD_DEBUG
@@ -61,6 +62,20 @@ do { \
(P)->sched.next = NULL; \
} while (0)
+#ifdef USE_VM_PROBES
+#define DTRACE_DRIVER(PROBE_NAME, PP) \
+ if (DTRACE_ENABLED(driver_ready_input)) { \
+ DTRACE_CHARBUF(process_str, DTRACE_TERM_BUF_SIZE); \
+ DTRACE_CHARBUF(port_str, DTRACE_TERM_BUF_SIZE); \
+ \
+ dtrace_pid_str(PP->connected, process_str); \
+ dtrace_port_str(PP, port_str); \
+ DTRACE3(PROBE_NAME, process_str, port_str, PP->name); \
+ }
+#else
+#define DTRACE_DRIVER(PROBE_NAME, PP) do {} while(0)
+#endif
+
erts_smp_atomic_t erts_port_task_outstanding_io_tasks;
struct ErtsPortTaskQueue_ {
@@ -121,7 +136,7 @@ ERTS_SCHED_PREF_QUICK_ALLOC_IMPL(port_taskq,
static ERTS_INLINE ErtsPortTask *
handle2task(ErtsPortTaskHandle *pthp)
{
- return (ErtsPortTask *) erts_smp_atomic_read(pthp);
+ return (ErtsPortTask *) erts_smp_atomic_read_nob(pthp);
}
static ERTS_INLINE void
@@ -129,7 +144,7 @@ reset_handle(ErtsPortTask *ptp)
{
if (ptp->handle) {
ASSERT(ptp == handle2task(ptp->handle));
- erts_smp_atomic_set(ptp->handle, (erts_aint_t) NULL);
+ erts_smp_atomic_set_nob(ptp->handle, (erts_aint_t) NULL);
}
}
@@ -138,7 +153,7 @@ set_handle(ErtsPortTask *ptp, ErtsPortTaskHandle *pthp)
{
ptp->handle = pthp;
if (pthp) {
- erts_smp_atomic_set(pthp, (erts_aint_t) ptp);
+ erts_smp_atomic_set_nob(pthp, (erts_aint_t) ptp);
ASSERT(ptp == handle2task(ptp->handle));
}
}
@@ -479,8 +494,8 @@ erts_port_task_abort(Eterm id, ErtsPortTaskHandle *pthp)
case ERTS_PORT_TASK_INPUT:
case ERTS_PORT_TASK_OUTPUT:
case ERTS_PORT_TASK_EVENT:
- ASSERT(erts_smp_atomic_read(&erts_port_task_outstanding_io_tasks) > 0);
- erts_smp_atomic_dec(&erts_port_task_outstanding_io_tasks);
+ ASSERT(erts_smp_atomic_read_nob(&erts_port_task_outstanding_io_tasks) > 0);
+ erts_smp_atomic_dec_relb(&erts_port_task_outstanding_io_tasks);
break;
default:
break;
@@ -568,7 +583,7 @@ erts_port_task_schedule(Eterm id,
ErtsRunQueue *xrunq = erts_check_emigration_need(runq, ERTS_PORT_PRIO_LEVEL);
if (xrunq) {
/* Port emigrated ... */
- erts_smp_atomic_set(&pp->run_queue, (erts_aint_t) xrunq);
+ erts_smp_atomic_set_nob(&pp->run_queue, (erts_aint_t) xrunq);
erts_smp_runq_unlock(runq);
runq = xrunq;
}
@@ -594,7 +609,7 @@ erts_port_task_schedule(Eterm id,
case ERTS_PORT_TASK_INPUT:
case ERTS_PORT_TASK_OUTPUT:
case ERTS_PORT_TASK_EVENT:
- erts_smp_atomic_inc(&erts_port_task_outstanding_io_tasks);
+ erts_smp_atomic_inc_relb(&erts_port_task_outstanding_io_tasks);
/* Fall through... */
default:
enqueue_task(pp->sched.taskq, ptp);
@@ -662,7 +677,7 @@ erts_port_task_free_port(Port *pp)
pp->status |= ERTS_PORT_SFLG_FREE_SCHEDULED;
erts_may_save_closed_port(pp);
erts_smp_port_state_unlock(pp);
- ERTS_SMP_LC_ASSERT(erts_smp_atomic_read(&pp->refc) > 1);
+ ERTS_SMP_LC_ASSERT(erts_smp_atomic_read_nob(&pp->refc) > 1);
ptp->type = ERTS_PORT_TASK_FREE;
ptp->event = (ErlDrvEvent) -1;
ptp->event_data = NULL;
@@ -684,9 +699,9 @@ erts_port_task_free_port(Port *pp)
erts_may_save_closed_port(pp);
erts_smp_port_state_unlock(pp);
#ifdef ERTS_SMP
- erts_smp_atomic_dec(&pp->refc); /* Not alive */
+ erts_smp_atomic_dec_nob(&pp->refc); /* Not alive */
#endif
- ERTS_SMP_LC_ASSERT(erts_smp_atomic_read(&pp->refc) > 0); /* Lock */
+ ERTS_SMP_LC_ASSERT(erts_smp_atomic_read_nob(&pp->refc) > 0); /* Lock */
handle_remaining_tasks(runq, pp); /* May release runq lock */
ASSERT(!pp->sched.exe_taskq && (!ptqp || !ptqp->first));
pp->sched.taskq = NULL;
@@ -711,23 +726,6 @@ typedef struct {
int *resp;
} ErtsPortTaskExeBlockData;
-static void
-prepare_for_block(void *vd)
-{
- ErtsPortTaskExeBlockData *d = (ErtsPortTaskExeBlockData *) vd;
- erts_smp_runq_unlock(d->runq);
-}
-
-static void
-resume_after_block(void *vd)
-{
- ErtsPortTaskExeBlockData *d = (ErtsPortTaskExeBlockData *) vd;
- erts_smp_runq_lock(d->runq);
- if (d->resp)
- *d->resp = (erts_smp_atomic_read(&erts_port_task_outstanding_io_tasks)
- != (erts_aint_t) 0);
-}
-
/*
* Run all scheduled tasks for the first port in run queue. If
* new tasks appear while running reschedule port (free task is
@@ -748,15 +746,9 @@ erts_port_task_execute(ErtsRunQueue *runq, Port **curr_port_pp)
int reds = ERTS_PORT_REDS_EXECUTE;
erts_aint_t io_tasks_executed = 0;
int fpe_was_unmasked;
- ErtsPortTaskExeBlockData blk_data = {runq, NULL};
ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(runq));
- erts_smp_activity_begin(ERTS_ACTIVITY_IO,
- prepare_for_block,
- resume_after_block,
- (void *) &blk_data);
-
ERTS_PT_CHK_PORTQ(runq);
pp = pop_port(runq);
@@ -832,8 +824,8 @@ erts_port_task_execute(ErtsRunQueue *runq, Port **curr_port_pp)
ASSERT(!ptqp->first
&& (!pp->sched.taskq || !pp->sched.taskq->first));
#ifdef ERTS_SMP
- erts_smp_atomic_dec(&pp->refc); /* Not alive */
- ERTS_SMP_LC_ASSERT(erts_smp_atomic_read(&pp->refc) > 0); /* Lock */
+ erts_smp_atomic_dec_nob(&pp->refc); /* Not alive */
+ ERTS_SMP_LC_ASSERT(erts_smp_atomic_read_nob(&pp->refc) > 0); /* Lock */
#else
erts_port_status_bor_set(pp, ERTS_PORT_SFLG_FREE);
#endif
@@ -846,12 +838,15 @@ erts_port_task_execute(ErtsRunQueue *runq, Port **curr_port_pp)
goto tasks_done;
case ERTS_PORT_TASK_TIMEOUT:
reds += ERTS_PORT_REDS_TIMEOUT;
- if (!(pp->status & ERTS_PORT_SFLGS_DEAD))
+ if (!(pp->status & ERTS_PORT_SFLGS_DEAD)) {
+ DTRACE_DRIVER(driver_timeout, pp);
(*pp->drv_ptr->timeout)((ErlDrvData) pp->drv_data);
+ }
break;
case ERTS_PORT_TASK_INPUT:
reds += ERTS_PORT_REDS_INPUT;
ASSERT((pp->status & ERTS_PORT_SFLGS_DEAD) == 0);
+ DTRACE_DRIVER(driver_ready_input, pp);
/* NOTE some windows drivers use ->ready_input for input and output */
(*pp->drv_ptr->ready_input)((ErlDrvData) pp->drv_data, ptp->event);
io_tasks_executed++;
@@ -859,12 +854,14 @@ erts_port_task_execute(ErtsRunQueue *runq, Port **curr_port_pp)
case ERTS_PORT_TASK_OUTPUT:
reds += ERTS_PORT_REDS_OUTPUT;
ASSERT((pp->status & ERTS_PORT_SFLGS_DEAD) == 0);
+ DTRACE_DRIVER(driver_ready_output, pp);
(*pp->drv_ptr->ready_output)((ErlDrvData) pp->drv_data, ptp->event);
io_tasks_executed++;
break;
case ERTS_PORT_TASK_EVENT:
reds += ERTS_PORT_REDS_EVENT;
ASSERT((pp->status & ERTS_PORT_SFLGS_DEAD) == 0);
+ DTRACE_DRIVER(driver_event, pp);
(*pp->drv_ptr->event)((ErlDrvData) pp->drv_data, ptp->event, ptp->event_data);
io_tasks_executed++;
break;
@@ -906,14 +903,16 @@ erts_port_task_execute(ErtsRunQueue *runq, Port **curr_port_pp)
erts_unblock_fpe(fpe_was_unmasked);
if (io_tasks_executed) {
- ASSERT(erts_smp_atomic_read(&erts_port_task_outstanding_io_tasks) >= io_tasks_executed);
- erts_smp_atomic_add(&erts_port_task_outstanding_io_tasks, -1*io_tasks_executed);
+ ASSERT(erts_smp_atomic_read_nob(&erts_port_task_outstanding_io_tasks)
+ >= io_tasks_executed);
+ erts_smp_atomic_add_relb(&erts_port_task_outstanding_io_tasks,
+ -1*io_tasks_executed);
}
*curr_port_pp = NULL;
#ifdef ERTS_SMP
- ASSERT(runq == (ErtsRunQueue *) erts_smp_atomic_read(&pp->run_queue));
+ ASSERT(runq == (ErtsRunQueue *) erts_smp_atomic_read_nob(&pp->run_queue));
#endif
if (!pp->sched.taskq) {
@@ -940,7 +939,7 @@ erts_port_task_execute(ErtsRunQueue *runq, Port **curr_port_pp)
}
else {
/* Port emigrated ... */
- erts_smp_atomic_set(&pp->run_queue, (erts_aint_t) xrunq);
+ erts_smp_atomic_set_nob(&pp->run_queue, (erts_aint_t) xrunq);
enqueue_port(xrunq, pp);
ASSERT(pp->sched.exe_taskq);
pp->sched.exe_taskq = NULL;
@@ -951,7 +950,7 @@ erts_port_task_execute(ErtsRunQueue *runq, Port **curr_port_pp)
port_was_enqueued = 1;
}
- res = (erts_smp_atomic_read(&erts_port_task_outstanding_io_tasks)
+ res = (erts_smp_atomic_read_nob(&erts_port_task_outstanding_io_tasks)
!= (erts_aint_t) 0);
ERTS_PT_CHK_PRES_PORTQ(runq, pp);
@@ -972,25 +971,19 @@ erts_port_task_execute(ErtsRunQueue *runq, Port **curr_port_pp)
{
erts_aint_t refc;
erts_smp_mtx_unlock(pp->lock);
- refc = erts_smp_atomic_dectest(&pp->refc);
+ refc = erts_smp_atomic_dec_read_nob(&pp->refc);
ASSERT(refc >= 0);
if (refc == 0) {
erts_smp_runq_unlock(runq);
erts_port_cleanup(pp); /* Might aquire runq lock */
erts_smp_runq_lock(runq);
- res = (erts_smp_atomic_read(&erts_port_task_outstanding_io_tasks)
+ res = (erts_smp_atomic_read_nob(&erts_port_task_outstanding_io_tasks)
!= (erts_aint_t) 0);
}
}
#endif
done:
- blk_data.resp = &res;
- erts_smp_activity_end(ERTS_ACTIVITY_IO,
- prepare_for_block,
- resume_after_block,
- (void *) &blk_data);
-
ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(runq));
ERTS_PORT_REDUCTIONS_EXECUTED(runq, reds);
@@ -1072,8 +1065,6 @@ erts_port_migrate(Port *prt, int *prt_locked,
ERTS_SMP_LC_CHK_RUNQ_LOCK(from_rq, *from_locked);
ERTS_SMP_LC_CHK_RUNQ_LOCK(to_rq, *to_locked);
- ASSERT(!erts_common_run_queue);
-
if (!*from_locked || !*to_locked) {
if (from_rq < to_rq) {
if (!*to_locked) {
@@ -1107,12 +1098,12 @@ erts_port_migrate(Port *prt, int *prt_locked,
/* Refuse to migrate to a suspended run queue */
if (to_rq->flags & ERTS_RUNQ_FLG_SUSPENDED)
return ERTS_MIGRATE_FAILED_RUNQ_SUSPENDED;
- if (from_rq != (ErtsRunQueue *) erts_smp_atomic_read(&prt->run_queue))
+ if (from_rq != (ErtsRunQueue *) erts_smp_atomic_read_nob(&prt->run_queue))
return ERTS_MIGRATE_FAILED_RUNQ_CHANGED;
if (!ERTS_PORT_IS_IN_RUNQ(from_rq, prt))
return ERTS_MIGRATE_FAILED_NOT_IN_RUNQ;
dequeue_port(from_rq, prt);
- erts_smp_atomic_set(&prt->run_queue, (erts_aint_t) to_rq);
+ erts_smp_atomic_set_nob(&prt->run_queue, (erts_aint_t) to_rq);
enqueue_port(to_rq, prt);
return ERTS_MIGRATE_SUCCESS;
}
@@ -1125,7 +1116,8 @@ erts_port_migrate(Port *prt, int *prt_locked,
void
erts_port_task_init(void)
{
- erts_smp_atomic_init(&erts_port_task_outstanding_io_tasks, (erts_aint_t) 0);
+ erts_smp_atomic_init_nob(&erts_port_task_outstanding_io_tasks,
+ (erts_aint_t) 0);
init_port_task_alloc();
init_port_taskq_alloc();
}
diff --git a/erts/emulator/beam/erl_port_task.h b/erts/emulator/beam/erl_port_task.h
index 3e2c5f07ab..d7104e1143 100644
--- a/erts/emulator/beam/erl_port_task.h
+++ b/erts/emulator/beam/erl_port_task.h
@@ -79,13 +79,13 @@ ERTS_GLB_INLINE int erts_port_task_have_outstanding_io_tasks(void);
ERTS_GLB_INLINE void
erts_port_task_handle_init(ErtsPortTaskHandle *pthp)
{
- erts_smp_atomic_init(pthp, (erts_aint_t) NULL);
+ erts_smp_atomic_init_nob(pthp, (erts_aint_t) NULL);
}
ERTS_GLB_INLINE int
erts_port_task_is_scheduled(ErtsPortTaskHandle *pthp)
{
- return ((void *) erts_smp_atomic_read(pthp)) != NULL;
+ return ((void *) erts_smp_atomic_read_nob(pthp)) != NULL;
}
ERTS_GLB_INLINE void
@@ -102,8 +102,8 @@ erts_port_task_init_sched(ErtsPortTaskSched *ptsp)
ERTS_GLB_INLINE int
erts_port_task_have_outstanding_io_tasks(void)
{
- ERTS_THR_MEMORY_BARRIER;
- return erts_smp_atomic_read(&erts_port_task_outstanding_io_tasks) != 0;
+ return (erts_smp_atomic_read_acqb(&erts_port_task_outstanding_io_tasks)
+ != 0);
}
#endif /* ERTS_INCLUDE_SCHEDULER_INTERNALS */
diff --git a/erts/emulator/beam/erl_process.c b/erts/emulator/beam/erl_process.c
index 2704359a8f..95d408f79d 100644
--- a/erts/emulator/beam/erl_process.c
+++ b/erts/emulator/beam/erl_process.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1996-2011. All Rights Reserved.
+ * Copyright Ericsson AB 1996-2012. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -39,6 +39,10 @@
#include "erl_binary.h"
#include "beam_bp.h"
#include "erl_cpu_topology.h"
+#include "erl_thr_progress.h"
+#include "erl_thr_queue.h"
+#include "erl_async.h"
+#include "dtrace-wrapper.h"
#define ERTS_RUNQ_CHECK_BALANCE_REDS_PER_SCHED (2000*CONTEXT_REDS)
#define ERTS_RUNQ_CALL_CHECK_BALANCE_REDS \
@@ -101,6 +105,9 @@ do { \
#define ERTS_EMPTY_RUNQ(RQ) \
((RQ)->len == 0 && (RQ)->misc.start == NULL)
+#define ERTS_EMPTY_RUNQ_PORTS(RQ) \
+ ((RQ)->ports.info.len == 0 && (RQ)->misc.start == NULL)
+
extern BeamInstr beam_apply[];
extern BeamInstr beam_exit[];
extern BeamInstr beam_continue_exit[];
@@ -111,6 +118,7 @@ static Sint p_serial;
static Uint p_serial_mask;
static Uint p_serial_shift;
+int erts_sched_compact_load;
Uint erts_no_schedulers;
Uint erts_max_processes = ERTS_DEFAULT_MAX_PROCESSES;
Uint erts_process_tab_index_mask;
@@ -124,9 +132,10 @@ ErtsLcPSDLocks erts_psd_required_locks[ERTS_PSD_SIZE];
#endif
#ifdef ERTS_SMP
-
int erts_disable_proc_not_running_opt;
+static ErtsAuxWorkData *aux_thread_aux_work_data;
+
#define ERTS_SCHDLR_SSPND_CHNG_WAITER (((erts_aint32_t) 1) << 0)
#define ERTS_SCHDLR_SSPND_CHNG_MSB (((erts_aint32_t) 1) << 1)
#define ERTS_SCHDLR_SSPND_CHNG_ONLN (((erts_aint32_t) 1) << 2)
@@ -134,15 +143,15 @@ int erts_disable_proc_not_running_opt;
#ifndef DEBUG
#define ERTS_SCHDLR_SSPND_CHNG_SET(VAL, OLD_VAL) \
- erts_smp_atomic32_set(&schdlr_sspnd.changing, (VAL))
+ erts_smp_atomic32_set_nob(&schdlr_sspnd.changing, (VAL))
#else
#define ERTS_SCHDLR_SSPND_CHNG_SET(VAL, OLD_VAL) \
do { \
erts_aint32_t old_val__; \
- old_val__ = erts_smp_atomic32_xchg(&schdlr_sspnd.changing, \
- (VAL)); \
+ old_val__ = erts_smp_atomic32_xchg_nob(&schdlr_sspnd.changing, \
+ (VAL)); \
ASSERT(old_val__ == (OLD_VAL)); \
} while (0)
@@ -158,7 +167,7 @@ static struct {
erts_smp_atomic32_t changing;
erts_smp_atomic32_t active;
struct {
- erts_smp_atomic32_t ongoing;
+ int ongoing;
long wait_active;
ErtsProcList *procs;
} msb; /* Multi Scheduling Block */
@@ -191,8 +200,6 @@ do { \
erts_sched_stat_t erts_sched_stat;
-ErtsRunQueue *erts_common_run_queue;
-
#ifdef USE_THREADS
static erts_tsd_key_t sched_data_key;
#endif
@@ -213,8 +220,6 @@ Uint erts_no_run_queues;
ErtsAlignedSchedulerData *erts_aligned_scheduler_data;
-#ifdef ERTS_SMP
-
typedef union {
ErtsSchedulerSleepInfo ssi;
char align[ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(ErtsSchedulerSleepInfo))];
@@ -222,18 +227,11 @@ typedef union {
static ErtsAlignedSchedulerSleepInfo *aligned_sched_sleep_info;
-#endif
-
-#ifndef BM_COUNTERS
-static int processes_busy;
-#endif
-
Process** process_tab;
static Uint last_reductions;
static Uint last_exact_reductions;
Uint erts_default_process_flags;
Eterm erts_system_monitor;
-Eterm erts_system_monitor_msg_queue_len;
Eterm erts_system_monitor_long_gc;
Eterm erts_system_monitor_large_heap;
struct erts_system_monitor_flags_t erts_system_monitor_flags;
@@ -285,8 +283,9 @@ ERTS_SCHED_PREF_QUICK_ALLOC_IMPL(proclist,
ERTS_ALC_T_PROC_LIST)
#define ERTS_SCHED_SLEEP_INFO_IX(IX) \
- (ASSERT_EXPR(0 <= (IX) && (IX) < erts_no_schedulers), \
- &aligned_sched_sleep_info[(IX)].ssi)
+ (ASSERT_EXPR(-1 <= ((int) (IX)) \
+ && ((int) (IX)) < ((int) erts_no_schedulers)), \
+ &aligned_sched_sleep_info[(IX)].ssi)
#define ERTS_FOREACH_RUNQ(RQVAR, DO) \
do { \
@@ -339,6 +338,54 @@ static void exec_misc_ops(ErtsRunQueue *);
static void print_function_from_pc(int to, void *to_arg, BeamInstr* x);
static int stack_element_dump(int to, void *to_arg, Process* p, Eterm* sp,
int yreg);
+
+static void aux_work_timeout(void *unused);
+static void aux_work_timeout_early_init(int no_schedulers);
+static void aux_work_timeout_late_init(void);
+static void setup_aux_work_timer(void);
+
+#if defined(DEBUG) || 0
+#define ERTS_DBG_CHK_AUX_WORK_VAL(V) dbg_chk_aux_work_val((V))
+static void
+dbg_chk_aux_work_val(erts_aint32_t value)
+{
+ erts_aint32_t valid = 0;
+
+ valid |= ERTS_SSI_AUX_WORK_SET_TMO;
+ valid |= ERTS_SSI_AUX_WORK_MISC;
+ valid |= ERTS_SSI_AUX_WORK_FIX_ALLOC_LOWER_LIM;
+ valid |= ERTS_SSI_AUX_WORK_FIX_ALLOC_DEALLOC;
+#if ERTS_USE_ASYNC_READY_Q
+ valid |= ERTS_SSI_AUX_WORK_ASYNC_READY;
+ valid |= ERTS_SSI_AUX_WORK_ASYNC_READY_CLEAN;
+#endif
+#ifdef ERTS_SMP
+ valid |= ERTS_SSI_AUX_WORK_MISC_THR_PRGR;
+ valid |= ERTS_SSI_AUX_WORK_DD;
+ valid |= ERTS_SSI_AUX_WORK_DD_THR_PRGR;
+#endif
+#if HAVE_ERTS_MSEG
+ valid |= ERTS_SSI_AUX_WORK_MSEG_CACHE_CHECK;
+#endif
+#ifdef ERTS_SMP_SCHEDULERS_NEED_TO_CHECK_CHILDREN
+ valid |= ERTS_SSI_AUX_WORK_CHECK_CHILDREN;
+#endif
+#ifdef ERTS_SSI_AUX_WORK_REAP_PORTS
+ valid |= ERTS_SSI_AUX_WORK_REAP_PORTS;
+#endif
+
+ if (~valid & value)
+ erl_exit(ERTS_ABORT_EXIT,
+ "Invalid aux_work value found: 0x%x\n",
+ ~valid & value);
+}
+#define ERTS_DBG_CHK_SSI_AUX_WORK(SSI) \
+ ERTS_DBG_CHK_AUX_WORK_VAL(erts_atomic32_read_nob(&(SSI)->aux_work))
+#else
+#define ERTS_DBG_CHK_AUX_WORK_VAL(V)
+#define ERTS_DBG_CHK_SSI_AUX_WORK(SSI)
+#endif
+
#ifdef ERTS_SMP
static void handle_pending_exiters(ErtsProcList *);
@@ -410,7 +457,7 @@ erts_init_process(int ncpu)
init_proclist_alloc();
- erts_smp_atomic32_init(&process_count, 0);
+ erts_smp_atomic32_init_nob(&process_count, 0);
if (erts_use_r9_pids_ports) {
proc_bits = ERTS_R9_PROC_BITS;
@@ -435,9 +482,6 @@ erts_init_process(int ncpu)
p_serial_shift = erts_fit_in_bits(erts_max_processes - 1);
p_serial_mask = ((~(~((Uint) 0) << proc_bits)) >> p_serial_shift);
erts_process_tab_index_mask = ~(~((Uint) 0) << p_serial_shift);
-#ifndef BM_COUNTERS
- processes_busy = 0;
-#endif
last_reductions = 0;
last_exact_reductions = 0;
erts_default_process_flags = 0;
@@ -484,6 +528,213 @@ erts_late_init_process(void)
}
+static void
+init_sched_wall_time(ErtsSchedWallTime *swtp)
+{
+ swtp->enabled = 0;
+ swtp->start = 0;
+ swtp->working.total = 0;
+ swtp->working.start = 0;
+ swtp->working.currently = 0;
+}
+
+static ERTS_INLINE Uint64
+sched_wall_time_ts(void)
+{
+#ifdef HAVE_GETHRTIME
+ return (Uint64) sys_gethrtime();
+#else
+ Uint64 res;
+ SysTimeval tv;
+ sys_gettimeofday(&tv);
+ res = (Uint64) tv.tv_sec*1000000;
+ res += (Uint64) tv.tv_usec;
+ return res;
+#endif
+}
+
+static ERTS_INLINE void
+sched_wall_time_change(ErtsSchedulerData *esdp, int working)
+{
+ if (esdp->sched_wall_time.enabled) {
+ Uint64 ts = sched_wall_time_ts();
+ if (working) {
+#ifdef DEBUG
+ ASSERT(!esdp->sched_wall_time.working.currently);
+ esdp->sched_wall_time.working.currently = 1;
+#endif
+ ts -= esdp->sched_wall_time.start;
+ esdp->sched_wall_time.working.start = ts;
+ }
+ else {
+#ifdef DEBUG
+ ASSERT(esdp->sched_wall_time.working.currently);
+ esdp->sched_wall_time.working.currently = 0;
+#endif
+ ts -= esdp->sched_wall_time.start;
+ ts -= esdp->sched_wall_time.working.start;
+ esdp->sched_wall_time.working.total += ts;
+ }
+ }
+}
+
+typedef struct {
+ int set;
+ int enable;
+ Process *proc;
+ Eterm ref;
+ Eterm ref_heap[REF_THING_SIZE];
+ Uint req_sched;
+ erts_smp_atomic32_t refc;
+} ErtsSchedWallTimeReq;
+
+#if !HALFWORD_HEAP
+ERTS_SCHED_PREF_QUICK_ALLOC_IMPL(swtreq,
+ ErtsSchedWallTimeReq,
+ 5,
+ ERTS_ALC_T_SCHED_WTIME_REQ)
+#else
+static ERTS_INLINE ErtsSchedWallTimeReq *
+swtreq_alloc(void)
+{
+ return erts_alloc(ERTS_ALC_T_SCHED_WTIME_REQ,
+ sizeof(ErtsSchedWallTimeReq));
+}
+
+static ERTS_INLINE void
+swtreq_free(ErtsSchedWallTimeReq *ptr)
+{
+ erts_free(ERTS_ALC_T_SCHED_WTIME_REQ, ptr);
+}
+#endif
+
+static void
+reply_sched_wall_time(void *vswtrp)
+{
+ Uint64 working = 0, total = 0;
+ ErtsSchedulerData *esdp = erts_get_scheduler_data();
+ ErtsSchedWallTimeReq *swtrp = (ErtsSchedWallTimeReq *) vswtrp;
+ ErtsProcLocks rp_locks = (swtrp->req_sched == esdp->no
+ ? ERTS_PROC_LOCK_MAIN
+ : 0);
+ Process *rp = swtrp->proc;
+ Eterm ref_copy = NIL, msg;
+ Eterm *hp = NULL;
+ Eterm **hpp;
+ Uint sz, *szp;
+ ErlOffHeap *ohp = NULL;
+ ErlHeapFragment *bp = NULL;
+
+ ASSERT(esdp);
+
+ if (swtrp->set) {
+ if (!swtrp->enable && esdp->sched_wall_time.enabled)
+ esdp->sched_wall_time.enabled = 0;
+ else if (swtrp->enable && !esdp->sched_wall_time.enabled) {
+ Uint64 ts = sched_wall_time_ts();
+ esdp->sched_wall_time.enabled = 1;
+ esdp->sched_wall_time.start = ts;
+ esdp->sched_wall_time.working.total = 0;
+ esdp->sched_wall_time.working.start = 0;
+ esdp->sched_wall_time.working.currently = 1;
+ }
+ }
+
+ if (esdp->sched_wall_time.enabled) {
+ Uint64 ts = sched_wall_time_ts();
+ ASSERT(esdp->sched_wall_time.working.currently);
+ ts -= esdp->sched_wall_time.start;
+ total = ts;
+ ts -= esdp->sched_wall_time.working.start;
+ working = esdp->sched_wall_time.working.total + ts;
+ }
+
+ sz = 0;
+ hpp = NULL;
+ szp = &sz;
+
+ while (1) {
+ if (hpp)
+ ref_copy = STORE_NC(hpp, ohp, swtrp->ref);
+ else
+ *szp += REF_THING_SIZE;
+
+ if (swtrp->set)
+ msg = ref_copy;
+ else {
+ msg = (!esdp->sched_wall_time.enabled
+ ? am_notsup
+ : erts_bld_tuple(hpp, szp, 3,
+ make_small(esdp->no),
+ erts_bld_uint64(hpp, szp, working),
+ erts_bld_uint64(hpp, szp, total)));
+
+ msg = erts_bld_tuple(hpp, szp, 2, ref_copy, msg);
+ }
+ if (hpp)
+ break;
+
+ hp = erts_alloc_message_heap(sz, &bp, &ohp, rp, &rp_locks);
+ szp = NULL;
+ hpp = &hp;
+ }
+
+ erts_queue_message(rp, &rp_locks, bp, msg, NIL
+#ifdef USE_VM_PROBES
+ , NIL
+#endif
+ );
+
+ if (swtrp->req_sched == esdp->no)
+ rp_locks &= ~ERTS_PROC_LOCK_MAIN;
+
+ if (rp_locks)
+ erts_smp_proc_unlock(rp, rp_locks);
+
+ erts_smp_proc_dec_refc(rp);
+
+ if (erts_smp_atomic32_dec_read_nob(&swtrp->refc) == 0)
+ swtreq_free(vswtrp);
+}
+
+Eterm
+erts_sched_wall_time_request(Process *c_p, int set, int enable)
+{
+ ErtsSchedulerData *esdp = ERTS_PROC_GET_SCHDATA(c_p);
+ Eterm ref;
+ ErtsSchedWallTimeReq *swtrp;
+ Eterm *hp;
+
+ if (!set && !esdp->sched_wall_time.enabled)
+ return THE_NON_VALUE;
+
+ swtrp = swtreq_alloc();
+ ref = erts_make_ref(c_p);
+ hp = &swtrp->ref_heap[0];
+
+ swtrp->set = set;
+ swtrp->enable = enable;
+ swtrp->proc = c_p;
+ swtrp->ref = STORE_NC(&hp, NULL, ref);
+ swtrp->req_sched = esdp->no;
+ erts_smp_atomic32_init_nob(&swtrp->refc,
+ (erts_aint32_t) erts_no_schedulers);
+
+ erts_smp_proc_add_refc(c_p, (Sint32) erts_no_schedulers);
+
+#ifdef ERTS_SMP
+ if (erts_no_schedulers > 1)
+ erts_schedule_multi_misc_aux_work(1,
+ erts_no_schedulers,
+ reply_sched_wall_time,
+ (void *) swtrp);
+#endif
+
+ reply_sched_wall_time((void *) swtrp);
+
+ return ref;
+}
+
static ERTS_INLINE ErtsProcList *
proclist_create(Process *p)
{
@@ -577,6 +828,13 @@ erts_sched_finish_poke(ErtsSchedulerSleepInfo *ssi, erts_aint32_t flags)
case ERTS_SSI_FLG_POLL_SLEEPING:
erts_sys_schedule_interrupt(1);
break;
+ case ERTS_SSI_FLG_POLL_SLEEPING|ERTS_SSI_FLG_TSE_SLEEPING:
+ /*
+ * Thread progress blocking while poll sleeping; need
+ * to signal on both...
+ */
+ erts_sys_schedule_interrupt(1);
+ /* fall through */
case ERTS_SSI_FLG_TSE_SLEEPING:
erts_tse_set(ssi->event);
break;
@@ -589,189 +847,827 @@ erts_sched_finish_poke(ErtsSchedulerSleepInfo *ssi, erts_aint32_t flags)
}
}
+#endif
+
+static ERTS_INLINE void
+set_aux_work_flags_wakeup_nob(ErtsSchedulerSleepInfo *ssi,
+ erts_aint32_t flgs)
+{
+ erts_aint32_t old_flgs;
+
+ ERTS_DBG_CHK_SSI_AUX_WORK(ssi);
+
+ old_flgs = erts_atomic32_read_nob(&ssi->aux_work);
+ if ((old_flgs & flgs) == 0) {
+
+ old_flgs = erts_atomic32_read_bor_nob(&ssi->aux_work, flgs);
+
+ if ((old_flgs & flgs) == 0) {
+#ifdef ERTS_SMP
+ erts_sched_poke(ssi);
+#else
+ erts_sys_schedule_interrupt(1);
+#endif
+ }
+ }
+}
+
+static ERTS_INLINE void
+set_aux_work_flags_wakeup_relb(ErtsSchedulerSleepInfo *ssi,
+ erts_aint32_t flgs)
+{
+ erts_aint32_t old_flgs;
+
+ ERTS_DBG_CHK_SSI_AUX_WORK(ssi);
+
+ old_flgs = erts_atomic32_read_bor_relb(&ssi->aux_work, flgs);
+
+ if ((old_flgs & flgs) == 0) {
+#ifdef ERTS_SMP
+ erts_sched_poke(ssi);
+#else
+ erts_sys_schedule_interrupt(1);
+#endif
+ }
+}
+
+static ERTS_INLINE erts_aint32_t
+set_aux_work_flags(ErtsSchedulerSleepInfo *ssi, erts_aint32_t flgs)
+{
+ return erts_atomic32_read_bor_nob(&ssi->aux_work, flgs);
+}
+
+static ERTS_INLINE erts_aint32_t
+unset_aux_work_flags(ErtsSchedulerSleepInfo *ssi, erts_aint32_t flgs)
+{
+ return erts_atomic32_read_band_nob(&ssi->aux_work, ~flgs);
+}
+
+#ifdef ERTS_SMP
+
+static ERTS_INLINE void
+thr_prgr_current_reset(ErtsAuxWorkData *awdp)
+{
+ awdp->current_thr_prgr = ERTS_THR_PRGR_INVALID;
+}
+
+static ERTS_INLINE ErtsThrPrgrVal
+thr_prgr_current(ErtsAuxWorkData *awdp)
+{
+ ErtsThrPrgrVal current = awdp->current_thr_prgr;
+ if (current == ERTS_THR_PRGR_INVALID) {
+ current = erts_thr_progress_current();
+ awdp->current_thr_prgr = current;
+ }
+ return current;
+}
+
+#endif
+
typedef struct erts_misc_aux_work_t_ erts_misc_aux_work_t;
struct erts_misc_aux_work_t_ {
- erts_misc_aux_work_t *next;
void (*func)(void *);
void *arg;
};
-typedef struct {
- erts_smp_mtx_t mtx;
- erts_misc_aux_work_t *first;
- erts_misc_aux_work_t *last;
-} erts_misc_aux_work_q_t;
+ERTS_SCHED_PREF_QUICK_ALLOC_IMPL(misc_aux_work,
+ erts_misc_aux_work_t,
+ 200,
+ ERTS_ALC_T_MISC_AUX_WORK)
typedef union {
- erts_misc_aux_work_q_t data;
- char align[ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(erts_misc_aux_work_q_t))];
+ ErtsThrQ_t q;
+ char align[ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(ErtsThrQ_t))];
} erts_algnd_misc_aux_work_q_t;
static erts_algnd_misc_aux_work_q_t *misc_aux_work_queues;
-ERTS_SCHED_PREF_QUICK_ALLOC_IMPL(misc_aux_work,
- erts_misc_aux_work_t,
- 200,
- ERTS_ALC_T_MISC_AUX_WORK)
+static void
+notify_aux_work(void *vssi)
+{
+ set_aux_work_flags_wakeup_nob((ErtsSchedulerSleepInfo *) vssi,
+ ERTS_SSI_AUX_WORK_MISC);
+}
static void
init_misc_aux_work(void)
{
int ix;
+ ErtsThrQInit_t qinit = ERTS_THR_Q_INIT_DEFAULT;
+ qinit.notify = notify_aux_work;
init_misc_aux_work_alloc();
misc_aux_work_queues =
erts_alloc_permanent_cache_aligned(ERTS_ALC_T_MISC_AUX_WORK_Q,
- erts_no_schedulers *
- sizeof(erts_algnd_misc_aux_work_q_t));
+ sizeof(erts_algnd_misc_aux_work_q_t)
+ * (erts_no_schedulers+1));
- for (ix = 0; ix < erts_no_schedulers; ix++) {
- erts_smp_mtx_init_x(&misc_aux_work_queues[ix].data.mtx,
- "misc_aux_work_queue",
- make_small(ix + 1));
- misc_aux_work_queues[ix].data.first = NULL;
- misc_aux_work_queues[ix].data.last = NULL;
+#ifdef ERTS_SMP
+ ix = 0; /* aux_thread + schedulers */
+#else
+ ix = 1; /* scheduler only */
+#endif
+
+ for (; ix <= erts_no_schedulers; ix++) {
+ qinit.arg = (void *) ERTS_SCHED_SLEEP_INFO_IX(ix-1);
+ erts_thr_q_initialize(&misc_aux_work_queues[ix].q, &qinit);
}
}
-static void
-handle_misc_aux_work(ErtsSchedulerData *esdp)
-{
- int ix = (int) esdp->no - 1;
- erts_misc_aux_work_t *mawp;
+static erts_aint32_t
+misc_aux_work_clean(ErtsThrQ_t *q,
+ ErtsAuxWorkData *awdp,
+ erts_aint32_t aux_work)
+{
+ switch (erts_thr_q_clean(q)) {
+ case ERTS_THR_Q_DIRTY:
+ set_aux_work_flags(awdp->ssi, ERTS_SSI_AUX_WORK_MISC);
+ return aux_work | ERTS_SSI_AUX_WORK_MISC;
+ case ERTS_THR_Q_NEED_THR_PRGR:
+#ifdef ERTS_SMP
+ set_aux_work_flags(awdp->ssi, ERTS_SSI_AUX_WORK_MISC_THR_PRGR);
+ erts_thr_progress_wakeup(awdp->esdp,
+ erts_thr_q_need_thr_progress(q));
+#endif
+ case ERTS_THR_Q_CLEAN:
+ break;
+ }
+ return aux_work;
+}
- erts_smp_mtx_lock(&misc_aux_work_queues[ix].data.mtx);
- mawp = misc_aux_work_queues[ix].data.first;
- misc_aux_work_queues[ix].data.first = NULL;
- misc_aux_work_queues[ix].data.last = NULL;
- erts_smp_mtx_unlock(&misc_aux_work_queues[ix].data.mtx);
+static ERTS_INLINE erts_aint32_t
+handle_misc_aux_work(ErtsAuxWorkData *awdp,
+ erts_aint32_t aux_work)
+{
+ ErtsThrQ_t *q = &misc_aux_work_queues[awdp->sched_id].q;
- while (mawp) {
- erts_misc_aux_work_t *free_mawp;
+ unset_aux_work_flags(awdp->ssi, ERTS_SSI_AUX_WORK_MISC);
+ while (1) {
+ erts_misc_aux_work_t *mawp = erts_thr_q_dequeue(q);
+ if (!mawp)
+ break;
mawp->func(mawp->arg);
- free_mawp = mawp;
- mawp = mawp->next;
- misc_aux_work_free(free_mawp);
+ misc_aux_work_free(mawp);
}
+
+ return misc_aux_work_clean(q, awdp, aux_work & ~ERTS_SSI_AUX_WORK_MISC);
+}
+
+#ifdef ERTS_SMP
+
+static ERTS_INLINE erts_aint32_t
+handle_misc_aux_work_thr_prgr(ErtsAuxWorkData *awdp,
+ erts_aint32_t aux_work)
+{
+ if (!erts_thr_progress_has_reached_this(thr_prgr_current(awdp),
+ awdp->misc.thr_prgr))
+ return aux_work & ~ERTS_SSI_AUX_WORK_MISC_THR_PRGR;
+
+ unset_aux_work_flags(awdp->ssi, ERTS_SSI_AUX_WORK_MISC_THR_PRGR);
+
+ return misc_aux_work_clean(&misc_aux_work_queues[awdp->sched_id].q,
+ awdp,
+ aux_work & ~ERTS_SSI_AUX_WORK_MISC_THR_PRGR);
+}
+
+#endif
+
+static ERTS_INLINE void
+schedule_misc_aux_work(int sched_id,
+ void (*func)(void *),
+ void *arg)
+{
+ ErtsThrQ_t *q;
+ erts_misc_aux_work_t *mawp;
+
+#ifdef ERTS_SMP
+ ASSERT(0 <= sched_id && sched_id <= erts_no_schedulers);
+#else
+ ASSERT(sched_id == 1);
+#endif
+
+ q = &misc_aux_work_queues[sched_id].q;
+ mawp = misc_aux_work_alloc();
+ mawp->func = func;
+ mawp->arg = arg;
+ erts_thr_q_enqueue(q, mawp);
}
void
-erts_smp_schedule_misc_aux_work(int ignore_self,
- int max_sched,
- void (*func)(void *),
- void *arg)
+erts_schedule_misc_aux_work(int sched_id,
+ void (*func)(void *),
+ void *arg)
{
- int ix, ignore_ix = -1;
+ schedule_misc_aux_work(sched_id, func, arg);
+}
+
+void
+erts_schedule_multi_misc_aux_work(int ignore_self,
+ int max_sched,
+ void (*func)(void *),
+ void *arg)
+{
+ int id, self = 0;
if (ignore_self) {
ErtsSchedulerData *esdp = erts_get_scheduler_data();
if (esdp)
- ignore_ix = (int) esdp->no - 1;
+ self = (int) esdp->no;
}
- ASSERT(0 <= max_sched && max_sched <= erts_no_schedulers);
+ ASSERT(0 < max_sched && max_sched <= erts_no_schedulers);
- for (ix = 0; ix < max_sched; ix++) {
- erts_aint32_t aux_work;
- erts_misc_aux_work_t *mawp;
- ErtsSchedulerSleepInfo *ssi;
- if (ix == ignore_ix)
+ for (id = 1; id <= max_sched; id++) {
+ if (id == self)
continue;
+ schedule_misc_aux_work(id, func, arg);
+ }
+}
- mawp = misc_aux_work_alloc();
+#if ERTS_USE_ASYNC_READY_Q
- mawp->func = func;
- mawp->arg = arg;
- mawp->next = NULL;
+void
+erts_notify_check_async_ready_queue(void *vno)
+{
+ int ix = ((int) (SWord) vno) -1;
+ set_aux_work_flags_wakeup_nob(ERTS_SCHED_SLEEP_INFO_IX(ix),
+ ERTS_SSI_AUX_WORK_ASYNC_READY);
+}
- erts_smp_mtx_lock(&misc_aux_work_queues[ix].data.mtx);
- if (!misc_aux_work_queues[ix].data.last)
- misc_aux_work_queues[ix].data.first = mawp;
- else
- misc_aux_work_queues[ix].data.last->next = mawp;
- misc_aux_work_queues[ix].data.last = mawp;
- erts_smp_mtx_unlock(&misc_aux_work_queues[ix].data.mtx);
-
- ssi = ERTS_SCHED_SLEEP_INFO_IX(ix);
- aux_work = erts_smp_atomic32_bor(&ssi->aux_work,
- ERTS_SSI_AUX_WORK_MISC);
- if ((aux_work & ERTS_SSI_AUX_WORK_MISC) == 0)
- erts_sched_poke(ssi);
- }
+static ERTS_INLINE erts_aint32_t
+handle_async_ready(ErtsAuxWorkData *awdp,
+ erts_aint32_t aux_work)
+{
+ ErtsSchedulerSleepInfo *ssi = awdp->ssi;
+ unset_aux_work_flags(ssi, ERTS_SSI_AUX_WORK_ASYNC_READY);
+ if (erts_check_async_ready(awdp->async_ready.queue)) {
+ if (set_aux_work_flags(ssi, ERTS_SSI_AUX_WORK_ASYNC_READY)
+ & ERTS_SSI_AUX_WORK_ASYNC_READY_CLEAN) {
+ unset_aux_work_flags(ssi, ERTS_SSI_AUX_WORK_ASYNC_READY_CLEAN);
+ aux_work &= ~ERTS_SSI_AUX_WORK_ASYNC_READY_CLEAN;
+ }
+ return aux_work;
+ }
+#ifdef ERTS_SMP
+ awdp->async_ready.need_thr_prgr = 0;
+#endif
+ set_aux_work_flags(ssi, ERTS_SSI_AUX_WORK_ASYNC_READY_CLEAN);
+ return ((aux_work & ~ERTS_SSI_AUX_WORK_ASYNC_READY)
+ | ERTS_SSI_AUX_WORK_ASYNC_READY_CLEAN);
+}
+
+static ERTS_INLINE erts_aint32_t
+handle_async_ready_clean(ErtsAuxWorkData *awdp,
+ erts_aint32_t aux_work)
+{
+ void *thr_prgr_p;
+
+#ifdef ERTS_SMP
+ if (awdp->async_ready.need_thr_prgr
+ && !erts_thr_progress_has_reached_this(thr_prgr_current(awdp),
+ awdp->async_ready.thr_prgr)) {
+ return aux_work & ~ERTS_SSI_AUX_WORK_ASYNC_READY_CLEAN;
+ }
+
+ awdp->async_ready.need_thr_prgr = 0;
+ thr_prgr_p = (void *) &awdp->async_ready.thr_prgr;
+#else
+ thr_prgr_p = NULL;
+#endif
+
+ switch (erts_async_ready_clean(awdp->async_ready.queue, thr_prgr_p)) {
+ case ERTS_ASYNC_READY_CLEAN:
+ unset_aux_work_flags(awdp->ssi, ERTS_SSI_AUX_WORK_ASYNC_READY_CLEAN);
+ return aux_work & ~ERTS_SSI_AUX_WORK_ASYNC_READY_CLEAN;
+#ifdef ERTS_SMP
+ case ERTS_ASYNC_READY_NEED_THR_PRGR:
+ erts_thr_progress_wakeup(awdp->esdp,
+ awdp->async_ready.thr_prgr);
+ awdp->async_ready.need_thr_prgr = 1;
+ return aux_work & ~ERTS_SSI_AUX_WORK_ASYNC_READY_CLEAN;
+#endif
+ default:
+ return aux_work;
+ }
+}
+
+#endif
+
+static ERTS_INLINE erts_aint32_t
+handle_fix_alloc(ErtsAuxWorkData *awdp, erts_aint32_t aux_work)
+{
+ ErtsSchedulerSleepInfo *ssi = awdp->ssi;
+ erts_aint32_t res;
+
+ unset_aux_work_flags(ssi, (ERTS_SSI_AUX_WORK_FIX_ALLOC_LOWER_LIM
+ | ERTS_SSI_AUX_WORK_FIX_ALLOC_DEALLOC));
+ aux_work &= ~(ERTS_SSI_AUX_WORK_FIX_ALLOC_LOWER_LIM
+ | ERTS_SSI_AUX_WORK_FIX_ALLOC_DEALLOC);
+ res = erts_alloc_fix_alloc_shrink(awdp->sched_id, aux_work);
+ if (res) {
+ set_aux_work_flags(ssi, res);
+ aux_work |= res;
+ }
+
+ return aux_work;
+}
+
+#ifdef ERTS_SMP
+
+void
+erts_alloc_notify_delayed_dealloc(int ix)
+{
+ set_aux_work_flags_wakeup_nob(ERTS_SCHED_SLEEP_INFO_IX(ix-1),
+ ERTS_SSI_AUX_WORK_DD);
+}
+
+static ERTS_INLINE erts_aint32_t
+handle_delayed_dealloc(ErtsAuxWorkData *awdp, erts_aint32_t aux_work)
+{
+ ErtsSchedulerSleepInfo *ssi = awdp->ssi;
+ int need_thr_progress = 0;
+ ErtsThrPrgrVal wakeup = ERTS_THR_PRGR_INVALID;
+ int more_work = 0;
+
+ unset_aux_work_flags(ssi, ERTS_SSI_AUX_WORK_DD);
+ erts_alloc_scheduler_handle_delayed_dealloc((void *) awdp->esdp,
+ &need_thr_progress,
+ &wakeup,
+ &more_work);
+ if (more_work) {
+ if (set_aux_work_flags(ssi, ERTS_SSI_AUX_WORK_DD)
+ & ERTS_SSI_AUX_WORK_DD_THR_PRGR) {
+ unset_aux_work_flags(ssi, ERTS_SSI_AUX_WORK_DD_THR_PRGR);
+ aux_work &= ~ERTS_SSI_AUX_WORK_DD_THR_PRGR;
+ }
+ return aux_work;
+ }
+
+ if (need_thr_progress) {
+ if (wakeup == ERTS_THR_PRGR_INVALID)
+ wakeup = erts_thr_progress_later_than(thr_prgr_current(awdp));
+ awdp->dd.thr_prgr = wakeup;
+ set_aux_work_flags(ssi, ERTS_SSI_AUX_WORK_DD_THR_PRGR);
+ awdp->dd.thr_prgr = wakeup;
+ erts_thr_progress_wakeup(awdp->esdp, wakeup);
+ }
+ else if (awdp->dd.completed_callback) {
+ awdp->dd.completed_callback(awdp->dd.completed_arg);
+ awdp->dd.completed_callback = NULL;
+ awdp->dd.completed_arg = NULL;
+ }
+ return aux_work & ~ERTS_SSI_AUX_WORK_DD;
+}
+
+static ERTS_INLINE erts_aint32_t
+handle_delayed_dealloc_thr_prgr(ErtsAuxWorkData *awdp, erts_aint32_t aux_work)
+{
+ ErtsSchedulerSleepInfo *ssi;
+ int need_thr_progress;
+ int more_work;
+ ErtsThrPrgrVal wakeup = ERTS_THR_PRGR_INVALID;
+ ErtsThrPrgrVal current = thr_prgr_current(awdp);
+
+ if (!erts_thr_progress_has_reached_this(current, awdp->dd.thr_prgr))
+ return aux_work & ~ERTS_SSI_AUX_WORK_DD_THR_PRGR;
+
+ ssi = awdp->ssi;
+ need_thr_progress = 0;
+ more_work = 0;
+
+ erts_alloc_scheduler_handle_delayed_dealloc((void *) awdp->esdp,
+ &need_thr_progress,
+ &wakeup,
+ &more_work);
+ if (more_work) {
+ set_aux_work_flags(ssi, ERTS_SSI_AUX_WORK_DD);
+ unset_aux_work_flags(ssi, ERTS_SSI_AUX_WORK_DD_THR_PRGR);
+ return ((aux_work & ~ERTS_SSI_AUX_WORK_DD_THR_PRGR)
+ | ERTS_SSI_AUX_WORK_DD);
+ }
+
+ if (need_thr_progress) {
+ if (wakeup == ERTS_THR_PRGR_INVALID)
+ wakeup = erts_thr_progress_later_than(current);
+ awdp->dd.thr_prgr = wakeup;
+ erts_thr_progress_wakeup(awdp->esdp, wakeup);
+ }
+ else {
+ unset_aux_work_flags(ssi, ERTS_SSI_AUX_WORK_DD_THR_PRGR);
+ if (awdp->dd.completed_callback) {
+ awdp->dd.completed_callback(awdp->dd.completed_arg);
+ awdp->dd.completed_callback = NULL;
+ awdp->dd.completed_arg = NULL;
+ }
+ }
+
+ return aux_work & ~ERTS_SSI_AUX_WORK_DD_THR_PRGR;
+}
+
+static erts_atomic32_t completed_dealloc_count;
+
+static void
+completed_dealloc(void *vproc)
+{
+ if (erts_atomic32_dec_read_mb(&completed_dealloc_count) == 0) {
+ erts_resume((Process *) vproc, (ErtsProcLocks) 0);
+ erts_smp_proc_dec_refc((Process *) vproc);
+ }
+}
+
+static void
+setup_completed_dealloc(void *vproc)
+{
+ ErtsSchedulerData *esdp = erts_get_scheduler_data();
+ ErtsAuxWorkData *awdp = (esdp
+ ? &esdp->aux_work_data
+ : aux_thread_aux_work_data);
+ erts_alloc_fix_alloc_shrink(awdp->sched_id, 0);
+ set_aux_work_flags_wakeup_nob(awdp->ssi, ERTS_SSI_AUX_WORK_DD);
+ awdp->dd.completed_callback = completed_dealloc;
+ awdp->dd.completed_arg = vproc;
+}
+
+static void
+prep_setup_completed_dealloc(void *vproc)
+{
+ erts_aint32_t count = (erts_aint32_t) (erts_no_schedulers+1);
+ if (erts_atomic32_dec_read_mb(&completed_dealloc_count) == count) {
+ /* scheduler threads */
+ erts_schedule_multi_misc_aux_work(0,
+ erts_no_schedulers,
+ setup_completed_dealloc,
+ vproc);
+ /* aux_thread */
+ erts_schedule_misc_aux_work(0,
+ setup_completed_dealloc,
+ vproc);
+ }
+}
+
+#endif /* ERTS_SMP */
+
+int
+erts_debug_wait_deallocations(Process *c_p)
+{
+#ifndef ERTS_SMP
+ erts_alloc_fix_alloc_shrink(1, 0);
+ return 1;
+#else
+ /* Only one process at a time can do this */
+ erts_aint32_t count = (erts_aint32_t) (2*(erts_no_schedulers+1));
+ if (0 == erts_atomic32_cmpxchg_mb(&completed_dealloc_count,
+ count,
+ 0)) {
+ erts_suspend(c_p, ERTS_PROC_LOCK_MAIN, NULL);
+ erts_smp_proc_inc_refc(c_p);
+ /* scheduler threads */
+ erts_schedule_multi_misc_aux_work(0,
+ erts_no_schedulers,
+ prep_setup_completed_dealloc,
+ (void *) c_p);
+ /* aux_thread */
+ erts_schedule_misc_aux_work(0,
+ prep_setup_completed_dealloc,
+ (void *) c_p);
+ return 1;
+ }
+ return 0;
+#endif
}
+
#ifdef ERTS_SMP_SCHEDULERS_NEED_TO_CHECK_CHILDREN
void
erts_smp_notify_check_children_needed(void)
{
int i;
+ for (i = 0; i < erts_no_schedulers; i++)
+ set_aux_work_flags_wakeup_nob(ERTS_SCHED_SLEEP_INFO_IX(i),
+ ERTS_SSI_AUX_WORK_CHECK_CHILDREN);
+}
+static ERTS_INLINE erts_aint32_t
+handle_check_children(ErtsAuxWorkData *awdp, erts_aint32_t aux_work)
+{
+ unset_aux_work_flags(awdp->ssi, ERTS_SSI_AUX_WORK_CHECK_CHILDREN);
+ erts_check_children();
+ return aux_work & ~ERTS_SSI_AUX_WORK_CHECK_CHILDREN;
+}
+
+#endif
+
+static void
+notify_reap_ports_relb(void)
+{
+ int i;
for (i = 0; i < erts_no_schedulers; i++) {
- erts_aint32_t aux_work;
- ErtsSchedulerSleepInfo *ssi;
- ssi = ERTS_SCHED_SLEEP_INFO_IX(i);
- aux_work = erts_smp_atomic32_bor(&ssi->aux_work,
- ERTS_SSI_AUX_WORK_CHECK_CHILDREN);
- if (!(aux_work & ERTS_SSI_AUX_WORK_CHECK_CHILDREN))
- erts_sched_poke(ssi);
+ set_aux_work_flags_wakeup_relb(ERTS_SCHED_SLEEP_INFO_IX(i),
+ ERTS_SSI_AUX_WORK_REAP_PORTS);
}
}
-#endif
-#ifdef ERTS_SCHED_NEED_BLOCKABLE_AUX_WORK
+erts_smp_atomic32_t erts_halt_progress;
+int erts_halt_code;
+
static ERTS_INLINE erts_aint32_t
-blockable_aux_work(ErtsSchedulerData *esdp,
- ErtsSchedulerSleepInfo *ssi,
- erts_aint32_t aux_work)
+handle_reap_ports(ErtsAuxWorkData *awdp, erts_aint32_t aux_work)
{
- if (aux_work & ERTS_SSI_BLOCKABLE_AUX_WORK_MASK) {
- if (aux_work & ERTS_SSI_AUX_WORK_MISC) {
- aux_work = erts_smp_atomic32_band(&ssi->aux_work,
- ~ERTS_SSI_AUX_WORK_MISC);
- aux_work &= ~ERTS_SSI_AUX_WORK_MISC;
- handle_misc_aux_work(esdp);
+ unset_aux_work_flags(awdp->ssi, ERTS_SSI_AUX_WORK_REAP_PORTS);
+ awdp->esdp->run_queue->halt_in_progress = 1;
+ if (erts_smp_atomic32_dec_read_acqb(&erts_halt_progress) == 0) {
+ int i;
+ erts_smp_atomic32_set_nob(&erts_halt_progress, 1);
+ for (i = 0; i < erts_max_ports; i++) {
+ Port *prt = &erts_port[i];
+ erts_smp_port_state_lock(prt);
+ if ((prt->status & (ERTS_PORT_SFLGS_INVALID_DRIVER_LOOKUP
+ | ERTS_PORT_SFLG_HALT))) {
+ erts_smp_port_state_unlock(prt);
+ continue;
+ }
+ /* We need to set the halt flag - get the port lock */
+#ifdef ERTS_SMP
+ erts_smp_atomic_inc_nob(&prt->refc);
+#endif
+ erts_smp_port_state_unlock(prt);
+#ifdef ERTS_SMP
+ erts_smp_mtx_lock(prt->lock);
+#endif
+ if ((prt->status & (ERTS_PORT_SFLGS_INVALID_DRIVER_LOOKUP
+ | ERTS_PORT_SFLG_HALT))) {
+ erts_port_release(prt);
+ continue;
+ }
+ erts_port_status_bor_set(prt, ERTS_PORT_SFLG_HALT);
+ erts_smp_atomic32_inc_nob(&erts_halt_progress);
+ if (prt->status & (ERTS_PORT_SFLG_EXITING
+ | ERTS_PORT_SFLG_CLOSING)) {
+ erts_port_release(prt);
+ continue;
+ }
+ erts_do_exit_port(prt, prt->id, am_killed);
+ erts_port_release(prt);
}
-#ifdef ERTS_SMP_SCHEDULERS_NEED_TO_CHECK_CHILDREN
- if (aux_work & ERTS_SSI_AUX_WORK_CHECK_CHILDREN) {
- aux_work = erts_smp_atomic32_band(&ssi->aux_work,
- ~ERTS_SSI_AUX_WORK_CHECK_CHILDREN);
- aux_work &= ~ERTS_SSI_AUX_WORK_CHECK_CHILDREN;
- erts_check_children();
+ if (erts_smp_atomic32_dec_read_nob(&erts_halt_progress) == 0) {
+ erl_exit_flush_async(erts_halt_code, "");
}
-#endif
}
- return aux_work;
+ return aux_work & ~ERTS_SSI_AUX_WORK_REAP_PORTS;
+}
+
+#if HAVE_ERTS_MSEG
+
+static ERTS_INLINE erts_aint32_t
+handle_mseg_cache_check(ErtsAuxWorkData *awdp, erts_aint32_t aux_work)
+{
+ unset_aux_work_flags(awdp->ssi, ERTS_SSI_AUX_WORK_MSEG_CACHE_CHECK);
+ erts_mseg_cache_check();
+ return aux_work & ~ERTS_SSI_AUX_WORK_MSEG_CACHE_CHECK;
}
#endif
-#ifdef ERTS_SCHED_NEED_NONBLOCKABLE_AUX_WORK
static ERTS_INLINE erts_aint32_t
-nonblockable_aux_work(ErtsSchedulerData *esdp,
- ErtsSchedulerSleepInfo *ssi,
- erts_aint32_t aux_work)
+handle_setup_aux_work_timer(ErtsAuxWorkData *awdp, erts_aint32_t aux_work)
{
- if (aux_work & ERTS_SSI_NONBLOCKABLE_AUX_WORK_MASK) {
+ unset_aux_work_flags(awdp->ssi, ERTS_SSI_AUX_WORK_SET_TMO);
+ setup_aux_work_timer();
+ return aux_work & ~ERTS_SSI_AUX_WORK_SET_TMO;
+}
+static erts_aint32_t
+handle_aux_work(ErtsAuxWorkData *awdp, erts_aint32_t orig_aux_work)
+{
+#undef HANDLE_AUX_WORK
+#define HANDLE_AUX_WORK(FLG, HNDLR) \
+ ignore |= FLG; \
+ if (aux_work & FLG) { \
+ aux_work = HNDLR(awdp, aux_work); \
+ ERTS_DBG_CHK_AUX_WORK_VAL(aux_work); \
+ if (!(aux_work & ~ignore)) { \
+ ERTS_DBG_CHK_AUX_WORK_VAL(aux_work); \
+ return aux_work; \
+ } \
}
-}
+
+ erts_aint32_t aux_work = orig_aux_work;
+ erts_aint32_t ignore = 0;
+
+#ifdef ERTS_SMP
+ thr_prgr_current_reset(awdp);
+#endif
+
+ ERTS_DBG_CHK_AUX_WORK_VAL(aux_work);
+ ASSERT(aux_work);
+
+ /*
+ * Handlers are *only* allowed to modify flags in return value
+ * and ssi flags that are explicity handled by the handler.
+ * Handlers are, e.g., not allowed to read the ssi flag field and
+ * then unconditionally return that value.
+ *
+ * Flag field returned should only contain flags for work that
+ * can continue immediately.
+ */
+
+ /*
+ * Keep ERTS_SSI_AUX_WORK flags in expected frequency order relative
+ * eachother. Most frequent first.
+ */
+#ifdef ERTS_SMP
+ HANDLE_AUX_WORK(ERTS_SSI_AUX_WORK_DD,
+ handle_delayed_dealloc);
+ /* DD must be before DD_THR_PRGR */
+ HANDLE_AUX_WORK(ERTS_SSI_AUX_WORK_DD_THR_PRGR,
+ handle_delayed_dealloc_thr_prgr);
+#endif
+
+ HANDLE_AUX_WORK((ERTS_SSI_AUX_WORK_FIX_ALLOC_LOWER_LIM
+ | ERTS_SSI_AUX_WORK_FIX_ALLOC_DEALLOC),
+ handle_fix_alloc);
+
+#if ERTS_USE_ASYNC_READY_Q
+ HANDLE_AUX_WORK(ERTS_SSI_AUX_WORK_ASYNC_READY,
+ handle_async_ready);
+ /* ASYNC_READY must be before ASYNC_READY_CLEAN */
+ HANDLE_AUX_WORK(ERTS_SSI_AUX_WORK_ASYNC_READY_CLEAN,
+ handle_async_ready_clean);
+#endif
+
+#ifdef ERTS_SMP
+ HANDLE_AUX_WORK(ERTS_SSI_AUX_WORK_MISC_THR_PRGR,
+ handle_misc_aux_work_thr_prgr);
#endif
+ /* MISC_THR_PRGR must be before MISC */
+ HANDLE_AUX_WORK(ERTS_SSI_AUX_WORK_MISC,
+ handle_misc_aux_work);
+
+#ifdef ERTS_SMP_SCHEDULERS_NEED_TO_CHECK_CHILDREN
+ HANDLE_AUX_WORK(ERTS_SSI_AUX_WORK_CHECK_CHILDREN,
+ handle_check_children);
+#endif
+
+ HANDLE_AUX_WORK(ERTS_SSI_AUX_WORK_SET_TMO,
+ handle_setup_aux_work_timer);
+
+#if HAVE_ERTS_MSEG
+ HANDLE_AUX_WORK(ERTS_SSI_AUX_WORK_MSEG_CACHE_CHECK,
+ handle_mseg_cache_check);
+#endif
+
+ HANDLE_AUX_WORK(ERTS_SSI_AUX_WORK_REAP_PORTS,
+ handle_reap_ports);
+
+ ERTS_DBG_CHK_AUX_WORK_VAL(aux_work);
+
+ return aux_work;
+
+#undef HANDLE_AUX_WORK
+
+}
+
+typedef struct {
+ union {
+ ErlTimer data;
+ char align__[ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(ErlTimer))];
+ } timer;
+
+ int initialized;
+ erts_atomic32_t refc;
+ erts_atomic32_t type[1];
+} ErtsAuxWorkTmo;
+
+static ErtsAuxWorkTmo *aux_work_tmo;
+
+static void
+aux_work_timeout_early_init(int no_schedulers)
+{
+ int i;
+ UWord p;
+
+ /*
+ * This is done really early. Our own allocators have
+ * not been started yet.
+ */
+
+ p = (UWord) malloc((sizeof(ErtsAuxWorkTmo)
+ + sizeof(erts_atomic32_t)*(no_schedulers+1))
+ + ERTS_CACHE_LINE_SIZE-1);
+ if (p & ERTS_CACHE_LINE_MASK)
+ p = (p & ~ERTS_CACHE_LINE_MASK) + ERTS_CACHE_LINE_SIZE;
+ ASSERT((p & ERTS_CACHE_LINE_MASK) == 0);
+
+ aux_work_tmo = (ErtsAuxWorkTmo *) p;
+ aux_work_tmo->initialized = 0;
+ erts_atomic32_init_nob(&aux_work_tmo->refc, 0);
+ for (i = 0; i <= no_schedulers; i++)
+ erts_atomic32_init_nob(&aux_work_tmo->type[i], 0);
+}
+
+void
+aux_work_timeout_late_init(void)
+{
+ aux_work_tmo->initialized = 1;
+ if (erts_atomic32_read_nob(&aux_work_tmo->refc)) {
+ aux_work_tmo->timer.data.active = 0;
+ erts_set_timer(&aux_work_tmo->timer.data,
+ aux_work_timeout,
+ NULL,
+ NULL,
+ 1000);
+ }
+}
static void
-prepare_for_block(void *vrq)
+aux_work_timeout(void *unused)
{
- erts_smp_runq_unlock((ErtsRunQueue *) vrq);
+ erts_aint32_t refc;
+ int i;
+#ifdef ERTS_SMP
+ i = 0;
+#else
+ i = 1;
+#endif
+
+ for (; i <= erts_no_schedulers; i++) {
+ erts_aint32_t type;
+ type = erts_atomic32_read_acqb(&aux_work_tmo->type[i]);
+ if (type)
+ set_aux_work_flags_wakeup_nob(ERTS_SCHED_SLEEP_INFO_IX(i-1),
+ type);
+ }
+
+ refc = erts_atomic32_read_nob(&aux_work_tmo->refc);
+ ASSERT(refc >= 1);
+ if (refc != 1
+ || 1 != erts_atomic32_cmpxchg_relb(&aux_work_tmo->refc, 0, 1)) {
+ /* Setup next timeout... */
+ aux_work_tmo->timer.data.active = 0;
+ erts_set_timer(&aux_work_tmo->timer.data,
+ aux_work_timeout,
+ NULL,
+ NULL,
+ 1000);
+ }
}
static void
-resume_after_block(void *vrq)
+setup_aux_work_timer(void)
{
- erts_smp_runq_lock((ErtsRunQueue *) vrq);
+#ifndef ERTS_SMP
+ if (!erts_get_scheduler_data())
+ set_aux_work_flags_wakeup_nob(ERTS_SCHED_SLEEP_INFO_IX(0),
+ ERTS_SSI_AUX_WORK_SET_TMO);
+ else
+#endif
+ {
+ aux_work_tmo->timer.data.active = 0;
+ erts_set_timer(&aux_work_tmo->timer.data,
+ aux_work_timeout,
+ NULL,
+ NULL,
+ 1000);
+ }
}
+erts_aint32_t
+erts_set_aux_work_timeout(int ix, erts_aint32_t type, int enable)
+{
+ erts_aint32_t old, refc;
+
+#ifndef ERTS_SMP
+ ix = 1;
#endif
+ ERTS_DBG_CHK_AUX_WORK_VAL(type);
+ ERTS_DBG_CHK_AUX_WORK_VAL(erts_atomic32_read_nob(&aux_work_tmo->type[ix]));
+// erts_fprintf(stderr, "t(%d, 0x%x, %d)\n", ix, type, enable);
+
+ if (!enable) {
+ old = erts_atomic32_read_band_mb(&aux_work_tmo->type[ix], ~type);
+ ERTS_DBG_CHK_AUX_WORK_VAL(erts_atomic32_read_nob(&aux_work_tmo->type[ix]));
+ if (old != 0 && (old & ~type) == 0)
+ erts_atomic32_dec_relb(&aux_work_tmo->refc);
+ return old;
+ }
+
+ old = erts_atomic32_read_bor_mb(&aux_work_tmo->type[ix], type);
+ ERTS_DBG_CHK_AUX_WORK_VAL(erts_atomic32_read_nob(&aux_work_tmo->type[ix]));
+ if (old == 0 && type != 0) {
+ refc = erts_atomic32_inc_read_acqb(&aux_work_tmo->refc);
+ if (refc == 1) {
+ erts_atomic32_inc_acqb(&aux_work_tmo->refc);
+ if (aux_work_tmo->initialized)
+ setup_aux_work_timer();
+ }
+ }
+ return old;
+}
+
+
+
static ERTS_INLINE void
sched_waiting_sys(Uint no, ErtsRunQueue *rq)
{
@@ -800,8 +1696,6 @@ sched_active_sys(Uint no, ErtsRunQueue *rq)
Uint
erts_active_schedulers(void)
{
- /* RRRRRRRRR */
-
Uint as = erts_no_schedulers;
ERTS_ATOMIC_FOREACH_RUNQ(rq, as -= abs(rq->waiting));
@@ -815,7 +1709,7 @@ erts_active_schedulers(void)
static ERTS_INLINE void
clear_sys_scheduling(void)
{
- erts_smp_atomic32_set_relb(&doing_sys_schedule, 0);
+ erts_smp_atomic32_set_mb(&doing_sys_schedule, 0);
}
static ERTS_INLINE int
@@ -882,42 +1776,43 @@ sched_active(Uint no, ErtsRunQueue *rq)
static int ERTS_INLINE
ongoing_multi_scheduling_block(void)
{
- return erts_smp_atomic32_read(&schdlr_sspnd.msb.ongoing) != 0;
+ ERTS_SMP_LC_ASSERT(erts_lc_mtx_is_locked(&schdlr_sspnd.mtx));
+ return schdlr_sspnd.msb.ongoing;
}
static ERTS_INLINE void
empty_runq(ErtsRunQueue *rq)
{
- erts_aint32_t oifls = erts_smp_atomic32_band(&rq->info_flags,
- ~ERTS_RUNQ_IFLG_NONEMPTY);
+ erts_aint32_t oifls = erts_smp_atomic32_read_band_nob(&rq->info_flags,
+ ~ERTS_RUNQ_IFLG_NONEMPTY);
if (oifls & ERTS_RUNQ_IFLG_NONEMPTY) {
#ifdef DEBUG
- erts_aint32_t empty = erts_smp_atomic32_read(&no_empty_run_queues);
+ erts_aint32_t empty = erts_smp_atomic32_read_nob(&no_empty_run_queues);
/*
* For a short period of time no_empty_run_queues may have
* been increased twice for a specific run queue.
*/
ASSERT(0 <= empty && empty < 2*erts_no_run_queues);
#endif
- erts_smp_atomic32_inc(&no_empty_run_queues);
+ erts_smp_atomic32_inc_relb(&no_empty_run_queues);
}
}
static ERTS_INLINE void
non_empty_runq(ErtsRunQueue *rq)
{
- erts_aint32_t oifls = erts_smp_atomic32_bor(&rq->info_flags,
- ERTS_RUNQ_IFLG_NONEMPTY);
+ erts_aint32_t oifls = erts_smp_atomic32_read_bor_nob(&rq->info_flags,
+ ERTS_RUNQ_IFLG_NONEMPTY);
if (!(oifls & ERTS_RUNQ_IFLG_NONEMPTY)) {
#ifdef DEBUG
- erts_aint32_t empty = erts_smp_atomic32_read(&no_empty_run_queues);
+ erts_aint32_t empty = erts_smp_atomic32_read_nob(&no_empty_run_queues);
/*
* For a short period of time no_empty_run_queues may have
* been increased twice for a specific run queue.
*/
ASSERT(0 < empty && empty <= 2*erts_no_run_queues);
#endif
- erts_smp_atomic32_dec(&no_empty_run_queues);
+ erts_smp_atomic32_dec_relb(&no_empty_run_queues);
}
}
@@ -930,7 +1825,7 @@ sched_prep_spin_wait(ErtsSchedulerSleepInfo *ssi)
erts_aint32_t xflgs = 0;
do {
- oflgs = erts_smp_atomic32_cmpxchg(&ssi->flags, nflgs, xflgs);
+ oflgs = erts_smp_atomic32_cmpxchg_acqb(&ssi->flags, nflgs, xflgs);
if (oflgs == xflgs)
return nflgs;
xflgs = oflgs;
@@ -947,7 +1842,7 @@ sched_prep_cont_spin_wait(ErtsSchedulerSleepInfo *ssi)
erts_aint32_t xflgs = ERTS_SSI_FLG_WAITING;
do {
- oflgs = erts_smp_atomic32_cmpxchg(&ssi->flags, nflgs, xflgs);
+ oflgs = erts_smp_atomic32_cmpxchg_acqb(&ssi->flags, nflgs, xflgs);
if (oflgs == xflgs)
return nflgs;
xflgs = oflgs;
@@ -987,9 +1882,13 @@ sched_set_sleeptype(ErtsSchedulerSleepInfo *ssi, erts_aint32_t sleep_type)
if (sleep_type == ERTS_SSI_FLG_TSE_SLEEPING)
erts_tse_reset(ssi->event);
+ else {
+ ASSERT(sleep_type == ERTS_SSI_FLG_POLL_SLEEPING);
+ erts_sys_schedule_interrupt(0);
+ }
while (1) {
- oflgs = erts_smp_atomic32_cmpxchg(&ssi->flags, nflgs, xflgs);
+ oflgs = erts_smp_atomic32_cmpxchg_acqb(&ssi->flags, nflgs, xflgs);
if (oflgs == xflgs)
return nflgs;
if ((oflgs & (ERTS_SSI_FLG_SLEEPING|ERTS_SSI_FLG_WAITING))
@@ -1005,34 +1904,137 @@ sched_set_sleeptype(ErtsSchedulerSleepInfo *ssi, erts_aint32_t sleep_type)
(((FLGS) & (ERTS_SSI_FLG_WAITING|ERTS_SSI_FLG_SUSPENDED)) \
!= ERTS_SSI_FLG_WAITING)
+
+static void
+thr_prgr_wakeup(void *vssi)
+{
+ erts_sched_poke((ErtsSchedulerSleepInfo *) vssi);
+}
+
+static void
+thr_prgr_prep_wait(void *vssi)
+{
+ ErtsSchedulerSleepInfo *ssi = (ErtsSchedulerSleepInfo *) vssi;
+ erts_smp_atomic32_read_bor_acqb(&ssi->flags,
+ ERTS_SSI_FLG_SLEEPING);
+}
+
+static void
+thr_prgr_wait(void *vssi)
+{
+ ErtsSchedulerSleepInfo *ssi = (ErtsSchedulerSleepInfo *) vssi;
+ erts_aint32_t xflgs = ERTS_SSI_FLG_SLEEPING;
+
+ erts_tse_reset(ssi->event);
+
+ while (1) {
+ erts_aint32_t aflgs, nflgs;
+ nflgs = xflgs | ERTS_SSI_FLG_TSE_SLEEPING;
+ aflgs = erts_smp_atomic32_cmpxchg_acqb(&ssi->flags, nflgs, xflgs);
+ if (aflgs == xflgs) {
+ erts_tse_wait(ssi->event);
+ break;
+ }
+ if ((aflgs & ERTS_SSI_FLG_SLEEPING) == 0)
+ break;
+ xflgs = aflgs;
+ }
+}
+
+static void
+thr_prgr_fin_wait(void *vssi)
+{
+ ErtsSchedulerSleepInfo *ssi = (ErtsSchedulerSleepInfo *) vssi;
+ erts_smp_atomic32_read_band_nob(&ssi->flags,
+ ~(ERTS_SSI_FLG_SLEEPING
+ | ERTS_SSI_FLG_TSE_SLEEPING));
+}
+
+static void init_aux_work_data(ErtsAuxWorkData *awdp, ErtsSchedulerData *esdp);
+
+static void *
+aux_thread(void *unused)
+{
+ ErtsAuxWorkData *awdp = aux_thread_aux_work_data;
+ ErtsSchedulerSleepInfo *ssi = ERTS_SCHED_SLEEP_INFO_IX(-1);
+ erts_aint32_t aux_work;
+ ErtsThrPrgrCallbacks callbacks;
+ int thr_prgr_active = 1;
+
+ ssi->event = erts_tse_fetch();
+
+ callbacks.arg = (void *) ssi;
+ callbacks.wakeup = thr_prgr_wakeup;
+ callbacks.prepare_wait = thr_prgr_prep_wait;
+ callbacks.wait = thr_prgr_wait;
+ callbacks.finalize_wait = thr_prgr_fin_wait;
+
+ erts_thr_progress_register_managed_thread(NULL, &callbacks, 1);
+ init_aux_work_data(awdp, NULL);
+ awdp->ssi = ssi;
+
+ sched_prep_spin_wait(ssi);
+
+ while (1) {
+ erts_aint32_t flgs;
+
+ aux_work = erts_atomic32_read_acqb(&ssi->aux_work);
+ if (aux_work) {
+ if (!thr_prgr_active)
+ erts_thr_progress_active(NULL, thr_prgr_active = 1);
+ aux_work = handle_aux_work(awdp, aux_work);
+ if (aux_work && erts_thr_progress_update(NULL))
+ erts_thr_progress_leader_update(NULL);
+ }
+
+ if (!aux_work) {
+ if (thr_prgr_active)
+ erts_thr_progress_active(NULL, thr_prgr_active = 0);
+ erts_thr_progress_prepare_wait(NULL);
+
+ flgs = sched_spin_wait(ssi, 0);
+
+ if (flgs & ERTS_SSI_FLG_SLEEPING) {
+ ASSERT(flgs & ERTS_SSI_FLG_WAITING);
+ flgs = sched_set_sleeptype(ssi, ERTS_SSI_FLG_TSE_SLEEPING);
+ if (flgs & ERTS_SSI_FLG_SLEEPING) {
+ int res;
+ ASSERT(flgs & ERTS_SSI_FLG_TSE_SLEEPING);
+ ASSERT(flgs & ERTS_SSI_FLG_WAITING);
+ do {
+ res = erts_tse_wait(ssi->event);
+ } while (res == EINTR);
+ }
+ }
+ erts_thr_progress_finalize_wait(NULL);
+ }
+
+ flgs = sched_prep_spin_wait(ssi);
+ }
+ return NULL;
+}
+
+#endif /* ERTS_SMP */
+
static void
scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq)
{
+ int working = 1;
ErtsSchedulerSleepInfo *ssi = esdp->ssi;
int spincount;
+ erts_aint32_t aux_work = 0;
+#ifdef ERTS_SMP
+ int thr_prgr_active = 1;
erts_aint32_t flgs;
-#if defined(ERTS_SCHED_NEED_NONBLOCKABLE_AUX_WORK) \
- || defined(ERTS_SCHED_NEED_BLOCKABLE_AUX_WORK)
- erts_aint32_t aux_work;
-#endif
ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq));
- erts_smp_spin_lock(&rq->sleepers.lock);
flgs = sched_prep_spin_wait(ssi);
if (flgs & ERTS_SSI_FLG_SUSPENDED) {
/* Go suspend instead... */
- erts_smp_spin_unlock(&rq->sleepers.lock);
return;
}
- ssi->prev = NULL;
- ssi->next = rq->sleepers.list;
- if (rq->sleepers.list)
- rq->sleepers.list->prev = ssi;
- rq->sleepers.list = ssi;
- erts_smp_spin_unlock(&rq->sleepers.lock);
-
/*
* If all schedulers are waiting, one of them *should*
* be waiting in erl_sys_schedule()
@@ -1048,34 +2050,45 @@ scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq)
tse_wait:
-#ifdef ERTS_SCHED_NEED_BLOCKABLE_AUX_WORK
- aux_work = erts_smp_atomic32_read(&ssi->aux_work);
- tse_blockable_aux_work:
- aux_work = blockable_aux_work(esdp, ssi, aux_work);
-#endif
- erts_smp_activity_begin(ERTS_ACTIVITY_WAIT, NULL, NULL, NULL);
+ if (thr_prgr_active != working)
+ sched_wall_time_change(esdp, thr_prgr_active);
while (1) {
-#ifdef ERTS_SCHED_NEED_NONBLOCKABLE_AUX_WORK
-#ifndef ERTS_SCHED_NEED_BLOCKABLE_AUX_WORK
- aux_work = erts_smp_atomic32_read(&ssi->aux_work);
-#endif
- nonblockable_aux_work(esdp, ssi, aux_work);
-#endif
+ aux_work = erts_atomic32_read_acqb(&ssi->aux_work);
+ if (aux_work) {
+ if (!thr_prgr_active) {
+ erts_thr_progress_active(esdp, thr_prgr_active = 1);
+ sched_wall_time_change(esdp, 1);
+ }
+ aux_work = handle_aux_work(&esdp->aux_work_data, aux_work);
+ if (aux_work && erts_thr_progress_update(esdp))
+ erts_thr_progress_leader_update(esdp);
+ }
- flgs = sched_spin_wait(ssi, spincount);
- if (flgs & ERTS_SSI_FLG_SLEEPING) {
- ASSERT(flgs & ERTS_SSI_FLG_WAITING);
- flgs = sched_set_sleeptype(ssi, ERTS_SSI_FLG_TSE_SLEEPING);
+ if (aux_work)
+ flgs = erts_smp_atomic32_read_acqb(&ssi->flags);
+ else {
+ if (thr_prgr_active) {
+ erts_thr_progress_active(esdp, thr_prgr_active = 0);
+ sched_wall_time_change(esdp, 0);
+ }
+ erts_thr_progress_prepare_wait(esdp);
+
+ flgs = sched_spin_wait(ssi, spincount);
if (flgs & ERTS_SSI_FLG_SLEEPING) {
- int res;
- ASSERT(flgs & ERTS_SSI_FLG_TSE_SLEEPING);
ASSERT(flgs & ERTS_SSI_FLG_WAITING);
- do {
- res = erts_tse_wait(ssi->event);
- } while (res == EINTR);
+ flgs = sched_set_sleeptype(ssi, ERTS_SSI_FLG_TSE_SLEEPING);
+ if (flgs & ERTS_SSI_FLG_SLEEPING) {
+ int res;
+ ASSERT(flgs & ERTS_SSI_FLG_TSE_SLEEPING);
+ ASSERT(flgs & ERTS_SSI_FLG_WAITING);
+ do {
+ res = erts_tse_wait(ssi->event);
+ } while (res == EINTR);
+ }
}
+ erts_thr_progress_finalize_wait(esdp);
}
if (!(flgs & ERTS_SSI_FLG_WAITING)) {
@@ -1091,26 +2104,23 @@ scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq)
break;
}
-#ifdef ERTS_SCHED_NEED_BLOCKABLE_AUX_WORK
- aux_work = erts_smp_atomic32_read(&ssi->aux_work);
- if (aux_work & ERTS_SSI_BLOCKABLE_AUX_WORK_MASK) {
- erts_smp_activity_end(ERTS_ACTIVITY_WAIT, NULL, NULL, NULL);
- goto tse_blockable_aux_work;
- }
-#endif
-
}
- erts_smp_activity_end(ERTS_ACTIVITY_WAIT, NULL, NULL, NULL);
-
if (flgs & ~ERTS_SSI_FLG_SUSPENDED)
- erts_smp_atomic32_band(&ssi->flags, ERTS_SSI_FLG_SUSPENDED);
+ erts_smp_atomic32_read_band_nob(&ssi->flags, ERTS_SSI_FLG_SUSPENDED);
+
+ if (!thr_prgr_active) {
+ erts_thr_progress_active(esdp, thr_prgr_active = 1);
+ sched_wall_time_change(esdp, 1);
+ }
erts_smp_runq_lock(rq);
sched_active(esdp->no, rq);
}
- else {
+ else
+#endif
+ {
erts_aint_t dt;
erts_smp_atomic32_set_relb(&function_calls, 0);
@@ -1118,14 +2128,21 @@ scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq)
sched_waiting_sys(esdp->no, rq);
+
erts_smp_runq_unlock(rq);
+ ASSERT(working);
+ sched_wall_time_change(esdp, working = 0);
+
spincount = ERTS_SCHED_SYS_SLEEP_SPINCOUNT;
while (spincount-- > 0) {
sys_poll_aux_work:
+ if (working)
+ sched_wall_time_change(esdp, working = 0);
+
ASSERT(!erts_port_task_have_outstanding_io_tasks());
erl_sys_schedule(1); /* Might give us something to do */
@@ -1134,30 +2151,34 @@ scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq)
if (dt) erts_bump_timer(dt);
sys_aux_work:
-
-#ifdef ERTS_SCHED_NEED_BLOCKABLE_AUX_WORK
- aux_work = erts_smp_atomic32_read(&ssi->aux_work);
- aux_work = blockable_aux_work(esdp, ssi, aux_work);
+#ifndef ERTS_SMP
+ erts_sys_schedule_interrupt(0);
#endif
-#ifdef ERTS_SCHED_NEED_NONBLOCKABLE_AUX_WORK
-#ifndef ERTS_SCHED_NEED_BLOCKABLE_AUX_WORK
- aux_work = erts_smp_atomic32_read(&ssi->aux_work);
+
+ aux_work = erts_atomic32_read_acqb(&ssi->aux_work);
+ if (aux_work) {
+ if (!working)
+ sched_wall_time_change(esdp, working = 1);
+#ifdef ERTS_SMP
+ if (!thr_prgr_active)
+ erts_thr_progress_active(esdp, thr_prgr_active = 1);
#endif
- nonblockable_aux_work(esdp, ssi, aux_work);
+ aux_work = handle_aux_work(&esdp->aux_work_data, aux_work);
+#ifdef ERTS_SMP
+ if (aux_work && erts_thr_progress_update(esdp))
+ erts_thr_progress_leader_update(esdp);
#endif
+ }
+#ifndef ERTS_SMP
+ if (rq->len != 0 || rq->misc.start)
+ goto sys_woken;
+#else
flgs = erts_smp_atomic32_read_acqb(&ssi->flags);
if (!(flgs & ERTS_SSI_FLG_WAITING)) {
ASSERT(!(flgs & ERTS_SSI_FLG_SLEEPING));
goto sys_woken;
}
- if (!(flgs & ERTS_SSI_FLG_SLEEPING)) {
- flgs = sched_prep_cont_spin_wait(ssi);
- if (!(flgs & ERTS_SSI_FLG_WAITING)) {
- ASSERT(!(flgs & ERTS_SSI_FLG_SLEEPING));
- goto sys_woken;
- }
- }
/*
* If we got new I/O tasks we aren't allowed to
@@ -1174,10 +2195,12 @@ scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq)
goto tse_wait;
}
}
+#endif
}
erts_smp_runq_lock(rq);
+#ifdef ERTS_SMP
/*
* If we got new I/O tasks we aren't allowed to
* sleep in erl_sys_schedule().
@@ -1189,64 +2212,93 @@ scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq)
* Got to check that we still got I/O tasks; otherwise
* we have to wait in erl_sys_schedule() after all...
*/
- if (prepare_for_sys_schedule())
- goto do_sys_schedule;
-
- /*
- * Not allowed to wait in erl_sys_schedule;
- * do tse wait instead...
- */
- sched_change_waiting_sys_to_waiting(esdp->no, rq);
+ if (!prepare_for_sys_schedule()) {
+ /*
+ * Not allowed to wait in erl_sys_schedule;
+ * do tse wait instead...
+ */
+ sched_change_waiting_sys_to_waiting(esdp->no, rq);
+ erts_smp_runq_unlock(rq);
+ spincount = 0;
+ goto tse_wait;
+ }
+ }
+#endif
+ if (aux_work) {
erts_smp_runq_unlock(rq);
- spincount = 0;
- goto tse_wait;
+ goto sys_poll_aux_work;
}
- else {
- do_sys_schedule:
- erts_sys_schedule_interrupt(0);
- flgs = sched_set_sleeptype(ssi, ERTS_SSI_FLG_POLL_SLEEPING);
- if (!(flgs & ERTS_SSI_FLG_SLEEPING)) {
- if (!(flgs & ERTS_SSI_FLG_WAITING))
- goto sys_locked_woken;
- erts_smp_runq_unlock(rq);
- flgs = sched_prep_cont_spin_wait(ssi);
- if (!(flgs & ERTS_SSI_FLG_WAITING)) {
- ASSERT(!(flgs & ERTS_SSI_FLG_SLEEPING));
- goto sys_woken;
- }
- ASSERT(!erts_port_task_have_outstanding_io_tasks());
- goto sys_poll_aux_work;
+#ifdef ERTS_SMP
+ flgs = sched_set_sleeptype(ssi, ERTS_SSI_FLG_POLL_SLEEPING);
+ if (!(flgs & ERTS_SSI_FLG_SLEEPING)) {
+ if (!(flgs & ERTS_SSI_FLG_WAITING)) {
+ ASSERT(!(flgs & ERTS_SSI_FLG_SLEEPING));
+ goto sys_locked_woken;
}
+ erts_smp_runq_unlock(rq);
+ flgs = sched_prep_cont_spin_wait(ssi);
+ if (!(flgs & ERTS_SSI_FLG_WAITING)) {
+ ASSERT(!(flgs & ERTS_SSI_FLG_SLEEPING));
+ goto sys_woken;
+ }
+ ASSERT(!erts_port_task_have_outstanding_io_tasks());
+ goto sys_poll_aux_work;
+ }
- ASSERT(flgs & ERTS_SSI_FLG_POLL_SLEEPING);
- ASSERT(flgs & ERTS_SSI_FLG_WAITING);
+ ASSERT(flgs & ERTS_SSI_FLG_POLL_SLEEPING);
+ ASSERT(flgs & ERTS_SSI_FLG_WAITING);
+#endif
- erts_smp_runq_unlock(rq);
+ erts_smp_runq_unlock(rq);
- ASSERT(!erts_port_task_have_outstanding_io_tasks());
+ if (working)
+ sched_wall_time_change(esdp, working = 0);
- erl_sys_schedule(0);
+#ifdef ERTS_SMP
+ if (thr_prgr_active)
+ erts_thr_progress_active(esdp, thr_prgr_active = 0);
+#endif
- dt = erts_do_time_read_and_reset();
- if (dt) erts_bump_timer(dt);
+ ASSERT(!erts_port_task_have_outstanding_io_tasks());
- flgs = sched_prep_cont_spin_wait(ssi);
- if (flgs & ERTS_SSI_FLG_WAITING)
- goto sys_aux_work;
+ erl_sys_schedule(0);
+
+ dt = erts_do_time_read_and_reset();
+ if (dt) erts_bump_timer(dt);
+
+#ifndef ERTS_SMP
+ if (rq->len == 0 && !rq->misc.start)
+ goto sys_aux_work;
+ sys_woken:
+#else
+ flgs = sched_prep_cont_spin_wait(ssi);
+ if (flgs & ERTS_SSI_FLG_WAITING)
+ goto sys_aux_work;
- sys_woken:
+ sys_woken:
+ if (!thr_prgr_active)
+ erts_thr_progress_active(esdp, thr_prgr_active = 1);
+ erts_smp_runq_lock(rq);
+ sys_locked_woken:
+ if (!thr_prgr_active) {
+ erts_smp_runq_unlock(rq);
+ erts_thr_progress_active(esdp, thr_prgr_active = 1);
erts_smp_runq_lock(rq);
- sys_locked_woken:
- clear_sys_scheduling();
- if (flgs & ~ERTS_SSI_FLG_SUSPENDED)
- erts_smp_atomic32_band(&ssi->flags, ERTS_SSI_FLG_SUSPENDED);
- sched_active_sys(esdp->no, rq);
}
+ clear_sys_scheduling();
+ if (flgs & ~ERTS_SSI_FLG_SUSPENDED)
+ erts_smp_atomic32_read_band_nob(&ssi->flags, ERTS_SSI_FLG_SUSPENDED);
+#endif
+ if (!working)
+ sched_wall_time_change(esdp, working = 1);
+ sched_active_sys(esdp->no, rq);
}
ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq));
}
+#ifdef ERTS_SMP
+
static ERTS_INLINE erts_aint32_t
ssi_flags_set_wake(ErtsSchedulerSleepInfo *ssi)
{
@@ -1255,7 +2307,7 @@ ssi_flags_set_wake(ErtsSchedulerSleepInfo *ssi)
erts_aint32_t nflgs = 0;
erts_aint32_t xflgs = ERTS_SSI_FLG_SLEEPING|ERTS_SSI_FLG_WAITING;
while (1) {
- oflgs = erts_smp_atomic32_cmpxchg(&ssi->flags, nflgs, xflgs);
+ oflgs = erts_smp_atomic32_cmpxchg_relb(&ssi->flags, nflgs, xflgs);
if (oflgs == xflgs)
return oflgs;
nflgs = oflgs & ERTS_SSI_FLG_SUSPENDED;
@@ -1264,10 +2316,10 @@ ssi_flags_set_wake(ErtsSchedulerSleepInfo *ssi)
}
static void
-wake_scheduler(ErtsRunQueue *rq, int incq, int one)
+wake_scheduler(ErtsRunQueue *rq, int incq)
{
ErtsSchedulerSleepInfo *ssi;
- ErtsSchedulerSleepList *sl;
+ erts_aint32_t flgs;
/*
* The unlocked run queue is not strictly necessary
@@ -1279,57 +2331,13 @@ wake_scheduler(ErtsRunQueue *rq, int incq, int one)
*/
ERTS_SMP_LC_ASSERT(!erts_smp_lc_runq_is_locked(rq));
- sl = &rq->sleepers;
-
- erts_smp_spin_lock(&sl->lock);
- ssi = sl->list;
- if (!ssi)
- erts_smp_spin_unlock(&sl->lock);
- else if (one) {
- erts_aint32_t flgs;
- if (ssi->prev)
- ssi->prev->next = ssi->next;
- else {
- ASSERT(sl->list == ssi);
- sl->list = ssi->next;
- }
- if (ssi->next)
- ssi->next->prev = ssi->prev;
-
- erts_smp_spin_unlock(&sl->lock);
+ ssi = rq->scheduler->ssi;
- ERTS_THR_MEMORY_BARRIER;
- flgs = ssi_flags_set_wake(ssi);
- erts_sched_finish_poke(ssi, flgs);
-
- if (incq && !erts_common_run_queue && (flgs & ERTS_SSI_FLG_WAITING))
- non_empty_runq(rq);
- }
- else {
- sl->list = NULL;
- erts_smp_spin_unlock(&sl->lock);
-
- ERTS_THR_MEMORY_BARRIER;
- do {
- ErtsSchedulerSleepInfo *wake_ssi = ssi;
- ssi = ssi->next;
- erts_sched_finish_poke(wake_ssi, ssi_flags_set_wake(wake_ssi));
- } while (ssi);
- }
-}
+ flgs = ssi_flags_set_wake(ssi);
+ erts_sched_finish_poke(ssi, flgs);
-static void
-wake_all_schedulers(void)
-{
- if (erts_common_run_queue)
- wake_scheduler(erts_common_run_queue, 0, 0);
- else {
- int ix;
- for (ix = 0; ix < erts_no_run_queues; ix++) {
- ErtsRunQueue *rq = ERTS_RUNQ_IX(ix);
- wake_scheduler(rq, 0, 1);
- }
- }
+ if (incq && (flgs & ERTS_SSI_FLG_WAITING))
+ non_empty_runq(rq);
}
#define ERTS_NO_USED_RUNQS_SHIFT 16
@@ -1344,13 +2352,13 @@ init_no_runqs(int active, int used)
{
erts_aint32_t no_runqs = (erts_aint32_t) (active & ERTS_NO_RUNQS_MASK);
no_runqs |= (erts_aint32_t) ((used & ERTS_NO_RUNQS_MASK) << ERTS_NO_USED_RUNQS_SHIFT);
- erts_smp_atomic32_init(&balance_info.no_runqs, no_runqs);
+ erts_smp_atomic32_init_nob(&balance_info.no_runqs, no_runqs);
}
static ERTS_INLINE void
get_no_runqs(int *active, int *used)
{
- erts_aint32_t no_runqs = erts_smp_atomic32_read(&balance_info.no_runqs);
+ erts_aint32_t no_runqs = erts_smp_atomic32_read_nob(&balance_info.no_runqs);
if (active)
*active = (int) (no_runqs & ERTS_NO_RUNQS_MASK);
if (used)
@@ -1360,11 +2368,12 @@ get_no_runqs(int *active, int *used)
static ERTS_INLINE void
set_no_used_runqs(int used)
{
- erts_aint32_t exp = erts_smp_atomic32_read(&balance_info.no_runqs);
+ erts_aint32_t exp = erts_smp_atomic32_read_nob(&balance_info.no_runqs);
while (1) {
erts_aint32_t act, new;
- new = (used << ERTS_NO_USED_RUNQS_SHIFT) | (exp & ERTS_NO_RUNQS_MASK);
- act = erts_smp_atomic32_cmpxchg(&balance_info.no_runqs, new, exp);
+ new = (used & ERTS_NO_RUNQS_MASK) << ERTS_NO_USED_RUNQS_SHIFT;
+ new |= exp & ERTS_NO_RUNQS_MASK;
+ act = erts_smp_atomic32_cmpxchg_nob(&balance_info.no_runqs, new, exp);
if (act == exp)
break;
exp = act;
@@ -1374,11 +2383,12 @@ set_no_used_runqs(int used)
static ERTS_INLINE void
set_no_active_runqs(int active)
{
- erts_aint32_t exp = erts_smp_atomic32_read(&balance_info.no_runqs);
+ erts_aint32_t exp = erts_smp_atomic32_read_nob(&balance_info.no_runqs);
while (1) {
erts_aint32_t act, new;
- new = (exp & (ERTS_NO_RUNQS_MASK << ERTS_NO_USED_RUNQS_SHIFT)) | active;
- act = erts_smp_atomic32_cmpxchg(&balance_info.no_runqs, new, exp);
+ new = exp & (ERTS_NO_RUNQS_MASK << ERTS_NO_USED_RUNQS_SHIFT);
+ new |= active & ERTS_NO_RUNQS_MASK;
+ act = erts_smp_atomic32_cmpxchg_nob(&balance_info.no_runqs, new, exp);
if (act == exp)
break;
exp = act;
@@ -1388,13 +2398,14 @@ set_no_active_runqs(int active)
static ERTS_INLINE int
try_inc_no_active_runqs(int active)
{
- erts_aint32_t exp = erts_smp_atomic32_read(&balance_info.no_runqs);
+ erts_aint32_t exp = erts_smp_atomic32_read_nob(&balance_info.no_runqs);
if (((exp >> ERTS_NO_USED_RUNQS_SHIFT) & ERTS_NO_RUNQS_MASK) < active)
return 0;
if ((exp & ERTS_NO_RUNQS_MASK) + 1 == active) {
erts_aint32_t new, act;
- new = (exp & ~ERTS_NO_RUNQS_MASK) | active;
- act = erts_smp_atomic32_cmpxchg(&balance_info.no_runqs, new, exp);
+ new = exp & (ERTS_NO_RUNQS_MASK << ERTS_NO_USED_RUNQS_SHIFT);
+ new |= active & ERTS_NO_RUNQS_MASK;
+ act = erts_smp_atomic32_cmpxchg_nob(&balance_info.no_runqs, new, exp);
if (act == exp)
return 1;
}
@@ -1410,7 +2421,7 @@ chk_wake_sched(ErtsRunQueue *crq, int ix, int activate)
if (crq->ix == ix)
return 0;
wrq = ERTS_RUNQ_IX(ix);
- iflgs = erts_smp_atomic32_read(&wrq->info_flags);
+ iflgs = erts_smp_atomic32_read_nob(&wrq->info_flags);
if (!(iflgs & (ERTS_RUNQ_IFLG_SUSPENDED|ERTS_RUNQ_IFLG_NONEMPTY))) {
if (activate) {
if (try_inc_no_active_runqs(ix+1)) {
@@ -1419,7 +2430,7 @@ chk_wake_sched(ErtsRunQueue *crq, int ix, int activate)
erts_smp_xrunq_unlock(crq, wrq);
}
}
- wake_scheduler(wrq, 0, 1);
+ wake_scheduler(wrq, 0);
return 1;
}
return 0;
@@ -1467,7 +2478,7 @@ smp_notify_inc_runq(ErtsRunQueue *runq)
{
#ifdef ERTS_SMP
if (runq)
- wake_scheduler(runq, 1, 1);
+ wake_scheduler(runq, 1);
#endif
}
@@ -1482,19 +2493,12 @@ erts_sched_notify_check_cpu_bind(void)
{
#ifdef ERTS_SMP
int ix;
- if (erts_common_run_queue) {
- for (ix = 0; ix < erts_no_schedulers; ix++)
- erts_smp_atomic32_set_relb(&ERTS_SCHEDULER_IX(ix)->chk_cpu_bind, 1);
- wake_all_schedulers();
- }
- else {
- for (ix = 0; ix < erts_no_run_queues; ix++) {
- ErtsRunQueue *rq = ERTS_RUNQ_IX(ix);
- erts_smp_runq_lock(rq);
- rq->flags |= ERTS_RUNQ_FLG_CHK_CPU_BIND;
- erts_smp_runq_unlock(rq);
- wake_scheduler(rq, 0, 1);
- };
+ for (ix = 0; ix < erts_no_run_queues; ix++) {
+ ErtsRunQueue *rq = ERTS_RUNQ_IX(ix);
+ erts_smp_runq_lock(rq);
+ rq->flags |= ERTS_RUNQ_FLG_CHK_CPU_BIND;
+ erts_smp_runq_unlock(rq);
+ wake_scheduler(rq, 0);
}
#else
erts_sched_check_cpu_bind(erts_get_scheduler_data());
@@ -1652,15 +2656,15 @@ evacuate_run_queue(ErtsRunQueue *evac_rq, ErtsRunQueue *rq)
erts_smp_runq_lock(evac_rq);
- erts_smp_atomic32_bor(&evac_rq->scheduler->ssi->flags,
- ERTS_SSI_FLG_SUSPENDED);
+ erts_smp_atomic32_read_bor_nob(&evac_rq->scheduler->ssi->flags,
+ ERTS_SSI_FLG_SUSPENDED);
evac_rq->flags &= ~ERTS_RUNQ_FLGS_IMMIGRATE_QMASK;
evac_rq->flags |= (ERTS_RUNQ_FLGS_EMIGRATE_QMASK
| ERTS_RUNQ_FLGS_EVACUATE_QMASK
| ERTS_RUNQ_FLG_SUSPENDED);
- erts_smp_atomic32_bor(&evac_rq->info_flags, ERTS_RUNQ_IFLG_SUSPENDED);
+ erts_smp_atomic32_read_bor_nob(&evac_rq->info_flags, ERTS_RUNQ_IFLG_SUSPENDED);
/*
* Need to set up evacuation paths first since we
* may release the run queue lock on evac_rq
@@ -1763,7 +2767,7 @@ evacuate_run_queue(ErtsRunQueue *evac_rq, ErtsRunQueue *rq)
if (notify_to_rq)
smp_notify_inc_runq(rq);
- wake_scheduler(evac_rq, 0, 1);
+ wake_scheduler(evac_rq, 0);
}
static int
@@ -1781,6 +2785,9 @@ try_steal_task_from_victim(ErtsRunQueue *rq, int *rq_lockedp, ErtsRunQueue *vrq)
ERTS_SMP_LC_CHK_RUNQ_LOCK(rq, *rq_lockedp);
ERTS_SMP_LC_CHK_RUNQ_LOCK(vrq, vrq_locked);
+ if (rq->halt_in_progress)
+ goto try_steal_port;
+
/*
* Check for a runnable process to steal...
*/
@@ -1867,6 +2874,8 @@ try_steal_task_from_victim(ErtsRunQueue *rq, int *rq_lockedp, ErtsRunQueue *vrq)
vrq_locked = 1;
}
+ try_steal_port:
+
ERTS_SMP_LC_CHK_RUNQ_LOCK(rq, *rq_lockedp);
ERTS_SMP_LC_CHK_RUNQ_LOCK(vrq, vrq_locked);
@@ -1909,7 +2918,7 @@ static ERTS_INLINE int
check_possible_steal_victim(ErtsRunQueue *rq, int *rq_lockedp, int vix)
{
ErtsRunQueue *vrq = ERTS_RUNQ_IX(vix);
- erts_aint32_t iflgs = erts_smp_atomic32_read(&vrq->info_flags);
+ erts_aint32_t iflgs = erts_smp_atomic32_read_nob(&vrq->info_flags);
if (iflgs & ERTS_RUNQ_IFLG_NONEMPTY)
return try_steal_task_from_victim(rq, rq_lockedp, vrq);
else
@@ -1921,9 +2930,6 @@ static int
try_steal_task(ErtsRunQueue *rq)
{
int res, rq_locked, vix, active_rqs, blnc_rqs;
-
- if (erts_common_run_queue)
- return 0;
/*
* We are not allowed to steal jobs to this run queue
@@ -1985,7 +2991,8 @@ try_steal_task(ErtsRunQueue *rq)
erts_smp_runq_lock(rq);
if (!res)
- res = !ERTS_EMPTY_RUNQ(rq);
+ res = rq->halt_in_progress ?
+ !ERTS_EMPTY_RUNQ_PORTS(rq) : !ERTS_EMPTY_RUNQ(rq);
return res;
}
@@ -2061,7 +3068,7 @@ check_balance(ErtsRunQueue *c_rq)
int forced, active, current_active, oowc, half_full_scheds, full_scheds,
mmax_len, blnc_no_rqs, qix, pix, freds_hist_ix;
- if (erts_smp_atomic32_xchg(&balance_info.checking_balance, 1)) {
+ if (erts_smp_atomic32_xchg_nob(&balance_info.checking_balance, 1)) {
c_rq->check_balance_reds = INT_MAX;
return;
}
@@ -2069,7 +3076,7 @@ check_balance(ErtsRunQueue *c_rq)
get_no_runqs(NULL, &blnc_no_rqs);
if (blnc_no_rqs == 1) {
c_rq->check_balance_reds = INT_MAX;
- erts_smp_atomic32_set(&balance_info.checking_balance, 0);
+ erts_smp_atomic32_set_nob(&balance_info.checking_balance, 0);
return;
}
@@ -2077,7 +3084,7 @@ check_balance(ErtsRunQueue *c_rq)
if (balance_info.halftime) {
balance_info.halftime = 0;
- erts_smp_atomic32_set(&balance_info.checking_balance, 0);
+ erts_smp_atomic32_set_nob(&balance_info.checking_balance, 0);
ERTS_FOREACH_RUNQ(rq,
{
if (rq->waiting)
@@ -2111,7 +3118,7 @@ check_balance(ErtsRunQueue *c_rq)
erts_smp_mtx_unlock(&balance_info.update_mtx);
erts_smp_runq_lock(c_rq);
c_rq->check_balance_reds = INT_MAX;
- erts_smp_atomic32_set(&balance_info.checking_balance, 0);
+ erts_smp_atomic32_set_nob(&balance_info.checking_balance, 0);
return;
}
@@ -2215,6 +3222,9 @@ check_balance(ErtsRunQueue *c_rq)
mmax_len = run_queue_info[qix].max_len;
}
+ if (!erts_sched_compact_load)
+ goto all_active;
+
if (!forced && half_full_scheds != blnc_no_rqs) {
int min = 1;
if (min < half_full_scheds)
@@ -2456,7 +3466,7 @@ erts_fprintf(stderr, "--------------------------------\n");
set_no_active_runqs(active);
balance_info.halftime = 1;
- erts_smp_atomic32_set(&balance_info.checking_balance, 0);
+ erts_smp_atomic32_set_nob(&balance_info.checking_balance, 0);
/* Write migration paths and reset balance statistics in all queues */
for (qix = 0; qix < blnc_no_rqs; qix++) {
@@ -2551,8 +3561,9 @@ erts_debug_nbalance(void)
}
void
-erts_early_init_scheduling(void)
+erts_early_init_scheduling(int no_schedulers)
{
+ aux_work_timeout_early_init(no_schedulers);
wakeup_other_limit = ERTS_WAKEUP_OTHER_LIMIT_MEDIUM;
}
@@ -2573,16 +3584,32 @@ erts_sched_set_wakeup_limit(char *str)
return EINVAL;
return 0;
}
-
-void
-erts_init_scheduling(int mrq, int no_schedulers, int no_schedulers_online)
+static void
+init_aux_work_data(ErtsAuxWorkData *awdp, ErtsSchedulerData *esdp)
{
- int ix, n;
-
-#ifndef ERTS_SMP
- mrq = 0;
+ awdp->sched_id = esdp ? (int) esdp->no : 0;
+ awdp->esdp = esdp;
+ awdp->ssi = esdp ? esdp->ssi : NULL;
+#ifdef ERTS_SMP
+ awdp->misc.thr_prgr = ERTS_THR_PRGR_VAL_WAITING;
+ awdp->dd.thr_prgr = ERTS_THR_PRGR_VAL_WAITING;
+ awdp->dd.completed_callback = NULL;
+ awdp->dd.completed_arg = NULL;
+#endif
+#ifdef ERTS_USE_ASYNC_READY_Q
+#ifdef ERTS_SMP
+ awdp->async_ready.need_thr_prgr = 0;
+ awdp->async_ready.thr_prgr = ERTS_THR_PRGR_VAL_WAITING;
#endif
+ awdp->async_ready.queue = NULL;
+#endif
+}
+
+void
+erts_init_scheduling(int no_schedulers, int no_schedulers_online)
+{
+ int ix, n, no_ssi;
init_misc_op_list_alloc();
@@ -2592,13 +3619,13 @@ erts_init_scheduling(int mrq, int no_schedulers, int no_schedulers_online)
/* Create and initialize run queues */
- n = (int) (mrq ? no_schedulers : 1);
+ n = no_schedulers;
erts_aligned_run_queues =
erts_alloc_permanent_cache_aligned(ERTS_ALC_T_RUNQS,
sizeof(ErtsAlignedRunQueue) * n);
#ifdef ERTS_SMP
- erts_smp_atomic32_init(&no_empty_run_queues, 0);
+ erts_smp_atomic32_init_nob(&no_empty_run_queues, 0);
#endif
erts_no_run_queues = n;
@@ -2608,7 +3635,7 @@ erts_init_scheduling(int mrq, int no_schedulers, int no_schedulers_online)
ErtsRunQueue *rq = ERTS_RUNQ_IX(ix);
rq->ix = ix;
- erts_smp_atomic32_init(&rq->info_flags, ERTS_RUNQ_IFLG_NONEMPTY);
+ erts_smp_atomic32_init_nob(&rq->info_flags, ERTS_RUNQ_IFLG_NONEMPTY);
/* make sure that the "extra" id correponds to the schedulers
* id if the esdp->no <-> ix+1 mapping change.
@@ -2617,14 +3644,9 @@ erts_init_scheduling(int mrq, int no_schedulers, int no_schedulers_online)
erts_smp_mtx_init_x(&rq->mtx, "run_queue", make_small(ix + 1));
erts_smp_cnd_init(&rq->cnd);
-#ifdef ERTS_SMP
- erts_smp_spinlock_init(&rq->sleepers.lock, "run_queue_sleep_list");
- rq->sleepers.list = NULL;
-#endif
-
rq->waiting = 0;
rq->woken = 0;
- rq->flags = !mrq ? ERTS_RUNQ_FLG_SHARED_RUNQ : 0;
+ rq->flags = 0;
rq->check_balance_reds = ERTS_RUNQ_CALL_CHECK_BALANCE_REDS;
rq->full_reds_history_sum = 0;
for (rix = 0; rix < ERTS_FULL_REDS_HISTORY_SIZE; rix++) {
@@ -2636,6 +3658,7 @@ erts_init_scheduling(int mrq, int no_schedulers, int no_schedulers_online)
rq->len = 0;
rq->wakeup_other = 0;
rq->wakeup_other_reds = 0;
+ rq->halt_in_progress = 0;
rq->procs.len = 0;
rq->procs.pending_exiters = NULL;
@@ -2670,8 +3693,6 @@ erts_init_scheduling(int mrq, int no_schedulers, int no_schedulers_online)
rq->ports.end = NULL;
}
- erts_common_run_queue = !mrq ? ERTS_RUNQ_IX(0) : NULL;
-
#ifdef ERTS_SMP
if (erts_no_run_queues != 1) {
@@ -2688,23 +3709,31 @@ erts_init_scheduling(int mrq, int no_schedulers, int no_schedulers_online)
n = (int) no_schedulers;
erts_no_schedulers = n;
-#ifdef ERTS_SMP
/* Create and initialize scheduler sleep info */
-
+#ifdef ERTS_SMP
+ no_ssi = n+1;
+#else
+ no_ssi = 1;
+#endif
aligned_sched_sleep_info =
- erts_alloc_permanent_cache_aligned(ERTS_ALC_T_SCHDLR_SLP_INFO,
- n * sizeof(ErtsAlignedSchedulerSleepInfo));
-
- for (ix = 0; ix < n; ix++) {
- ErtsSchedulerSleepInfo *ssi = ERTS_SCHED_SLEEP_INFO_IX(ix);
+ erts_alloc_permanent_cache_aligned(
+ ERTS_ALC_T_SCHDLR_SLP_INFO,
+ no_ssi*sizeof(ErtsAlignedSchedulerSleepInfo));
+ for (ix = 0; ix < no_ssi; ix++) {
+ ErtsSchedulerSleepInfo *ssi = &aligned_sched_sleep_info[ix].ssi;
+#ifdef ERTS_SMP
#if 0 /* no need to initialize these... */
ssi->next = NULL;
ssi->prev = NULL;
#endif
- erts_smp_atomic32_init(&ssi->flags, 0);
+ erts_smp_atomic32_init_nob(&ssi->flags, 0);
ssi->event = NULL; /* initialized in sched_thread_func */
- erts_smp_atomic32_init(&ssi->aux_work, 0);
+#endif
+ erts_atomic32_init_nob(&ssi->aux_work, 0);
}
+
+#ifdef ERTS_SMP
+ aligned_sched_sleep_info++;
#endif
/* Create and initialize scheduler specific data */
@@ -2718,17 +3747,20 @@ erts_init_scheduling(int mrq, int no_schedulers, int no_schedulers_online)
#ifdef ERTS_SMP
erts_bits_init_state(&esdp->erl_bits_state);
esdp->match_pseudo_process = NULL;
- esdp->ssi = ERTS_SCHED_SLEEP_INFO_IX(ix);
esdp->free_process = NULL;
-#if HALFWORD_HEAP
- /* Registers need to be heap allocated (correct memory range) for tracing to work */
- esdp->save_reg = erts_alloc(ERTS_ALC_T_BEAM_REGISTER, ERTS_X_REGS_ALLOCATED * sizeof(Eterm));
-#endif
#endif
+ esdp->x_reg_array =
+ erts_alloc_permanent_cache_aligned(ERTS_ALC_T_BEAM_REGISTER,
+ ERTS_X_REGS_ALLOCATED *
+ sizeof(Eterm));
+ esdp->f_reg_array =
+ erts_alloc_permanent_cache_aligned(ERTS_ALC_T_BEAM_REGISTER,
+ MAX_REG * sizeof(FloatDef));
#if !HEAP_ON_C_STACK
esdp->num_tmp_heap_used = 0;
#endif
esdp->no = (Uint) ix+1;
+ esdp->ssi = ERTS_SCHED_SLEEP_INFO_IX(ix);
esdp->current_process = NULL;
esdp->current_port = NULL;
@@ -2737,54 +3769,52 @@ erts_init_scheduling(int mrq, int no_schedulers, int no_schedulers_online)
erts_init_atom_cache_map(&esdp->atom_cache_map);
- if (erts_common_run_queue) {
- esdp->run_queue = erts_common_run_queue;
- esdp->run_queue->scheduler = NULL;
- }
- else {
- esdp->run_queue = ERTS_RUNQ_IX(ix);
- esdp->run_queue->scheduler = esdp;
- }
+ esdp->run_queue = ERTS_RUNQ_IX(ix);
+ esdp->run_queue->scheduler = esdp;
-#ifdef ERTS_SMP
- erts_smp_atomic32_init(&esdp->chk_cpu_bind, 0);
-#endif
+ init_aux_work_data(&esdp->aux_work_data, esdp);
+ init_sched_wall_time(&esdp->sched_wall_time);
}
+ init_misc_aux_work();
+#if !HALFWORD_HEAP
+ init_swtreq_alloc();
+#endif
+
+
#ifdef ERTS_SMP
+
+ erts_atomic32_init_nob(&completed_dealloc_count, 0); /* debug only */
+
+ aux_thread_aux_work_data =
+ erts_alloc_permanent_cache_aligned(ERTS_ALC_T_SCHDLR_DATA,
+ sizeof(ErtsAuxWorkData));
+
erts_smp_mtx_init(&schdlr_sspnd.mtx, "schdlr_sspnd");
erts_smp_cnd_init(&schdlr_sspnd.cnd);
- erts_smp_atomic32_init(&schdlr_sspnd.changing, 0);
+ erts_smp_atomic32_init_nob(&schdlr_sspnd.changing, 0);
schdlr_sspnd.online = no_schedulers_online;
schdlr_sspnd.curr_online = no_schedulers;
- erts_smp_atomic32_init(&schdlr_sspnd.msb.ongoing, 0);
- erts_smp_atomic32_init(&schdlr_sspnd.active, no_schedulers);
+ schdlr_sspnd.msb.ongoing = 0;
+ erts_smp_atomic32_init_nob(&schdlr_sspnd.active, no_schedulers);
schdlr_sspnd.msb.procs = NULL;
- init_no_runqs(no_schedulers,
- erts_common_run_queue ? 1 : no_schedulers_online);
+ init_no_runqs(no_schedulers, no_schedulers_online);
balance_info.last_active_runqs = no_schedulers;
erts_smp_mtx_init(&balance_info.update_mtx, "migration_info_update");
balance_info.forced_check_balance = 0;
balance_info.halftime = 1;
balance_info.full_reds_history_index = 0;
- erts_smp_atomic32_init(&balance_info.checking_balance, 0);
+ erts_smp_atomic32_init_nob(&balance_info.checking_balance, 0);
balance_info.prev_rise.active_runqs = 0;
balance_info.prev_rise.max_len = 0;
balance_info.prev_rise.reds = 0;
balance_info.n = 0;
if (no_schedulers_online < no_schedulers) {
- if (erts_common_run_queue) {
- for (ix = no_schedulers_online; ix < no_schedulers; ix++)
- erts_smp_atomic32_bor(&ERTS_SCHED_SLEEP_INFO_IX(ix)->flags,
- ERTS_SSI_FLG_SUSPENDED);
- }
- else {
- for (ix = no_schedulers_online; ix < erts_no_run_queues; ix++)
- evacuate_run_queue(ERTS_RUNQ_IX(ix),
- ERTS_RUNQ_IX(ix % no_schedulers_online));
- }
+ for (ix = no_schedulers_online; ix < erts_no_run_queues; ix++)
+ evacuate_run_queue(ERTS_RUNQ_IX(ix),
+ ERTS_RUNQ_IX(ix % no_schedulers_online));
}
schdlr_sspnd.wait_curr_online = no_schedulers_online;
@@ -2792,7 +3822,7 @@ erts_init_scheduling(int mrq, int no_schedulers, int no_schedulers_online)
ERTS_SCHDLR_SSPND_CHNG_SET((ERTS_SCHDLR_SSPND_CHNG_ONLN
| ERTS_SCHDLR_SSPND_CHNG_WAITER), 0);
- erts_smp_atomic32_init(&doing_sys_schedule, 0);
+ erts_smp_atomic32_init_nob(&doing_sys_schedule, 0);
init_misc_aux_work();
@@ -2808,11 +3838,13 @@ erts_init_scheduling(int mrq, int no_schedulers, int no_schedulers_online)
erts_no_schedulers = 1;
#endif
- erts_smp_atomic32_init(&function_calls, 0);
+ erts_smp_atomic32_init_nob(&function_calls, 0);
/* init port tasks */
erts_port_task_init();
+ aux_work_timeout_late_init();
+
#ifndef ERTS_SMP
#ifdef ERTS_DO_VERIFY_UNUSED_TEMP_ALLOC
erts_scheduler_data->verify_unused_temp_alloc
@@ -2821,14 +3853,15 @@ erts_init_scheduling(int mrq, int no_schedulers, int no_schedulers_online)
ERTS_VERIFY_UNUSED_TEMP_ALLOC(NULL);
#endif
#endif
+
+ erts_smp_atomic32_init_relb(&erts_halt_progress, -1);
+ erts_halt_code = 0;
}
ErtsRunQueue *
erts_schedid2runq(Uint id)
{
int ix;
- if (erts_common_run_queue)
- return erts_common_run_queue;
ix = (int) id - 1;
ASSERT(0 <= ix && ix < erts_no_run_queues);
return ERTS_RUNQ_IX(ix);
@@ -2935,10 +3968,10 @@ int
erts_get_max_no_executing_schedulers(void)
{
#ifdef ERTS_SMP
- if (erts_smp_atomic32_read(&schdlr_sspnd.changing))
+ if (erts_smp_atomic32_read_nob(&schdlr_sspnd.changing))
return (int) erts_no_schedulers;
ERTS_THR_MEMORY_BARRIER;
- return (int) erts_smp_atomic32_read(&schdlr_sspnd.active);
+ return (int) erts_smp_atomic32_read_nob(&schdlr_sspnd.active);
#else
return 1;
#endif
@@ -2947,18 +3980,6 @@ erts_get_max_no_executing_schedulers(void)
#ifdef ERTS_SMP
static void
-susp_sched_prep_block(void *unused)
-{
- erts_smp_mtx_unlock(&schdlr_sspnd.mtx);
-}
-
-static void
-susp_sched_resume_block(void *unused)
-{
- erts_smp_mtx_lock(&schdlr_sspnd.mtx);
-}
-
-static void
scheduler_ix_resume_wake(Uint ix)
{
ErtsSchedulerSleepInfo *ssi = ERTS_SCHED_SLEEP_INFO_IX(ix);
@@ -2968,7 +3989,7 @@ scheduler_ix_resume_wake(Uint ix)
| ERTS_SSI_FLG_SUSPENDED);
erts_aint32_t oflgs;
do {
- oflgs = erts_smp_atomic32_cmpxchg(&ssi->flags, 0, xflgs);
+ oflgs = erts_smp_atomic32_cmpxchg_relb(&ssi->flags, 0, xflgs);
if (oflgs == xflgs) {
erts_sched_finish_poke(ssi, oflgs);
break;
@@ -2987,7 +4008,7 @@ sched_prep_spin_suspended(ErtsSchedulerSleepInfo *ssi, erts_aint32_t xpct)
erts_aint32_t xflgs = xpct;
do {
- oflgs = erts_smp_atomic32_cmpxchg(&ssi->flags, nflgs, xflgs);
+ oflgs = erts_smp_atomic32_cmpxchg_acqb(&ssi->flags, nflgs, xflgs);
if (oflgs == xflgs)
return nflgs;
xflgs = oflgs;
@@ -3037,7 +4058,7 @@ sched_set_suspended_sleeptype(ErtsSchedulerSleepInfo *ssi)
erts_tse_reset(ssi->event);
while (1) {
- oflgs = erts_smp_atomic32_cmpxchg(&ssi->flags, nflgs, xflgs);
+ oflgs = erts_smp_atomic32_cmpxchg_acqb(&ssi->flags, nflgs, xflgs);
if (oflgs == xflgs)
return nflgs;
if ((oflgs & (ERTS_SSI_FLG_SLEEPING
@@ -3062,10 +4083,8 @@ suspend_scheduler(ErtsSchedulerData *esdp)
long active_schedulers;
int curr_online = 1;
int wake = 0;
-#if defined(ERTS_SCHED_NEED_NONBLOCKABLE_AUX_WORK) \
- || defined(ERTS_SCHED_NEED_BLOCKABLE_AUX_WORK)
erts_aint32_t aux_work;
-#endif
+ int thr_prgr_active = 1;
/*
* Schedulers may be suspended in two different ways:
@@ -3087,20 +4106,22 @@ suspend_scheduler(ErtsSchedulerData *esdp)
if (erts_system_profile_flags.scheduler)
profile_scheduler(make_small(esdp->no), am_inactive);
+ sched_wall_time_change(esdp, 0);
+
erts_smp_mtx_lock(&schdlr_sspnd.mtx);
flgs = sched_prep_spin_suspended(ssi, ERTS_SSI_FLG_SUSPENDED);
if (flgs & ERTS_SSI_FLG_SUSPENDED) {
- active_schedulers = erts_smp_atomic32_dectest(&schdlr_sspnd.active);
+ active_schedulers = erts_smp_atomic32_dec_read_nob(&schdlr_sspnd.active);
ASSERT(active_schedulers >= 1);
- changing = erts_smp_atomic32_read(&schdlr_sspnd.changing);
+ changing = erts_smp_atomic32_read_nob(&schdlr_sspnd.changing);
if (changing & ERTS_SCHDLR_SSPND_CHNG_MSB) {
if (active_schedulers == schdlr_sspnd.msb.wait_active)
wake = 1;
if (active_schedulers == 1) {
- changing = erts_smp_atomic32_band(&schdlr_sspnd.changing,
- ~ERTS_SCHDLR_SSPND_CHNG_MSB);
+ changing = erts_smp_atomic32_read_band_nob(&schdlr_sspnd.changing,
+ ~ERTS_SCHDLR_SSPND_CHNG_MSB);
changing &= ~ERTS_SCHDLR_SSPND_CHNG_MSB;
}
}
@@ -3122,8 +4143,8 @@ suspend_scheduler(ErtsSchedulerData *esdp)
&& schdlr_sspnd.curr_online == schdlr_sspnd.wait_curr_online)
wake = 1;
if (schdlr_sspnd.online == schdlr_sspnd.curr_online) {
- changing = erts_smp_atomic32_band(&schdlr_sspnd.changing,
- ~ERTS_SCHDLR_SSPND_CHNG_ONLN);
+ changing = erts_smp_atomic32_read_band_nob(&schdlr_sspnd.changing,
+ ~ERTS_SCHDLR_SSPND_CHNG_ONLN);
changing &= ~ERTS_SCHDLR_SSPND_CHNG_ONLN;
}
}
@@ -3133,80 +4154,76 @@ suspend_scheduler(ErtsSchedulerData *esdp)
wake = 0;
}
- flgs = erts_smp_atomic32_read_acqb(&ssi->flags);
- if (!(flgs & ERTS_SSI_FLG_SUSPENDED))
- break;
+ if (curr_online && !ongoing_multi_scheduling_block()) {
+ flgs = erts_smp_atomic32_read_acqb(&ssi->flags);
+ if (!(flgs & ERTS_SSI_FLG_SUSPENDED))
+ break;
+ }
erts_smp_mtx_unlock(&schdlr_sspnd.mtx);
-
-#ifdef ERTS_SCHED_NEED_BLOCKABLE_AUX_WORK
- aux_work = erts_smp_atomic32_read(&ssi->aux_work);
- blockable_aux_work:
- blockable_aux_work(esdp, ssi, aux_work);
-#endif
-
- erts_smp_activity_begin(ERTS_ACTIVITY_WAIT, NULL, NULL, NULL);
while (1) {
erts_aint32_t flgs;
-#ifdef ERTS_SCHED_NEED_NONBLOCKABLE_AUX_WORK
-#ifndef ERTS_SCHED_NEED_BLOCKABLE_AUX_WORK
- aux_work = erts_smp_atomic32_read(&ssi->aux_work);
-#endif
- nonblockable_aux_work(esdp, ssi, aux_work);
-#endif
- flgs = sched_spin_suspended(ssi,
- ERTS_SCHED_SUSPEND_SLEEP_SPINCOUNT);
- if (flgs == (ERTS_SSI_FLG_SLEEPING
- | ERTS_SSI_FLG_WAITING
- | ERTS_SSI_FLG_SUSPENDED)) {
- flgs = sched_set_suspended_sleeptype(ssi);
+ aux_work = erts_atomic32_read_acqb(&ssi->aux_work);
+ if (aux_work) {
+ if (!thr_prgr_active) {
+ erts_thr_progress_active(esdp, thr_prgr_active = 1);
+ sched_wall_time_change(esdp, 1);
+ }
+ aux_work = handle_aux_work(&esdp->aux_work_data, aux_work);
+ if (aux_work && erts_thr_progress_update(esdp))
+ erts_thr_progress_leader_update(esdp);
+ }
+
+ if (!aux_work) {
+ if (thr_prgr_active) {
+ erts_thr_progress_active(esdp, thr_prgr_active = 0);
+ sched_wall_time_change(esdp, 0);
+ }
+ erts_thr_progress_prepare_wait(esdp);
+ flgs = sched_spin_suspended(ssi,
+ ERTS_SCHED_SUSPEND_SLEEP_SPINCOUNT);
if (flgs == (ERTS_SSI_FLG_SLEEPING
- | ERTS_SSI_FLG_TSE_SLEEPING
| ERTS_SSI_FLG_WAITING
| ERTS_SSI_FLG_SUSPENDED)) {
- int res;
- do {
- res = erts_tse_wait(ssi->event);
- } while (res == EINTR);
+ flgs = sched_set_suspended_sleeptype(ssi);
+ if (flgs == (ERTS_SSI_FLG_SLEEPING
+ | ERTS_SSI_FLG_TSE_SLEEPING
+ | ERTS_SSI_FLG_WAITING
+ | ERTS_SSI_FLG_SUSPENDED)) {
+ int res;
+
+ do {
+ res = erts_tse_wait(ssi->event);
+ } while (res == EINTR);
+ }
}
+ erts_thr_progress_finalize_wait(esdp);
}
flgs = sched_prep_spin_suspended(ssi, (ERTS_SSI_FLG_WAITING
| ERTS_SSI_FLG_SUSPENDED));
if (!(flgs & ERTS_SSI_FLG_SUSPENDED))
break;
- changing = erts_smp_atomic32_read(&schdlr_sspnd.changing);
+ changing = erts_smp_atomic32_read_nob(&schdlr_sspnd.changing);
if (changing & ~ERTS_SCHDLR_SSPND_CHNG_WAITER)
break;
-
-
-#ifdef ERTS_SCHED_NEED_BLOCKABLE_AUX_WORK
- aux_work = erts_smp_atomic32_read(&ssi->aux_work);
- if (aux_work & ERTS_SSI_BLOCKABLE_AUX_WORK_MASK) {
- erts_smp_activity_end(ERTS_ACTIVITY_WAIT, NULL, NULL, NULL);
- goto blockable_aux_work;
- }
-#endif
-
}
- erts_smp_activity_end(ERTS_ACTIVITY_WAIT, NULL, NULL, NULL);
-
erts_smp_mtx_lock(&schdlr_sspnd.mtx);
- changing = erts_smp_atomic32_read(&schdlr_sspnd.changing);
+ changing = erts_smp_atomic32_read_nob(&schdlr_sspnd.changing);
}
- active_schedulers = erts_smp_atomic32_inctest(&schdlr_sspnd.active);
- changing = erts_smp_atomic32_read(&schdlr_sspnd.changing);
+ active_schedulers = erts_smp_atomic32_inc_read_nob(&schdlr_sspnd.active);
+ changing = erts_smp_atomic32_read_nob(&schdlr_sspnd.changing);
if ((changing & ERTS_SCHDLR_SSPND_CHNG_MSB)
&& schdlr_sspnd.online == active_schedulers) {
- erts_smp_atomic32_band(&schdlr_sspnd.changing,
- ~ERTS_SCHDLR_SSPND_CHNG_MSB);
+ erts_smp_atomic32_read_band_nob(&schdlr_sspnd.changing,
+ ~ERTS_SCHDLR_SSPND_CHNG_MSB);
}
ASSERT(no <= schdlr_sspnd.online);
- ASSERT(!erts_smp_atomic32_read(&schdlr_sspnd.msb.ongoing));
+ ASSERT(!ongoing_multi_scheduling_block());
}
@@ -3217,6 +4234,11 @@ suspend_scheduler(ErtsSchedulerData *esdp)
if (erts_system_profile_flags.scheduler)
profile_scheduler(make_small(esdp->no), am_active);
+ if (!thr_prgr_active) {
+ erts_thr_progress_active(esdp, thr_prgr_active = 1);
+ sched_wall_time_change(esdp, 1);
+ }
+
erts_smp_runq_lock(esdp->run_queue);
non_empty_runq(esdp->run_queue);
@@ -3235,7 +4257,7 @@ do { \
(RQ)->flags |= (ERTS_RUNQ_FLG_OUT_OF_WORK \
| ERTS_RUNQ_FLG_HALFTIME_OUT_OF_WORK); \
(RQ)->check_balance_reds = ERTS_RUNQ_CALL_CHECK_BALANCE_REDS; \
- erts_smp_atomic32_band(&(RQ)->info_flags, ~ERTS_RUNQ_IFLG_SUSPENDED);\
+ erts_smp_atomic32_read_band_nob(&(RQ)->info_flags, ~ERTS_RUNQ_IFLG_SUSPENDED);\
for (pix__ = 0; pix__ < ERTS_NO_PROC_PRIO_LEVELS; pix__++) { \
(RQ)->procs.prio_info[pix__].max_len = 0; \
(RQ)->procs.prio_info[pix__].reds = 0; \
@@ -3279,7 +4301,7 @@ erts_schedulers_state(Uint *total,
int res;
erts_aint32_t changing;
erts_smp_mtx_lock(&schdlr_sspnd.mtx);
- changing = erts_smp_atomic32_read(&schdlr_sspnd.changing);
+ changing = erts_smp_atomic32_read_nob(&schdlr_sspnd.changing);
if (yield_allowed && (changing & ~ERTS_SCHDLR_SSPND_CHNG_WAITER))
res = ERTS_SCHDLR_SSPND_YIELD_RESTART;
else {
@@ -3299,18 +4321,22 @@ erts_set_schedulers_online(Process *p,
Sint new_no,
Sint *old_no)
{
- int ix, res, no, have_unlocked_plocks;
+ ErtsSchedulerData *esdp;
+ int ix, res, no, have_unlocked_plocks, end_wait;
erts_aint32_t changing;
if (new_no < 1 || erts_no_schedulers < new_no)
return ERTS_SCHDLR_SSPND_EINVAL;
+ esdp = ERTS_PROC_GET_SCHDATA(p);
+ end_wait = 0;
+
erts_smp_mtx_lock(&schdlr_sspnd.mtx);
have_unlocked_plocks = 0;
no = (int) new_no;
- changing = erts_smp_atomic32_read(&schdlr_sspnd.changing);
+ changing = erts_smp_atomic32_read_nob(&schdlr_sspnd.changing);
if (changing) {
res = ERTS_SCHDLR_SSPND_YIELD_RESTART;
}
@@ -3330,10 +4356,6 @@ erts_set_schedulers_online(Process *p,
for (ix = online; ix < no; ix++)
erts_sched_poke(ERTS_SCHED_SLEEP_INFO_IX(ix));
}
- else if (erts_common_run_queue) {
- for (ix = online; ix < no; ix++)
- scheduler_ix_resume_wake(ix);
- }
else {
if (plocks) {
have_unlocked_plocks = 1;
@@ -3381,15 +4403,6 @@ erts_set_schedulers_online(Process *p,
for (ix = no; ix < online; ix++)
erts_sched_poke(ERTS_SCHED_SLEEP_INFO_IX(ix));
}
- else if (erts_common_run_queue) {
- for (ix = no; ix < online; ix++) {
- ErtsSchedulerSleepInfo *ssi;
- ssi = ERTS_SCHED_SLEEP_INFO_IX(ix);
- erts_smp_atomic32_bor(&ssi->flags,
- ERTS_SSI_FLG_SUSPENDED);
- }
- wake_all_schedulers();
- }
else {
if (plocks) {
have_unlocked_plocks = 1;
@@ -3416,32 +4429,42 @@ erts_set_schedulers_online(Process *p,
erts_smp_mtx_lock(&schdlr_sspnd.mtx);
for (ix = no; ix < online; ix++) {
ErtsRunQueue *rq = ERTS_RUNQ_IX(ix);
- wake_scheduler(rq, 0, 1);
+ wake_scheduler(rq, 0);
}
}
}
- erts_smp_activity_begin(ERTS_ACTIVITY_WAIT,
- susp_sched_prep_block,
- susp_sched_resume_block,
- NULL);
+ if (schdlr_sspnd.curr_online != schdlr_sspnd.wait_curr_online) {
+ erts_smp_mtx_unlock(&schdlr_sspnd.mtx);
+ if (plocks && !have_unlocked_plocks) {
+ have_unlocked_plocks = 1;
+ erts_smp_proc_unlock(p, plocks);
+ }
+ erts_thr_progress_active(esdp, 0);
+ erts_thr_progress_prepare_wait(esdp);
+ end_wait = 1;
+ erts_smp_mtx_lock(&schdlr_sspnd.mtx);
+ }
+
while (schdlr_sspnd.curr_online != schdlr_sspnd.wait_curr_online)
erts_smp_cnd_wait(&schdlr_sspnd.cnd, &schdlr_sspnd.mtx);
- erts_smp_activity_end(ERTS_ACTIVITY_WAIT,
- susp_sched_prep_block,
- susp_sched_resume_block,
- NULL);
+
ASSERT(res != ERTS_SCHDLR_SSPND_DONE
? (ERTS_SCHDLR_SSPND_CHNG_WAITER
- & erts_smp_atomic32_read(&schdlr_sspnd.changing))
+ & erts_smp_atomic32_read_nob(&schdlr_sspnd.changing))
: (ERTS_SCHDLR_SSPND_CHNG_WAITER
- == erts_smp_atomic32_read(&schdlr_sspnd.changing)));
- erts_smp_atomic32_band(&schdlr_sspnd.changing,
- ~ERTS_SCHDLR_SSPND_CHNG_WAITER);
+ == erts_smp_atomic32_read_nob(&schdlr_sspnd.changing)));
+ erts_smp_atomic32_read_band_nob(&schdlr_sspnd.changing,
+ ~ERTS_SCHDLR_SSPND_CHNG_WAITER);
+
}
}
erts_smp_mtx_unlock(&schdlr_sspnd.mtx);
+ if (end_wait) {
+ erts_thr_progress_finalize_wait(esdp);
+ erts_thr_progress_active(esdp, 1);
+ }
if (have_unlocked_plocks)
erts_smp_proc_lock(p, plocks);
@@ -3456,7 +4479,7 @@ erts_block_multi_scheduling(Process *p, ErtsProcLocks plocks, int on, int all)
ErtsProcList *plp;
erts_smp_mtx_lock(&schdlr_sspnd.mtx);
- changing = erts_smp_atomic32_read(&schdlr_sspnd.changing);
+ changing = erts_smp_atomic32_read_nob(&schdlr_sspnd.changing);
if (changing) {
res = ERTS_SCHDLR_SSPND_YIELD_RESTART; /* Yield */
}
@@ -3466,7 +4489,7 @@ erts_block_multi_scheduling(Process *p, ErtsProcLocks plocks, int on, int all)
plp->next = schdlr_sspnd.msb.procs;
schdlr_sspnd.msb.procs = plp;
p->flags |= F_HAVE_BLCKD_MSCHED;
- ASSERT(erts_smp_atomic32_read(&schdlr_sspnd.active) == 1);
+ ASSERT(erts_smp_atomic32_read_nob(&schdlr_sspnd.active) == 1);
ASSERT(p->scheduler_data->no == 1);
res = ERTS_SCHDLR_SSPND_DONE_MSCHED_BLOCKED;
}
@@ -3477,11 +4500,11 @@ erts_block_multi_scheduling(Process *p, ErtsProcLocks plocks, int on, int all)
have_unlocked_plocks = 1;
erts_smp_proc_unlock(p, plocks);
}
- ASSERT(0 == erts_smp_atomic32_read(&schdlr_sspnd.msb.ongoing));
- erts_smp_atomic32_set(&schdlr_sspnd.msb.ongoing, 1);
+ ASSERT(!ongoing_multi_scheduling_block());
+ schdlr_sspnd.msb.ongoing = 1;
if (online == 1) {
res = ERTS_SCHDLR_SSPND_DONE_MSCHED_BLOCKED;
- ASSERT(erts_smp_atomic32_read(&schdlr_sspnd.active) == 1);
+ ASSERT(erts_smp_atomic32_read_nob(&schdlr_sspnd.active) == 1);
ASSERT(p->scheduler_data->no == 1);
}
else {
@@ -3499,51 +4522,65 @@ erts_block_multi_scheduling(Process *p, ErtsProcLocks plocks, int on, int all)
res = ERTS_SCHDLR_SSPND_YIELD_DONE_MSCHED_BLOCKED;
schdlr_sspnd.msb.wait_active = 2;
}
- if (erts_common_run_queue) {
- for (ix = 1; ix < online; ix++)
- erts_smp_atomic32_bor(&ERTS_SCHED_SLEEP_INFO_IX(ix)->flags,
- ERTS_SSI_FLG_SUSPENDED);
- wake_all_schedulers();
+
+ erts_smp_mtx_unlock(&schdlr_sspnd.mtx);
+ erts_smp_mtx_lock(&balance_info.update_mtx);
+ set_no_used_runqs(1);
+ for (ix = 0; ix < online; ix++) {
+ ErtsRunQueue *rq = ERTS_RUNQ_IX(ix);
+ erts_smp_runq_lock(rq);
+ ASSERT(!(rq->flags & ERTS_RUNQ_FLG_SUSPENDED));
+ ERTS_RUNQ_RESET_MIGRATION_PATHS(rq, 0x7);
+ erts_smp_runq_unlock(rq);
}
- else {
+ /*
+ * Evacuate all activities in all other run queues
+ * into the first run queue. Note order is important,
+ * online run queues has to be evacuated last.
+ */
+ for (ix = erts_no_run_queues-1; ix >= 1; ix--)
+ evacuate_run_queue(ERTS_RUNQ_IX(ix), ERTS_RUNQ_IX(0));
+ erts_smp_mtx_unlock(&balance_info.update_mtx);
+ erts_smp_mtx_lock(&schdlr_sspnd.mtx);
+
+ if (erts_smp_atomic32_read_nob(&schdlr_sspnd.active)
+ != schdlr_sspnd.msb.wait_active) {
+ ErtsSchedulerData *esdp;
+
erts_smp_mtx_unlock(&schdlr_sspnd.mtx);
- erts_smp_mtx_lock(&balance_info.update_mtx);
- set_no_used_runqs(1);
- for (ix = 0; ix < online; ix++) {
- ErtsRunQueue *rq = ERTS_RUNQ_IX(ix);
- erts_smp_runq_lock(rq);
- ASSERT(!(rq->flags & ERTS_RUNQ_FLG_SUSPENDED));
- ERTS_RUNQ_RESET_MIGRATION_PATHS(rq, 0x7);
- erts_smp_runq_unlock(rq);
+
+ if (plocks && !have_unlocked_plocks) {
+ have_unlocked_plocks = 1;
+ erts_smp_proc_unlock(p, plocks);
}
- /*
- * Evacuate all activities in all other run queues
- * into the first run queue. Note order is important,
- * online run queues has to be evacuated last.
- */
- for (ix = erts_no_run_queues-1; ix >= 1; ix--)
- evacuate_run_queue(ERTS_RUNQ_IX(ix), ERTS_RUNQ_IX(0));
- erts_smp_mtx_unlock(&balance_info.update_mtx);
+
+ esdp = ERTS_PROC_GET_SCHDATA(p);
+
+ erts_thr_progress_active(esdp, 0);
+ erts_thr_progress_prepare_wait(esdp);
+
erts_smp_mtx_lock(&schdlr_sspnd.mtx);
+
+ while (erts_smp_atomic32_read_nob(&schdlr_sspnd.active)
+ != schdlr_sspnd.msb.wait_active)
+ erts_smp_cnd_wait(&schdlr_sspnd.cnd,
+ &schdlr_sspnd.mtx);
+
+ erts_smp_mtx_unlock(&schdlr_sspnd.mtx);
+
+ erts_thr_progress_active(esdp, 1);
+ erts_thr_progress_finalize_wait(esdp);
+
+ erts_smp_mtx_lock(&schdlr_sspnd.mtx);
+
}
- erts_smp_activity_begin(ERTS_ACTIVITY_WAIT,
- susp_sched_prep_block,
- susp_sched_resume_block,
- NULL);
- while (erts_smp_atomic32_read(&schdlr_sspnd.active)
- != schdlr_sspnd.msb.wait_active)
- erts_smp_cnd_wait(&schdlr_sspnd.cnd, &schdlr_sspnd.mtx);
- erts_smp_activity_end(ERTS_ACTIVITY_WAIT,
- susp_sched_prep_block,
- susp_sched_resume_block,
- NULL);
ASSERT(res != ERTS_SCHDLR_SSPND_DONE_MSCHED_BLOCKED
? (ERTS_SCHDLR_SSPND_CHNG_WAITER
- & erts_smp_atomic32_read(&schdlr_sspnd.changing))
+ & erts_smp_atomic32_read_nob(&schdlr_sspnd.changing))
: (ERTS_SCHDLR_SSPND_CHNG_WAITER
- == erts_smp_atomic32_read(&schdlr_sspnd.changing)));
- erts_smp_atomic32_band(&schdlr_sspnd.changing,
- ~ERTS_SCHDLR_SSPND_CHNG_WAITER);
+ == erts_smp_atomic32_read_nob(&schdlr_sspnd.changing)));
+ erts_smp_atomic32_read_band_nob(&schdlr_sspnd.changing,
+ ~ERTS_SCHDLR_SSPND_CHNG_WAITER);
}
plp = proclist_create(p);
plp->next = schdlr_sspnd.msb.procs;
@@ -3610,18 +4647,12 @@ erts_block_multi_scheduling(Process *p, ErtsProcLocks plocks, int on, int all)
});
#endif
p->flags &= ~F_HAVE_BLCKD_MSCHED;
- erts_smp_atomic32_set(&schdlr_sspnd.msb.ongoing, 0);
+ schdlr_sspnd.msb.ongoing = 0;
if (schdlr_sspnd.online == 1) {
/* No schedulers to resume */
- ASSERT(erts_smp_atomic32_read(&schdlr_sspnd.active) == 1);
+ ASSERT(erts_smp_atomic32_read_nob(&schdlr_sspnd.active) == 1);
ERTS_SCHDLR_SSPND_CHNG_SET(0, ERTS_SCHDLR_SSPND_CHNG_MSB);
}
- else if (erts_common_run_queue) {
- for (ix = 1; ix < schdlr_sspnd.online; ix++)
- erts_smp_atomic32_band(&ERTS_SCHED_SLEEP_INFO_IX(ix)->flags,
- ~ERTS_SSI_FLG_SUSPENDED);
- wake_all_schedulers();
- }
else {
int online = schdlr_sspnd.online;
erts_smp_mtx_unlock(&schdlr_sspnd.mtx);
@@ -3669,7 +4700,7 @@ void
erts_dbg_multi_scheduling_return_trap(Process *p, Eterm return_value)
{
if (return_value == am_blocked) {
- erts_aint32_t active = erts_smp_atomic32_read(&schdlr_sspnd.active);
+ erts_aint32_t active = erts_smp_atomic32_read_nob(&schdlr_sspnd.active);
ASSERT(1 <= active && active <= 2);
ASSERT(ERTS_PROC_GET_SCHDATA(p)->no == 1);
}
@@ -3724,8 +4755,19 @@ erts_multi_scheduling_blockers(Process *p)
static void *
sched_thread_func(void *vesdp)
{
+ ErtsThrPrgrCallbacks callbacks;
+ ErtsSchedulerData *esdp = vesdp;
+ Uint no = esdp->no;
#ifdef ERTS_SMP
- Uint no = ((ErtsSchedulerData *) vesdp)->no;
+ ERTS_SCHED_SLEEP_INFO_IX(no - 1)->event = erts_tse_fetch();
+ callbacks.arg = (void *) esdp->ssi;
+ callbacks.wakeup = thr_prgr_wakeup;
+ callbacks.prepare_wait = thr_prgr_prep_wait;
+ callbacks.wait = thr_prgr_wait;
+ callbacks.finalize_wait = thr_prgr_fin_wait;
+
+ erts_thr_progress_register_managed_thread(esdp, &callbacks, 0);
+ erts_alloc_register_scheduler(vesdp);
#endif
#ifdef ERTS_ENABLE_LOCK_CHECK
{
@@ -3734,65 +4776,71 @@ sched_thread_func(void *vesdp)
erts_lc_set_thread_name(&buf[0]);
}
#endif
- erts_alloc_reg_scheduler_id(no);
erts_tsd_set(sched_data_key, vesdp);
#ifdef ERTS_SMP
+#if HAVE_ERTS_MSEG
+ erts_mseg_late_init();
+#endif
+#if ERTS_USE_ASYNC_READY_Q
+ esdp->aux_work_data.async_ready.queue = erts_get_async_ready_queue(no);
+#endif
- erts_sched_init_check_cpu_bind((ErtsSchedulerData *) vesdp);
+ erts_sched_init_check_cpu_bind(esdp);
erts_proc_lock_prepare_proc_lock_waiter();
- ERTS_SCHED_SLEEP_INFO_IX(no - 1)->event = erts_tse_fetch();
-
-
#endif
- erts_register_blockable_thread();
+
#ifdef HIPE
hipe_thread_signal_init();
#endif
erts_thread_init_float();
+
+ if (no == 1) {
+ erts_thr_progress_active(esdp, 0);
+ erts_thr_progress_prepare_wait(esdp);
+ }
+
erts_smp_mtx_lock(&schdlr_sspnd.mtx);
- ASSERT(erts_smp_atomic32_read(&schdlr_sspnd.changing)
+ ASSERT(erts_smp_atomic32_read_nob(&schdlr_sspnd.changing)
& ERTS_SCHDLR_SSPND_CHNG_ONLN);
if (--schdlr_sspnd.curr_online == schdlr_sspnd.wait_curr_online) {
- erts_smp_atomic32_band(&schdlr_sspnd.changing,
- ~ERTS_SCHDLR_SSPND_CHNG_ONLN);
- if (((ErtsSchedulerData *) vesdp)->no != 1)
+ erts_smp_atomic32_read_band_nob(&schdlr_sspnd.changing,
+ ~ERTS_SCHDLR_SSPND_CHNG_ONLN);
+ if (no != 1)
erts_smp_cnd_signal(&schdlr_sspnd.cnd);
}
- if (((ErtsSchedulerData *) vesdp)->no == 1) {
- if (schdlr_sspnd.curr_online != schdlr_sspnd.wait_curr_online) {
- erts_smp_activity_begin(ERTS_ACTIVITY_WAIT,
- susp_sched_prep_block,
- susp_sched_resume_block,
- NULL);
- while (schdlr_sspnd.curr_online != schdlr_sspnd.wait_curr_online)
- erts_smp_cnd_wait(&schdlr_sspnd.cnd, &schdlr_sspnd.mtx);
- erts_smp_activity_end(ERTS_ACTIVITY_WAIT,
- susp_sched_prep_block,
- susp_sched_resume_block,
- NULL);
- }
+ if (no == 1) {
+ while (schdlr_sspnd.curr_online != schdlr_sspnd.wait_curr_online)
+ erts_smp_cnd_wait(&schdlr_sspnd.cnd, &schdlr_sspnd.mtx);
ERTS_SCHDLR_SSPND_CHNG_SET(0, ERTS_SCHDLR_SSPND_CHNG_WAITER);
}
erts_smp_mtx_unlock(&schdlr_sspnd.mtx);
+ if (no == 1) {
+ erts_thr_progress_finalize_wait(esdp);
+ erts_thr_progress_active(esdp, 1);
+ }
+
#ifdef ERTS_DO_VERIFY_UNUSED_TEMP_ALLOC
- ((ErtsSchedulerData *) vesdp)->verify_unused_temp_alloc
+ esdp->verify_unused_temp_alloc
= erts_alloc_get_verify_unused_temp_alloc(
- &((ErtsSchedulerData *) vesdp)->verify_unused_temp_alloc_data);
+ &esdp->verify_unused_temp_alloc_data);
ERTS_VERIFY_UNUSED_TEMP_ALLOC(NULL);
#endif
process_main();
/* No schedulers should *ever* terminate */
- erl_exit(ERTS_ABORT_EXIT, "Scheduler thread number %beu terminated\n",
- ((ErtsSchedulerData *) vesdp)->no);
+ erl_exit(ERTS_ABORT_EXIT,
+ "Scheduler thread number %beu terminated\n",
+ no);
return NULL;
}
+static ethr_tid aux_tid;
+
void
erts_start_schedulers(void)
{
@@ -3812,8 +4860,6 @@ erts_start_schedulers(void)
res = ENOTSUP;
}
- erts_block_system(0);
-
while (actual < wanted) {
ErtsSchedulerData *esdp = ERTS_SCHEDULER_IX(actual);
actual++;
@@ -3826,7 +4872,12 @@ erts_start_schedulers(void)
}
erts_no_schedulers = actual;
- erts_release_system();
+
+ ERTS_THR_MEMORY_BARRIER;
+
+ res = ethr_thr_create(&aux_tid, aux_thread, NULL, &opts);
+ if (res != 0)
+ erl_exit(1, "Failed to create aux thread\n");
if (actual < 1)
erl_exit(1,
@@ -4280,7 +5331,7 @@ suspend_process_2(BIF_ALIST_2)
/* This is really a piece of cake without SMP support... */
if (!smon->active) {
- suspend_process(erts_common_run_queue, suspendee);
+ suspend_process(ERTS_RUNQ_IX(0), suspendee);
smon->active++;
res = am_true;
}
@@ -4850,8 +5901,6 @@ erts_proc_migrate(Process *p, ErtsProcLocks *plcks,
|| from_locked);
ERTS_SMP_LC_CHK_RUNQ_LOCK(from_rq, *from_locked);
ERTS_SMP_LC_CHK_RUNQ_LOCK(to_rq, *to_locked);
-
- ASSERT(!erts_common_run_queue);
/*
* If we have the lock on the run queue to migrate to,
@@ -5002,25 +6051,17 @@ erts_process_status(Process *c_p, ErtsProcLocks c_p_locks,
int i;
ErtsSchedulerData *esdp;
- if (erts_common_run_queue)
- erts_smp_runq_lock(erts_common_run_queue);
-
for (i = 0; i < erts_no_schedulers; i++) {
esdp = ERTS_SCHEDULER_IX(i);
- if (!erts_common_run_queue)
- erts_smp_runq_lock(esdp->run_queue);
+ erts_smp_runq_lock(esdp->run_queue);
if (esdp->free_process && esdp->free_process->id == rpid) {
res = am_free;
- if (!erts_common_run_queue)
- erts_smp_runq_unlock(esdp->run_queue);
+ erts_smp_runq_unlock(esdp->run_queue);
break;
}
- if (!erts_common_run_queue)
- erts_smp_runq_unlock(esdp->run_queue);
+ erts_smp_runq_unlock(esdp->run_queue);
}
- if (erts_common_run_queue)
- erts_smp_runq_unlock(erts_common_run_queue);
#endif
}
@@ -5178,6 +6219,15 @@ Process *schedule(Process *p, int calls)
int actual_reds;
int reds;
+#ifdef USE_VM_PROBES
+ if (p != NULL && DTRACE_ENABLED(process_unscheduled)) {
+ DTRACE_CHARBUF(process_buf, DTRACE_TERM_BUF_SIZE);
+
+ dtrace_proc_str(p, process_buf);
+ DTRACE1(process_unscheduled, process_buf);
+ }
+#endif
+
if (ERTS_USE_MODIFIED_TIMING()) {
context_reds = ERTS_MODIFIED_TIMING_CONTEXT_REDS;
input_reductions = ERTS_MODIFIED_TIMING_INPUT_REDS;
@@ -5187,7 +6237,7 @@ Process *schedule(Process *p, int calls)
input_reductions = INPUT_REDUCTIONS;
}
- ERTS_SMP_LC_ASSERT(!ERTS_LC_IS_BLOCKING);
+ ERTS_SMP_LC_ASSERT(!erts_thr_progress_is_blocking());
/*
* Clean up after the process being scheduled out.
@@ -5214,7 +6264,7 @@ Process *schedule(Process *p, int calls)
reds = ERTS_PROC_MIN_CONTEXT_SWITCH_REDS_COST;
esdp->virtual_reds = 0;
- fcalls = (int) erts_smp_atomic32_addtest(&function_calls, reds);
+ fcalls = (int) erts_smp_atomic32_add_read_acqb(&function_calls, reds);
ASSERT(esdp && esdp == erts_get_scheduler_data());
rq = erts_get_runq_current(esdp);
@@ -5325,17 +6375,16 @@ Process *schedule(Process *p, int calls)
}
- ERTS_SMP_LC_ASSERT(!ERTS_LC_IS_BLOCKING);
+ ERTS_SMP_LC_ASSERT(!erts_thr_progress_is_blocking());
+
check_activities_to_run: {
#ifdef ERTS_SMP
- if (!(rq->flags & ERTS_RUNQ_FLG_SHARED_RUNQ)
- && rq->check_balance_reds <= 0) {
+ if (rq->check_balance_reds <= 0)
check_balance(rq);
- }
- ERTS_SMP_LC_ASSERT(!ERTS_LC_IS_BLOCKING);
+ ERTS_SMP_LC_ASSERT(!erts_thr_progress_is_blocking());
ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq));
if (rq->flags & ERTS_RUNQ_FLGS_IMMIGRATE_QMASK)
@@ -5343,58 +6392,51 @@ Process *schedule(Process *p, int calls)
continue_check_activities_to_run:
- if (rq->flags & (ERTS_RUNQ_FLG_SHARED_RUNQ
- | ERTS_RUNQ_FLG_CHK_CPU_BIND
+ if (rq->flags & (ERTS_RUNQ_FLG_CHK_CPU_BIND
| ERTS_RUNQ_FLG_SUSPENDED)) {
- if ((rq->flags & ERTS_RUNQ_FLG_SUSPENDED)
- || (erts_smp_atomic32_read_acqb(&esdp->ssi->flags)
- & ERTS_SSI_FLG_SUSPENDED)) {
- ASSERT(erts_smp_atomic32_read(&esdp->ssi->flags)
+ if (rq->flags & ERTS_RUNQ_FLG_SUSPENDED) {
+ ASSERT(erts_smp_atomic32_read_nob(&esdp->ssi->flags)
& ERTS_SSI_FLG_SUSPENDED);
suspend_scheduler(esdp);
}
- if ((rq->flags & ERTS_RUNQ_FLG_CHK_CPU_BIND)
- || erts_smp_atomic32_read_acqb(&esdp->chk_cpu_bind)) {
+ if (rq->flags & ERTS_RUNQ_FLG_CHK_CPU_BIND)
erts_sched_check_cpu_bind(esdp);
- }
}
-#if defined(ERTS_SCHED_NEED_BLOCKABLE_AUX_WORK) \
- || defined(ERTS_SCHED_NEED_NONBLOCKABLE_AUX_WORK)
{
- ErtsSchedulerSleepInfo *ssi = esdp->ssi;
- erts_aint32_t aux_work = erts_smp_atomic32_read(&ssi->aux_work);
- if (aux_work) {
+ erts_aint32_t aux_work;
+ int leader_update = erts_thr_progress_update(esdp);
+ aux_work = erts_atomic32_read_acqb(&esdp->ssi->aux_work);
+ if (aux_work | leader_update) {
erts_smp_runq_unlock(rq);
-#ifdef ERTS_SCHED_NEED_BLOCKABLE_AUX_WORK
- aux_work = blockable_aux_work(esdp, ssi, aux_work);
-#endif
-#ifdef ERTS_SCHED_NEED_NONBLOCKABLE_AUX_WORK
- nonblockable_aux_work(esdp, ssi, aux_work);
-#endif
+ if (leader_update)
+ erts_thr_progress_leader_update(esdp);
+ if (aux_work)
+ handle_aux_work(&esdp->aux_work_data, aux_work);
erts_smp_runq_lock(rq);
}
}
-#endif
- erts_smp_chk_system_block(prepare_for_block,
- resume_after_block,
- (void *) rq);
-
- ERTS_SMP_LC_ASSERT(!ERTS_LC_IS_BLOCKING);
+ ERTS_SMP_LC_ASSERT(!erts_thr_progress_is_blocking());
ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq));
-#endif
+#else /* ERTS_SMP */
+ {
+ erts_aint32_t aux_work;
+ aux_work = erts_atomic32_read_acqb(&esdp->ssi->aux_work);
+ if (aux_work)
+ handle_aux_work(&esdp->aux_work_data, aux_work);
+ }
+#endif /* ERTS_SMP */
ASSERT(rq->len == rq->procs.len + rq->ports.info.len);
-#ifndef ERTS_SMP
+ if ((rq->len == 0 && !rq->misc.start)
+ || (rq->halt_in_progress
+ && rq->ports.info.len == 0 && !rq->misc.start)) {
- if (rq->len == 0 && !rq->misc.start)
- goto do_sys_schedule;
+#ifdef ERTS_SMP
-#else /* ERTS_SMP */
- if (rq->len == 0 && !rq->misc.start) {
ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq));
rq->wakeup_other = 0;
@@ -5402,16 +6444,11 @@ Process *schedule(Process *p, int calls)
empty_runq(rq);
- if (rq->flags & (ERTS_RUNQ_FLG_SHARED_RUNQ
- | ERTS_RUNQ_FLG_SUSPENDED)) {
- if ((rq->flags & ERTS_RUNQ_FLG_SUSPENDED)
- || (erts_smp_atomic32_read_acqb(&esdp->ssi->flags)
- & ERTS_SSI_FLG_SUSPENDED)) {
- ASSERT(erts_smp_atomic32_read(&esdp->ssi->flags)
- & ERTS_SSI_FLG_SUSPENDED);
- non_empty_runq(rq);
- goto continue_check_activities_to_run;
- }
+ if (rq->flags & ERTS_RUNQ_FLG_SUSPENDED) {
+ ASSERT(erts_smp_atomic32_read_nob(&esdp->ssi->flags)
+ & ERTS_SSI_FLG_SUSPENDED);
+ non_empty_runq(rq);
+ goto continue_check_activities_to_run;
}
else if (!(rq->flags & ERTS_RUNQ_FLG_INACTIVE)) {
/*
@@ -5425,26 +6462,17 @@ Process *schedule(Process *p, int calls)
}
}
+#endif
+
scheduler_wait(&fcalls, esdp, rq);
+#ifdef ERTS_SMP
non_empty_runq(rq);
+#endif
goto check_activities_to_run;
}
- else
-#endif /* ERTS_SMP */
- if (fcalls > input_reductions && prepare_for_sys_schedule()) {
- int runnable;
-
-#ifdef ERTS_SMP
- runnable = 1;
-#else
- do_sys_schedule:
- runnable = rq->len != 0;
- if (!runnable)
- sched_waiting_sys(esdp->no, rq);
-#endif
-
+ else if (fcalls > input_reductions && prepare_for_sys_schedule()) {
/*
* Schedule system-level activities.
*/
@@ -5454,11 +6482,11 @@ Process *schedule(Process *p, int calls)
ASSERT(!erts_port_task_have_outstanding_io_tasks());
-#ifdef ERTS_SMP
- /* erts_sys_schedule_interrupt(0); */
+#if 0 /* Not needed since we wont wait in sys schedule */
+ erts_sys_schedule_interrupt(0);
#endif
erts_smp_runq_unlock(rq);
- erl_sys_schedule(runnable);
+ erl_sys_schedule(1);
dt = erts_do_time_read_and_reset();
if (dt) erts_bump_timer(dt);
#ifdef ERTS_SMP
@@ -5466,8 +6494,6 @@ Process *schedule(Process *p, int calls)
clear_sys_scheduling();
goto continue_check_activities_to_run;
#else
- if (!runnable)
- sched_active_sys(esdp->no, rq);
goto check_activities_to_run;
#endif
}
@@ -5487,11 +6513,7 @@ Process *schedule(Process *p, int calls)
else if (rq->wakeup_other < wakeup_other_limit)
rq->wakeup_other += rq->len*wo_reds + ERTS_WAKEUP_OTHER_FIXED_INC;
else {
- if (erts_common_run_queue) {
- if (erts_common_run_queue->waiting)
- wake_scheduler(erts_common_run_queue, 0, 1);
- }
- else if (erts_smp_atomic32_read_acqb(&no_empty_run_queues) != 0) {
+ if (erts_smp_atomic32_read_acqb(&no_empty_run_queues) != 0) {
wake_scheduler_on_empty_runq(rq);
rq->wakeup_other = 0;
}
@@ -5509,7 +6531,8 @@ Process *schedule(Process *p, int calls)
if (rq->ports.info.len) {
int have_outstanding_io;
have_outstanding_io = erts_port_task_execute(rq, &esdp->current_port);
- if (have_outstanding_io && fcalls > 2*input_reductions) {
+ if ((have_outstanding_io && fcalls > 2*input_reductions)
+ || rq->halt_in_progress) {
/*
* If we have performed more than 2*INPUT_REDUCTIONS since
* last call to erl_sys_schedule() and we still haven't
@@ -5715,14 +6738,14 @@ erts_sched_stat_modify(int what)
int ix;
switch (what) {
case ERTS_SCHED_STAT_MODIFY_ENABLE:
- erts_smp_block_system(0);
+ erts_smp_thr_progress_block();
erts_sched_stat.enabled = 1;
- erts_smp_release_system();
+ erts_smp_thr_progress_unblock();
break;
case ERTS_SCHED_STAT_MODIFY_DISABLE:
- erts_smp_block_system(0);
+ erts_smp_thr_progress_block();
erts_sched_stat.enabled = 1;
- erts_smp_release_system();
+ erts_smp_thr_progress_unblock();
break;
case ERTS_SCHED_STAT_MODIFY_CLEAR:
erts_smp_spin_lock(&erts_sched_stat.lock);
@@ -5782,18 +6805,10 @@ erts_sched_stat_term(Process *p, int total)
void
erts_schedule_misc_op(void (*func)(void *), void *arg)
{
- ErtsRunQueue *rq = erts_get_runq_current(NULL);
+ ErtsSchedulerData *esdp = erts_get_scheduler_data();
+ ErtsRunQueue *rq = esdp ? esdp->run_queue : ERTS_RUNQ_IX(0);
ErtsMiscOpList *molp = misc_op_list_alloc();
- if (!rq) {
- /*
- * This can only happen when the sys msg dispatcher
- * thread schedules misc ops (this happens *very*
- * seldom; only when trace drivers are unloaded).
- */
- rq = ERTS_RUNQ_IX(0);
- }
-
erts_smp_runq_lock(rq);
while (rq->misc.evac_runq) {
@@ -5885,7 +6900,7 @@ erts_get_exact_total_reductions(Process *c_p, Uint *redsp, Uint *diffp)
* Wait for other schedulers to schedule out their processes
* and update 'reductions'.
*/
- erts_smp_block_system(0);
+ erts_smp_thr_progress_block();
for (reds = 0, ix = 0; ix < erts_no_run_queues; ix++)
reds += ERTS_RUNQ_IX(ix)->procs.reductions;
if (redsp)
@@ -5893,7 +6908,7 @@ erts_get_exact_total_reductions(Process *c_p, Uint *redsp, Uint *diffp)
if (diffp)
*diffp = reds - last_exact_reductions;
last_exact_reductions = reds;
- erts_smp_release_system();
+ erts_smp_thr_progress_unblock();
erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_MAIN);
}
@@ -5948,7 +6963,7 @@ erts_test_next_pid(int set, Uint next)
Uint erts_process_count(void)
{
- erts_aint32_t res = erts_smp_atomic32_read(&process_count);
+ erts_aint32_t res = erts_smp_atomic32_read_nob(&process_count);
ASSERT(res >= 0);
return (Uint) res;
}
@@ -5997,7 +7012,7 @@ alloc_process(void)
ASSERT(!process_tab[p_next]);
process_tab[p_next] = p;
- erts_smp_atomic32_inc(&process_count);
+ erts_smp_atomic32_inc_nob(&process_count);
p->id = make_internal_pid(p_serial << p_serial_shift | p_next);
if (p->id == ERTS_INVALID_PID) {
/* Do not use the invalid pid; change serial */
@@ -6098,7 +7113,9 @@ erl_create_process(Process* parent, /* Parent of process (default group leader).
goto error;
}
+#ifdef BM_COUNTERS
processes_busy++;
+#endif
BM_COUNT(processes_spawned);
#ifndef HYBRID
@@ -6123,7 +7140,7 @@ erl_create_process(Process* parent, /* Parent of process (default group leader).
p->min_heap_size = H_MIN_SIZE;
p->min_vheap_size = BIN_VH_MIN_SIZE;
p->prio = PRIORITY_NORMAL;
- p->max_gen_gcs = (Uint16) erts_smp_atomic32_read(&erts_max_gen_gcs);
+ p->max_gen_gcs = (Uint16) erts_smp_atomic32_read_nob(&erts_max_gen_gcs);
}
p->skipped = 0;
ASSERT(p->min_heap_size == erts_next_heap_size(p->min_heap_size, 0));
@@ -6249,6 +7266,10 @@ erl_create_process(Process* parent, /* Parent of process (default group leader).
p->seq_trace_lastcnt = 0;
p->seq_trace_clock = 0;
SEQ_TRACE_TOKEN(p) = NIL;
+#ifdef USE_VM_PROBES
+ DT_UTAG(p) = NIL;
+ DT_UTAG_FLAGS(p) = 0;
+#endif
p->parent = parent->id == ERTS_INVALID_PID ? NIL : parent->id;
#ifdef HYBRID
@@ -6346,7 +7367,7 @@ erl_create_process(Process* parent, /* Parent of process (default group leader).
p->pending_exit.bp = NULL;
#endif
-#if !defined(NO_FPE_SIGNALS)
+#if !defined(NO_FPE_SIGNALS) || defined(HIPE)
p->fp_exception = 0;
#endif
@@ -6381,6 +7402,16 @@ erl_create_process(Process* parent, /* Parent of process (default group leader).
VERBOSE(DEBUG_PROCESSES, ("Created a new process: %T\n",p->id));
+#ifdef USE_VM_PROBES
+ if (DTRACE_ENABLED(process_spawn)) {
+ DTRACE_CHARBUF(process_name, DTRACE_TERM_BUF_SIZE);
+ DTRACE_CHARBUF(mfa, DTRACE_TERM_BUF_SIZE);
+
+ dtrace_fun_decode(p, mod, func, arity, process_name, mfa);
+ DTRACE2(process_spawn, process_name, mfa);
+ }
+#endif
+
error:
erts_smp_proc_unlock(parent, ERTS_PROC_LOCKS_ALL_MINOR);
@@ -6520,7 +7551,7 @@ void erts_init_empty_process(Process *p)
p->run_queue = ERTS_RUNQ_IX(0);
#endif
-#if !defined(NO_FPE_SIGNALS)
+#if !defined(NO_FPE_SIGNALS) || defined(HIPE)
p->fp_exception = 0;
#endif
@@ -6832,7 +7863,11 @@ static ERTS_INLINE void
send_exit_message(Process *to, ErtsProcLocks *to_locksp,
Eterm exit_term, Uint term_size, Eterm token)
{
- if (token == NIL) {
+ if (token == NIL
+#ifdef USE_VM_PROBES
+ || token == am_have_dt_utag
+#endif
+ ) {
Eterm* hp;
Eterm mess;
ErlHeapFragment* bp;
@@ -6840,7 +7875,11 @@ send_exit_message(Process *to, ErtsProcLocks *to_locksp,
hp = erts_alloc_message_heap(term_size, &bp, &ohp, to, to_locksp);
mess = copy_struct(exit_term, term_size, &hp, ohp);
- erts_queue_message(to, to_locksp, bp, mess, NIL);
+ erts_queue_message(to, to_locksp, bp, mess, NIL
+#ifdef USE_VM_PROBES
+ , NIL
+#endif
+ );
} else {
ErlHeapFragment* bp;
Eterm* hp;
@@ -6856,7 +7895,11 @@ send_exit_message(Process *to, ErtsProcLocks *to_locksp,
/* the trace token must in this case be updated by the caller */
seq_trace_output(token, mess, SEQ_TRACE_SEND, to->id, NULL);
temp_token = copy_struct(token, sz_token, &hp, &bp->off_heap);
- erts_queue_message(to, to_locksp, bp, mess, temp_token);
+ erts_queue_message(to, to_locksp, bp, mess, temp_token
+#ifdef USE_VM_PROBES
+ , NIL
+#endif
+ );
}
}
@@ -6949,9 +7992,26 @@ send_exit_signal(Process *c_p, /* current process if and only
ASSERT(reason != THE_NON_VALUE);
+#ifdef USE_VM_PROBES
+ if(DTRACE_ENABLED(process_exit_signal) && is_pid(from)) {
+ DTRACE_CHARBUF(sender_str, DTRACE_TERM_BUF_SIZE);
+ DTRACE_CHARBUF(receiver_str, DTRACE_TERM_BUF_SIZE);
+ DTRACE_CHARBUF(reason_buf, DTRACE_TERM_BUF_SIZE);
+
+ dtrace_pid_str(from, sender_str);
+ dtrace_proc_str(rp, receiver_str);
+ erts_snprintf(reason_buf, sizeof(reason_buf) - 1, "%T", reason);
+ DTRACE3(process_exit_signal, sender_str, receiver_str, reason_buf);
+ }
+#endif
+
if (ERTS_PROC_IS_TRAPPING_EXITS(rp)
&& (reason != am_kill || (flags & ERTS_XSIG_FLG_IGN_KILL))) {
- if (is_not_nil(token) && token_update)
+ if (is_not_nil(token)
+#ifdef USE_VM_PROBES
+ && token != am_have_dt_utag
+#endif
+ && token_update)
seq_trace_update_send(token_update);
if (is_value(exit_tuple))
send_exit_message(rp, rp_locks, exit_tuple, exit_tuple_sz, token);
@@ -7334,15 +8394,6 @@ static void doit_exit_link(ErtsLink *lnk, void *vpcontext)
if (rlnk)
erts_destroy_link(rlnk);
erts_deref_dist_entry(dep);
- } else {
-#ifndef ERTS_SMP
- /* XXX Is this possible? Shouldn't this link
- previously have been removed if the node
- had previously been disconnected. */
- ASSERT(0);
-#endif
- /* This is possible when smp support has been enabled,
- and dist port and process exits simultaneously. */
}
break;
@@ -7384,7 +8435,18 @@ erts_do_exit_process(Process* p, Eterm reason)
p->arity = 0; /* No live registers */
p->fvalue = reason;
-
+
+#ifdef USE_VM_PROBES
+ if (DTRACE_ENABLED(process_exit)) {
+ DTRACE_CHARBUF(process_buf, DTRACE_TERM_BUF_SIZE);
+ DTRACE_CHARBUF(reason_buf, DTRACE_TERM_BUF_SIZE);
+
+ dtrace_proc_str(p, process_buf);
+ erts_snprintf(reason_buf, DTRACE_TERM_BUF_SIZE - 1, "%T", reason);
+ DTRACE2(process_exit, process_buf, reason_buf);
+ }
+#endif
+
#ifdef ERTS_SMP
ERTS_SMP_CHK_HAVE_ONLY_MAIN_PROC_LOCK(p);
/* By locking all locks (main lock is already locked) when going
@@ -7580,8 +8642,8 @@ continue_exit_process(Process *p
p->status_flags = 0;
#endif
process_tab[pix] = NULL; /* Time of death! */
- ASSERT(erts_smp_atomic32_read(&process_count) > 0);
- erts_smp_atomic32_dec(&process_count);
+ ASSERT(erts_smp_atomic32_read_nob(&process_count) > 0);
+ erts_smp_atomic32_dec_nob(&process_count);
#ifdef ERTS_SMP
erts_pix_unlock(pix_lock);
@@ -7621,7 +8683,9 @@ continue_exit_process(Process *p
pbt = ERTS_PROC_SET_CALL_TIME(p, ERTS_PROC_LOCKS_ALL, NULL);
erts_smp_proc_unlock(p, ERTS_PROC_LOCKS_ALL);
+#ifdef BM_COUNTERS
processes_busy--;
+#endif
if (dep) {
erts_do_net_exits(dep, reason);
@@ -8698,6 +9762,22 @@ init_processes_bif(void)
* Debug stuff
*/
+#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_CHECK)
+int
+erts_dbg_check_halloc_lock(Process *p)
+{
+ if (ERTS_PROC_LOCK_MAIN & erts_proc_lc_my_proc_locks(p))
+ return 1;
+ if (p->id == ERTS_INVALID_PID)
+ return 1;
+ if (p->scheduler_data && p == p->scheduler_data->match_pseudo_process)
+ return 1;
+ if (erts_thr_progress_is_blocking())
+ return 1;
+ return 0;
+}
+#endif
+
Eterm
erts_debug_processes(Process *c_p)
{
@@ -8932,3 +10012,30 @@ debug_processes_assert_error(char* expr, char* file, int line)
/* *\
* End of the processes/0 BIF implementation. *
\* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+/*
+ * A nice system halt closing all open port goes as follows:
+ * 1) This function schedules the aux work ERTS_SSI_AUX_WORK_REAP_PORTS
+ * on all schedulers, then schedules itself out.
+ * 2) All shedulers detect this and set the flag halt_in_progress
+ * on their run queue. The last scheduler sets all non-closed ports
+ * ERTS_PORT_SFLG_HALT. Global atomic erts_halt_progress is used
+ * as refcount to determine which is last.
+ * 3) While the run ques has flag halt_in_progress no processes
+ * will be scheduled, only ports.
+ * 4) When the last port closes that scheduler calls erlang:halt/1.
+ * The same global atomic is used as refcount.
+ *
+ * A BIF that calls this should make sure to schedule out to never come back:
+ * erl_halt((int)(- code));
+ * ERTS_BIF_YIELD1(bif_export[BIF_erlang_halt_1], BIF_P, NIL);
+ */
+void erl_halt(int code)
+{
+ if (-1 == erts_smp_atomic32_cmpxchg_acqb(&erts_halt_progress,
+ erts_no_schedulers,
+ -1)) {
+ erts_halt_code = code;
+ notify_reap_ports_relb();
+ }
+}
diff --git a/erts/emulator/beam/erl_process.h b/erts/emulator/beam/erl_process.h
index 296acc7367..cff0783bc4 100644
--- a/erts/emulator/beam/erl_process.h
+++ b/erts/emulator/beam/erl_process.h
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1996-2011. All Rights Reserved.
+ * Copyright Ericsson AB 1996-2012. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -53,11 +53,18 @@ typedef struct process Process;
#include "erl_time.h"
#include "erl_atom_table.h"
#include "external.h"
+#include "erl_mseg.h"
+#include "erl_async.h"
#ifdef HIPE
#include "hipe_process.h"
#endif
+#undef ERL_THR_PROGRESS_TSD_TYPE_ONLY
+#define ERL_THR_PROGRESS_TSD_TYPE_ONLY
+#include "erl_thr_progress.h"
+#undef ERL_THR_PROGRESS_TSD_TYPE_ONLY
+
struct ErtsNodesMonitor_;
struct port;
@@ -88,6 +95,7 @@ struct saved_calls {
};
extern Export exp_send, exp_receive, exp_timeout;
+extern int erts_sched_compact_load;
extern Uint erts_no_schedulers;
extern Uint erts_no_run_queues;
extern int erts_sched_thread_suggested_stack_size;
@@ -136,12 +144,10 @@ extern int erts_sched_thread_suggested_stack_size;
(((Uint32) 1) << (ERTS_RUNQ_FLG_BASE2 + 1))
#define ERTS_RUNQ_FLG_SUSPENDED \
(((Uint32) 1) << (ERTS_RUNQ_FLG_BASE2 + 2))
-#define ERTS_RUNQ_FLG_SHARED_RUNQ \
- (((Uint32) 1) << (ERTS_RUNQ_FLG_BASE2 + 3))
#define ERTS_RUNQ_FLG_CHK_CPU_BIND \
- (((Uint32) 1) << (ERTS_RUNQ_FLG_BASE2 + 4))
+ (((Uint32) 1) << (ERTS_RUNQ_FLG_BASE2 + 3))
#define ERTS_RUNQ_FLG_INACTIVE \
- (((Uint32) 1) << (ERTS_RUNQ_FLG_BASE2 + 5))
+ (((Uint32) 1) << (ERTS_RUNQ_FLG_BASE2 + 4))
#define ERTS_RUNQ_FLGS_MIGRATION_QMASKS \
(ERTS_RUNQ_FLGS_EMIGRATE_QMASK \
@@ -242,30 +248,34 @@ typedef enum {
| ERTS_SSI_FLG_WAITING \
| ERTS_SSI_FLG_SUSPENDED)
-#define ERTS_SCHED_NEED_BLOCKABLE_AUX_WORK
-
-#define ERTS_SSI_AUX_WORK_CHECK_CHILDREN (((erts_aint32_t) 1) << 0)
-#define ERTS_SSI_AUX_WORK_MISC (((erts_aint32_t) 1) << 1)
+/*
+ * Keep ERTS_SSI_AUX_WORK flags in expected frequency order relative
+ * eachother. Most frequent - lowest bit number.
+ */
-#define ERTS_SSI_BLOCKABLE_AUX_WORK_MASK \
- (ERTS_SSI_AUX_WORK_CHECK_CHILDREN \
- | ERTS_SSI_AUX_WORK_MISC)
-#define ERTS_SSI_NONBLOCKABLE_AUX_WORK_MASK \
- (0)
+#define ERTS_SSI_AUX_WORK_DD (((erts_aint32_t) 1) << 0)
+#define ERTS_SSI_AUX_WORK_DD_THR_PRGR (((erts_aint32_t) 1) << 1)
+#define ERTS_SSI_AUX_WORK_FIX_ALLOC_DEALLOC (((erts_aint32_t) 1) << 2)
+#define ERTS_SSI_AUX_WORK_FIX_ALLOC_LOWER_LIM (((erts_aint32_t) 1) << 3)
+#define ERTS_SSI_AUX_WORK_ASYNC_READY (((erts_aint32_t) 1) << 4)
+#define ERTS_SSI_AUX_WORK_ASYNC_READY_CLEAN (((erts_aint32_t) 1) << 5)
+#define ERTS_SSI_AUX_WORK_MISC_THR_PRGR (((erts_aint32_t) 1) << 6)
+#define ERTS_SSI_AUX_WORK_MISC (((erts_aint32_t) 1) << 7)
+#define ERTS_SSI_AUX_WORK_CHECK_CHILDREN (((erts_aint32_t) 1) << 8)
+#define ERTS_SSI_AUX_WORK_SET_TMO (((erts_aint32_t) 1) << 9)
+#define ERTS_SSI_AUX_WORK_MSEG_CACHE_CHECK (((erts_aint32_t) 1) << 10)
+#define ERTS_SSI_AUX_WORK_REAP_PORTS (((erts_aint32_t) 1) << 11)
typedef struct ErtsSchedulerSleepInfo_ ErtsSchedulerSleepInfo;
-typedef struct {
- erts_smp_spinlock_t lock;
- ErtsSchedulerSleepInfo *list;
-} ErtsSchedulerSleepList;
-
struct ErtsSchedulerSleepInfo_ {
+#ifdef ERTS_SMP
ErtsSchedulerSleepInfo *next;
ErtsSchedulerSleepInfo *prev;
erts_smp_atomic32_t flags;
erts_tse_t *event;
- erts_smp_atomic32_t aux_work;
+#endif
+ erts_atomic32_t aux_work;
};
/* times to reschedule low prio process before running */
@@ -320,10 +330,6 @@ struct ErtsRunQueue_ {
erts_smp_mtx_t mtx;
erts_smp_cnd_t cnd;
-#ifdef ERTS_SMP
- ErtsSchedulerSleepList sleepers;
-#endif
-
ErtsSchedulerData *scheduler;
int waiting; /* < 0 in sys schedule; > 0 on cnd variable */
int woken;
@@ -336,6 +342,7 @@ struct ErtsRunQueue_ {
int len;
int wakeup_other;
int wakeup_other_reds;
+ int halt_in_progress;
struct {
int len;
@@ -369,7 +376,6 @@ typedef union {
} ErtsAlignedRunQueue;
extern ErtsAlignedRunQueue *erts_aligned_run_queues;
-extern ErtsRunQueue *erts_common_run_queue;
#define ERTS_PROC_REDUCTIONS_EXECUTED(RQ, PRIO, REDS, AREDS) \
do { \
@@ -386,25 +392,62 @@ do { \
(RQ)->wakeup_other_reds += (REDS); \
} while (0)
-struct ErtsSchedulerData_ {
+typedef struct {
+ int enabled;
+ Uint64 start;
+ struct {
+ Uint64 total;
+ Uint64 start;
+ int currently;
+ } working;
+} ErtsSchedWallTime;
+typedef struct {
+ int sched_id;
+ ErtsSchedulerData *esdp;
+ ErtsSchedulerSleepInfo *ssi;
+#ifdef ERTS_SMP
+ ErtsThrPrgrVal current_thr_prgr;
+#endif
+ struct {
+ int ix;
#ifdef ERTS_SMP
+ ErtsThrPrgrVal thr_prgr;
+#endif
+ } misc;
+#ifdef ERTS_SMP
+ struct {
+ ErtsThrPrgrVal thr_prgr;
+ void (*completed_callback)(void *);
+ void (*completed_arg)(void *);
+ } dd;
+#endif
+#ifdef ERTS_USE_ASYNC_READY_Q
+ struct {
+#ifdef ERTS_SMP
+ int need_thr_prgr;
+ ErtsThrPrgrVal thr_prgr;
+#endif
+ void *queue;
+ } async_ready;
+#endif
+} ErtsAuxWorkData;
+
+struct ErtsSchedulerData_ {
/*
* Keep X registers first (so we get as many low
* numbered registers as possible in the same cache
* line).
*/
-#if !HALFWORD_HEAP
- Eterm save_reg[ERTS_X_REGS_ALLOCATED]; /* X registers */
-#else
- Eterm *save_reg;
-#endif
- FloatDef freg[MAX_REG]; /* Floating point registers. */
+ Eterm* x_reg_array; /* X registers */
+ FloatDef* f_reg_array; /* Floating point registers. */
+
+#ifdef ERTS_SMP
ethr_tid tid; /* Thread id */
struct erl_bits_state erl_bits_state; /* erl_bits.c state */
void *match_pseudo_process; /* erl_db_util.c:db_prog_match() */
- ErtsSchedulerSleepInfo *ssi;
Process *free_process;
+ ErtsThrPrgrData thr_progress_data;
#endif
#if !HEAP_ON_C_STACK
Eterm tmp_heap[TMP_HEAP_SIZE];
@@ -413,20 +456,20 @@ struct ErtsSchedulerData_ {
Eterm cmp_tmp_heap[CMP_TMP_HEAP_SIZE];
Eterm erl_arith_tmp_heap[ERL_ARITH_TMP_HEAP_SIZE];
#endif
-
+ ErtsSchedulerSleepInfo *ssi;
Process *current_process;
Uint no; /* Scheduler number */
struct port *current_port;
ErtsRunQueue *run_queue;
int virtual_reds;
int cpu_id; /* >= 0 when bound */
+ ErtsAuxWorkData aux_work_data;
ErtsAtomCacheMap atom_cache_map;
-#ifdef ERTS_SMP
- /* NOTE: These fields are modified under held mutexes by other threads */
- erts_smp_atomic32_t chk_cpu_bind; /* Only used when common run queue */
-#endif
+ ErtsSchedAllocData alloc_data;
+
+ ErtsSchedWallTime sched_wall_time;
#ifdef ERTS_DO_VERIFY_UNUSED_TEMP_ALLOC
erts_alloc_verify_func_t verify_unused_temp_alloc;
@@ -572,7 +615,7 @@ struct process {
Uint min_heap_size; /* Minimum size of heap (in words). */
Uint min_vheap_size; /* Minimum size of virtual heap (in words). */
-#if !defined(NO_FPE_SIGNALS)
+#if !defined(NO_FPE_SIGNALS) || defined(HIPE)
volatile unsigned long fp_exception;
#endif
@@ -640,6 +683,10 @@ struct process {
Uint seq_trace_lastcnt;
Eterm seq_trace_token; /* Sequential trace token (tuple size 5 see below) */
+#ifdef USE_VM_PROBES
+ Eterm dt_utag; /* Place to store the dynamc trace user tag */
+ Uint dt_utag_flags; /* flag field for the dt_utag */
+#endif
BeamInstr initial[3]; /* Initial module(0), function(1), arity(2), often used instead
of pointer to funcinfo instruction, hence the BeamInstr datatype */
BeamInstr* current; /* Current Erlang function, part of the funcinfo:
@@ -955,6 +1002,14 @@ extern struct erts_system_profile_flags_t erts_system_profile_flags;
#define SEQ_TRACE_PRINT (1 << 2)
#define SEQ_TRACE_TIMESTAMP (1 << 3)
+#ifdef USE_VM_PROBES
+#define DT_UTAG_PERMANENT (1 << 0)
+#define DT_UTAG_SPREADING (1 << 1)
+#define DT_UTAG(P) ((P)->dt_utag)
+#define DT_UTAG_FLAGS(P) ((P)->dt_utag_flags)
+#endif
+
+
#ifdef ERTS_SMP
/* Status flags ... */
#define ERTS_PROC_SFLG_PENDADD2SCHEDQ (((Uint32) 1) << 0) /* Pending
@@ -1032,8 +1087,10 @@ extern struct erts_system_profile_flags_t erts_system_profile_flags;
void erts_pre_init_process(void);
void erts_late_init_process(void);
-void erts_early_init_scheduling(void);
-void erts_init_scheduling(int, int, int);
+void erts_early_init_scheduling(int);
+void erts_init_scheduling(int, int);
+
+Eterm erts_sched_wall_time_request(Process *c_p, int set, int enable);
ErtsProcList *erts_proclist_create(Process *);
void erts_proclist_destroy(ErtsProcList *);
@@ -1041,6 +1098,9 @@ int erts_proclist_same(ErtsProcList *, Process *);
int erts_sched_set_wakeup_limit(char *str);
+#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_CHECK)
+int erts_dbg_check_halloc_lock(Process *p);
+#endif
#ifdef DEBUG
void erts_dbg_multi_scheduling_return_trap(Process *, Eterm);
#endif
@@ -1058,13 +1118,20 @@ erts_block_multi_scheduling(Process *, ErtsProcLocks, int, int);
int erts_is_multi_scheduling_blocked(void);
Eterm erts_multi_scheduling_blockers(Process *);
void erts_start_schedulers(void);
+void erts_alloc_notify_delayed_dealloc(int);
void erts_smp_notify_check_children_needed(void);
-void
-erts_smp_schedule_misc_aux_work(int ignore_self,
- int max_sched,
- void (*func)(void *),
- void *arg);
#endif
+#if ERTS_USE_ASYNC_READY_Q
+void erts_notify_check_async_ready_queue(void *);
+#endif
+void erts_schedule_misc_aux_work(int sched_id,
+ void (*func)(void *),
+ void *arg);
+void erts_schedule_multi_misc_aux_work(int ignore_self,
+ int max_sched,
+ void (*func)(void *),
+ void *arg);
+erts_aint32_t erts_set_aux_work_timeout(int, erts_aint32_t, int);
void erts_sched_notify_check_cpu_bind(void);
Uint erts_active_schedulers(void);
void erts_init_process(int);
@@ -1148,6 +1215,7 @@ Sint erts_test_next_pid(int, Uint);
Eterm erts_debug_processes(Process *c_p);
Eterm erts_debug_processes_bif_info(Process *c_p);
Uint erts_debug_nbalance(void);
+int erts_debug_wait_deallocations(Process *c_p);
#ifdef ERTS_SMP
# define ERTS_GET_SCHEDULER_DATA_FROM_PROC(PROC) ((PROC)->scheduler_data)
@@ -1218,16 +1286,11 @@ erts_psd_get(Process *p, int ix)
#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_CHECK)
ErtsProcLocks locks = erts_proc_lc_my_proc_locks(p);
if (ERTS_LC_PSD_ANY_LOCK == erts_psd_required_locks[ix].get_locks)
- ERTS_SMP_LC_ASSERT(locks
- || erts_is_system_blocked(0)
- || (ERTS_IS_CRASH_DUMPING
- && erts_is_system_blocked(ERTS_BS_FLG_ALLOW_GC)));
+ ERTS_SMP_LC_ASSERT(locks || erts_thr_progress_is_blocking());
else {
locks &= erts_psd_required_locks[ix].get_locks;
ERTS_SMP_LC_ASSERT(erts_psd_required_locks[ix].get_locks == locks
- || erts_is_system_blocked(0)
- || (ERTS_IS_CRASH_DUMPING
- && erts_is_system_blocked(ERTS_BS_FLG_ALLOW_GC)));
+ || erts_thr_progress_is_blocking());
}
#endif
ASSERT(0 <= ix && ix < ERTS_PSD_SIZE);
@@ -1244,16 +1307,11 @@ erts_psd_set(Process *p, ErtsProcLocks plocks, int ix, void *data)
#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_CHECK)
ErtsProcLocks locks = erts_proc_lc_my_proc_locks(p);
if (ERTS_LC_PSD_ANY_LOCK == erts_psd_required_locks[ix].set_locks)
- ERTS_SMP_LC_ASSERT(locks
- || erts_is_system_blocked(0)
- || (ERTS_IS_CRASH_DUMPING
- && erts_is_system_blocked(ERTS_BS_FLG_ALLOW_GC)));
+ ERTS_SMP_LC_ASSERT(locks || erts_thr_progress_is_blocking());
else {
locks &= erts_psd_required_locks[ix].set_locks;
ERTS_SMP_LC_ASSERT(erts_psd_required_locks[ix].set_locks == locks
- || erts_is_system_blocked(0)
- || (ERTS_IS_CRASH_DUMPING
- && erts_is_system_blocked(ERTS_BS_FLG_ALLOW_GC)));
+ || erts_thr_progress_is_blocking());
}
#endif
ASSERT(0 <= ix && ix < ERTS_PSD_SIZE);
@@ -1417,8 +1475,7 @@ erts_get_runq_proc(Process *p)
ASSERT(p->run_queue);
return p->run_queue;
#else
- ASSERT(erts_common_run_queue);
- return erts_common_run_queue;
+ return ERTS_RUNQ_IX(0);
#endif
}
@@ -1431,8 +1488,7 @@ erts_get_runq_current(ErtsSchedulerData *esdp)
esdp = erts_get_scheduler_data();
return esdp->run_queue;
#else
- ASSERT(erts_common_run_queue);
- return erts_common_run_queue;
+ return ERTS_RUNQ_IX(0);
#endif
}
@@ -1599,11 +1655,9 @@ erts_sched_poke(ErtsSchedulerSleepInfo *ssi)
{
erts_aint32_t flags;
ERTS_THR_MEMORY_BARRIER;
- flags = erts_smp_atomic32_read(&ssi->flags);
- ASSERT(!(flags & ERTS_SSI_FLG_SLEEPING)
- || (flags & ERTS_SSI_FLG_WAITING));
+ flags = erts_smp_atomic32_read_nob(&ssi->flags);
if (flags & ERTS_SSI_FLG_SLEEPING) {
- flags = erts_smp_atomic32_band(&ssi->flags, ~ERTS_SSI_FLGS_SLEEP);
+ flags = erts_smp_atomic32_read_band_nob(&ssi->flags, ~ERTS_SSI_FLGS_SLEEP);
erts_sched_finish_poke(ssi, flags);
}
}
@@ -1619,4 +1673,6 @@ erts_sched_poke(ErtsSchedulerSleepInfo *ssi)
#endif
-
+void erl_halt(int code);
+extern erts_smp_atomic32_t erts_halt_progress;
+extern int erts_halt_code;
diff --git a/erts/emulator/beam/erl_process_dump.c b/erts/emulator/beam/erl_process_dump.c
index 5410bcd495..3550f1396c 100644
--- a/erts/emulator/beam/erl_process_dump.c
+++ b/erts/emulator/beam/erl_process_dump.c
@@ -350,7 +350,7 @@ heap_dump(int to, void *to_arg, Eterm x)
ProcBin* pb = (ProcBin *) binary_val(x);
Binary* val = pb->val;
- if (erts_smp_atomic_xchg(&val->refc, 0) != 0) {
+ if (erts_smp_atomic_xchg_nob(&val->refc, 0) != 0) {
val->flags = (UWord) all_binaries;
all_binaries = val;
}
diff --git a/erts/emulator/beam/erl_process_lock.c b/erts/emulator/beam/erl_process_lock.c
index 72560aa124..a5a753b798 100644
--- a/erts/emulator/beam/erl_process_lock.c
+++ b/erts/emulator/beam/erl_process_lock.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2007-2010. All Rights Reserved.
+ * Copyright Ericsson AB 2007-2011. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -123,10 +123,10 @@ erts_init_proc_lock(int cpus)
erts_smp_spinlock_init(&qs_lock, "proc_lck_qs_alloc");
for (i = 0; i < ERTS_NO_OF_PIX_LOCKS; i++) {
#ifdef ERTS_ENABLE_LOCK_COUNT
- erts_smp_spinlock_init_x(&erts_pix_locks[i].u.spnlck,
- "pix_lock", make_small(i));
+ erts_mtx_init_x(&erts_pix_locks[i].u.mtx,
+ "pix_lock", make_small(i));
#else
- erts_smp_spinlock_init(&erts_pix_locks[i].u.spnlck, "pix_lock");
+ erts_mtx_init(&erts_pix_locks[i].u.mtx, "pix_lock");
#endif
}
queue_free_list = NULL;
@@ -316,7 +316,7 @@ try_aquire(erts_proc_lock_t *lck, erts_tse_t *wtr)
break;
}
wflg = lock << ERTS_PROC_LOCK_WAITER_SHIFT;
- old_lflgs = ERTS_PROC_LOCK_FLGS_BOR_(lck, wflg | lock);
+ old_lflgs = ERTS_PROC_LOCK_FLGS_BOR_ACQB_(lck, wflg | lock);
if (old_lflgs & lock) {
/* Didn't get the lock */
goto enqueue;
@@ -413,7 +413,7 @@ transfer_locks(Process *p,
do {
erts_tse_t *tmp = wake;
wake = wake->next;
- erts_atomic32_set(&tmp->uaflgs, 0);
+ erts_atomic32_set_nob(&tmp->uaflgs, 0);
erts_tse_set(tmp);
} while (wake);
@@ -509,14 +509,14 @@ wait_for_locks(Process *p,
ASSERT((wtr->uflgs & ~ERTS_PROC_LOCKS_ALL) == 0);
- erts_atomic32_set(&wtr->uaflgs, 1);
+ erts_atomic32_set_nob(&wtr->uaflgs, 1);
erts_pix_unlock(pix_lock);
while (1) {
int res;
erts_tse_reset(wtr);
- if (erts_atomic32_read(&wtr->uaflgs) == 0)
+ if (erts_atomic32_read_nob(&wtr->uaflgs) == 0)
break;
/*
@@ -669,7 +669,9 @@ proc_safelock(Process *a_proc,
ErtsProcLocks b_need_locks)
{
Process *p1, *p2;
+#ifdef ERTS_ENABLE_LOCK_CHECK
Eterm pid1, pid2;
+#endif
erts_pix_lock_t *pix_lck1, *pix_lck2;
ErtsProcLocks need_locks1, have_locks1, need_locks2, have_locks2;
ErtsProcLocks unlock_mask;
@@ -684,24 +686,32 @@ proc_safelock(Process *a_proc,
if (a_proc) {
if (a_proc->id < b_proc->id) {
p1 = a_proc;
+#ifdef ERTS_ENABLE_LOCK_CHECK
pid1 = a_proc->id;
+#endif
pix_lck1 = a_pix_lck;
need_locks1 = a_need_locks;
have_locks1 = a_have_locks;
p2 = b_proc;
+#ifdef ERTS_ENABLE_LOCK_CHECK
pid2 = b_proc->id;
+#endif
pix_lck2 = b_pix_lck;
need_locks2 = b_need_locks;
have_locks2 = b_have_locks;
}
else if (a_proc->id > b_proc->id) {
p1 = b_proc;
+#ifdef ERTS_ENABLE_LOCK_CHECK
pid1 = b_proc->id;
+#endif
pix_lck1 = b_pix_lck;
need_locks1 = b_need_locks;
have_locks1 = b_have_locks;
p2 = a_proc;
+#ifdef ERTS_ENABLE_LOCK_CHECK
pid2 = a_proc->id;
+#endif
pix_lck2 = a_pix_lck;
need_locks2 = a_need_locks;
have_locks2 = a_have_locks;
@@ -710,12 +720,16 @@ proc_safelock(Process *a_proc,
ERTS_LC_ASSERT(a_proc == b_proc);
ERTS_LC_ASSERT(a_proc->id == b_proc->id);
p1 = a_proc;
+#ifdef ERTS_ENABLE_LOCK_CHECK
pid1 = a_proc->id;
+#endif
pix_lck1 = a_pix_lck;
need_locks1 = a_need_locks | b_need_locks;
have_locks1 = a_have_locks | b_have_locks;
p2 = NULL;
+#ifdef ERTS_ENABLE_LOCK_CHECK
pid2 = 0;
+#endif
pix_lck2 = NULL;
need_locks2 = 0;
have_locks2 = 0;
@@ -723,12 +737,16 @@ proc_safelock(Process *a_proc,
}
else {
p1 = b_proc;
+#ifdef ERTS_ENABLE_LOCK_CHECK
pid1 = b_proc->id;
+#endif
pix_lck1 = b_pix_lck;
need_locks1 = b_need_locks;
have_locks1 = b_have_locks;
p2 = NULL;
+#ifdef ERTS_ENABLE_LOCK_CHECK
pid2 = 0;
+#endif
pix_lck2 = NULL;
need_locks2 = 0;
have_locks2 = 0;
@@ -955,7 +973,8 @@ erts_proc_lock_init(Process *p)
{
/* We always start with all locks locked */
#if ERTS_PROC_LOCK_ATOMIC_IMPL
- erts_smp_atomic32_init(&p->lock.flags, (erts_aint32_t) ERTS_PROC_LOCKS_ALL);
+ erts_smp_atomic32_init_nob(&p->lock.flags,
+ (erts_aint32_t) ERTS_PROC_LOCKS_ALL);
#else
p->lock.flags = ERTS_PROC_LOCKS_ALL;
#endif
@@ -974,7 +993,7 @@ erts_proc_lock_init(Process *p)
{
int i;
for (i = 0; i <= ERTS_PROC_LOCK_MAX_BIT; i++)
- erts_smp_atomic32_init(&p->lock.locked[i], (erts_aint32_t) 1);
+ erts_smp_atomic32_init_nob(&p->lock.locked[i], (erts_aint32_t) 1);
}
#endif
}
diff --git a/erts/emulator/beam/erl_process_lock.h b/erts/emulator/beam/erl_process_lock.h
index 355179f084..8dbdaccc68 100644
--- a/erts/emulator/beam/erl_process_lock.h
+++ b/erts/emulator/beam/erl_process_lock.h
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2007-2010. All Rights Reserved.
+ * Copyright Ericsson AB 2007-2011. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -41,10 +41,10 @@
#define ERTS_PROC_LOCK_SPINLOCK_IMPL 0
#define ERTS_PROC_LOCK_MUTEX_IMPL 0
-#if defined(ETHR_HAVE_OPTIMIZED_ATOMIC_OPS)
+#if defined(ETHR_HAVE_32BIT_NATIVE_ATOMIC_OPS)
# undef ERTS_PROC_LOCK_ATOMIC_IMPL
# define ERTS_PROC_LOCK_ATOMIC_IMPL 1
-#elif defined(ETHR_HAVE_OPTIMIZED_SPINLOCK)
+#elif defined(ETHR_HAVE_NATIVE_SPINLOCKS)
# undef ERTS_PROC_LOCK_SPINLOCK_IMPL
# define ERTS_PROC_LOCK_SPINLOCK_IMPL 1
#else
@@ -255,8 +255,8 @@ void erts_proc_lc_unrequire_lock(Process *p, ErtsProcLocks locks);
typedef struct {
union {
- erts_smp_spinlock_t spnlck;
- char buf[64]; /* Try to get locks in different cache lines */
+ erts_mtx_t mtx;
+ char buf[ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(erts_mtx_t))];
} u;
} erts_pix_lock_t;
@@ -270,9 +270,11 @@ typedef struct {
#if ERTS_PROC_LOCK_ATOMIC_IMPL
#define ERTS_PROC_LOCK_FLGS_BAND_(L, MSK) \
- ((ErtsProcLocks) erts_smp_atomic32_band(&(L)->flags, (erts_aint32_t) (MSK)))
-#define ERTS_PROC_LOCK_FLGS_BOR_(L, MSK) \
- ((ErtsProcLocks) erts_smp_atomic32_bor(&(L)->flags, (erts_aint32_t) (MSK)))
+ ((ErtsProcLocks) erts_smp_atomic32_read_band_nob(&(L)->flags, \
+ (erts_aint32_t) (MSK)))
+#define ERTS_PROC_LOCK_FLGS_BOR_ACQB_(L, MSK) \
+ ((ErtsProcLocks) erts_smp_atomic32_read_bor_acqb(&(L)->flags, \
+ (erts_aint32_t) (MSK)))
#define ERTS_PROC_LOCK_FLGS_CMPXCHG_ACQB_(L, NEW, EXPECTED) \
((ErtsProcLocks) erts_smp_atomic32_cmpxchg_acqb(&(L)->flags, \
(erts_aint32_t) (NEW), \
@@ -282,7 +284,7 @@ typedef struct {
(erts_aint32_t) (NEW), \
(erts_aint32_t) (EXPECTED)))
#define ERTS_PROC_LOCK_FLGS_READ_(L) \
- ((ErtsProcLocks) erts_smp_atomic32_read(&(L)->flags))
+ ((ErtsProcLocks) erts_smp_atomic32_read_nob(&(L)->flags))
#else /* no opt atomic ops */
@@ -325,7 +327,7 @@ erts_proc_lock_flags_cmpxchg(erts_proc_lock_t *lck, ErtsProcLocks new,
#endif
#define ERTS_PROC_LOCK_FLGS_BAND_(L, MSK) erts_proc_lock_flags_band((L), (MSK))
-#define ERTS_PROC_LOCK_FLGS_BOR_(L, MSK) erts_proc_lock_flags_bor((L), (MSK))
+#define ERTS_PROC_LOCK_FLGS_BOR_ACQB_(L, MSK) erts_proc_lock_flags_bor((L), (MSK))
#define ERTS_PROC_LOCK_FLGS_CMPXCHG_ACQB_(L, NEW, EXPECTED) \
erts_proc_lock_flags_cmpxchg((L), (NEW), (EXPECTED))
#define ERTS_PROC_LOCK_FLGS_CMPXCHG_RELB_(L, NEW, EXPECTED) \
@@ -378,18 +380,18 @@ ERTS_GLB_INLINE void erts_proc_lock_op_debug(Process *, ErtsProcLocks, int);
ERTS_GLB_INLINE void erts_pix_lock(erts_pix_lock_t *pixlck)
{
ERTS_LC_ASSERT(pixlck);
- erts_smp_spin_lock(&pixlck->u.spnlck);
+ erts_mtx_lock(&pixlck->u.mtx);
}
ERTS_GLB_INLINE void erts_pix_unlock(erts_pix_lock_t *pixlck)
{
ERTS_LC_ASSERT(pixlck);
- erts_smp_spin_unlock(&pixlck->u.spnlck);
+ erts_mtx_unlock(&pixlck->u.mtx);
}
ERTS_GLB_INLINE int erts_lc_pix_lock_is_locked(erts_pix_lock_t *pixlck)
{
- return erts_smp_lc_spinlock_is_locked(&pixlck->u.spnlck);
+ return erts_lc_mtx_is_locked(&pixlck->u.mtx);
}
/*
@@ -623,11 +625,11 @@ erts_proc_lock_op_debug(Process *p, ErtsProcLocks locks, int locked)
if (locks & lock) {
erts_aint32_t lock_count;
if (locked) {
- lock_count = erts_smp_atomic32_inctest(&p->lock.locked[i]);
+ lock_count = erts_smp_atomic32_inc_read_nob(&p->lock.locked[i]);
ERTS_LC_ASSERT(lock_count == 1);
}
else {
- lock_count = erts_smp_atomic32_dectest(&p->lock.locked[i]);
+ lock_count = erts_smp_atomic32_dec_read_nob(&p->lock.locked[i]);
ERTS_LC_ASSERT(lock_count == 0);
}
}
@@ -649,7 +651,7 @@ ERTS_GLB_INLINE int erts_smp_proc_trylock(Process *, ErtsProcLocks);
ERTS_GLB_INLINE void erts_smp_proc_inc_refc(Process *);
ERTS_GLB_INLINE void erts_smp_proc_dec_refc(Process *);
-
+ERTS_GLB_INLINE void erts_smp_proc_add_refc(Process *, Sint32);
#if ERTS_GLB_INLINE_INCL_FUNC_DEF
@@ -735,6 +737,21 @@ ERTS_GLB_INLINE void erts_smp_proc_dec_refc(Process *p)
#endif
}
+ERTS_GLB_INLINE void erts_smp_proc_add_refc(Process *p, Sint32 refc)
+{
+#ifdef ERTS_SMP
+ Process *fp;
+ erts_pix_lock_t *pixlck = ERTS_PID2PIXLOCK(p->id);
+ erts_pix_lock(pixlck);
+ ERTS_LC_ASSERT(p->lock.refc > 0);
+ p->lock.refc += refc;
+ fp = p->lock.refc == 0 ? p : NULL;
+ erts_pix_unlock(pixlck);
+ if (fp)
+ erts_free_proc(fp);
+#endif
+}
+
#endif /* #if ERTS_GLB_INLINE_INCL_FUNC_DEF */
#ifdef ERTS_SMP
@@ -941,8 +958,6 @@ erts_pid2proc_opt(Process *c_p,
if (flags & ERTS_P2P_FLG_TRY_LOCK)
proc = ERTS_PROC_LOCK_BUSY;
else {
- if (flags & ERTS_P2P_FLG_SMP_INC_REFC)
- proc->lock.refc++;
#ifdef ERTS_ENABLE_LOCK_COUNT
erts_lcnt_proc_lock_unaquire(&proc->lock, lcnt_locks);
#endif
@@ -952,6 +967,8 @@ erts_pid2proc_opt(Process *c_p,
pid_need_locks,
pix_lock,
flags);
+ if (proc && (flags & ERTS_P2P_FLG_SMP_INC_REFC))
+ proc->lock.refc++;
}
}
}
diff --git a/erts/emulator/beam/erl_sched_spec_pre_alloc.c b/erts/emulator/beam/erl_sched_spec_pre_alloc.c
new file mode 100644
index 0000000000..bff9d246a3
--- /dev/null
+++ b/erts/emulator/beam/erl_sched_spec_pre_alloc.c
@@ -0,0 +1,304 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2011-2012. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/*
+ * Description: Scheduler specific pre-allocators. Each scheduler
+ * thread allocates memory in its own private chunk of
+ * memory. Memory blocks deallocated by remote
+ * schedulers (or other threads) are passed back to
+ * the chunk owner via a lock-free data structure.
+ *
+ * Author: Rickard Green
+ */
+
+#ifdef HAVE_CONFIG_H
+# include "config.h"
+#endif
+
+#ifdef ERTS_SMP
+
+#include "erl_process.h"
+#include "erl_thr_progress.h"
+
+erts_sspa_data_t *
+erts_sspa_create(size_t blk_sz, int pa_size)
+{
+ erts_sspa_data_t *data;
+ size_t tot_size;
+ size_t chunk_mem_size;
+ char *p;
+ char *chunk_start;
+ int cix;
+ int no_blocks = pa_size;
+ int no_blocks_per_chunk;
+
+ if (erts_no_schedulers == 1)
+ no_blocks_per_chunk = no_blocks;
+ else {
+ int extra = (no_blocks - 1)/4 + 1;
+ if (extra == 0)
+ extra = 1;
+ no_blocks_per_chunk = no_blocks;
+ no_blocks_per_chunk += extra*erts_no_schedulers;
+ no_blocks_per_chunk /= erts_no_schedulers;
+ }
+ no_blocks = no_blocks_per_chunk * erts_no_schedulers;
+ chunk_mem_size = ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(erts_sspa_chunk_header_t));
+ chunk_mem_size += blk_sz * no_blocks_per_chunk;
+ chunk_mem_size = ERTS_ALC_CACHE_LINE_ALIGN_SIZE(chunk_mem_size);
+ tot_size = ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(erts_sspa_data_t));
+ tot_size += chunk_mem_size*erts_no_schedulers;
+
+ p = erts_alloc_permanent_cache_aligned(ERTS_ALC_T_PRE_ALLOC_DATA, tot_size);
+ data = (erts_sspa_data_t *) p;
+ p += ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(erts_sspa_data_t));
+ chunk_start = p;
+
+ data->chunks_mem_size = chunk_mem_size;
+ data->start = chunk_start;
+ data->end = chunk_start + chunk_mem_size*erts_no_schedulers;
+
+ /* Initialize all chunks */
+ for (cix = 0; cix < erts_no_schedulers; cix++) {
+ erts_sspa_chunk_t *chnk = erts_sspa_cix2chunk(data, cix);
+ erts_sspa_chunk_header_t *chdr = &chnk->aligned.header;
+ erts_sspa_blk_t *blk;
+ int i;
+
+ erts_atomic_init_nob(&chdr->tail.data.last, (erts_aint_t) &chdr->tail.data.marker);
+ erts_atomic_init_nob(&chdr->tail.data.marker.next_atmc, ERTS_AINT_NULL);
+ erts_atomic_init_nob(&chdr->tail.data.um_refc[0], 0);
+ erts_atomic_init_nob(&chdr->tail.data.um_refc[1], 0);
+ erts_atomic32_init_nob(&chdr->tail.data.um_refc_ix, 0);
+
+ chdr->head.no_thr_progress_check = 0;
+ chdr->head.used_marker = 1;
+ chdr->head.first = &chdr->tail.data.marker;
+ chdr->head.unref_end = &chdr->tail.data.marker;
+ chdr->head.next.thr_progress = erts_thr_progress_current();
+ chdr->head.next.thr_progress_reached = 1;
+ chdr->head.next.um_refc_ix = 1;
+ chdr->head.next.unref_end = &chdr->tail.data.marker;
+
+ p = &chnk->data[0];
+ chdr->local.first = (erts_sspa_blk_t *) p;
+ blk = (erts_sspa_blk_t *) p;
+ for (i = 0; i < no_blocks_per_chunk; i++) {
+ blk = (erts_sspa_blk_t *) p;
+ p += blk_sz;
+ blk->next_ptr = (erts_sspa_blk_t *) p;
+ }
+
+ blk->next_ptr = NULL;
+ chdr->local.last = blk;
+ chdr->local.cnt = no_blocks_per_chunk;
+ chdr->local.lim = no_blocks_per_chunk / 3;
+
+ ERTS_SSPA_DBG_CHK_LCL(chdr);
+ }
+
+ return data;
+}
+
+static ERTS_INLINE erts_aint_t
+enqueue_remote_managed_thread(erts_sspa_chunk_header_t *chdr,
+ erts_sspa_blk_t *this,
+ int want_last)
+{
+ erts_aint_t ilast, itmp;
+
+ erts_atomic_init_nob(&this->next_atmc, ERTS_AINT_NULL);
+
+ /* Enqueue at end of list... */
+
+ ilast = erts_atomic_read_nob(&chdr->tail.data.last);
+ while (1) {
+ erts_sspa_blk_t *last = (erts_sspa_blk_t *) ilast;
+ itmp = erts_atomic_cmpxchg_mb(&last->next_atmc,
+ (erts_aint_t) this,
+ ERTS_AINT_NULL);
+ if (itmp == ERTS_AINT_NULL)
+ break;
+ ilast = itmp;
+ }
+
+ /* Move last pointer forward... */
+ while (1) {
+ erts_aint_t itmp;
+ if (want_last) {
+ if (erts_atomic_read_rb(&this->next_atmc) != ERTS_AINT_NULL) {
+ /* Someone else will move it forward */
+ return erts_atomic_read_nob(&chdr->tail.data.last);
+ }
+ }
+ else {
+ if (erts_atomic_read_nob(&this->next_atmc) != ERTS_AINT_NULL) {
+ /* Someone else will move it forward */
+ return ERTS_AINT_NULL;
+ }
+ }
+ itmp = erts_atomic_cmpxchg_mb(&chdr->tail.data.last,
+ (erts_aint_t) this,
+ ilast);
+ if (ilast == itmp)
+ return want_last ? (erts_aint_t) this : ERTS_AINT_NULL;
+ ilast = itmp;
+ }
+}
+
+void
+erts_sspa_remote_free(erts_sspa_chunk_header_t *chdr, erts_sspa_blk_t *blk)
+{
+ int um_refc_ix = 0;
+ int managed_thread = erts_thr_progress_is_managed_thread();
+ if (!managed_thread) {
+ um_refc_ix = erts_atomic32_read_acqb(&chdr->tail.data.um_refc_ix);
+ while (1) {
+ int tmp_um_refc_ix;
+ erts_atomic_inc_acqb(&chdr->tail.data.um_refc[um_refc_ix]);
+ tmp_um_refc_ix = erts_atomic32_read_acqb(&chdr->tail.data.um_refc_ix);
+ if (tmp_um_refc_ix == um_refc_ix)
+ break;
+ erts_atomic_dec_relb(&chdr->tail.data.um_refc[um_refc_ix]);
+ um_refc_ix = tmp_um_refc_ix;
+ }
+ }
+
+ (void) enqueue_remote_managed_thread(chdr, blk, 0);
+
+ if (!managed_thread)
+ erts_atomic_dec_relb(&chdr->tail.data.um_refc[um_refc_ix]);
+}
+
+static ERTS_INLINE void
+fetch_remote(erts_sspa_chunk_header_t *chdr, int max)
+{
+ int new_local = 0;
+
+ if (chdr->head.no_thr_progress_check < ERTS_SSPA_FORCE_THR_CHECK_PROGRESS)
+ chdr->head.no_thr_progress_check++;
+ else {
+ erts_aint_t ilast;
+
+ chdr->head.no_thr_progress_check = 0;
+
+ ilast = erts_atomic_read_nob(&chdr->tail.data.last);
+ if (((erts_sspa_blk_t *) ilast) == &chdr->tail.data.marker
+ && chdr->head.first == &chdr->tail.data.marker)
+ return;
+
+ if (chdr->head.next.thr_progress_reached
+ || erts_thr_progress_has_reached(chdr->head.next.thr_progress)) {
+ int um_refc_ix;
+ chdr->head.next.thr_progress_reached = 1;
+ um_refc_ix = chdr->head.next.um_refc_ix;
+ if (erts_atomic_read_acqb(&chdr->tail.data.um_refc[um_refc_ix]) == 0) {
+
+ /* Move unreferenced end pointer forward... */
+
+ chdr->head.unref_end = chdr->head.next.unref_end;
+
+ if (!chdr->head.used_marker
+ && chdr->head.unref_end == (erts_sspa_blk_t *) ilast) {
+ /* Need to equeue marker */
+ chdr->head.used_marker = 1;
+ ilast = enqueue_remote_managed_thread(chdr,
+ &chdr->tail.data.marker,
+ 1);
+ }
+
+ if (chdr->head.unref_end == (erts_sspa_blk_t *) ilast)
+ ERTS_THR_MEMORY_BARRIER;
+ else {
+ chdr->head.next.unref_end = (erts_sspa_blk_t *) ilast;
+ chdr->head.next.thr_progress = erts_thr_progress_later();
+ erts_atomic32_set_relb(&chdr->tail.data.um_refc_ix,
+ um_refc_ix);
+ chdr->head.next.um_refc_ix = um_refc_ix == 0 ? 1 : 0;
+ chdr->head.next.thr_progress_reached = 0;
+ }
+ }
+ }
+ }
+
+ if (new_local < max && chdr->head.first != chdr->head.unref_end) {
+ erts_sspa_blk_t *first, *this, *next, *last;
+ first = chdr->head.first;
+ if (first == &chdr->tail.data.marker) {
+ chdr->head.used_marker = 0;
+ first = ((erts_sspa_blk_t *)
+ erts_atomic_read_nob(&first->next_atmc));
+ chdr->head.first = first;
+ }
+ if (first != chdr->head.unref_end) {
+
+ ERTS_SSPA_DBG_CHK_LCL(chdr);
+
+ this = last = first;
+ do {
+ next = (erts_sspa_blk_t *) erts_atomic_read_nob(&this->next_atmc);
+ if (this == &chdr->tail.data.marker)
+ chdr->head.used_marker = 0;
+ else {
+ last->next_ptr = this;
+ last = this;
+ new_local++;
+ }
+ this = next;
+ } while (new_local < max && this != chdr->head.unref_end);
+ chdr->head.first = this;
+ if (!chdr->local.last)
+ chdr->local.first = first;
+ else
+ chdr->local.last->next_ptr = first;
+ chdr->local.last = last;
+ last->next_ptr = NULL;
+ chdr->local.cnt += new_local;
+
+ ERTS_SSPA_DBG_CHK_LCL(chdr);
+ }
+ }
+
+}
+
+erts_sspa_blk_t *
+erts_sspa_process_remote_frees(erts_sspa_chunk_header_t *chdr,
+ erts_sspa_blk_t *old_res)
+{
+ erts_sspa_blk_t *res = old_res;
+
+ fetch_remote(chdr, ERTS_SSPA_MAX_GET_NEW_LOCAL);
+
+ if (!res && chdr->local.first) {
+
+ ERTS_SSPA_DBG_CHK_LCL(chdr);
+
+ res = chdr->local.first;
+ chdr->local.first = res->next_ptr;
+ chdr->local.cnt--;
+ if (!chdr->local.first)
+ chdr->local.last = NULL;
+
+ ERTS_SSPA_DBG_CHK_LCL(chdr);
+ }
+
+ return res;
+}
+
+#endif /* ERTS_SMP */
diff --git a/erts/emulator/beam/erl_sched_spec_pre_alloc.h b/erts/emulator/beam/erl_sched_spec_pre_alloc.h
new file mode 100644
index 0000000000..d36066c399
--- /dev/null
+++ b/erts/emulator/beam/erl_sched_spec_pre_alloc.h
@@ -0,0 +1,239 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2011. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/*
+ * Description: Scheduler specific pre-allocators. Each scheduler
+ * thread allocates memory in its own private chunk of
+ * memory. Memory blocks deallocated by remote
+ * schedulers (or other threads) are passed back to
+ * the chunk owner via a lock-free data structure.
+ *
+ * Author: Rickard Green
+ */
+
+#ifndef ERTS_SCHED_SPEC_PRE_ALLOC_H__
+#define ERTS_SCHED_SPEC_PRE_ALLOC_H__
+
+#ifdef ERTS_SMP
+
+#undef ERL_THR_PROGRESS_TSD_TYPE_ONLY
+#define ERL_THR_PROGRESS_TSD_TYPE_ONLY
+#include "erl_thr_progress.h"
+#undef ERL_THR_PROGRESS_TSD_TYPE_ONLY
+
+#ifdef DEBUG
+#define ERTS_SPPA_DBG_CHK_IN_CHNK(A, C, P) \
+do { \
+ ASSERT((void *) (C) < (void *) (P)); \
+ ASSERT((void *) (P) \
+ < (void *) (((char *) (C)) + (A)->chunks_mem_size)); \
+} while (0)
+#else
+#define ERTS_SPPA_DBG_CHK_IN_CHNK(A, C, P)
+#endif
+
+#ifdef DEBUG
+extern Uint erts_no_schedulers;
+#endif
+
+#define ERTS_SSPA_FORCE_THR_CHECK_PROGRESS 10
+#define ERTS_SSPA_MAX_GET_NEW_LOCAL 5
+
+typedef struct {
+ char *start;
+ char *end;
+ int chunks_mem_size;
+} erts_sspa_data_t;
+
+typedef union erts_sspa_blk_t_ erts_sspa_blk_t;
+union erts_sspa_blk_t_ {
+ erts_atomic_t next_atmc;
+ erts_sspa_blk_t *next_ptr;
+};
+
+typedef struct {
+ erts_sspa_blk_t *first;
+ erts_sspa_blk_t *last;
+ int cnt;
+ int lim;
+} erts_sspa_local_freelist_t;
+
+typedef struct {
+ erts_sspa_blk_t marker;
+ erts_atomic_t last;
+ erts_atomic_t um_refc[2];
+ erts_atomic32_t um_refc_ix;
+} erts_sspa_tail_t;
+
+typedef struct {
+ /*
+ * This structure needs to be cache line aligned for best
+ * performance.
+ */
+ union {
+ /* Modified by threads returning memory to this chunk */
+ erts_sspa_tail_t data;
+ char align__[ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(erts_sspa_tail_t))];
+ } tail;
+ /*
+ * Everything below this point is *only* accessed by the
+ * thread owning this chunk.
+ */
+ struct {
+ int no_thr_progress_check;
+ int used_marker;
+ erts_sspa_blk_t *first;
+ erts_sspa_blk_t *unref_end;
+ struct {
+ ErtsThrPrgrVal thr_progress;
+ int thr_progress_reached;
+ int um_refc_ix;
+ erts_sspa_blk_t *unref_end;
+ } next;
+ } head;
+ erts_sspa_local_freelist_t local;
+} erts_sspa_chunk_header_t;
+
+typedef struct {
+ union {
+ erts_sspa_chunk_header_t header;
+ char align__[ERTS_ALC_CACHE_LINE_ALIGN_SIZE(
+ sizeof(erts_sspa_chunk_header_t))];
+ } aligned;
+ char data[1];
+} erts_sspa_chunk_t;
+
+#ifdef DEBUG
+ERTS_GLB_INLINE void
+check_local_list(erts_sspa_chunk_header_t *chdr);
+
+#if ERTS_GLB_INLINE_INCL_FUNC_DEF
+ERTS_GLB_INLINE void
+check_local_list(erts_sspa_chunk_header_t *chdr)
+{
+ erts_sspa_blk_t *blk;
+ int n = 0;
+ for (blk = chdr->local.first; blk; blk = blk->next_ptr)
+ n++;
+ ASSERT(n == chdr->local.cnt);
+}
+#endif
+#define ERTS_SSPA_DBG_CHK_LCL(CHDR) check_local_list((CHDR))
+#else
+#define ERTS_SSPA_DBG_CHK_LCL(CHDR)
+#endif
+
+erts_sspa_data_t *erts_sspa_create(size_t blk_sz,
+ int pa_size);
+void erts_sspa_remote_free(erts_sspa_chunk_header_t *chdr,
+ erts_sspa_blk_t *blk);
+erts_sspa_blk_t *erts_sspa_process_remote_frees(erts_sspa_chunk_header_t *chdr,
+ erts_sspa_blk_t *old_res);
+
+ERTS_GLB_INLINE erts_sspa_chunk_t *erts_sspa_cix2chunk(erts_sspa_data_t *data,
+ int cix);
+ERTS_GLB_INLINE int erts_sspa_ptr2cix(erts_sspa_data_t *data, void *ptr);
+ERTS_GLB_INLINE char *erts_sspa_alloc(erts_sspa_data_t *data, int cix);
+ERTS_GLB_INLINE int erts_sspa_free(erts_sspa_data_t *data, int cix, char *blk);
+
+#if ERTS_GLB_INLINE_INCL_FUNC_DEF
+
+ERTS_GLB_INLINE erts_sspa_chunk_t *
+erts_sspa_cix2chunk(erts_sspa_data_t *data, int cix)
+{
+ ASSERT(0 <= cix && cix < erts_no_schedulers);
+ return (erts_sspa_chunk_t *) (data->start + cix*data->chunks_mem_size);
+}
+
+ERTS_GLB_INLINE int
+erts_sspa_ptr2cix(erts_sspa_data_t *data, void *ptr)
+{
+ int cix;
+ size_t diff;
+ if ((char *) ptr < data->start || data->end <= (char *) ptr)
+ return -1;
+ diff = ((char *) ptr) - data->start;
+ cix = (int) diff / data->chunks_mem_size;
+ ASSERT(0 <= cix && cix < erts_no_schedulers);
+ return cix;
+}
+
+ERTS_GLB_INLINE char *
+erts_sspa_alloc(erts_sspa_data_t *data, int cix)
+{
+ erts_sspa_chunk_t *chnk;
+ erts_sspa_chunk_header_t *chdr;
+ erts_sspa_blk_t *res;
+
+ chnk = erts_sspa_cix2chunk(data, cix);
+ chdr = &chnk->aligned.header;
+ res = chdr->local.first;
+ ERTS_SSPA_DBG_CHK_LCL(chdr);
+ if (res) {
+ ERTS_SSPA_DBG_CHK_LCL(chdr);
+ chdr->local.first = res->next_ptr;
+ chdr->local.cnt--;
+ if (!chdr->local.first)
+ chdr->local.last = NULL;
+ ERTS_SSPA_DBG_CHK_LCL(chdr);
+ }
+ if (chdr->local.cnt <= chdr->local.lim)
+ return (char *) erts_sspa_process_remote_frees(chdr, res);
+ else if (chdr->head.no_thr_progress_check < ERTS_SSPA_FORCE_THR_CHECK_PROGRESS)
+ chdr->head.no_thr_progress_check++;
+ ASSERT(res);
+ return (char *) res;
+}
+
+ERTS_GLB_INLINE int
+erts_sspa_free(erts_sspa_data_t *data, int cix, char *cblk)
+{
+ erts_sspa_chunk_t *chnk;
+ erts_sspa_chunk_header_t *chdr;
+ erts_sspa_blk_t *blk = (erts_sspa_blk_t *) cblk;
+ int chnk_cix = erts_sspa_ptr2cix(data, blk);
+
+ if (chnk_cix < 0)
+ return 0;
+
+ chnk = erts_sspa_cix2chunk(data, chnk_cix);
+ chdr = &chnk->aligned.header;
+ if (chnk_cix != cix) {
+ /* Remote chunk */
+ erts_sspa_remote_free(chdr, blk);
+ }
+ else {
+ /* Local chunk */
+ ERTS_SSPA_DBG_CHK_LCL(chdr);
+ blk->next_ptr = chdr->local.first;
+ chdr->local.first = blk;
+ if (!chdr->local.last)
+ chdr->local.last = blk;
+ chdr->local.cnt++;
+ ERTS_SSPA_DBG_CHK_LCL(chdr);
+ }
+
+ return 1;
+}
+
+#endif /* ERTS_GLB_INLINE_INCL_FUNC_DEF */
+
+#endif /* ERTS_SMP */
+
+#endif /* ERTS_SCHED_SPEC_PRE_ALLOC_H__ */
diff --git a/erts/emulator/beam/erl_smp.h b/erts/emulator/beam/erl_smp.h
index 287327bfe1..a32e9d9d7c 100644
--- a/erts/emulator/beam/erl_smp.h
+++ b/erts/emulator/beam/erl_smp.h
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2005-2010. All Rights Reserved.
+ * Copyright Ericsson AB 2005-2012. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -54,12 +54,18 @@ typedef erts_cnd_t erts_smp_cnd_t;
typedef erts_rwmtx_opt_t erts_smp_rwmtx_opt_t;
typedef erts_rwmtx_t erts_smp_rwmtx_t;
typedef erts_tsd_key_t erts_smp_tsd_key_t;
-typedef erts_atomic_t erts_smp_atomic_t;
-typedef erts_atomic32_t erts_smp_atomic32_t;
+#define erts_smp_dw_atomic_t erts_dw_atomic_t
+#define erts_smp_atomic_t erts_atomic_t
+#define erts_smp_atomic32_t erts_atomic32_t
typedef erts_spinlock_t erts_smp_spinlock_t;
typedef erts_rwlock_t erts_smp_rwlock_t;
void erts_thr_fatal_error(int, char *); /* implemented in erl_init.c */
+#define ERTS_SMP_MEMORY_BARRIER ERTS_THR_MEMORY_BARRIER
+#define ERTS_SMP_WRITE_MEMORY_BARRIER ERTS_THR_WRITE_MEMORY_BARRIER
+#define ERTS_SMP_READ_MEMORY_BARRIER ERTS_THR_READ_MEMORY_BARRIER
+#define ERTS_SMP_DATA_DEPENDENCY_READ_MEMORY_BARRIER ERTS_THR_DATA_DEPENDENCY_READ_MEMORY_BARRIER
+
#else /* #ifdef ERTS_SMP */
#define ERTS_SMP_THR_OPTS_DEFAULT_INITER {0}
@@ -83,8 +89,9 @@ typedef struct {
} erts_smp_rwmtx_opt_t;
typedef int erts_smp_rwmtx_t;
typedef int erts_smp_tsd_key_t;
-typedef SWord erts_smp_atomic_t;
-typedef Uint32 erts_smp_atomic32_t;
+#define erts_smp_dw_atomic_t erts_no_dw_atomic_t
+#define erts_smp_atomic_t erts_no_atomic_t
+#define erts_smp_atomic32_t erts_no_atomic32_t
#if __GNUC__ > 2
typedef struct { } erts_smp_spinlock_t;
typedef struct { } erts_smp_rwlock_t;
@@ -93,6 +100,11 @@ typedef struct { int gcc_is_buggy; } erts_smp_spinlock_t;
typedef struct { int gcc_is_buggy; } erts_smp_rwlock_t;
#endif
+#define ERTS_SMP_MEMORY_BARRIER
+#define ERTS_SMP_WRITE_MEMORY_BARRIER
+#define ERTS_SMP_READ_MEMORY_BARRIER
+#define ERTS_SMP_DATA_DEPENDENCY_READ_MEMORY_BARRIER
+
#endif /* #ifdef ERTS_SMP */
ERTS_GLB_INLINE void erts_smp_thr_init(erts_smp_thr_init_data_t *id);
@@ -160,82 +172,6 @@ ERTS_GLB_INLINE int erts_smp_rwmtx_tryrwlock(erts_smp_rwmtx_t *rwmtx);
ERTS_GLB_INLINE void erts_smp_rwmtx_rwunlock(erts_smp_rwmtx_t *rwmtx);
ERTS_GLB_INLINE int erts_smp_lc_rwmtx_is_rlocked(erts_smp_rwmtx_t *mtx);
ERTS_GLB_INLINE int erts_smp_lc_rwmtx_is_rwlocked(erts_smp_rwmtx_t *mtx);
-ERTS_GLB_INLINE void erts_smp_atomic_init(erts_smp_atomic_t *var,
- erts_aint_t i);
-ERTS_GLB_INLINE void erts_smp_atomic_set(erts_smp_atomic_t *var, erts_aint_t i);
-ERTS_GLB_INLINE erts_aint_t erts_smp_atomic_read(erts_smp_atomic_t *var);
-ERTS_GLB_INLINE erts_aint_t erts_smp_atomic_inctest(erts_smp_atomic_t *incp);
-ERTS_GLB_INLINE erts_aint_t erts_smp_atomic_dectest(erts_smp_atomic_t *decp);
-ERTS_GLB_INLINE void erts_smp_atomic_inc(erts_smp_atomic_t *incp);
-ERTS_GLB_INLINE void erts_smp_atomic_dec(erts_smp_atomic_t *decp);
-ERTS_GLB_INLINE erts_aint_t erts_smp_atomic_addtest(erts_smp_atomic_t *addp,
- erts_aint_t i);
-ERTS_GLB_INLINE void erts_smp_atomic_add(erts_smp_atomic_t *addp,
- erts_aint_t i);
-ERTS_GLB_INLINE erts_aint_t erts_smp_atomic_xchg(erts_smp_atomic_t *xchgp,
- erts_aint_t new);
-ERTS_GLB_INLINE erts_aint_t erts_smp_atomic_cmpxchg(erts_smp_atomic_t *xchgp,
- erts_aint_t new,
- erts_aint_t expected);
-ERTS_GLB_INLINE erts_aint_t erts_smp_atomic_bor(erts_smp_atomic_t *var,
- erts_aint_t mask);
-ERTS_GLB_INLINE erts_aint_t erts_smp_atomic_band(erts_smp_atomic_t *var,
- erts_aint_t mask);
-ERTS_GLB_INLINE erts_aint_t erts_smp_atomic_read_acqb(erts_smp_atomic_t *var);
-ERTS_GLB_INLINE void erts_smp_atomic_set_relb(erts_smp_atomic_t *var,
- erts_aint_t i);
-ERTS_GLB_INLINE void erts_smp_atomic_dec_relb(erts_smp_atomic_t *decp);
-ERTS_GLB_INLINE erts_aint_t erts_smp_atomic_dectest_relb(erts_smp_atomic_t *decp);
-ERTS_GLB_INLINE erts_aint_t erts_smp_atomic_cmpxchg_acqb(erts_smp_atomic_t *xchgp,
- erts_aint_t new,
- erts_aint_t exp);
-ERTS_GLB_INLINE erts_aint_t erts_smp_atomic_cmpxchg_relb(erts_smp_atomic_t *xchgp,
- erts_aint_t new,
- erts_aint_t exp);
-ERTS_GLB_INLINE void
-erts_smp_atomic32_init(erts_smp_atomic32_t *var, erts_aint32_t i);
-ERTS_GLB_INLINE void
-erts_smp_atomic32_set(erts_smp_atomic32_t *var, erts_aint32_t i);
-ERTS_GLB_INLINE erts_aint32_t
-erts_smp_atomic32_read(erts_smp_atomic32_t *var);
-ERTS_GLB_INLINE erts_aint32_t
-erts_smp_atomic32_inctest(erts_smp_atomic32_t *incp);
-ERTS_GLB_INLINE erts_aint32_t
-erts_smp_atomic32_dectest(erts_smp_atomic32_t *decp);
-ERTS_GLB_INLINE void
-erts_smp_atomic32_inc(erts_smp_atomic32_t *incp);
-ERTS_GLB_INLINE void
-erts_smp_atomic32_dec(erts_smp_atomic32_t *decp);
-ERTS_GLB_INLINE erts_aint32_t
-erts_smp_atomic32_addtest(erts_smp_atomic32_t *addp, erts_aint32_t i);
-ERTS_GLB_INLINE void
-erts_smp_atomic32_add(erts_smp_atomic32_t *addp, erts_aint32_t i);
-ERTS_GLB_INLINE erts_aint32_t
-erts_smp_atomic32_xchg(erts_smp_atomic32_t *xchgp, erts_aint32_t new);
-ERTS_GLB_INLINE erts_aint32_t
-erts_smp_atomic32_cmpxchg(erts_smp_atomic32_t *xchgp,
- erts_aint32_t new,
- erts_aint32_t expected);
-ERTS_GLB_INLINE erts_aint32_t
-erts_smp_atomic32_bor(erts_smp_atomic32_t *var, erts_aint32_t mask);
-ERTS_GLB_INLINE erts_aint32_t
-erts_smp_atomic32_band(erts_smp_atomic32_t *var, erts_aint32_t mask);
-ERTS_GLB_INLINE erts_aint32_t
-erts_smp_atomic32_read_acqb(erts_smp_atomic32_t *var);
-ERTS_GLB_INLINE void
-erts_smp_atomic32_set_relb(erts_smp_atomic32_t *var, erts_aint32_t i);
-ERTS_GLB_INLINE void
-erts_smp_atomic32_dec_relb(erts_smp_atomic32_t *decp);
-ERTS_GLB_INLINE erts_aint32_t
-erts_smp_atomic32_dectest_relb(erts_smp_atomic32_t *decp);
-ERTS_GLB_INLINE erts_aint32_t
-erts_smp_atomic32_cmpxchg_acqb(erts_smp_atomic32_t *xchgp,
- erts_aint32_t new,
- erts_aint32_t exp);
-ERTS_GLB_INLINE erts_aint32_t
-erts_smp_atomic32_cmpxchg_relb(erts_smp_atomic32_t *xchgp,
- erts_aint32_t new,
- erts_aint32_t exp);
ERTS_GLB_INLINE void erts_smp_spinlock_init_x(erts_smp_spinlock_t *lock,
char *name,
Eterm extra);
@@ -279,6 +215,490 @@ ERTS_GLB_INLINE void erts_smp_thr_sigmask(int how,
ERTS_GLB_INLINE void erts_smp_thr_sigwait(const sigset_t *set, int *sig);
#endif /* #ifdef ERTS_THR_HAVE_SIG_FUNCS */
+/*
+ * See "Documentation of atomics and memory barriers" at the top
+ * of erl_threads.h for info on atomics.
+ */
+
+#ifdef ERTS_SMP
+
+/* Double word size atomics */
+
+#define erts_smp_dw_atomic_init_nob erts_dw_atomic_init_nob
+#define erts_smp_dw_atomic_set_nob erts_dw_atomic_set_nob
+#define erts_smp_dw_atomic_read_nob erts_dw_atomic_read_nob
+#define erts_smp_dw_atomic_cmpxchg_nob erts_dw_atomic_cmpxchg_nob
+
+#define erts_smp_dw_atomic_init_mb erts_dw_atomic_init_mb
+#define erts_smp_dw_atomic_set_mb erts_dw_atomic_set_mb
+#define erts_smp_dw_atomic_read_mb erts_dw_atomic_read_mb
+#define erts_smp_dw_atomic_cmpxchg_mb erts_dw_atomic_cmpxchg_mb
+
+#define erts_smp_dw_atomic_init_acqb erts_dw_atomic_init_acqb
+#define erts_smp_dw_atomic_set_acqb erts_dw_atomic_set_acqb
+#define erts_smp_dw_atomic_read_acqb erts_dw_atomic_read_acqb
+#define erts_smp_dw_atomic_cmpxchg_acqb erts_dw_atomic_cmpxchg_acqb
+
+#define erts_smp_dw_atomic_init_relb erts_dw_atomic_init_relb
+#define erts_smp_dw_atomic_set_relb erts_dw_atomic_set_relb
+#define erts_smp_dw_atomic_read_relb erts_dw_atomic_read_relb
+#define erts_smp_dw_atomic_cmpxchg_relb erts_dw_atomic_cmpxchg_relb
+
+#define erts_smp_dw_atomic_init_ddrb erts_dw_atomic_init_ddrb
+#define erts_smp_dw_atomic_set_ddrb erts_dw_atomic_set_ddrb
+#define erts_smp_dw_atomic_read_ddrb erts_dw_atomic_read_ddrb
+#define erts_smp_dw_atomic_cmpxchg_ddrb erts_dw_atomic_cmpxchg_ddrb
+
+#define erts_smp_dw_atomic_init_rb erts_dw_atomic_init_rb
+#define erts_smp_dw_atomic_set_rb erts_dw_atomic_set_rb
+#define erts_smp_dw_atomic_read_rb erts_dw_atomic_read_rb
+#define erts_smp_dw_atomic_cmpxchg_rb erts_dw_atomic_cmpxchg_rb
+
+#define erts_smp_dw_atomic_init_wb erts_dw_atomic_init_wb
+#define erts_smp_dw_atomic_set_wb erts_dw_atomic_set_wb
+#define erts_smp_dw_atomic_read_wb erts_dw_atomic_read_wb
+#define erts_smp_dw_atomic_cmpxchg_wb erts_dw_atomic_cmpxchg_wb
+
+/* Word size atomics */
+
+#define erts_smp_atomic_init_nob erts_atomic_init_nob
+#define erts_smp_atomic_set_nob erts_atomic_set_nob
+#define erts_smp_atomic_read_nob erts_atomic_read_nob
+#define erts_smp_atomic_inc_read_nob erts_atomic_inc_read_nob
+#define erts_smp_atomic_dec_read_nob erts_atomic_dec_read_nob
+#define erts_smp_atomic_inc_nob erts_atomic_inc_nob
+#define erts_smp_atomic_dec_nob erts_atomic_dec_nob
+#define erts_smp_atomic_add_read_nob erts_atomic_add_read_nob
+#define erts_smp_atomic_add_nob erts_atomic_add_nob
+#define erts_smp_atomic_read_bor_nob erts_atomic_read_bor_nob
+#define erts_smp_atomic_read_band_nob erts_atomic_read_band_nob
+#define erts_smp_atomic_xchg_nob erts_atomic_xchg_nob
+#define erts_smp_atomic_cmpxchg_nob erts_atomic_cmpxchg_nob
+
+#define erts_smp_atomic_init_mb erts_atomic_init_mb
+#define erts_smp_atomic_set_mb erts_atomic_set_mb
+#define erts_smp_atomic_read_mb erts_atomic_read_mb
+#define erts_smp_atomic_inc_read_mb erts_atomic_inc_read_mb
+#define erts_smp_atomic_dec_read_mb erts_atomic_dec_read_mb
+#define erts_smp_atomic_inc_mb erts_atomic_inc_mb
+#define erts_smp_atomic_dec_mb erts_atomic_dec_mb
+#define erts_smp_atomic_add_read_mb erts_atomic_add_read_mb
+#define erts_smp_atomic_add_mb erts_atomic_add_mb
+#define erts_smp_atomic_read_bor_mb erts_atomic_read_bor_mb
+#define erts_smp_atomic_read_band_mb erts_atomic_read_band_mb
+#define erts_smp_atomic_xchg_mb erts_atomic_xchg_mb
+#define erts_smp_atomic_cmpxchg_mb erts_atomic_cmpxchg_mb
+
+#define erts_smp_atomic_init_acqb erts_atomic_init_acqb
+#define erts_smp_atomic_set_acqb erts_atomic_set_acqb
+#define erts_smp_atomic_read_acqb erts_atomic_read_acqb
+#define erts_smp_atomic_inc_read_acqb erts_atomic_inc_read_acqb
+#define erts_smp_atomic_dec_read_acqb erts_atomic_dec_read_acqb
+#define erts_smp_atomic_inc_acqb erts_atomic_inc_acqb
+#define erts_smp_atomic_dec_acqb erts_atomic_dec_acqb
+#define erts_smp_atomic_add_read_acqb erts_atomic_add_read_acqb
+#define erts_smp_atomic_add_acqb erts_atomic_add_acqb
+#define erts_smp_atomic_read_bor_acqb erts_atomic_read_bor_acqb
+#define erts_smp_atomic_read_band_acqb erts_atomic_read_band_acqb
+#define erts_smp_atomic_xchg_acqb erts_atomic_xchg_acqb
+#define erts_smp_atomic_cmpxchg_acqb erts_atomic_cmpxchg_acqb
+
+#define erts_smp_atomic_init_relb erts_atomic_init_relb
+#define erts_smp_atomic_set_relb erts_atomic_set_relb
+#define erts_smp_atomic_read_relb erts_atomic_read_relb
+#define erts_smp_atomic_inc_read_relb erts_atomic_inc_read_relb
+#define erts_smp_atomic_dec_read_relb erts_atomic_dec_read_relb
+#define erts_smp_atomic_inc_relb erts_atomic_inc_relb
+#define erts_smp_atomic_dec_relb erts_atomic_dec_relb
+#define erts_smp_atomic_add_read_relb erts_atomic_add_read_relb
+#define erts_smp_atomic_add_relb erts_atomic_add_relb
+#define erts_smp_atomic_read_bor_relb erts_atomic_read_bor_relb
+#define erts_smp_atomic_read_band_relb erts_atomic_read_band_relb
+#define erts_smp_atomic_xchg_relb erts_atomic_xchg_relb
+#define erts_smp_atomic_cmpxchg_relb erts_atomic_cmpxchg_relb
+
+#define erts_smp_atomic_init_ddrb erts_atomic_init_ddrb
+#define erts_smp_atomic_set_ddrb erts_atomic_set_ddrb
+#define erts_smp_atomic_read_ddrb erts_atomic_read_ddrb
+#define erts_smp_atomic_inc_read_ddrb erts_atomic_inc_read_ddrb
+#define erts_smp_atomic_dec_read_ddrb erts_atomic_dec_read_ddrb
+#define erts_smp_atomic_inc_ddrb erts_atomic_inc_ddrb
+#define erts_smp_atomic_dec_ddrb erts_atomic_dec_ddrb
+#define erts_smp_atomic_add_read_ddrb erts_atomic_add_read_ddrb
+#define erts_smp_atomic_add_ddrb erts_atomic_add_ddrb
+#define erts_smp_atomic_read_bor_ddrb erts_atomic_read_bor_ddrb
+#define erts_smp_atomic_read_band_ddrb erts_atomic_read_band_ddrb
+#define erts_smp_atomic_xchg_ddrb erts_atomic_xchg_ddrb
+#define erts_smp_atomic_cmpxchg_ddrb erts_atomic_cmpxchg_ddrb
+
+#define erts_smp_atomic_init_rb erts_atomic_init_rb
+#define erts_smp_atomic_set_rb erts_atomic_set_rb
+#define erts_smp_atomic_read_rb erts_atomic_read_rb
+#define erts_smp_atomic_inc_read_rb erts_atomic_inc_read_rb
+#define erts_smp_atomic_dec_read_rb erts_atomic_dec_read_rb
+#define erts_smp_atomic_inc_rb erts_atomic_inc_rb
+#define erts_smp_atomic_dec_rb erts_atomic_dec_rb
+#define erts_smp_atomic_add_read_rb erts_atomic_add_read_rb
+#define erts_smp_atomic_add_rb erts_atomic_add_rb
+#define erts_smp_atomic_read_bor_rb erts_atomic_read_bor_rb
+#define erts_smp_atomic_read_band_rb erts_atomic_read_band_rb
+#define erts_smp_atomic_xchg_rb erts_atomic_xchg_rb
+#define erts_smp_atomic_cmpxchg_rb erts_atomic_cmpxchg_rb
+
+#define erts_smp_atomic_init_wb erts_atomic_init_wb
+#define erts_smp_atomic_set_wb erts_atomic_set_wb
+#define erts_smp_atomic_read_wb erts_atomic_read_wb
+#define erts_smp_atomic_inc_read_wb erts_atomic_inc_read_wb
+#define erts_smp_atomic_dec_read_wb erts_atomic_dec_read_wb
+#define erts_smp_atomic_inc_wb erts_atomic_inc_wb
+#define erts_smp_atomic_dec_wb erts_atomic_dec_wb
+#define erts_smp_atomic_add_read_wb erts_atomic_add_read_wb
+#define erts_smp_atomic_add_wb erts_atomic_add_wb
+#define erts_smp_atomic_read_bor_wb erts_atomic_read_bor_wb
+#define erts_smp_atomic_read_band_wb erts_atomic_read_band_wb
+#define erts_smp_atomic_xchg_wb erts_atomic_xchg_wb
+#define erts_smp_atomic_cmpxchg_wb erts_atomic_cmpxchg_wb
+
+/* 32-bit atomics */
+
+#define erts_smp_atomic32_init_nob erts_atomic32_init_nob
+#define erts_smp_atomic32_set_nob erts_atomic32_set_nob
+#define erts_smp_atomic32_read_nob erts_atomic32_read_nob
+#define erts_smp_atomic32_inc_read_nob erts_atomic32_inc_read_nob
+#define erts_smp_atomic32_dec_read_nob erts_atomic32_dec_read_nob
+#define erts_smp_atomic32_inc_nob erts_atomic32_inc_nob
+#define erts_smp_atomic32_dec_nob erts_atomic32_dec_nob
+#define erts_smp_atomic32_add_read_nob erts_atomic32_add_read_nob
+#define erts_smp_atomic32_add_nob erts_atomic32_add_nob
+#define erts_smp_atomic32_read_bor_nob erts_atomic32_read_bor_nob
+#define erts_smp_atomic32_read_band_nob erts_atomic32_read_band_nob
+#define erts_smp_atomic32_xchg_nob erts_atomic32_xchg_nob
+#define erts_smp_atomic32_cmpxchg_nob erts_atomic32_cmpxchg_nob
+
+#define erts_smp_atomic32_init_mb erts_atomic32_init_mb
+#define erts_smp_atomic32_set_mb erts_atomic32_set_mb
+#define erts_smp_atomic32_read_mb erts_atomic32_read_mb
+#define erts_smp_atomic32_inc_read_mb erts_atomic32_inc_read_mb
+#define erts_smp_atomic32_dec_read_mb erts_atomic32_dec_read_mb
+#define erts_smp_atomic32_inc_mb erts_atomic32_inc_mb
+#define erts_smp_atomic32_dec_mb erts_atomic32_dec_mb
+#define erts_smp_atomic32_add_read_mb erts_atomic32_add_read_mb
+#define erts_smp_atomic32_add_mb erts_atomic32_add_mb
+#define erts_smp_atomic32_read_bor_mb erts_atomic32_read_bor_mb
+#define erts_smp_atomic32_read_band_mb erts_atomic32_read_band_mb
+#define erts_smp_atomic32_xchg_mb erts_atomic32_xchg_mb
+#define erts_smp_atomic32_cmpxchg_mb erts_atomic32_cmpxchg_mb
+
+#define erts_smp_atomic32_init_acqb erts_atomic32_init_acqb
+#define erts_smp_atomic32_set_acqb erts_atomic32_set_acqb
+#define erts_smp_atomic32_read_acqb erts_atomic32_read_acqb
+#define erts_smp_atomic32_inc_read_acqb erts_atomic32_inc_read_acqb
+#define erts_smp_atomic32_dec_read_acqb erts_atomic32_dec_read_acqb
+#define erts_smp_atomic32_inc_acqb erts_atomic32_inc_acqb
+#define erts_smp_atomic32_dec_acqb erts_atomic32_dec_acqb
+#define erts_smp_atomic32_add_read_acqb erts_atomic32_add_read_acqb
+#define erts_smp_atomic32_add_acqb erts_atomic32_add_acqb
+#define erts_smp_atomic32_read_bor_acqb erts_atomic32_read_bor_acqb
+#define erts_smp_atomic32_read_band_acqb erts_atomic32_read_band_acqb
+#define erts_smp_atomic32_xchg_acqb erts_atomic32_xchg_acqb
+#define erts_smp_atomic32_cmpxchg_acqb erts_atomic32_cmpxchg_acqb
+
+#define erts_smp_atomic32_init_relb erts_atomic32_init_relb
+#define erts_smp_atomic32_set_relb erts_atomic32_set_relb
+#define erts_smp_atomic32_read_relb erts_atomic32_read_relb
+#define erts_smp_atomic32_inc_read_relb erts_atomic32_inc_read_relb
+#define erts_smp_atomic32_dec_read_relb erts_atomic32_dec_read_relb
+#define erts_smp_atomic32_inc_relb erts_atomic32_inc_relb
+#define erts_smp_atomic32_dec_relb erts_atomic32_dec_relb
+#define erts_smp_atomic32_add_read_relb erts_atomic32_add_read_relb
+#define erts_smp_atomic32_add_relb erts_atomic32_add_relb
+#define erts_smp_atomic32_read_bor_relb erts_atomic32_read_bor_relb
+#define erts_smp_atomic32_read_band_relb erts_atomic32_read_band_relb
+#define erts_smp_atomic32_xchg_relb erts_atomic32_xchg_relb
+#define erts_smp_atomic32_cmpxchg_relb erts_atomic32_cmpxchg_relb
+
+#define erts_smp_atomic32_init_ddrb erts_atomic32_init_ddrb
+#define erts_smp_atomic32_set_ddrb erts_atomic32_set_ddrb
+#define erts_smp_atomic32_read_ddrb erts_atomic32_read_ddrb
+#define erts_smp_atomic32_inc_read_ddrb erts_atomic32_inc_read_ddrb
+#define erts_smp_atomic32_dec_read_ddrb erts_atomic32_dec_read_ddrb
+#define erts_smp_atomic32_inc_ddrb erts_atomic32_inc_ddrb
+#define erts_smp_atomic32_dec_ddrb erts_atomic32_dec_ddrb
+#define erts_smp_atomic32_add_read_ddrb erts_atomic32_add_read_ddrb
+#define erts_smp_atomic32_add_ddrb erts_atomic32_add_ddrb
+#define erts_smp_atomic32_read_bor_ddrb erts_atomic32_read_bor_ddrb
+#define erts_smp_atomic32_read_band_ddrb erts_atomic32_read_band_ddrb
+#define erts_smp_atomic32_xchg_ddrb erts_atomic32_xchg_ddrb
+#define erts_smp_atomic32_cmpxchg_ddrb erts_atomic32_cmpxchg_ddrb
+
+#define erts_smp_atomic32_init_rb erts_atomic32_init_rb
+#define erts_smp_atomic32_set_rb erts_atomic32_set_rb
+#define erts_smp_atomic32_read_rb erts_atomic32_read_rb
+#define erts_smp_atomic32_inc_read_rb erts_atomic32_inc_read_rb
+#define erts_smp_atomic32_dec_read_rb erts_atomic32_dec_read_rb
+#define erts_smp_atomic32_inc_rb erts_atomic32_inc_rb
+#define erts_smp_atomic32_dec_rb erts_atomic32_dec_rb
+#define erts_smp_atomic32_add_read_rb erts_atomic32_add_read_rb
+#define erts_smp_atomic32_add_rb erts_atomic32_add_rb
+#define erts_smp_atomic32_read_bor_rb erts_atomic32_read_bor_rb
+#define erts_smp_atomic32_read_band_rb erts_atomic32_read_band_rb
+#define erts_smp_atomic32_xchg_rb erts_atomic32_xchg_rb
+#define erts_smp_atomic32_cmpxchg_rb erts_atomic32_cmpxchg_rb
+
+#define erts_smp_atomic32_init_wb erts_atomic32_init_wb
+#define erts_smp_atomic32_set_wb erts_atomic32_set_wb
+#define erts_smp_atomic32_read_wb erts_atomic32_read_wb
+#define erts_smp_atomic32_inc_read_wb erts_atomic32_inc_read_wb
+#define erts_smp_atomic32_dec_read_wb erts_atomic32_dec_read_wb
+#define erts_smp_atomic32_inc_wb erts_atomic32_inc_wb
+#define erts_smp_atomic32_dec_wb erts_atomic32_dec_wb
+#define erts_smp_atomic32_add_read_wb erts_atomic32_add_read_wb
+#define erts_smp_atomic32_add_wb erts_atomic32_add_wb
+#define erts_smp_atomic32_read_bor_wb erts_atomic32_read_bor_wb
+#define erts_smp_atomic32_read_band_wb erts_atomic32_read_band_wb
+#define erts_smp_atomic32_xchg_wb erts_atomic32_xchg_wb
+#define erts_smp_atomic32_cmpxchg_wb erts_atomic32_cmpxchg_wb
+
+#else /* !ERTS_SMP */
+
+/* Double word size atomics */
+
+#define erts_smp_dw_atomic_init_nob erts_no_dw_atomic_set
+#define erts_smp_dw_atomic_set_nob erts_no_dw_atomic_set
+#define erts_smp_dw_atomic_read_nob erts_no_dw_atomic_read
+#define erts_smp_dw_atomic_cmpxchg_nob erts_no_dw_atomic_cmpxchg
+
+#define erts_smp_dw_atomic_init_mb erts_no_dw_atomic_init
+#define erts_smp_dw_atomic_set_mb erts_no_dw_atomic_set
+#define erts_smp_dw_atomic_read_mb erts_no_dw_atomic_read
+#define erts_smp_dw_atomic_cmpxchg_mb erts_no_dw_atomic_cmpxchg
+
+#define erts_smp_dw_atomic_init_acqb erts_no_dw_atomic_init
+#define erts_smp_dw_atomic_set_acqb erts_no_dw_atomic_set
+#define erts_smp_dw_atomic_read_acqb erts_no_dw_atomic_read
+#define erts_smp_dw_atomic_cmpxchg_acqb erts_no_dw_atomic_cmpxchg
+
+#define erts_smp_dw_atomic_init_relb erts_no_dw_atomic_init
+#define erts_smp_dw_atomic_set_relb erts_no_dw_atomic_set
+#define erts_smp_dw_atomic_read_relb erts_no_dw_atomic_read
+#define erts_smp_dw_atomic_cmpxchg_relb erts_no_dw_atomic_cmpxchg
+
+#define erts_smp_dw_atomic_init_ddrb erts_no_dw_atomic_init
+#define erts_smp_dw_atomic_set_ddrb erts_no_dw_atomic_set
+#define erts_smp_dw_atomic_read_ddrb erts_no_dw_atomic_read
+#define erts_smp_dw_atomic_cmpxchg_ddrb erts_no_dw_atomic_cmpxchg
+
+#define erts_smp_dw_atomic_init_rb erts_no_dw_atomic_init
+#define erts_smp_dw_atomic_set_rb erts_no_dw_atomic_set
+#define erts_smp_dw_atomic_read_rb erts_no_dw_atomic_read
+#define erts_smp_dw_atomic_cmpxchg_rb erts_no_dw_atomic_cmpxchg
+
+#define erts_smp_dw_atomic_init_wb erts_no_dw_atomic_init
+#define erts_smp_dw_atomic_set_wb erts_no_dw_atomic_set
+#define erts_smp_dw_atomic_read_wb erts_no_dw_atomic_read
+#define erts_smp_dw_atomic_cmpxchg_wb erts_no_dw_atomic_cmpxchg
+
+/* Word size atomics */
+
+#define erts_smp_atomic_init_nob erts_no_atomic_set
+#define erts_smp_atomic_set_nob erts_no_atomic_set
+#define erts_smp_atomic_read_nob erts_no_atomic_read
+#define erts_smp_atomic_inc_read_nob erts_no_atomic_inc_read
+#define erts_smp_atomic_dec_read_nob erts_no_atomic_dec_read
+#define erts_smp_atomic_inc_nob erts_no_atomic_inc
+#define erts_smp_atomic_dec_nob erts_no_atomic_dec
+#define erts_smp_atomic_add_read_nob erts_no_atomic_add_read
+#define erts_smp_atomic_add_nob erts_no_atomic_add
+#define erts_smp_atomic_read_bor_nob erts_no_atomic_read_bor
+#define erts_smp_atomic_read_band_nob erts_no_atomic_read_band
+#define erts_smp_atomic_xchg_nob erts_no_atomic_xchg
+#define erts_smp_atomic_cmpxchg_nob erts_no_atomic_cmpxchg
+
+#define erts_smp_atomic_init_mb erts_no_atomic_set
+#define erts_smp_atomic_set_mb erts_no_atomic_set
+#define erts_smp_atomic_read_mb erts_no_atomic_read
+#define erts_smp_atomic_inc_read_mb erts_no_atomic_inc_read
+#define erts_smp_atomic_dec_read_mb erts_no_atomic_dec_read
+#define erts_smp_atomic_inc_mb erts_no_atomic_inc
+#define erts_smp_atomic_dec_mb erts_no_atomic_dec
+#define erts_smp_atomic_add_read_mb erts_no_atomic_add_read
+#define erts_smp_atomic_add_mb erts_no_atomic_add
+#define erts_smp_atomic_read_bor_mb erts_no_atomic_read_bor
+#define erts_smp_atomic_read_band_mb erts_no_atomic_read_band
+#define erts_smp_atomic_xchg_mb erts_no_atomic_xchg
+#define erts_smp_atomic_cmpxchg_mb erts_no_atomic_cmpxchg
+
+#define erts_smp_atomic_init_acqb erts_no_atomic_set
+#define erts_smp_atomic_set_acqb erts_no_atomic_set
+#define erts_smp_atomic_read_acqb erts_no_atomic_read
+#define erts_smp_atomic_inc_read_acqb erts_no_atomic_inc_read
+#define erts_smp_atomic_dec_read_acqb erts_no_atomic_dec_read
+#define erts_smp_atomic_inc_acqb erts_no_atomic_inc
+#define erts_smp_atomic_dec_acqb erts_no_atomic_dec
+#define erts_smp_atomic_add_read_acqb erts_no_atomic_add_read
+#define erts_smp_atomic_add_acqb erts_no_atomic_add
+#define erts_smp_atomic_read_bor_acqb erts_no_atomic_read_bor
+#define erts_smp_atomic_read_band_acqb erts_no_atomic_read_band
+#define erts_smp_atomic_xchg_acqb erts_no_atomic_xchg
+#define erts_smp_atomic_cmpxchg_acqb erts_no_atomic_cmpxchg
+
+#define erts_smp_atomic_init_relb erts_no_atomic_set
+#define erts_smp_atomic_set_relb erts_no_atomic_set
+#define erts_smp_atomic_read_relb erts_no_atomic_read
+#define erts_smp_atomic_inc_read_relb erts_no_atomic_inc_read
+#define erts_smp_atomic_dec_read_relb erts_no_atomic_dec_read
+#define erts_smp_atomic_inc_relb erts_no_atomic_inc
+#define erts_smp_atomic_dec_relb erts_no_atomic_dec
+#define erts_smp_atomic_add_read_relb erts_no_atomic_add_read
+#define erts_smp_atomic_add_relb erts_no_atomic_add
+#define erts_smp_atomic_read_bor_relb erts_no_atomic_read_bor
+#define erts_smp_atomic_read_band_relb erts_no_atomic_read_band
+#define erts_smp_atomic_xchg_relb erts_no_atomic_xchg
+#define erts_smp_atomic_cmpxchg_relb erts_no_atomic_cmpxchg
+
+#define erts_smp_atomic_init_ddrb erts_no_atomic_set
+#define erts_smp_atomic_set_ddrb erts_no_atomic_set
+#define erts_smp_atomic_read_ddrb erts_no_atomic_read
+#define erts_smp_atomic_inc_read_ddrb erts_no_atomic_inc_read
+#define erts_smp_atomic_dec_read_ddrb erts_no_atomic_dec_read
+#define erts_smp_atomic_inc_ddrb erts_no_atomic_inc
+#define erts_smp_atomic_dec_ddrb erts_no_atomic_dec
+#define erts_smp_atomic_add_read_ddrb erts_no_atomic_add_read
+#define erts_smp_atomic_add_ddrb erts_no_atomic_add
+#define erts_smp_atomic_read_bor_ddrb erts_no_atomic_read_bor
+#define erts_smp_atomic_read_band_ddrb erts_no_atomic_read_band
+#define erts_smp_atomic_xchg_ddrb erts_no_atomic_xchg
+#define erts_smp_atomic_cmpxchg_ddrb erts_no_atomic_cmpxchg
+
+#define erts_smp_atomic_init_rb erts_no_atomic_set
+#define erts_smp_atomic_set_rb erts_no_atomic_set
+#define erts_smp_atomic_read_rb erts_no_atomic_read
+#define erts_smp_atomic_inc_read_rb erts_no_atomic_inc_read
+#define erts_smp_atomic_dec_read_rb erts_no_atomic_dec_read
+#define erts_smp_atomic_inc_rb erts_no_atomic_inc
+#define erts_smp_atomic_dec_rb erts_no_atomic_dec
+#define erts_smp_atomic_add_read_rb erts_no_atomic_add_read
+#define erts_smp_atomic_add_rb erts_no_atomic_add
+#define erts_smp_atomic_read_bor_rb erts_no_atomic_read_bor
+#define erts_smp_atomic_read_band_rb erts_no_atomic_read_band
+#define erts_smp_atomic_xchg_rb erts_no_atomic_xchg
+#define erts_smp_atomic_cmpxchg_rb erts_no_atomic_cmpxchg
+
+#define erts_smp_atomic_init_wb erts_no_atomic_set
+#define erts_smp_atomic_set_wb erts_no_atomic_set
+#define erts_smp_atomic_read_wb erts_no_atomic_read
+#define erts_smp_atomic_inc_read_wb erts_no_atomic_inc_read
+#define erts_smp_atomic_dec_read_wb erts_no_atomic_dec_read
+#define erts_smp_atomic_inc_wb erts_no_atomic_inc
+#define erts_smp_atomic_dec_wb erts_no_atomic_dec
+#define erts_smp_atomic_add_read_wb erts_no_atomic_add_read
+#define erts_smp_atomic_add_wb erts_no_atomic_add
+#define erts_smp_atomic_read_bor_wb erts_no_atomic_read_bor
+#define erts_smp_atomic_read_band_wb erts_no_atomic_read_band
+#define erts_smp_atomic_xchg_wb erts_no_atomic_xchg
+#define erts_smp_atomic_cmpxchg_wb erts_no_atomic_cmpxchg
+
+/* 32-bit atomics */
+
+#define erts_smp_atomic32_init_nob erts_no_atomic32_set
+#define erts_smp_atomic32_set_nob erts_no_atomic32_set
+#define erts_smp_atomic32_read_nob erts_no_atomic32_read
+#define erts_smp_atomic32_inc_read_nob erts_no_atomic32_inc_read
+#define erts_smp_atomic32_dec_read_nob erts_no_atomic32_dec_read
+#define erts_smp_atomic32_inc_nob erts_no_atomic32_inc
+#define erts_smp_atomic32_dec_nob erts_no_atomic32_dec
+#define erts_smp_atomic32_add_read_nob erts_no_atomic32_add_read
+#define erts_smp_atomic32_add_nob erts_no_atomic32_add
+#define erts_smp_atomic32_read_bor_nob erts_no_atomic32_read_bor
+#define erts_smp_atomic32_read_band_nob erts_no_atomic32_read_band
+#define erts_smp_atomic32_xchg_nob erts_no_atomic32_xchg
+#define erts_smp_atomic32_cmpxchg_nob erts_no_atomic32_cmpxchg
+
+#define erts_smp_atomic32_init_mb erts_no_atomic32_set
+#define erts_smp_atomic32_set_mb erts_no_atomic32_set
+#define erts_smp_atomic32_read_mb erts_no_atomic32_read
+#define erts_smp_atomic32_inc_read_mb erts_no_atomic32_inc_read
+#define erts_smp_atomic32_dec_read_mb erts_no_atomic32_dec_read
+#define erts_smp_atomic32_inc_mb erts_no_atomic32_inc
+#define erts_smp_atomic32_dec_mb erts_no_atomic32_dec
+#define erts_smp_atomic32_add_read_mb erts_no_atomic32_add_read
+#define erts_smp_atomic32_add_mb erts_no_atomic32_add
+#define erts_smp_atomic32_read_bor_mb erts_no_atomic32_read_bor
+#define erts_smp_atomic32_read_band_mb erts_no_atomic32_read_band
+#define erts_smp_atomic32_xchg_mb erts_no_atomic32_xchg
+#define erts_smp_atomic32_cmpxchg_mb erts_no_atomic32_cmpxchg
+
+#define erts_smp_atomic32_init_acqb erts_no_atomic32_set
+#define erts_smp_atomic32_set_acqb erts_no_atomic32_set
+#define erts_smp_atomic32_read_acqb erts_no_atomic32_read
+#define erts_smp_atomic32_inc_read_acqb erts_no_atomic32_inc_read
+#define erts_smp_atomic32_dec_read_acqb erts_no_atomic32_dec_read
+#define erts_smp_atomic32_inc_acqb erts_no_atomic32_inc
+#define erts_smp_atomic32_dec_acqb erts_no_atomic32_dec
+#define erts_smp_atomic32_add_read_acqb erts_no_atomic32_add_read
+#define erts_smp_atomic32_add_acqb erts_no_atomic32_add
+#define erts_smp_atomic32_read_bor_acqb erts_no_atomic32_read_bor
+#define erts_smp_atomic32_read_band_acqb erts_no_atomic32_read_band
+#define erts_smp_atomic32_xchg_acqb erts_no_atomic32_xchg
+#define erts_smp_atomic32_cmpxchg_acqb erts_no_atomic32_cmpxchg
+
+#define erts_smp_atomic32_init_relb erts_no_atomic32_set
+#define erts_smp_atomic32_set_relb erts_no_atomic32_set
+#define erts_smp_atomic32_read_relb erts_no_atomic32_read
+#define erts_smp_atomic32_inc_read_relb erts_no_atomic32_inc_read
+#define erts_smp_atomic32_dec_read_relb erts_no_atomic32_dec_read
+#define erts_smp_atomic32_inc_relb erts_no_atomic32_inc
+#define erts_smp_atomic32_dec_relb erts_no_atomic32_dec
+#define erts_smp_atomic32_add_read_relb erts_no_atomic32_add_read
+#define erts_smp_atomic32_add_relb erts_no_atomic32_add
+#define erts_smp_atomic32_read_bor_relb erts_no_atomic32_read_bor
+#define erts_smp_atomic32_read_band_relb erts_no_atomic32_read_band
+#define erts_smp_atomic32_xchg_relb erts_no_atomic32_xchg
+#define erts_smp_atomic32_cmpxchg_relb erts_no_atomic32_cmpxchg
+
+#define erts_smp_atomic32_init_ddrb erts_no_atomic32_set
+#define erts_smp_atomic32_set_ddrb erts_no_atomic32_set
+#define erts_smp_atomic32_read_ddrb erts_no_atomic32_read
+#define erts_smp_atomic32_inc_read_ddrb erts_no_atomic32_inc_read
+#define erts_smp_atomic32_dec_read_ddrb erts_no_atomic32_dec_read
+#define erts_smp_atomic32_inc_ddrb erts_no_atomic32_inc
+#define erts_smp_atomic32_dec_ddrb erts_no_atomic32_dec
+#define erts_smp_atomic32_add_read_ddrb erts_no_atomic32_add_read
+#define erts_smp_atomic32_add_ddrb erts_no_atomic32_add
+#define erts_smp_atomic32_read_bor_ddrb erts_no_atomic32_read_bor
+#define erts_smp_atomic32_read_band_ddrb erts_no_atomic32_read_band
+#define erts_smp_atomic32_xchg_ddrb erts_no_atomic32_xchg
+#define erts_smp_atomic32_cmpxchg_ddrb erts_no_atomic32_cmpxchg
+
+#define erts_smp_atomic32_init_rb erts_no_atomic32_set
+#define erts_smp_atomic32_set_rb erts_no_atomic32_set
+#define erts_smp_atomic32_read_rb erts_no_atomic32_read
+#define erts_smp_atomic32_inc_read_rb erts_no_atomic32_inc_read
+#define erts_smp_atomic32_dec_read_rb erts_no_atomic32_dec_read
+#define erts_smp_atomic32_inc_rb erts_no_atomic32_inc
+#define erts_smp_atomic32_dec_rb erts_no_atomic32_dec
+#define erts_smp_atomic32_add_read_rb erts_no_atomic32_add_read
+#define erts_smp_atomic32_add_rb erts_no_atomic32_add
+#define erts_smp_atomic32_read_bor_rb erts_no_atomic32_read_bor
+#define erts_smp_atomic32_read_band_rb erts_no_atomic32_read_band
+#define erts_smp_atomic32_xchg_rb erts_no_atomic32_xchg
+#define erts_smp_atomic32_cmpxchg_rb erts_no_atomic32_cmpxchg
+
+#define erts_smp_atomic32_init_wb erts_no_atomic32_set
+#define erts_smp_atomic32_set_wb erts_no_atomic32_set
+#define erts_smp_atomic32_read_wb erts_no_atomic32_read
+#define erts_smp_atomic32_inc_read_wb erts_no_atomic32_inc_read
+#define erts_smp_atomic32_dec_read_wb erts_no_atomic32_dec_read
+#define erts_smp_atomic32_inc_wb erts_no_atomic32_inc
+#define erts_smp_atomic32_dec_wb erts_no_atomic32_dec
+#define erts_smp_atomic32_add_read_wb erts_no_atomic32_add_read
+#define erts_smp_atomic32_add_wb erts_no_atomic32_add
+#define erts_smp_atomic32_read_bor_wb erts_no_atomic32_read_bor
+#define erts_smp_atomic32_read_band_wb erts_no_atomic32_read_band
+#define erts_smp_atomic32_xchg_wb erts_no_atomic32_xchg
+#define erts_smp_atomic32_cmpxchg_wb erts_no_atomic32_cmpxchg
+
+#endif /* !ERTS_SMP */
#if ERTS_GLB_INLINE_INCL_FUNC_DEF
@@ -473,6 +893,16 @@ erts_smp_cnd_wait(erts_smp_cnd_t *cnd, erts_smp_mtx_t *mtx)
#endif
}
+/*
+ * IMPORTANT note about erts_smp_cnd_signal() and erts_smp_cnd_broadcast()
+ *
+ * POSIX allow a call to `pthread_cond_signal' or `pthread_cond_broadcast'
+ * even though the associated mutex/mutexes isn't/aren't locked by the
+ * caller. Our implementation do not allow that in order to avoid a
+ * performance penalty. That is, all associated mutexes *need* to be
+ * locked by the caller of erts_smp_cnd_signal()/erts_smp_cnd_broadcast()!
+ */
+
ERTS_GLB_INLINE void
erts_smp_cnd_signal(erts_smp_cnd_t *cnd)
{
@@ -655,434 +1085,6 @@ erts_smp_lc_rwmtx_is_rwlocked(erts_smp_rwmtx_t *mtx)
}
ERTS_GLB_INLINE void
-erts_smp_atomic_init(erts_smp_atomic_t *var, erts_aint_t i)
-{
-#ifdef ERTS_SMP
- erts_atomic_init(var, i);
-#else
- *var = i;
-#endif
-}
-
-ERTS_GLB_INLINE void
-erts_smp_atomic_set(erts_smp_atomic_t *var, erts_aint_t i)
-{
-#ifdef ERTS_SMP
- erts_atomic_set(var, i);
-#else
- *var = i;
-#endif
-}
-
-ERTS_GLB_INLINE erts_aint_t
-erts_smp_atomic_read(erts_smp_atomic_t *var)
-{
-#ifdef ERTS_SMP
- return erts_atomic_read(var);
-#else
- return *var;
-#endif
-}
-
-ERTS_GLB_INLINE erts_aint_t
-erts_smp_atomic_inctest(erts_smp_atomic_t *incp)
-{
-#ifdef ERTS_SMP
- return erts_atomic_inctest(incp);
-#else
- return ++(*incp);
-#endif
-}
-
-ERTS_GLB_INLINE erts_aint_t
-erts_smp_atomic_dectest(erts_smp_atomic_t *decp)
-{
-#ifdef ERTS_SMP
- return erts_atomic_dectest(decp);
-#else
- return --(*decp);
-#endif
-}
-
-ERTS_GLB_INLINE void
-erts_smp_atomic_inc(erts_smp_atomic_t *incp)
-{
-#ifdef ERTS_SMP
- erts_atomic_inc(incp);
-#else
- ++(*incp);
-#endif
-}
-
-ERTS_GLB_INLINE void
-erts_smp_atomic_dec(erts_smp_atomic_t *decp)
-{
-#ifdef ERTS_SMP
- erts_atomic_dec(decp);
-#else
- --(*decp);
-#endif
-}
-
-ERTS_GLB_INLINE erts_aint_t
-erts_smp_atomic_addtest(erts_smp_atomic_t *addp, erts_aint_t i)
-{
-#ifdef ERTS_SMP
- return erts_atomic_addtest(addp, i);
-#else
- return *addp += i;
-#endif
-}
-
-ERTS_GLB_INLINE void
-erts_smp_atomic_add(erts_smp_atomic_t *addp, erts_aint_t i)
-{
-#ifdef ERTS_SMP
- erts_atomic_add(addp, i);
-#else
- *addp += i;
-#endif
-}
-
-ERTS_GLB_INLINE erts_aint_t
-erts_smp_atomic_xchg(erts_smp_atomic_t *xchgp, erts_aint_t new)
-{
-#ifdef ERTS_SMP
- return erts_atomic_xchg(xchgp, new);
-#else
- erts_aint_t old;
- old = *xchgp;
- *xchgp = new;
- return old;
-#endif
-}
-
-ERTS_GLB_INLINE erts_aint_t
-erts_smp_atomic_cmpxchg(erts_smp_atomic_t *xchgp,
- erts_aint_t new,
- erts_aint_t expected)
-{
-#ifdef ERTS_SMP
- return erts_atomic_cmpxchg(xchgp, new, expected);
-#else
- erts_aint_t old = *xchgp;
- if (old == expected)
- *xchgp = new;
- return old;
-#endif
-}
-
-ERTS_GLB_INLINE erts_aint_t
-erts_smp_atomic_bor(erts_smp_atomic_t *var, erts_aint_t mask)
-{
-#ifdef ERTS_SMP
- return erts_atomic_bor(var, mask);
-#else
- erts_aint_t old;
- old = *var;
- *var |= mask;
- return old;
-#endif
-}
-
-ERTS_GLB_INLINE erts_aint_t
-erts_smp_atomic_band(erts_smp_atomic_t *var, erts_aint_t mask)
-{
-#ifdef ERTS_SMP
- return erts_atomic_band(var, mask);
-#else
- erts_aint_t old;
- old = *var;
- *var &= mask;
- return old;
-#endif
-}
-
-ERTS_GLB_INLINE erts_aint_t
-erts_smp_atomic_read_acqb(erts_smp_atomic_t *var)
-{
-#ifdef ERTS_SMP
- return erts_atomic_read_acqb(var);
-#else
- return *var;
-#endif
-}
-
-ERTS_GLB_INLINE void
-erts_smp_atomic_set_relb(erts_smp_atomic_t *var, erts_aint_t i)
-{
-#ifdef ERTS_SMP
- erts_atomic_set_relb(var, i);
-#else
- *var = i;
-#endif
-}
-
-ERTS_GLB_INLINE void
-erts_smp_atomic_dec_relb(erts_smp_atomic_t *decp)
-{
-#ifdef ERTS_SMP
- erts_atomic_dec_relb(decp);
-#else
- --(*decp);
-#endif
-}
-
-ERTS_GLB_INLINE erts_aint_t
-erts_smp_atomic_dectest_relb(erts_smp_atomic_t *decp)
-{
-#ifdef ERTS_SMP
- return erts_atomic_dectest_relb(decp);
-#else
- return --(*decp);
-#endif
-}
-
-ERTS_GLB_INLINE erts_aint_t
-erts_smp_atomic_cmpxchg_acqb(erts_smp_atomic_t *xchgp,
- erts_aint_t new,
- erts_aint_t exp)
-{
-#ifdef ERTS_SMP
- return erts_atomic_cmpxchg_acqb(xchgp, new, exp);
-#else
- erts_aint_t old = *xchgp;
- if (old == exp)
- *xchgp = new;
- return old;
-#endif
-}
-
-ERTS_GLB_INLINE erts_aint_t
-erts_smp_atomic_cmpxchg_relb(erts_smp_atomic_t *xchgp,
- erts_aint_t new,
- erts_aint_t exp)
-{
-#ifdef ERTS_SMP
- return erts_atomic_cmpxchg_relb(xchgp, new, exp);
-#else
- erts_aint_t old = *xchgp;
- if (old == exp)
- *xchgp = new;
- return old;
-#endif
-}
-
-ERTS_GLB_INLINE void
-erts_smp_atomic32_init(erts_smp_atomic32_t *var, erts_aint32_t i)
-{
-#ifdef ERTS_SMP
- erts_atomic32_init(var, i);
-#else
- *var = i;
-#endif
-}
-
-ERTS_GLB_INLINE void
-erts_smp_atomic32_set(erts_smp_atomic32_t *var, erts_aint32_t i)
-{
-#ifdef ERTS_SMP
- erts_atomic32_set(var, i);
-#else
- *var = i;
-#endif
-}
-
-ERTS_GLB_INLINE erts_aint32_t
-erts_smp_atomic32_read(erts_smp_atomic32_t *var)
-{
-#ifdef ERTS_SMP
- return erts_atomic32_read(var);
-#else
- return *var;
-#endif
-}
-
-ERTS_GLB_INLINE erts_aint32_t
-erts_smp_atomic32_inctest(erts_smp_atomic32_t *incp)
-{
-#ifdef ERTS_SMP
- return erts_atomic32_inctest(incp);
-#else
- return ++(*incp);
-#endif
-}
-
-ERTS_GLB_INLINE erts_aint32_t
-erts_smp_atomic32_dectest(erts_smp_atomic32_t *decp)
-{
-#ifdef ERTS_SMP
- return erts_atomic32_dectest(decp);
-#else
- return --(*decp);
-#endif
-}
-
-ERTS_GLB_INLINE void
-erts_smp_atomic32_inc(erts_smp_atomic32_t *incp)
-{
-#ifdef ERTS_SMP
- erts_atomic32_inc(incp);
-#else
- ++(*incp);
-#endif
-}
-
-ERTS_GLB_INLINE void
-erts_smp_atomic32_dec(erts_smp_atomic32_t *decp)
-{
-#ifdef ERTS_SMP
- erts_atomic32_dec(decp);
-#else
- --(*decp);
-#endif
-}
-
-ERTS_GLB_INLINE erts_aint32_t
-erts_smp_atomic32_addtest(erts_smp_atomic32_t *addp, erts_aint32_t i)
-{
-#ifdef ERTS_SMP
- return erts_atomic32_addtest(addp, i);
-#else
- return *addp += i;
-#endif
-}
-
-ERTS_GLB_INLINE void
-erts_smp_atomic32_add(erts_smp_atomic32_t *addp, erts_aint32_t i)
-{
-#ifdef ERTS_SMP
- erts_atomic32_add(addp, i);
-#else
- *addp += i;
-#endif
-}
-
-ERTS_GLB_INLINE erts_aint32_t
-erts_smp_atomic32_xchg(erts_smp_atomic32_t *xchgp, erts_aint32_t new)
-{
-#ifdef ERTS_SMP
- return erts_atomic32_xchg(xchgp, new);
-#else
- erts_aint32_t old;
- old = *xchgp;
- *xchgp = new;
- return old;
-#endif
-}
-
-ERTS_GLB_INLINE erts_aint32_t
-erts_smp_atomic32_cmpxchg(erts_smp_atomic32_t *xchgp,
- erts_aint32_t new,
- erts_aint32_t expected)
-{
-#ifdef ERTS_SMP
- return erts_atomic32_cmpxchg(xchgp, new, expected);
-#else
- erts_aint32_t old = *xchgp;
- if (old == expected)
- *xchgp = new;
- return old;
-#endif
-}
-
-ERTS_GLB_INLINE erts_aint32_t
-erts_smp_atomic32_bor(erts_smp_atomic32_t *var, erts_aint32_t mask)
-{
-#ifdef ERTS_SMP
- return erts_atomic32_bor(var, mask);
-#else
- erts_aint32_t old;
- old = *var;
- *var |= mask;
- return old;
-#endif
-}
-
-ERTS_GLB_INLINE erts_aint32_t
-erts_smp_atomic32_band(erts_smp_atomic32_t *var, erts_aint32_t mask)
-{
-#ifdef ERTS_SMP
- return erts_atomic32_band(var, mask);
-#else
- erts_aint32_t old;
- old = *var;
- *var &= mask;
- return old;
-#endif
-}
-
-ERTS_GLB_INLINE erts_aint32_t
-erts_smp_atomic32_read_acqb(erts_smp_atomic32_t *var)
-{
-#ifdef ERTS_SMP
- return erts_atomic32_read_acqb(var);
-#else
- return *var;
-#endif
-}
-
-ERTS_GLB_INLINE void
-erts_smp_atomic32_set_relb(erts_smp_atomic32_t *var, erts_aint32_t i)
-{
-#ifdef ERTS_SMP
- erts_atomic32_set_relb(var, i);
-#else
- *var = i;
-#endif
-}
-
-ERTS_GLB_INLINE void
-erts_smp_atomic32_dec_relb(erts_smp_atomic32_t *decp)
-{
-#ifdef ERTS_SMP
- erts_atomic32_dec_relb(decp);
-#else
- --(*decp);
-#endif
-}
-
-ERTS_GLB_INLINE erts_aint32_t
-erts_smp_atomic32_dectest_relb(erts_smp_atomic32_t *decp)
-{
-#ifdef ERTS_SMP
- return erts_atomic32_dectest_relb(decp);
-#else
- return --(*decp);
-#endif
-}
-
-ERTS_GLB_INLINE erts_aint32_t
-erts_smp_atomic32_cmpxchg_acqb(erts_smp_atomic32_t *xchgp,
- erts_aint32_t new,
- erts_aint32_t exp)
-{
-#ifdef ERTS_SMP
- return erts_atomic32_cmpxchg_acqb(xchgp, new, exp);
-#else
- erts_aint32_t old = *xchgp;
- if (old == exp)
- *xchgp = new;
- return old;
-#endif
-}
-
-ERTS_GLB_INLINE erts_aint32_t
-erts_smp_atomic32_cmpxchg_relb(erts_smp_atomic32_t *xchgp,
- erts_aint32_t new,
- erts_aint32_t exp)
-{
-#ifdef ERTS_SMP
- return erts_atomic32_cmpxchg_relb(xchgp, new, exp);
-#else
- erts_aint32_t old = *xchgp;
- if (old == exp)
- *xchgp = new;
- return old;
-#endif
-}
-
-ERTS_GLB_INLINE void
erts_smp_spinlock_init_x(erts_smp_spinlock_t *lock, char *name, Eterm extra)
{
#ifdef ERTS_SMP
@@ -1308,3 +1310,37 @@ erts_smp_thr_sigwait(const sigset_t *set, int *sig)
#endif /* #if ERTS_GLB_INLINE_INCL_FUNC_DEF */
#endif /* ERL_SMP_H */
+
+#ifdef ERTS_UNDEF_DEPRECATED_ATOMICS
+
+/* Deprecated functions to replace */
+
+#undef erts_smp_atomic_init
+#undef erts_smp_atomic_set
+#undef erts_smp_atomic_read
+#undef erts_smp_atomic_inctest
+#undef erts_smp_atomic_dectest
+#undef erts_smp_atomic_inc
+#undef erts_smp_atomic_dec
+#undef erts_smp_atomic_addtest
+#undef erts_smp_atomic_add
+#undef erts_smp_atomic_xchg
+#undef erts_smp_atomic_cmpxchg
+#undef erts_smp_atomic_bor
+#undef erts_smp_atomic_band
+
+#undef erts_smp_atomic32_init
+#undef erts_smp_atomic32_set
+#undef erts_smp_atomic32_read
+#undef erts_smp_atomic32_inctest
+#undef erts_smp_atomic32_dectest
+#undef erts_smp_atomic32_inc
+#undef erts_smp_atomic32_dec
+#undef erts_smp_atomic32_addtest
+#undef erts_smp_atomic32_add
+#undef erts_smp_atomic32_xchg
+#undef erts_smp_atomic32_cmpxchg
+#undef erts_smp_atomic32_bor
+#undef erts_smp_atomic32_band
+
+#endif
diff --git a/erts/emulator/beam/erl_term.h b/erts/emulator/beam/erl_term.h
index 1d75fa313c..c270d13365 100644
--- a/erts/emulator/beam/erl_term.h
+++ b/erts/emulator/beam/erl_term.h
@@ -253,15 +253,15 @@ _ET_DECLARE_CHECKED(Eterm*,list_val,Wterm)
#define SMALL_BITS (28)
#define SMALL_DIGITS (8)
#endif
-#define MAX_SMALL ((1L << (SMALL_BITS-1))-1)
-#define MIN_SMALL (-(1L << (SMALL_BITS-1)))
+#define MAX_SMALL ((SWORD_CONSTANT(1) << (SMALL_BITS-1))-1)
+#define MIN_SMALL (-(SWORD_CONSTANT(1) << (SMALL_BITS-1)))
#define make_small(x) (((Uint)(x) << _TAG_IMMED1_SIZE) + _TAG_IMMED1_SMALL)
#define is_small(x) (((x) & _TAG_IMMED1_MASK) == _TAG_IMMED1_SMALL)
#define is_not_small(x) (!is_small((x)))
#define is_byte(x) (((x) & ((~(Uint)0 << (_TAG_IMMED1_SIZE+8)) + _TAG_IMMED1_MASK)) == _TAG_IMMED1_SMALL)
#define is_valid_bit_size(x) (((Sint)(x)) >= 0 && ((x) & 0x7F) == _TAG_IMMED1_SMALL)
#define is_not_valid_bit_size(x) (!is_valid_bit_size((x)))
-#define MY_IS_SSMALL(x) (((Uint) (((x) >> (SMALL_BITS-1)) + 1)) < 2)
+#define MY_IS_SSMALL(x) (((Uint) ((((x)) >> (SMALL_BITS-1)) + 1)) < 2)
#define _unchecked_unsigned_val(x) ((x) >> _TAG_IMMED1_SIZE)
_ET_DECLARE_CHECKED(Uint,unsigned_val,Eterm)
#define unsigned_val(x) _ET_APPLY(unsigned_val,(x))
@@ -331,7 +331,13 @@ _ET_DECLARE_CHECKED(Uint,thing_subtag,Eterm)
* we now use a non-zero bit-pattern in debug mode.
*/
#if ET_DEBUG
-#define THE_NON_VALUE _make_header(0,_TAG_HEADER_FLOAT)
+# ifdef HIPE
+ /* A very large (or negative) value as work-around for ugly hipe-bifs
+ that return untagged integers (eg hipe_bs_put_utf8) */
+# define THE_NON_VALUE _make_header((Uint)~0,_TAG_HEADER_FLOAT)
+# else
+# define THE_NON_VALUE _make_header(0,_TAG_HEADER_FLOAT)
+# endif
#else
#define THE_NON_VALUE (0)
#endif
diff --git a/erts/emulator/beam/erl_thr_progress.c b/erts/emulator/beam/erl_thr_progress.c
new file mode 100644
index 0000000000..9ef83746c5
--- /dev/null
+++ b/erts/emulator/beam/erl_thr_progress.c
@@ -0,0 +1,1377 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2011-2012. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/*
+ * Description: Thread progress information. Used by lock free algorithms
+ * to determine when all involved threads are guaranteed to
+ * have passed a specific point of execution.
+ *
+ * Usage instructions below.
+ *
+ * Author: Rickard Green
+ */
+
+/*
+ * ------ Usage instructions -----------------------------------------------
+ *
+ * This module keeps track of the progress of a set of managed threads. Only
+ * threads that behave well can be allowed to be managed. A managed thread
+ * should update its thread progress frequently. Currently only scheduler
+ * threads, the system-message-dispatcher threads, and the aux-thread are
+ * managed threads. We typically do not want any async threads as managed
+ * threads since they cannot guarantee a frequent update of thread progress,
+ * since they execute user implemented driver code that is assumed to be
+ * time consuming.
+ *
+ * erts_thr_progress_current() returns the global current thread progress
+ * value of managed threads. I.e., the latest progress value that all
+ * managed threads have reached. Thread progress values are opaque.
+ *
+ * erts_thr_progress_has_reached(VAL) returns a value != 0 if current
+ * global thread progress has reached or passed VAL.
+ *
+ * erts_thr_progress_later() returns a thread progress value in the future
+ * which no managed thread have yet reached.
+ *
+ * All threads issue a full memory barrier when reaching a new thread
+ * progress value. They only reach new thread progress values in specific
+ * controlled states when calling erts_thr_progress_update(). Schedulers
+ * call erts_thr_progress_update() in between execution of processes,
+ * when going to sleep and when waking up.
+ *
+ * Sleeping managed threads are considered to have reached next thread
+ * progress value immediately. They are not woken and do therefore not
+ * issue any memory barriers when reaching a new thread progress value.
+ * A sleeping thread do however immediately issue a memory barrier upon
+ * wakeup.
+ *
+ * Both managed and registered unmanaged threads may request wakeup when
+ * the global thread progress reach a certain value using
+ * erts_thr_progress_wakeup().
+ *
+ * Note that thread progress values are opaque, and that you are only
+ * allowed to use thread progress values retrieved from this API!
+ *
+ * -------------------------------------------------------------------------
+ */
+
+#ifdef HAVE_CONFIG_H
+# include "config.h"
+#endif
+
+#include <stddef.h> /* offsetof() */
+#include "erl_thr_progress.h"
+#include "global.h"
+
+#ifdef ERTS_SMP
+
+#define ERTS_THR_PRGR_DBG_CHK_WAKEUP_REQUEST_VALUE 0
+
+#ifdef DEBUG
+#undef ERTS_THR_PRGR_DBG_CHK_WAKEUP_REQUEST_VALUE
+#define ERTS_THR_PRGR_DBG_CHK_WAKEUP_REQUEST_VALUE 1
+#endif
+
+#define ERTS_THR_PRGR_PRINT_LEADER 0
+#define ERTS_THR_PRGR_PRINT_VAL 0
+#define ERTS_THR_PRGR_PRINT_BLOCKERS 0
+
+#define ERTS_THR_PRGR_FTL_ERR_BLCK_POLL_INTERVAL 100
+
+#define ERTS_THR_PRGR_LFLG_BLOCK (((erts_aint32_t) 1) << 31)
+#define ERTS_THR_PRGR_LFLG_NO_LEADER (((erts_aint32_t) 1) << 30)
+#define ERTS_THR_PRGR_LFLG_ACTIVE_MASK (~(ERTS_THR_PRGR_LFLG_NO_LEADER \
+ | ERTS_THR_PRGR_LFLG_BLOCK))
+
+#define ERTS_THR_PRGR_LFLGS_ACTIVE(LFLGS) \
+ ((LFLGS) & ERTS_THR_PRGR_LFLG_ACTIVE_MASK)
+
+#define ERTS_THR_PRGR_LFLGS_ALL_WAITING(LFLGS) \
+ (((LFLGS) & (ERTS_THR_PRGR_LFLG_NO_LEADER \
+ |ERTS_THR_PRGR_LFLG_ACTIVE_MASK)) \
+ == ERTS_THR_PRGR_LFLG_NO_LEADER)
+
+/*
+ * We use a 64-bit value for thread progress. By this wrapping of
+ * the thread progress will more or less never occur.
+ *
+ * On 32-bit systems we therefore need a double word atomic.
+ */
+#undef read_acqb
+#define read_acqb erts_thr_prgr_read_acqb__
+#undef read_nob
+#define read_nob erts_thr_prgr_read_nob__
+
+#ifdef ARCH_64
+
+static ERTS_INLINE void
+set_mb(ERTS_THR_PRGR_ATOMIC *atmc, ErtsThrPrgrVal val)
+{
+ erts_atomic_set_mb(atmc, val);
+}
+
+static ERTS_INLINE void
+set_nob(ERTS_THR_PRGR_ATOMIC *atmc, ErtsThrPrgrVal val)
+{
+ erts_atomic_set_nob(atmc, val);
+}
+
+static ERTS_INLINE void
+init_nob(ERTS_THR_PRGR_ATOMIC *atmc, ErtsThrPrgrVal val)
+{
+ erts_atomic_init_nob(atmc, val);
+}
+
+#else
+
+#undef dw_aint_to_val
+#define dw_aint_to_val erts_thr_prgr_dw_aint_to_val__
+
+static void
+val_to_dw_aint(erts_dw_aint_t *dw_aint, ErtsThrPrgrVal val)
+{
+#ifdef ETHR_SU_DW_NAINT_T__
+ dw_aint->dw_sint = (ETHR_SU_DW_NAINT_T__) val;
+#else
+ dw_aint->sint[ERTS_DW_AINT_LOW_WORD]
+ = (erts_aint_t) (val & 0xffffffff);
+ dw_aint->sint[ERTS_DW_AINT_HIGH_WORD]
+ = (erts_aint_t) ((val >> 32) & 0xffffffff);
+#endif
+}
+
+static ERTS_INLINE void
+set_mb(ERTS_THR_PRGR_ATOMIC *atmc, ErtsThrPrgrVal val)
+{
+ erts_dw_aint_t dw_aint;
+ val_to_dw_aint(&dw_aint, val);
+ erts_dw_atomic_set_mb(atmc, &dw_aint);
+}
+
+static ERTS_INLINE void
+set_nob(ERTS_THR_PRGR_ATOMIC *atmc, ErtsThrPrgrVal val)
+{
+ erts_dw_aint_t dw_aint;
+ val_to_dw_aint(&dw_aint, val);
+ erts_dw_atomic_set_nob(atmc, &dw_aint);
+}
+
+static ERTS_INLINE void
+init_nob(ERTS_THR_PRGR_ATOMIC *atmc, ErtsThrPrgrVal val)
+{
+ erts_dw_aint_t dw_aint;
+ val_to_dw_aint(&dw_aint, val);
+ erts_dw_atomic_init_nob(atmc, &dw_aint);
+}
+
+#endif
+
+/* #define ERTS_THR_PROGRESS_STATE_DEBUG */
+
+#ifdef ERTS_THR_PROGRESS_STATE_DEBUG
+
+#ifdef __GNUC__
+#warning "Thread progress state debug is on"
+#endif
+
+#define ERTS_THR_PROGRESS_STATE_DEBUG_LEADER (((erts_aint32_t) 1) << 0)
+#define ERTS_THR_PROGRESS_STATE_DEBUG_ACTIVE (((erts_aint32_t) 1) << 1)
+
+#define ERTS_THR_PROGRESS_STATE_DEBUG_INIT(ID) \
+ erts_atomic32_init_nob(&intrnl->thr[(ID)].data.state_debug, \
+ ERTS_THR_PROGRESS_STATE_DEBUG_ACTIVE)
+
+#define ERTS_THR_PROGRESS_STATE_DEBUG_SET_ACTIVE(ID, ON) \
+do { \
+ erts_aint32_t state_debug__; \
+ state_debug__ = erts_atomic32_read_nob(&intrnl->thr[(ID)].data.state_debug); \
+ if ((ON)) \
+ state_debug__ |= ERTS_THR_PROGRESS_STATE_DEBUG_ACTIVE; \
+ else \
+ state_debug__ &= ~ERTS_THR_PROGRESS_STATE_DEBUG_ACTIVE; \
+ erts_atomic32_set_nob(&intrnl->thr[(ID)].data.state_debug, state_debug__); \
+} while (0)
+
+#define ERTS_THR_PROGRESS_STATE_DEBUG_SET_LEADER(ID, ON) \
+do { \
+ erts_aint32_t state_debug__; \
+ state_debug__ = erts_atomic32_read_nob(&intrnl->thr[(ID)].data.state_debug); \
+ if ((ON)) \
+ state_debug__ |= ERTS_THR_PROGRESS_STATE_DEBUG_LEADER; \
+ else \
+ state_debug__ &= ~ERTS_THR_PROGRESS_STATE_DEBUG_LEADER; \
+ erts_atomic32_set_nob(&intrnl->thr[(ID)].data.state_debug, state_debug__); \
+} while (0)
+
+#else
+
+#define ERTS_THR_PROGRESS_STATE_DEBUG_INIT(ID)
+#define ERTS_THR_PROGRESS_STATE_DEBUG_SET_ACTIVE(ID, ON)
+#define ERTS_THR_PROGRESS_STATE_DEBUG_SET_LEADER(ID, ON)
+
+#endif /* ERTS_THR_PROGRESS_STATE_DEBUG */
+
+#define ERTS_THR_PRGR_BLCKR_INVALID (~((erts_aint32_t) 0))
+#define ERTS_THR_PRGR_BLCKR_UNMANAGED (((erts_aint32_t) 1) << 31)
+
+#define ERTS_THR_PRGR_BC_FLG_NOT_BLOCKING (((erts_aint32_t) 1) << 31)
+
+#define ERTS_THR_PRGR_BM_BITS 32
+#define ERTS_THR_PRGR_BM_SHIFT 5
+#define ERTS_THR_PRGR_BM_MASK 0x1f
+
+#define ERTS_THR_PRGR_WAKEUP_DATA_MASK (ERTS_THR_PRGR_WAKEUP_DATA_SIZE - 1)
+
+#define ERTS_THR_PRGR_WAKEUP_IX(V) \
+ ((int) ((V) & ERTS_THR_PRGR_WAKEUP_DATA_MASK))
+
+typedef struct {
+ erts_atomic32_t len;
+ int id[1];
+} ErtsThrPrgrManagedWakeupData;
+
+typedef struct {
+ erts_atomic32_t len;
+ int high_sz;
+ int low_sz;
+ erts_atomic32_t *high;
+ erts_atomic32_t *low;
+} ErtsThrPrgrUnmanagedWakeupData;
+
+typedef struct {
+ erts_atomic32_t lflgs;
+ erts_atomic32_t block_count;
+ erts_atomic_t blocker_event;
+ erts_atomic32_t pref_wakeup_used;
+ erts_atomic32_t managed_count;
+ erts_atomic32_t managed_id;
+ erts_atomic32_t unmanaged_id;
+} ErtsThrPrgrMiscData;
+
+typedef struct {
+ ERTS_THR_PRGR_ATOMIC current;
+#ifdef ERTS_THR_PROGRESS_STATE_DEBUG
+ erts_atomic32_t state_debug;
+#endif
+} ErtsThrPrgrElement;
+
+typedef union {
+ ErtsThrPrgrElement data;
+ char align__[ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(ErtsThrPrgrElement))];
+} ErtsThrPrgrArray;
+
+typedef struct {
+ union {
+ ErtsThrPrgrMiscData data;
+ char align__[ERTS_ALC_CACHE_LINE_ALIGN_SIZE(
+ sizeof(ErtsThrPrgrMiscData))];
+ } misc;
+ ErtsThrPrgrArray *thr;
+ struct {
+ int no;
+ ErtsThrPrgrCallbacks *callbacks;
+ ErtsThrPrgrManagedWakeupData *data[ERTS_THR_PRGR_WAKEUP_DATA_SIZE];
+ } managed;
+ struct {
+ int no;
+ ErtsThrPrgrCallbacks *callbacks;
+ ErtsThrPrgrUnmanagedWakeupData *data[ERTS_THR_PRGR_WAKEUP_DATA_SIZE];
+ } unmanaged;
+} ErtsThrPrgrInternalData;
+
+static ErtsThrPrgrInternalData *intrnl;
+
+ErtsThrPrgr erts_thr_prgr__;
+
+erts_tsd_key_t erts_thr_prgr_data_key__;
+
+static void handle_wakeup_requests(ErtsThrPrgrVal current);
+static int got_sched_wakeups(void);
+static erts_aint32_t block_thread(ErtsThrPrgrData *tpd);
+
+static ERTS_INLINE void
+wakeup_managed(int id)
+{
+ ErtsThrPrgrCallbacks *cbp = &intrnl->managed.callbacks[id];
+ ASSERT(0 <= id && id < intrnl->managed.no);
+ cbp->wakeup(cbp->arg);
+}
+
+
+static ERTS_INLINE void
+wakeup_unmanaged(int id)
+{
+ ErtsThrPrgrCallbacks *cbp = &intrnl->unmanaged.callbacks[id];
+ ASSERT(0 <= id && id < intrnl->unmanaged.no);
+ cbp->wakeup(cbp->arg);
+}
+
+static ERTS_INLINE ErtsThrPrgrData *
+perhaps_thr_prgr_data(ErtsSchedulerData *esdp)
+{
+ if (esdp)
+ return &esdp->thr_progress_data;
+ else
+ return erts_tsd_get(erts_thr_prgr_data_key__);
+}
+
+static ERTS_INLINE ErtsThrPrgrData *
+thr_prgr_data(ErtsSchedulerData *esdp)
+{
+ ErtsThrPrgrData *tpd = perhaps_thr_prgr_data(esdp);
+ ASSERT(tpd);
+ return tpd;
+}
+
+static void
+init_tmp_thr_prgr_data(ErtsThrPrgrData *tpd)
+{
+ tpd->id = -1;
+ tpd->is_managed = 0;
+ tpd->is_blocking = 0;
+ tpd->is_temporary = 1;
+
+ erts_tsd_set(erts_thr_prgr_data_key__, (void *) tpd);
+}
+
+static ERTS_INLINE ErtsThrPrgrData *
+tmp_thr_prgr_data(ErtsSchedulerData *esdp)
+{
+ ErtsThrPrgrData *tpd = perhaps_thr_prgr_data(esdp);
+
+ if (!tpd) {
+ /*
+ * We only allocate the part up to the wakeup_request field
+ * which is the first field only used by registered threads
+ */
+ tpd = erts_alloc(ERTS_ALC_T_T_THR_PRGR_DATA,
+ offsetof(ErtsThrPrgrData, wakeup_request));
+ init_tmp_thr_prgr_data(tpd);
+ }
+
+ return tpd;
+}
+
+static ERTS_INLINE void
+return_tmp_thr_prgr_data(ErtsThrPrgrData *tpd)
+{
+ if (tpd->is_temporary) {
+ erts_tsd_set(erts_thr_prgr_data_key__, NULL);
+ erts_free(ERTS_ALC_T_T_THR_PRGR_DATA, tpd);
+ }
+}
+
+static ERTS_INLINE int
+block_count_dec(void)
+{
+ erts_aint32_t block_count;
+ block_count = erts_atomic32_dec_read_mb(&intrnl->misc.data.block_count);
+ if (block_count == 0) {
+ erts_tse_t *event;
+ event = ((erts_tse_t*)
+ erts_atomic_read_nob(&intrnl->misc.data.blocker_event));
+ if (event)
+ erts_tse_set(event);
+ return 1;
+ }
+
+ return (block_count & ERTS_THR_PRGR_BC_FLG_NOT_BLOCKING) == 0;
+}
+
+static ERTS_INLINE int
+block_count_inc(void)
+{
+ erts_aint32_t block_count;
+ block_count = erts_atomic32_inc_read_mb(&intrnl->misc.data.block_count);
+ return (block_count & ERTS_THR_PRGR_BC_FLG_NOT_BLOCKING) == 0;
+}
+
+
+void
+erts_thr_progress_pre_init(void)
+{
+ intrnl = NULL;
+ erts_tsd_key_create(&erts_thr_prgr_data_key__);
+ init_nob(&erts_thr_prgr__.current, 0);
+}
+
+void
+erts_thr_progress_init(int no_schedulers, int managed, int unmanaged)
+{
+ int i, j, um_low, um_high;
+ char *ptr;
+ size_t cb_sz, intrnl_sz, thr_arr_sz, m_wakeup_size, um_wakeup_size,
+ tot_size;
+
+ intrnl_sz = sizeof(ErtsThrPrgrInternalData);
+ intrnl_sz = ERTS_ALC_CACHE_LINE_ALIGN_SIZE(intrnl_sz);
+
+ cb_sz = sizeof(ErtsThrPrgrCallbacks)*(managed+unmanaged);
+ cb_sz = ERTS_ALC_CACHE_LINE_ALIGN_SIZE(cb_sz);
+
+ thr_arr_sz = sizeof(ErtsThrPrgrArray)*managed;
+ ASSERT(thr_arr_sz == ERTS_ALC_CACHE_LINE_ALIGN_SIZE(thr_arr_sz));
+
+ m_wakeup_size = sizeof(ErtsThrPrgrManagedWakeupData);
+ m_wakeup_size += (managed - 1)*sizeof(int);
+ m_wakeup_size = ERTS_ALC_CACHE_LINE_ALIGN_SIZE(m_wakeup_size);
+
+ um_low = (unmanaged - 1)/ERTS_THR_PRGR_BM_BITS + 1;
+ um_high = (um_low - 1)/ERTS_THR_PRGR_BM_BITS + 1;
+
+ um_wakeup_size = sizeof(ErtsThrPrgrUnmanagedWakeupData);
+ um_wakeup_size += (um_high + um_low)*sizeof(erts_atomic32_t);
+ um_wakeup_size = ERTS_ALC_CACHE_LINE_ALIGN_SIZE(um_wakeup_size);
+
+ tot_size = intrnl_sz;
+ tot_size += cb_sz;
+ tot_size += thr_arr_sz;
+ tot_size += m_wakeup_size*ERTS_THR_PRGR_WAKEUP_DATA_SIZE;
+ tot_size += um_wakeup_size*ERTS_THR_PRGR_WAKEUP_DATA_SIZE;
+
+ ptr = erts_alloc_permanent_cache_aligned(ERTS_ALC_T_THR_PRGR_IDATA,
+ tot_size);
+
+ intrnl = (ErtsThrPrgrInternalData *) ptr;
+ ptr += intrnl_sz;
+
+ erts_atomic32_init_nob(&intrnl->misc.data.lflgs,
+ ERTS_THR_PRGR_LFLG_NO_LEADER);
+ erts_atomic32_init_nob(&intrnl->misc.data.block_count,
+ (ERTS_THR_PRGR_BC_FLG_NOT_BLOCKING
+ | (erts_aint32_t) managed));
+ erts_atomic_init_nob(&intrnl->misc.data.blocker_event, ERTS_AINT_NULL);
+ erts_atomic32_init_nob(&intrnl->misc.data.pref_wakeup_used, 0);
+ erts_atomic32_init_nob(&intrnl->misc.data.managed_count, 0);
+ erts_atomic32_init_nob(&intrnl->misc.data.managed_id, no_schedulers);
+ erts_atomic32_init_nob(&intrnl->misc.data.unmanaged_id, -1);
+
+ intrnl->thr = (ErtsThrPrgrArray *) ptr;
+ ptr += thr_arr_sz;
+ for (i = 0; i < managed; i++)
+ init_nob(&intrnl->thr[i].data.current, 0);
+
+ intrnl->managed.callbacks = (ErtsThrPrgrCallbacks *) ptr;
+ intrnl->unmanaged.callbacks = &intrnl->managed.callbacks[managed];
+ ptr += cb_sz;
+
+ intrnl->managed.no = managed;
+ for (i = 0; i < managed; i++) {
+ intrnl->managed.callbacks[i].arg = NULL;
+ intrnl->managed.callbacks[i].wakeup = NULL;
+ }
+
+ intrnl->unmanaged.no = unmanaged;
+ for (i = 0; i < unmanaged; i++) {
+ intrnl->unmanaged.callbacks[i].arg = NULL;
+ intrnl->unmanaged.callbacks[i].wakeup = NULL;
+ }
+
+ for (i = 0; i < ERTS_THR_PRGR_WAKEUP_DATA_SIZE; i++) {
+ intrnl->managed.data[i] = (ErtsThrPrgrManagedWakeupData *) ptr;
+ erts_atomic32_init_nob(&intrnl->managed.data[i]->len, 0);
+ ptr += m_wakeup_size;
+ }
+
+ for (i = 0; i < ERTS_THR_PRGR_WAKEUP_DATA_SIZE; i++) {
+ erts_atomic32_t *bm;
+ intrnl->unmanaged.data[i] = (ErtsThrPrgrUnmanagedWakeupData *) ptr;
+ erts_atomic32_init_nob(&intrnl->unmanaged.data[i]->len, 0);
+ bm = (erts_atomic32_t *) (ptr + sizeof(ErtsThrPrgrUnmanagedWakeupData));
+ intrnl->unmanaged.data[i]->high = bm;
+ intrnl->unmanaged.data[i]->high_sz = um_high;
+ for (j = 0; j < um_high; j++)
+ erts_atomic32_init_nob(&intrnl->unmanaged.data[i]->high[j], 0);
+ intrnl->unmanaged.data[i]->low
+ = &intrnl->unmanaged.data[i]->high[um_high];
+ intrnl->unmanaged.data[i]->low_sz = um_low;
+ for (j = 0; j < um_low; j++)
+ erts_atomic32_init_nob(&intrnl->unmanaged.data[i]->low[j], 0);
+ ptr += um_wakeup_size;
+ }
+ ERTS_THR_MEMORY_BARRIER;
+}
+
+static void
+init_wakeup_request_array(ErtsThrPrgrVal *w)
+{
+ int i;
+ ErtsThrPrgrVal current;
+
+ current = read_acqb(&erts_thr_prgr__.current);
+ for (i = 0; i < ERTS_THR_PRGR_WAKEUP_DATA_SIZE; i++) {
+ w[i] = current - ((ErtsThrPrgrVal) (ERTS_THR_PRGR_WAKEUP_DATA_SIZE + i));
+ if (w[i] > current)
+ w[i]--;
+ }
+}
+
+void
+erts_thr_progress_register_unmanaged_thread(ErtsThrPrgrCallbacks *callbacks)
+{
+ ErtsThrPrgrData *tpd = perhaps_thr_prgr_data(NULL);
+ int is_blocking = 0;
+
+ if (tpd) {
+ if (!tpd->is_temporary)
+ erl_exit(ERTS_ABORT_EXIT,
+ "%s:%d:%s(): Double register of thread\n",
+ __FILE__, __LINE__, __func__);
+ is_blocking = tpd->is_blocking;
+ return_tmp_thr_prgr_data(tpd);
+ }
+
+ /*
+ * We only allocate the part up to the leader field
+ * which is the first field only used by managed threads
+ */
+ tpd = erts_alloc(ERTS_ALC_T_THR_PRGR_DATA,
+ offsetof(ErtsThrPrgrData, leader));
+ tpd->id = (int) erts_atomic32_inc_read_nob(&intrnl->misc.data.unmanaged_id);
+ tpd->is_managed = 0;
+ tpd->is_blocking = is_blocking;
+ tpd->is_temporary = 0;
+ ASSERT(tpd->id >= 0);
+ if (tpd->id >= intrnl->unmanaged.no)
+ erl_exit(ERTS_ABORT_EXIT,
+ "%s:%d:%s(): Too many unmanaged registered threads\n",
+ __FILE__, __LINE__, __func__);
+
+ init_wakeup_request_array(&tpd->wakeup_request[0]);
+ erts_tsd_set(erts_thr_prgr_data_key__, (void *) tpd);
+
+ ASSERT(callbacks->wakeup);
+
+ intrnl->unmanaged.callbacks[tpd->id] = *callbacks;
+}
+
+
+void
+erts_thr_progress_register_managed_thread(ErtsSchedulerData *esdp,
+ ErtsThrPrgrCallbacks *callbacks,
+ int pref_wakeup)
+{
+ ErtsThrPrgrData *tpd = perhaps_thr_prgr_data(NULL);
+ int is_blocking = 0, managed;
+
+ if (tpd) {
+ if (!tpd->is_temporary)
+ erl_exit(ERTS_ABORT_EXIT,
+ "%s:%d:%s(): Double register of thread\n",
+ __FILE__, __LINE__, __func__);
+ is_blocking = tpd->is_blocking;
+ return_tmp_thr_prgr_data(tpd);
+ }
+
+ if (esdp)
+ tpd = &esdp->thr_progress_data;
+ else
+ tpd = erts_alloc(ERTS_ALC_T_THR_PRGR_DATA, sizeof(ErtsThrPrgrData));
+
+ if (pref_wakeup
+ && !erts_atomic32_xchg_nob(&intrnl->misc.data.pref_wakeup_used, 1))
+ tpd->id = 0;
+ else if (esdp)
+ tpd->id = (int) esdp->no;
+ else
+ tpd->id = erts_atomic32_inc_read_nob(&intrnl->misc.data.managed_id);
+ ASSERT(tpd->id >= 0);
+ if (tpd->id >= intrnl->managed.no)
+ erl_exit(ERTS_ABORT_EXIT,
+ "%s:%d:%s(): Too many managed registered threads\n",
+ __FILE__, __LINE__, __func__);
+
+ tpd->is_managed = 1;
+ tpd->is_blocking = is_blocking;
+ tpd->is_temporary = 0;
+
+ init_wakeup_request_array(&tpd->wakeup_request[0]);
+
+ ERTS_THR_PROGRESS_STATE_DEBUG_INIT(tpd->id);
+
+ tpd->leader = 0;
+ tpd->active = 1;
+ tpd->previous.local = 0;
+ tpd->previous.current = ERTS_THR_PRGR_VAL_WAITING;
+ erts_tsd_set(erts_thr_prgr_data_key__, (void *) tpd);
+
+ erts_atomic32_inc_nob(&intrnl->misc.data.lflgs);
+
+ ASSERT(callbacks->wakeup);
+ ASSERT(callbacks->prepare_wait);
+ ASSERT(callbacks->wait);
+ ASSERT(callbacks->finalize_wait);
+
+ intrnl->managed.callbacks[tpd->id] = *callbacks;
+
+ callbacks->prepare_wait(callbacks->arg);
+ managed = erts_atomic32_inc_read_relb(&intrnl->misc.data.managed_count);
+ if (managed != intrnl->managed.no) {
+ /* Wait until all managed threads have registered... */
+ do {
+ callbacks->wait(callbacks->arg);
+ callbacks->prepare_wait(callbacks->arg);
+ managed = erts_atomic32_read_acqb(&intrnl->misc.data.managed_count);
+ } while (managed != intrnl->managed.no);
+ }
+ else {
+ int id;
+ /* All managed threads have registered; lets go... */
+ for (id = 0; id < managed; id++)
+ if (id != tpd->id)
+ wakeup_managed(id);
+ }
+ callbacks->finalize_wait(callbacks->arg);
+}
+
+static ERTS_INLINE int
+leader_update(ErtsThrPrgrData *tpd)
+{
+#ifdef ERTS_ENABLE_LOCK_CHECK
+ erts_lc_check_exact(NULL, 0);
+#endif
+ if (!tpd->leader) {
+ /* Probably need to block... */
+ block_thread(tpd);
+ }
+ else {
+ erts_aint32_t lflgs;
+ ErtsThrPrgrVal next;
+ int ix, sz, make_progress;
+
+ if (tpd->previous.current == ERTS_THR_PRGR_VAL_WAITING) {
+ /* Took over as leader from another thread */
+ tpd->previous.current = read_acqb(&erts_thr_prgr__.current);
+ tpd->previous.next = tpd->previous.current;
+ tpd->previous.next++;
+ if (tpd->previous.next == ERTS_THR_PRGR_VAL_WAITING)
+ tpd->previous.next = 0;
+ }
+
+ if (tpd->previous.local == tpd->previous.current) {
+ ErtsThrPrgrVal val = tpd->previous.current + 1;
+ if (val == ERTS_THR_PRGR_VAL_WAITING)
+ val = 0;
+ tpd->previous.local = val;
+ set_mb(&intrnl->thr[tpd->id].data.current, val);
+ }
+
+ next = tpd->previous.next;
+
+ make_progress = 1;
+ sz = intrnl->managed.no;
+ for (ix = 0; ix < sz; ix++) {
+ ErtsThrPrgrVal tmp;
+ tmp = read_nob(&intrnl->thr[ix].data.current);
+ if (tmp != next && tmp != ERTS_THR_PRGR_VAL_WAITING) {
+ make_progress = 0;
+ ASSERT(erts_thr_progress_has_passed__(next, tmp));
+ break;
+ }
+ }
+
+ if (make_progress) {
+ ErtsThrPrgrVal current = next;
+
+ next++;
+ if (next == ERTS_THR_PRGR_VAL_WAITING)
+ next = 0;
+
+ set_nob(&intrnl->thr[tpd->id].data.current, next);
+ set_mb(&erts_thr_prgr__.current, current);
+ tpd->previous.local = next;
+ tpd->previous.next = next;
+ tpd->previous.current = current;
+
+#if ERTS_THR_PRGR_PRINT_VAL
+ if (current % 1000 == 0)
+ erts_fprintf(stderr, "%b64u\n", current);
+#endif
+ handle_wakeup_requests(current);
+ }
+
+ if (tpd->active) {
+ lflgs = erts_atomic32_read_nob(&intrnl->misc.data.lflgs);
+ if (lflgs & ERTS_THR_PRGR_LFLG_BLOCK)
+ (void) block_thread(tpd);
+ }
+ else {
+ tpd->leader = 0;
+ tpd->previous.current = ERTS_THR_PRGR_VAL_WAITING;
+#if ERTS_THR_PRGR_PRINT_LEADER
+ erts_fprintf(stderr, "L <- %d\n", tpd->id);
+#endif
+
+ ERTS_THR_PROGRESS_STATE_DEBUG_SET_LEADER(tpd->id, 0);
+
+ lflgs = erts_atomic32_read_bor_relb(&intrnl->misc.data.lflgs,
+ ERTS_THR_PRGR_LFLG_NO_LEADER);
+ if (lflgs & ERTS_THR_PRGR_LFLG_BLOCK)
+ lflgs = block_thread(tpd);
+ if (ERTS_THR_PRGR_LFLGS_ACTIVE(lflgs) == 0 && got_sched_wakeups())
+ wakeup_managed(0);
+ }
+ }
+
+ return tpd->leader;
+}
+
+static int
+update(ErtsThrPrgrData *tpd)
+{
+ int res;
+ ErtsThrPrgrVal val;
+
+ if (tpd->leader)
+ res = 1;
+ else {
+ erts_aint32_t lflgs;
+ res = 0;
+ val = read_acqb(&erts_thr_prgr__.current);
+ if (tpd->previous.local == val) {
+ val++;
+ if (val == ERTS_THR_PRGR_VAL_WAITING)
+ val = 0;
+ tpd->previous.local = val;
+ set_mb(&intrnl->thr[tpd->id].data.current, val);
+ }
+
+ lflgs = erts_atomic32_read_nob(&intrnl->misc.data.lflgs);
+ if (lflgs & ERTS_THR_PRGR_LFLG_BLOCK)
+ res = 1; /* Need to block in leader_update() */
+
+ if ((lflgs & ERTS_THR_PRGR_LFLG_NO_LEADER)
+ && (tpd->active || ERTS_THR_PRGR_LFLGS_ACTIVE(lflgs) == 0)) {
+ /* Try to take over leadership... */
+ erts_aint32_t olflgs;
+ olflgs = erts_atomic32_read_band_acqb(
+ &intrnl->misc.data.lflgs,
+ ~ERTS_THR_PRGR_LFLG_NO_LEADER);
+ if (olflgs & ERTS_THR_PRGR_LFLG_NO_LEADER) {
+ tpd->leader = 1;
+#if ERTS_THR_PRGR_PRINT_LEADER
+ erts_fprintf(stderr, "L -> %d\n", tpd->id);
+#endif
+ ERTS_THR_PROGRESS_STATE_DEBUG_SET_LEADER(tpd->id, 1);
+ }
+ }
+ res |= tpd->leader;
+ }
+ return res;
+}
+
+int
+erts_thr_progress_update(ErtsSchedulerData *esdp)
+{
+ return update(thr_prgr_data(esdp));
+}
+
+
+int
+erts_thr_progress_leader_update(ErtsSchedulerData *esdp)
+{
+ return leader_update(thr_prgr_data(esdp));
+}
+
+void
+erts_thr_progress_prepare_wait(ErtsSchedulerData *esdp)
+{
+ erts_aint32_t lflgs;
+ ErtsThrPrgrData *tpd = thr_prgr_data(esdp);
+
+#ifdef ERTS_ENABLE_LOCK_CHECK
+ erts_lc_check_exact(NULL, 0);
+#endif
+
+ block_count_dec();
+
+ tpd->previous.local = ERTS_THR_PRGR_VAL_WAITING;
+ set_mb(&intrnl->thr[tpd->id].data.current, ERTS_THR_PRGR_VAL_WAITING);
+
+ lflgs = erts_atomic32_read_nob(&intrnl->misc.data.lflgs);
+ if (ERTS_THR_PRGR_LFLGS_ALL_WAITING(lflgs) && got_sched_wakeups())
+ wakeup_managed(0); /* Someone need to make progress */
+}
+
+void
+erts_thr_progress_finalize_wait(ErtsSchedulerData *esdp)
+{
+ ErtsThrPrgrData *tpd = thr_prgr_data(esdp);
+ ErtsThrPrgrVal current, val;
+
+#ifdef ERTS_ENABLE_LOCK_CHECK
+ erts_lc_check_exact(NULL, 0);
+#endif
+
+ /*
+ * We aren't allowed to continue until our thread
+ * progress is past global current.
+ */
+ val = current = read_acqb(&erts_thr_prgr__.current);
+ while (1) {
+ val++;
+ if (val == ERTS_THR_PRGR_VAL_WAITING)
+ val = 0;
+ tpd->previous.local = val;
+ set_mb(&intrnl->thr[tpd->id].data.current, val);
+ val = read_acqb(&erts_thr_prgr__.current);
+ if (current == val)
+ break;
+ current = val;
+ }
+ if (block_count_inc())
+ block_thread(tpd);
+ if (update(tpd))
+ leader_update(tpd);
+}
+
+void
+erts_thr_progress_active(ErtsSchedulerData *esdp, int on)
+{
+ ErtsThrPrgrData *tpd = thr_prgr_data(esdp);
+
+#ifdef ERTS_ENABLE_LOCK_CHECK
+ erts_lc_check_exact(NULL, 0);
+#endif
+
+ ERTS_THR_PROGRESS_STATE_DEBUG_SET_ACTIVE(tpd->id, on);
+
+ if (on) {
+ ASSERT(!tpd->active);
+ tpd->active = 1;
+ erts_atomic32_inc_nob(&intrnl->misc.data.lflgs);
+ }
+ else {
+ ASSERT(tpd->active);
+ tpd->active = 0;
+ erts_atomic32_dec_nob(&intrnl->misc.data.lflgs);
+ if (update(tpd))
+ leader_update(tpd);
+ }
+
+#ifdef DEBUG
+ {
+ erts_aint32_t n = erts_atomic32_read_nob(&intrnl->misc.data.lflgs);
+ n &= ERTS_THR_PRGR_LFLG_ACTIVE_MASK;
+ ASSERT(tpd->active <= n && n <= intrnl->managed.no);
+ }
+#endif
+
+}
+
+static ERTS_INLINE int
+has_reached_wakeup(ErtsThrPrgrVal wakeup)
+{
+ /*
+ * Exactly the same as erts_thr_progress_has_reached(), but
+ * also verify valid wakeup requests in debug mode.
+ */
+ ErtsThrPrgrVal current;
+
+ current = read_acqb(&erts_thr_prgr__.current);
+
+#if ERTS_THR_PRGR_DBG_CHK_WAKEUP_REQUEST_VALUE
+ {
+ ErtsThrPrgrVal limit;
+ /*
+ * erts_thr_progress_later() returns values which are
+ * equal to 'current + 2'. That is, users should never
+ * get a hold of values larger than that.
+ *
+ * That is, valid values are values less than 'current + 3'.
+ *
+ * Values larger than this won't work with the wakeup
+ * algorithm.
+ */
+
+ limit = current + 3;
+ if (limit == ERTS_THR_PRGR_VAL_WAITING)
+ limit = 0;
+ else if (limit < current) /* Wrapped */
+ limit += 1;
+
+ if (!erts_thr_progress_has_passed__(limit, wakeup))
+ erl_exit(ERTS_ABORT_EXIT,
+ "Invalid wakeup request value found:"
+ " current=%b64u, wakeup=%b64u, limit=%b64u",
+ current, wakeup, limit);
+ }
+#endif
+
+ if (current == wakeup)
+ return 1;
+ return erts_thr_progress_has_passed__(current, wakeup);
+}
+
+static void
+request_wakeup_managed(ErtsThrPrgrData *tpd, ErtsThrPrgrVal value)
+{
+ ErtsThrPrgrManagedWakeupData *mwd;
+ int ix, wix;
+
+ /*
+ * Only managed threads that aren't in waiting state
+ * are allowed to call this function.
+ */
+
+ ASSERT(tpd->is_managed);
+ ASSERT(tpd->previous.local != ERTS_THR_PRGR_VAL_WAITING);
+
+ if (has_reached_wakeup(value)) {
+ wakeup_managed(tpd->id);
+ return;
+ }
+
+ wix = ERTS_THR_PRGR_WAKEUP_IX(value);
+ if (tpd->wakeup_request[wix] == value)
+ return; /* Already got a request registered */
+
+ ASSERT(erts_thr_progress_has_passed__(value,
+ tpd->wakeup_request[wix]));
+
+
+ if (tpd->previous.local == value) {
+ /*
+ * We have already confirmed this value. We need to request
+ * wakeup for a value later than our latest confirmed value in
+ * order to prevent progress from reaching the requested value
+ * while we are writing the request.
+ *
+ * It is ok to move the wakeup request forward since the only
+ * guarantee we make (and can make) is that the thread will be
+ * woken some time *after* the requested value has been reached.
+ */
+ value++;
+ if (value == ERTS_THR_PRGR_VAL_WAITING)
+ value = 0;
+
+ wix = ERTS_THR_PRGR_WAKEUP_IX(value);
+ if (tpd->wakeup_request[wix] == value)
+ return; /* Already got a request registered */
+
+ ASSERT(erts_thr_progress_has_passed__(value,
+ tpd->wakeup_request[wix]));
+ }
+
+ tpd->wakeup_request[wix] = value;
+
+ mwd = intrnl->managed.data[wix];
+
+ ix = erts_atomic32_inc_read_nob(&mwd->len) - 1;
+#if ERTS_THR_PRGR_DBG_CHK_WAKEUP_REQUEST_VALUE
+ if (ix >= intrnl->managed.no)
+ erl_exit(ERTS_ABORT_EXIT, "Internal error: Too many wakeup requests\n");
+#endif
+ mwd->id[ix] = tpd->id;
+
+ ASSERT(!erts_thr_progress_has_reached(value));
+
+ /*
+ * This thread is guarranteed to issue a full memory barrier:
+ * - after the request has been written, but
+ * - before the global thread progress reach the (possibly
+ * increased) requested wakeup value.
+ */
+}
+
+static void
+request_wakeup_unmanaged(ErtsThrPrgrData *tpd, ErtsThrPrgrVal value)
+{
+ int wix, ix, id, bit;
+ ErtsThrPrgrUnmanagedWakeupData *umwd;
+
+ ASSERT(!tpd->is_managed);
+
+ /*
+ * Thread progress *can* reach and pass our requested value while
+ * we are writing the request.
+ */
+
+ if (has_reached_wakeup(value)) {
+ wakeup_unmanaged(tpd->id);
+ return;
+ }
+
+ wix = ERTS_THR_PRGR_WAKEUP_IX(value);
+
+ if (tpd->wakeup_request[wix] == value)
+ return; /* Already got a request registered */
+
+ ASSERT(erts_thr_progress_has_passed__(value,
+ tpd->wakeup_request[wix]));
+
+ umwd = intrnl->unmanaged.data[wix];
+
+ id = tpd->id;
+
+ bit = id & ERTS_THR_PRGR_BM_MASK;
+ ix = id >> ERTS_THR_PRGR_BM_SHIFT;
+ ASSERT(0 <= ix && ix < umwd->low_sz);
+ erts_atomic32_read_bor_nob(&umwd->low[ix], 1 << bit);
+
+ bit = ix & ERTS_THR_PRGR_BM_MASK;
+ ix >>= ERTS_THR_PRGR_BM_SHIFT;
+ ASSERT(0 <= ix && ix < umwd->high_sz);
+ erts_atomic32_read_bor_nob(&umwd->high[ix], 1 << bit);
+
+ erts_atomic32_inc_mb(&umwd->len);
+
+ if (erts_thr_progress_has_reached(value))
+ wakeup_unmanaged(tpd->id);
+ else
+ tpd->wakeup_request[wix] = value;
+}
+
+void
+erts_thr_progress_wakeup(ErtsSchedulerData *esdp,
+ ErtsThrPrgrVal value)
+{
+ ErtsThrPrgrData *tpd = thr_prgr_data(esdp);
+ ASSERT(!tpd->is_temporary);
+ if (tpd->is_managed)
+ request_wakeup_managed(tpd, value);
+ else
+ request_wakeup_unmanaged(tpd, value);
+}
+
+static void
+wakeup_unmanaged_threads(ErtsThrPrgrUnmanagedWakeupData *umwd)
+{
+ int hix;
+ for (hix = 0; hix < umwd->high_sz; hix++) {
+ erts_aint32_t hmask = erts_atomic32_read_nob(&umwd->high[hix]);
+ if (hmask) {
+ int hbase = hix << ERTS_THR_PRGR_BM_SHIFT;
+ int hbit;
+ for (hbit = 0; hbit < ERTS_THR_PRGR_BM_BITS; hbit++) {
+ if (hmask & (1 << hbit)) {
+ erts_aint_t lmask;
+ int lix = hbase + hbit;
+ ASSERT(0 <= lix && lix < umwd->low_sz);
+ lmask = erts_atomic32_read_nob(&umwd->low[lix]);
+ if (lmask) {
+ int lbase = lix << ERTS_THR_PRGR_BM_SHIFT;
+ int lbit;
+ for (lbit = 0; lbit < ERTS_THR_PRGR_BM_BITS; lbit++) {
+ if (lmask & (1 << lbit)) {
+ int id = lbase + lbit;
+ wakeup_unmanaged(id);
+ }
+ }
+ erts_atomic32_set_nob(&umwd->low[lix], 0);
+ }
+ }
+ }
+ erts_atomic32_set_nob(&umwd->high[hix], 0);
+ }
+ }
+}
+
+
+static void
+handle_wakeup_requests(ErtsThrPrgrVal current)
+{
+ ErtsThrPrgrManagedWakeupData *mwd;
+ ErtsThrPrgrUnmanagedWakeupData *umwd;
+ int wix, len, i;
+
+ wix = ERTS_THR_PRGR_WAKEUP_IX(current);
+
+ mwd = intrnl->managed.data[wix];
+ len = erts_atomic32_read_nob(&mwd->len);
+ ASSERT(len >= 0);
+ if (len) {
+ for (i = 0; i < len; i++)
+ wakeup_managed(mwd->id[i]);
+ erts_atomic32_set_nob(&mwd->len, 0);
+ }
+
+ umwd = intrnl->unmanaged.data[wix];
+ len = erts_atomic32_read_nob(&umwd->len);
+ ASSERT(len >= 0);
+ if (len) {
+ wakeup_unmanaged_threads(umwd);
+ erts_atomic32_set_nob(&umwd->len, 0);
+ }
+
+}
+
+static int
+got_sched_wakeups(void)
+{
+ int wix;
+
+ ERTS_THR_MEMORY_BARRIER;
+
+ for (wix = 0; wix < ERTS_THR_PRGR_WAKEUP_DATA_SIZE; wix++) {
+ ErtsThrPrgrManagedWakeupData **mwd = intrnl->managed.data;
+ if (erts_atomic32_read_nob(&mwd[wix]->len))
+ return 1;
+ }
+ for (wix = 0; wix < ERTS_THR_PRGR_WAKEUP_DATA_SIZE; wix++) {
+ ErtsThrPrgrUnmanagedWakeupData **umwd = intrnl->unmanaged.data;
+ if (erts_atomic32_read_nob(&umwd[wix]->len))
+ return 1;
+ }
+ return 0;
+}
+
+static erts_aint32_t
+block_thread(ErtsThrPrgrData *tpd)
+{
+ erts_aint32_t lflgs;
+ ErtsThrPrgrCallbacks *cbp = &intrnl->managed.callbacks[tpd->id];
+
+ do {
+ block_count_dec();
+
+ while (1) {
+ cbp->prepare_wait(cbp->arg);
+ lflgs = erts_atomic32_read_nob(&intrnl->misc.data.lflgs);
+ if (lflgs & ERTS_THR_PRGR_LFLG_BLOCK)
+ cbp->wait(cbp->arg);
+ else
+ break;
+ }
+
+ } while (block_count_inc());
+
+ cbp->finalize_wait(cbp->arg);
+
+ return lflgs;
+}
+
+static erts_aint32_t
+thr_progress_block(ErtsThrPrgrData *tpd, int wait)
+{
+ erts_tse_t *event = NULL; /* Remove erroneous warning... sigh... */
+ erts_aint32_t lflgs, bc;
+
+ if (tpd->is_blocking++)
+ return (erts_aint32_t) 0;
+
+ while (1) {
+ lflgs = erts_atomic32_read_bor_nob(&intrnl->misc.data.lflgs,
+ ERTS_THR_PRGR_LFLG_BLOCK);
+ if (lflgs & ERTS_THR_PRGR_LFLG_BLOCK)
+ block_thread(tpd);
+ else
+ break;
+ }
+
+#if ERTS_THR_PRGR_PRINT_BLOCKERS
+ erts_fprintf(stderr, "block(%d)\n", tpd->id);
+#endif
+
+ ASSERT(ERTS_AINT_NULL
+ == erts_atomic_read_nob(&intrnl->misc.data.blocker_event));
+
+ if (wait) {
+ event = erts_tse_fetch();
+ erts_tse_reset(event);
+ erts_atomic_set_nob(&intrnl->misc.data.blocker_event,
+ (erts_aint_t) event);
+ }
+ if (tpd->is_managed)
+ erts_atomic32_dec_nob(&intrnl->misc.data.block_count);
+ bc = erts_atomic32_read_band_mb(&intrnl->misc.data.block_count,
+ ~ERTS_THR_PRGR_BC_FLG_NOT_BLOCKING);
+ bc &= ~ERTS_THR_PRGR_BC_FLG_NOT_BLOCKING;
+ if (wait) {
+ while (bc != 0) {
+ erts_tse_wait(event);
+ erts_tse_reset(event);
+ bc = erts_atomic32_read_acqb(&intrnl->misc.data.block_count);
+ }
+ }
+ return bc;
+
+}
+
+void
+erts_thr_progress_block(void)
+{
+ thr_progress_block(tmp_thr_prgr_data(NULL), 1);
+}
+
+void
+erts_thr_progress_fatal_error_block(SWord timeout,
+ ErtsThrPrgrData *tmp_tpd_bufp)
+{
+ ErtsThrPrgrData *tpd = perhaps_thr_prgr_data(NULL);
+ erts_aint32_t bc;
+ SWord time_left = timeout;
+ SysTimeval to;
+
+ /*
+ * Counting poll intervals may give us a too long timeout
+ * if cpu is busy. If we got tolerant time of day we use it
+ * to prevent this.
+ */
+ if (!erts_disable_tolerant_timeofday) {
+ erts_get_timeval(&to);
+ to.tv_sec += timeout / 1000;
+ to.tv_sec += timeout % 1000;
+ }
+
+ if (!tpd) {
+ /*
+ * We stack allocate since failure to allocate memory may
+ * have caused the problem in the first place. This is ok
+ * since we never complete an unblock after a fatal error
+ * block.
+ */
+ tpd = tmp_tpd_bufp;
+ init_tmp_thr_prgr_data(tpd);
+ }
+
+ bc = thr_progress_block(tpd, 0);
+ if (bc == 0)
+ return; /* Succefully blocked all managed threads */
+
+ while (1) {
+ if (erts_milli_sleep(ERTS_THR_PRGR_FTL_ERR_BLCK_POLL_INTERVAL) == 0)
+ time_left -= ERTS_THR_PRGR_FTL_ERR_BLCK_POLL_INTERVAL;
+ bc = erts_atomic32_read_acqb(&intrnl->misc.data.block_count);
+ if (bc == 0)
+ break; /* Succefully blocked all managed threads */
+ if (time_left <= 0)
+ break; /* Timeout */
+ if (!erts_disable_tolerant_timeofday) {
+ SysTimeval now;
+ erts_get_timeval(&now);
+ if (now.tv_sec > to.tv_sec)
+ break; /* Timeout */
+ if (now.tv_sec == to.tv_sec && now.tv_usec >= to.tv_usec)
+ break; /* Timeout */
+ }
+ }
+}
+
+void
+erts_thr_progress_unblock(void)
+{
+ erts_tse_t *event;
+ int id, break_id, sz, wakeup;
+ ErtsThrPrgrData *tpd = thr_prgr_data(NULL);
+
+ ASSERT(tpd->is_blocking);
+ if (--tpd->is_blocking)
+ return;
+
+ sz = intrnl->managed.no;
+
+ wakeup = 1;
+ if (!tpd->is_managed)
+ id = break_id = tpd->id < 0 ? 0 : tpd->id % sz;
+ else {
+ break_id = tpd->id;
+ id = break_id + 1;
+ if (id >= sz)
+ id = 0;
+ if (id == break_id)
+ wakeup = 0;
+ erts_atomic32_inc_nob(&intrnl->misc.data.block_count);
+ }
+
+ event = ((erts_tse_t *)
+ erts_atomic_read_nob(&intrnl->misc.data.blocker_event));
+ ASSERT(event);
+ erts_atomic_set_nob(&intrnl->misc.data.blocker_event, ERTS_AINT_NULL);
+
+ erts_atomic32_read_bor_relb(&intrnl->misc.data.block_count,
+ ERTS_THR_PRGR_BC_FLG_NOT_BLOCKING);
+#if ERTS_THR_PRGR_PRINT_BLOCKERS
+ erts_fprintf(stderr, "unblock(%d)\n", tpd->id);
+#endif
+ erts_atomic32_read_band_mb(&intrnl->misc.data.lflgs,
+ ~ERTS_THR_PRGR_LFLG_BLOCK);
+
+ if (wakeup) {
+ do {
+ ErtsThrPrgrVal tmp;
+ tmp = read_nob(&intrnl->thr[id].data.current);
+ if (tmp != ERTS_THR_PRGR_VAL_WAITING)
+ wakeup_managed(id);
+ if (++id >= sz)
+ id = 0;
+ } while (id != break_id);
+ }
+
+ return_tmp_thr_prgr_data(tpd);
+ erts_tse_return(event);
+}
+
+int
+erts_thr_progress_is_blocking(void)
+{
+ ErtsThrPrgrData *tpd = perhaps_thr_prgr_data(NULL);
+ return tpd && tpd->is_blocking;
+}
+
+void erts_thr_progress_dbg_print_state(void)
+{
+ int id;
+ int sz = intrnl->managed.no;
+
+ erts_fprintf(stderr, "--- thread progress ---\n");
+ erts_fprintf(stderr,"current=%b64u\n", erts_thr_progress_current());
+ for (id = 0; id < sz; id++) {
+ ErtsThrPrgrVal current = read_nob(&intrnl->thr[id].data.current);
+#ifdef ERTS_THR_PROGRESS_STATE_DEBUG
+ erts_aint32_t state_debug;
+ char *active, *leader;
+
+ state_debug = erts_atomic32_read_nob(&intrnl->thr[id].data.state_debug);
+ active = (state_debug & ERTS_THR_PROGRESS_STATE_DEBUG_ACTIVE
+ ? "true"
+ : "false");
+ leader = (state_debug & ERTS_THR_PROGRESS_STATE_DEBUG_LEADER
+ ? "true"
+ : "false");
+#endif
+ if (current == ERTS_THR_PRGR_VAL_WAITING)
+ erts_fprintf(stderr,
+ " id=%d, current=WAITING"
+#ifdef ERTS_THR_PROGRESS_STATE_DEBUG
+ ", active=%s, leader=%s"
+#endif
+ "\n", id
+#ifdef ERTS_THR_PROGRESS_STATE_DEBUG
+ , active, leader
+#endif
+ );
+ else
+ erts_fprintf(stderr,
+ " id=%d, current=%b64u"
+#ifdef ERTS_THR_PROGRESS_STATE_DEBUG
+ ", active=%s, leader=%s"
+#endif
+ "\n", id, current
+#ifdef ERTS_THR_PROGRESS_STATE_DEBUG
+ , active, leader
+#endif
+ );
+ }
+ erts_fprintf(stderr, "-----------------------\n");
+
+
+}
+
+#endif
diff --git a/erts/emulator/beam/erl_thr_progress.h b/erts/emulator/beam/erl_thr_progress.h
new file mode 100644
index 0000000000..a71724b813
--- /dev/null
+++ b/erts/emulator/beam/erl_thr_progress.h
@@ -0,0 +1,303 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2011-2012. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/*
+ * Description: Thread progress information. Used by lock free algorithms
+ * to determine when all involved threads are guaranteed to
+ * have passed a specific point of execution.
+ *
+ * Usage instructions can be found in ert_thr_progress.c
+ *
+ * Author: Rickard Green
+ */
+
+#if !defined(ERL_THR_PROGRESS_H__TSD_TYPE__)
+#define ERL_THR_PROGRESS_H__TSD_TYPE__
+
+#include "sys.h"
+
+#ifndef ERTS_SMP
+
+#define erts_smp_thr_progress_block() ((void) 0)
+#define erts_smp_thr_progress_unblock() ((void) 0)
+#define erts_smp_thr_progress_is_blocking() 1
+
+#else /* ERTS_SMP */
+
+#define erts_smp_thr_progress_block erts_thr_progress_block
+#define erts_smp_thr_progress_unblock erts_thr_progress_unblock
+#define erts_smp_thr_progress_is_blocking erts_thr_progress_is_blocking
+
+void erts_thr_progress_block(void);
+void erts_thr_progress_unblock(void);
+int erts_thr_progress_is_blocking(void);
+
+typedef Uint64 ErtsThrPrgrVal;
+
+#define ERTS_THR_PRGR_WAKEUP_DATA_SIZE 4 /* Need to be an even power of 2. */
+
+typedef struct {
+ int id;
+ int is_managed;
+ int is_blocking;
+ int is_temporary;
+
+ /* --- Part below only for registered threads --- */
+
+ ErtsThrPrgrVal wakeup_request[ERTS_THR_PRGR_WAKEUP_DATA_SIZE];
+
+ /* --- Part below only for managed threads --- */
+
+ int leader; /* Needs to be first in the managed threads part */
+ int active;
+ struct {
+ ErtsThrPrgrVal local;
+ ErtsThrPrgrVal next;
+ ErtsThrPrgrVal current;
+ } previous;
+} ErtsThrPrgrData;
+
+void erts_thr_progress_fatal_error_block(SWord timeout,
+ ErtsThrPrgrData *tmp_tpd_bufp);
+
+#endif /* ERTS_SMP */
+
+#endif
+
+#if !defined(ERL_THR_PROGRESS_H__) && !defined(ERL_THR_PROGRESS_TSD_TYPE_ONLY)
+#define ERL_THR_PROGRESS_H__
+
+#include "erl_threads.h"
+#include "erl_process.h"
+
+#ifdef ERTS_SMP
+
+#define ERTS_THR_PRGR_VAL_WAITING (~((ErtsThrPrgrVal) 0))
+#define ERTS_THR_PRGR_INVALID (~((ErtsThrPrgrVal) 0))
+
+extern erts_tsd_key_t erts_thr_prgr_data_key__;
+
+#ifdef ARCH_64
+# define ERTS_THR_PRGR_ATOMIC erts_atomic_t
+#else /* ARCH_32 */
+# define ERTS_THR_PRGR_ATOMIC erts_dw_atomic_t
+#endif
+
+typedef struct {
+ void *arg;
+ void (*wakeup)(void *);
+ void (*prepare_wait)(void *);
+ void (*wait)(void *);
+ void (*finalize_wait)(void *);
+} ErtsThrPrgrCallbacks;
+
+typedef struct {
+ ERTS_THR_PRGR_ATOMIC current;
+} ErtsThrPrgr;
+
+extern ErtsThrPrgr erts_thr_prgr__;
+
+void erts_thr_progress_pre_init(void);
+void erts_thr_progress_init(int no_schedulers, int managed, int unmanaged);
+void erts_thr_progress_register_managed_thread(ErtsSchedulerData *esdp,
+ ErtsThrPrgrCallbacks *,
+ int);
+void erts_thr_progress_register_unmanaged_thread(ErtsThrPrgrCallbacks *);
+void erts_thr_progress_active(ErtsSchedulerData *esdp, int on);
+void erts_thr_progress_wakeup(ErtsSchedulerData *esdp,
+ ErtsThrPrgrVal value);
+int erts_thr_progress_update(ErtsSchedulerData *esdp);
+int erts_thr_progress_leader_update(ErtsSchedulerData *esdp);
+void erts_thr_progress_prepare_wait(ErtsSchedulerData *esdp);
+void erts_thr_progress_finalize_wait(ErtsSchedulerData *esdp);
+
+void erts_thr_progress_dbg_print_state(void);
+
+#ifdef ARCH_32
+#define ERTS_THR_PRGR_ATOMIC erts_dw_atomic_t
+ERTS_GLB_INLINE ErtsThrPrgrVal erts_thr_prgr_dw_aint_to_val__(erts_dw_aint_t *dw_aint);
+#endif
+ERTS_GLB_INLINE ErtsThrPrgrVal erts_thr_prgr_read_nob__(ERTS_THR_PRGR_ATOMIC *atmc);
+ERTS_GLB_INLINE ErtsThrPrgrVal erts_thr_prgr_read_acqb__(ERTS_THR_PRGR_ATOMIC *atmc);
+ERTS_GLB_INLINE ErtsThrPrgrVal erts_thr_prgr_read_mb__(ERTS_THR_PRGR_ATOMIC *atmc);
+
+ERTS_GLB_INLINE int erts_thr_progress_is_managed_thread(void);
+ERTS_GLB_INLINE ErtsThrPrgrVal erts_thr_progress_current_to_later__(ErtsThrPrgrVal val);
+ERTS_GLB_INLINE ErtsThrPrgrVal erts_thr_progress_later_than(ErtsThrPrgrVal val);
+ERTS_GLB_INLINE ErtsThrPrgrVal erts_thr_progress_later(void);
+ERTS_GLB_INLINE ErtsThrPrgrVal erts_thr_progress_current(void);
+ERTS_GLB_INLINE int erts_thr_progress_has_passed__(ErtsThrPrgrVal val1, ErtsThrPrgrVal val2);
+ERTS_GLB_INLINE int erts_thr_progress_has_reached_this(ErtsThrPrgrVal this, ErtsThrPrgrVal val);
+ERTS_GLB_INLINE int erts_thr_progress_cmp(ErtsThrPrgrVal val1, ErtsThrPrgrVal val2);
+ERTS_GLB_INLINE int erts_thr_progress_has_reached(ErtsThrPrgrVal val);
+
+#if ERTS_GLB_INLINE_INCL_FUNC_DEF
+
+#ifdef ARCH_64
+
+ERTS_GLB_INLINE ErtsThrPrgrVal
+erts_thr_prgr_read_nob__(ERTS_THR_PRGR_ATOMIC *atmc)
+{
+ return (ErtsThrPrgrVal) erts_atomic_read_nob(atmc);
+}
+
+ERTS_GLB_INLINE ErtsThrPrgrVal
+erts_thr_prgr_read_acqb__(ERTS_THR_PRGR_ATOMIC *atmc)
+{
+ return (ErtsThrPrgrVal) erts_atomic_read_acqb(atmc);
+}
+
+ERTS_GLB_INLINE ErtsThrPrgrVal
+erts_thr_prgr_read_mb__(ERTS_THR_PRGR_ATOMIC *atmc)
+{
+ return (ErtsThrPrgrVal) erts_atomic_read_mb(atmc);
+}
+
+#else /* ARCH_32 */
+
+ERTS_GLB_INLINE ErtsThrPrgrVal
+erts_thr_prgr_dw_aint_to_val__(erts_dw_aint_t *dw_aint)
+{
+#ifdef ETHR_SU_DW_NAINT_T__
+ return (ErtsThrPrgrVal) dw_aint->dw_sint;
+#else
+ ErtsThrPrgrVal res;
+ res = (ErtsThrPrgrVal) ((Uint32) dw_aint->sint[ERTS_DW_AINT_HIGH_WORD]);
+ res <<= 32;
+ res |= (ErtsThrPrgrVal) ((Uint32) dw_aint->sint[ERTS_DW_AINT_LOW_WORD]);
+ return res;
+#endif
+}
+
+ERTS_GLB_INLINE ErtsThrPrgrVal
+erts_thr_prgr_read_nob__(ERTS_THR_PRGR_ATOMIC *atmc)
+{
+ erts_dw_aint_t dw_aint;
+ erts_dw_atomic_read_nob(atmc, &dw_aint);
+ return erts_thr_prgr_dw_aint_to_val__(&dw_aint);
+}
+
+ERTS_GLB_INLINE ErtsThrPrgrVal
+erts_thr_prgr_read_acqb__(ERTS_THR_PRGR_ATOMIC *atmc)
+{
+ erts_dw_aint_t dw_aint;
+ erts_dw_atomic_read_acqb(atmc, &dw_aint);
+ return erts_thr_prgr_dw_aint_to_val__(&dw_aint);
+}
+
+ERTS_GLB_INLINE ErtsThrPrgrVal
+erts_thr_prgr_read_mb__(ERTS_THR_PRGR_ATOMIC *atmc)
+{
+ erts_dw_aint_t dw_aint;
+ erts_dw_atomic_read_mb(atmc, &dw_aint);
+ return erts_thr_prgr_dw_aint_to_val__(&dw_aint);
+}
+
+#endif
+
+ERTS_GLB_INLINE int
+erts_thr_progress_is_managed_thread(void)
+{
+ ErtsThrPrgrData *tpd = erts_tsd_get(erts_thr_prgr_data_key__);
+ return tpd && tpd->is_managed;
+}
+
+ERTS_GLB_INLINE ErtsThrPrgrVal
+erts_thr_progress_current_to_later__(ErtsThrPrgrVal val)
+{
+ if (val == (ERTS_THR_PRGR_VAL_WAITING-((ErtsThrPrgrVal)2)))
+ return ((ErtsThrPrgrVal) 0);
+ else if (val == (ERTS_THR_PRGR_VAL_WAITING-((ErtsThrPrgrVal)1)))
+ return ((ErtsThrPrgrVal) 1);
+ else
+ return val + ((ErtsThrPrgrVal) 2);
+}
+
+ERTS_GLB_INLINE ErtsThrPrgrVal
+erts_thr_progress_later_than(ErtsThrPrgrVal val)
+{
+ ERTS_THR_MEMORY_BARRIER;
+ return erts_thr_progress_current_to_later__(val);
+}
+
+ERTS_GLB_INLINE ErtsThrPrgrVal
+erts_thr_progress_later(void)
+{
+ ErtsThrPrgrVal val = erts_thr_prgr_read_mb__(&erts_thr_prgr__.current);
+ return erts_thr_progress_current_to_later__(val);
+}
+
+ERTS_GLB_INLINE ErtsThrPrgrVal
+erts_thr_progress_current(void)
+{
+ if (erts_thr_progress_is_managed_thread())
+ return erts_thr_prgr_read_nob__(&erts_thr_prgr__.current);
+ else
+ return erts_thr_prgr_read_acqb__(&erts_thr_prgr__.current);
+}
+
+ERTS_GLB_INLINE int
+erts_thr_progress_has_passed__(ErtsThrPrgrVal val1, ErtsThrPrgrVal val0)
+{
+ if ((((((ErtsThrPrgrVal) 1) << 63) & val1)
+ ^ ((((ErtsThrPrgrVal) 1) << 63) & val0)) != 0) {
+ /* May have wrapped... */
+ if (val1 < (((ErtsThrPrgrVal) 1) << 62)
+ && val0 > (((ErtsThrPrgrVal) 3) << 62)) {
+ /*
+ * 'val1' has wrapped but 'val0' has not yet wrapped. While in
+ * these ranges 'current' is considered later than 'val0'.
+ */
+ return 1;
+ }
+ }
+ return val1 > val0;
+}
+
+ERTS_GLB_INLINE int
+erts_thr_progress_has_reached_this(ErtsThrPrgrVal this, ErtsThrPrgrVal val)
+{
+ if (this == val)
+ return 1;
+ return erts_thr_progress_has_passed__(this, val);
+}
+
+ERTS_GLB_INLINE int
+erts_thr_progress_cmp(ErtsThrPrgrVal val1, ErtsThrPrgrVal val2)
+{
+ if (val1 == val2)
+ return 0;
+ if (erts_thr_progress_has_passed__(val1, val2))
+ return 1;
+ else
+ return -1;
+}
+
+ERTS_GLB_INLINE int
+erts_thr_progress_has_reached(ErtsThrPrgrVal val)
+{
+ ErtsThrPrgrVal current = erts_thr_progress_current();
+ return erts_thr_progress_has_reached_this(current, val);
+}
+
+#endif
+
+#endif /* ERTS_SMP */
+
+#endif
diff --git a/erts/emulator/beam/erl_thr_queue.c b/erts/emulator/beam/erl_thr_queue.c
new file mode 100644
index 0000000000..70949ece76
--- /dev/null
+++ b/erts/emulator/beam/erl_thr_queue.c
@@ -0,0 +1,762 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2011-2012. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/*
+ * Description: Lock-free queue for communication between threads.
+ *
+ * Currently only a many-to-one version has been,
+ * implemented, i.e., many threads can enqueue but
+ * only one thread can dequeue at a time. It doesn't
+ * have to be the same thread dequeuing every time, but
+ * synchronization so that only one thread dequeues
+ * at a time has to be provided by other means.
+ *
+ * When/If the need for a many-to-many queue arises,
+ * this implementation can relatively easy be extended
+ * to support that too.
+ *
+ * Usage instructions below.
+ *
+ * Author: Rickard Green
+ */
+
+/*
+ * ------ Usage instructions -----------------------------------------------
+ *
+ * Dequeuing generates garbage that needs to be cleaned up.
+ * erts_thr_q_dequeue() automatically cleans, but garbage may have to be
+ * cleaned up also when the queue is empty. This is done by calling
+ * erts_thr_q_clean(). In the SMP case thread progress may have to be made
+ * before cleaning can continue. If so, erts_thr_q_need_thr_progress() in
+ * combination with erts_thr_progress_wakeup() can be used in order to
+ * request a wakeup at appropriate time.
+ *
+ * Enqueuing implies memory allocation and dequeuing implies memory
+ * deallocation. Memory allocation can be moved to another more suitable
+ * thread using erts_thr_q_prepare_enqueue() together with
+ * erts_thr_q_enqueue_prepared() instead of using erts_thr_q_enqueue().
+ * Memory deallocation can can be moved to another more suitable thread by
+ * disabling auto_finalize_dequeue when initializing the queue and then use
+ * erts_thr_q_get_finalize_dequeue_data() together
+ * erts_thr_q_finalize_dequeue() after dequeuing or cleaning.
+ *
+ * Ending the life of the queue using either erts_thr_q_destroy()
+ * or erts_thr_q_finalize() impies cleaning the queue. Both functions
+ * return the cleaning result and may have to be called multiple times
+ * until the queue is clean. Once one of these functions have been called
+ * enqueuing is not allowed. This has to be synchronized by the user.
+ * If auto_finalize_dequeue has been disabled, the finalize dequeue
+ * functionality has to be called after ending the life of the queue just
+ * as when dequeuing or cleaning on a queue that is alive.
+ *
+ * -------------------------------------------------------------------------
+ */
+
+#ifdef HAVE_CONFIG_H
+# include "config.h"
+#endif
+
+#include "erl_thr_queue.h"
+
+#if defined(DEBUG)
+#define ERTS_THR_Q_DBG_CHK_DATA 1
+#else
+#define ERTS_THR_Q_DBG_CHK_DATA 0
+#endif
+
+#define ERTS_THR_Q_MAX_CLEAN_REACHED_HEAD_COUNT 100
+#define ERTS_THR_Q_MAX_SCHED_CLEAN_OPS 50
+#define ERTS_THR_Q_MAX_DEQUEUE_CLEAN_OPS 3
+
+#define ERTS_THR_Q_MAX_FINI_DEQ_OPS 50
+
+#ifdef ERTS_SMP
+ERTS_SCHED_PREF_QUICK_ALLOC_IMPL(sl_element,
+ ErtsThrQElement_t,
+ 1000,
+ ERTS_ALC_T_THR_Q_EL_SL)
+#else
+
+static void
+init_sl_element_alloc(void)
+{
+}
+
+static ErtsThrQElement_t *
+sl_element_alloc(void)
+{
+ return erts_alloc(ERTS_ALC_T_THR_Q_EL_SL,
+ sizeof(ErtsThrQElement_t));
+}
+
+static void
+sl_element_free(ErtsThrQElement_t *p)
+{
+ erts_free(ERTS_ALC_T_THR_Q_EL_SL, p);
+}
+
+#endif
+
+typedef union {
+ ErtsThrQ_t q;
+ char align__[ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(ErtsThrQ_t))];
+} ErtsAlignedThrQ_t;
+
+void
+erts_thr_q_init(void)
+{
+ init_sl_element_alloc();
+}
+
+static void noop_callback(void *arg) { }
+
+void
+erts_thr_q_initialize(ErtsThrQ_t *q, ErtsThrQInit_t *qi)
+{
+#ifndef USE_THREADS
+ q->init = *qi;
+ if (!q->init.notify)
+ q->init.notify = noop_callback;
+ q->first = NULL;
+ q->last = NULL;
+ q->q.blk = NULL;
+#else
+ erts_atomic_init_nob(&q->tail.data.marker.next.atmc, ERTS_AINT_NULL);
+ q->tail.data.marker.data.ptr = NULL;
+ erts_atomic_init_nob(&q->tail.data.last,
+ (erts_aint_t) &q->tail.data.marker);
+ erts_atomic_init_nob(&q->tail.data.um_refc[0], 0);
+ erts_atomic_init_nob(&q->tail.data.um_refc[1], 0);
+ erts_atomic32_init_nob(&q->tail.data.um_refc_ix, 0);
+ q->tail.data.live = qi->live.objects;
+ q->tail.data.arg = qi->arg;
+ q->tail.data.notify = qi->notify;
+ if (!q->tail.data.notify)
+ q->tail.data.notify = noop_callback;
+
+ q->head.head.ptr = &q->tail.data.marker;
+ q->head.live = qi->live.objects;
+ q->head.first = &q->tail.data.marker;
+ q->head.unref_end = &q->tail.data.marker;
+ q->head.clean_reached_head_count = 0;
+ q->head.deq_fini.automatic = qi->auto_finalize_dequeue;
+ q->head.deq_fini.start = NULL;
+ q->head.deq_fini.end = NULL;
+#ifdef ERTS_SMP
+ q->head.next.thr_progress = erts_thr_progress_current();
+ q->head.next.thr_progress_reached = 1;
+#endif
+ q->head.next.um_refc_ix = 1;
+ q->head.next.unref_end = &q->tail.data.marker;
+ q->head.used_marker = 1;
+ q->head.arg = qi->arg;
+ q->head.notify = q->tail.data.notify;
+ q->q.finalizing = 0;
+ q->q.live = qi->live.queue;
+ q->q.blk = NULL;
+#endif
+}
+
+ErtsThrQCleanState_t
+erts_thr_q_finalize(ErtsThrQ_t *q)
+{
+#ifdef USE_THREADS
+ q->q.finalizing = 1;
+#endif
+ while (erts_thr_q_dequeue(q));
+ return erts_thr_q_clean(q);
+}
+
+ErtsThrQ_t *
+erts_thr_q_create(ErtsThrQInit_t *qi)
+{
+ ErtsAlcType_t atype;
+ ErtsThrQ_t *q, *qblk;
+ UWord qw;
+
+ switch (qi->live.queue) {
+ case ERTS_THR_Q_LIVE_SHORT:
+ atype = ERTS_ALC_T_THR_Q_SL;
+ break;
+ case ERTS_THR_Q_LIVE_LONG:
+ atype = ERTS_ALC_T_THR_Q_LL;
+ break;
+ default:
+ atype = ERTS_ALC_T_THR_Q;
+ break;
+ }
+
+ qw = (UWord) erts_alloc(atype,
+ sizeof(ErtsThrQ_t) + (ERTS_CACHE_LINE_SIZE-1));
+ qblk = (ErtsThrQ_t *) qw;
+ if (qw & ERTS_CACHE_LINE_MASK)
+ qw = (qw & ~ERTS_CACHE_LINE_MASK) + ERTS_CACHE_LINE_SIZE;
+ ASSERT((qw & ERTS_CACHE_LINE_MASK) == 0);
+ q = (ErtsThrQ_t *) qw;
+ erts_thr_q_initialize(q, qi);
+ q->q.blk = qblk;
+ return q;
+}
+
+ErtsThrQCleanState_t
+erts_thr_q_destroy(ErtsThrQ_t *q)
+{
+ if (!q->q.blk)
+ erl_exit(ERTS_ABORT_EXIT,
+ "Trying to destroy not created thread queue\n");
+ return erts_thr_q_finalize(q);
+}
+
+#ifdef USE_THREADS
+
+static void
+destroy(ErtsThrQ_t *q)
+{
+ ErtsAlcType_t atype;
+ switch (q->q.live) {
+ case ERTS_THR_Q_LIVE_SHORT:
+ atype = ERTS_ALC_T_THR_Q_SL;
+ break;
+ case ERTS_THR_Q_LIVE_LONG:
+ atype = ERTS_ALC_T_THR_Q_LL;
+ break;
+ default:
+ atype = ERTS_ALC_T_THR_Q;
+ break;
+ }
+ erts_free(atype, q->q.blk);
+}
+
+#endif
+
+static ERTS_INLINE ErtsThrQElement_t *
+element_live_alloc(ErtsThrQLive_t live)
+{
+ switch (live) {
+ case ERTS_THR_Q_LIVE_SHORT:
+ return sl_element_alloc();
+ default:
+ return (ErtsThrQElement_t *) erts_alloc(ERTS_ALC_T_THR_Q_EL,
+ sizeof(ErtsThrQElement_t));
+ }
+}
+
+static ERTS_INLINE ErtsThrQElement_t *
+element_alloc(ErtsThrQ_t *q)
+{
+ ErtsThrQLive_t live;
+#ifdef USE_THREADS
+ live = q->tail.data.live;
+#else
+ live = q->init.live.objects;
+#endif
+ return element_live_alloc(live);
+}
+
+static ERTS_INLINE void
+element_live_free(ErtsThrQLive_t live, ErtsThrQElement_t *el)
+{
+ switch (live) {
+ case ERTS_THR_Q_LIVE_SHORT:
+ sl_element_free(el);
+ break;
+ default:
+ erts_free(ERTS_ALC_T_THR_Q_EL, el);
+ }
+}
+
+static ERTS_INLINE void
+element_free(ErtsThrQ_t *q, ErtsThrQElement_t *el)
+{
+ ErtsThrQLive_t live;
+#ifdef USE_THREADS
+ live = q->head.live;
+#else
+ live = q->init.live.objects;
+#endif
+ element_live_free(live, el);
+}
+
+#ifdef USE_THREADS
+
+static ERTS_INLINE ErtsThrQElement_t *
+enqueue_managed(ErtsThrQ_t *q, ErtsThrQElement_t *this, int want_last)
+{
+ erts_aint_t ilast, itmp;
+
+ erts_atomic_init_nob(&this->next.atmc, ERTS_AINT_NULL);
+ /* Enqueue at end of list... */
+
+ ilast = erts_atomic_read_nob(&q->tail.data.last);
+ while (1) {
+ ErtsThrQElement_t *last = (ErtsThrQElement_t *) ilast;
+ itmp = erts_atomic_cmpxchg_mb(&last->next.atmc,
+ (erts_aint_t) this,
+ ERTS_AINT_NULL);
+ if (itmp == ERTS_AINT_NULL)
+ break;
+ ilast = itmp;
+ }
+
+ /* Move last pointer forward... */
+ while (1) {
+ if (want_last) {
+ if (erts_atomic_read_rb(&this->next.atmc) != ERTS_AINT_NULL) {
+ /* Someone else will move it forward */
+ ilast = erts_atomic_read_rb(&q->tail.data.last);
+ return (ErtsThrQElement_t *) ilast;
+ }
+ }
+ else {
+ if (erts_atomic_read_nob(&this->next.atmc) != ERTS_AINT_NULL) {
+ /* Someone else will move it forward */
+ return NULL;
+ }
+ }
+ itmp = erts_atomic_cmpxchg_mb(&q->tail.data.last,
+ (erts_aint_t) this,
+ ilast);
+ if (ilast == itmp)
+ return want_last ? this : NULL;
+ ilast = itmp;
+ }
+}
+
+static ErtsThrQCleanState_t
+clean(ErtsThrQ_t *q, int max_ops, int do_notify)
+{
+ erts_aint_t ilast;
+ int um_refc_ix;
+ int ops;
+
+ for (ops = 0; ops < max_ops; ops++) {
+ ErtsThrQElement_t *tmp;
+ restart:
+ ASSERT(q->head.first);
+ if (q->head.first == q->head.head.ptr) {
+ q->head.clean_reached_head_count++;
+ if (q->head.clean_reached_head_count
+ >= ERTS_THR_Q_MAX_CLEAN_REACHED_HEAD_COUNT) {
+ q->head.clean_reached_head_count = 0;
+ break;
+ }
+ goto inspect_head;
+ }
+ if (q->head.first == q->head.unref_end)
+ break;
+ if (q->head.first == &q->tail.data.marker) {
+ q->head.used_marker = 0;
+ q->head.first = q->head.first->next.ptr;
+ goto restart;
+ }
+ tmp = q->head.first;
+ q->head.first = q->head.first->next.ptr;
+ if (q->head.deq_fini.automatic)
+ element_free(q, tmp);
+ else {
+ tmp->data.ptr = (void *) (UWord) q->head.live;
+ if (!q->head.deq_fini.start)
+ q->head.deq_fini.start = tmp;
+ else if (q->head.deq_fini.end->next.ptr == &q->tail.data.marker)
+ q->head.deq_fini.end->next.ptr = tmp;
+ q->head.deq_fini.end = tmp;
+ }
+ }
+
+ ilast = erts_atomic_read_nob(&q->tail.data.last);
+ if (q->head.first == ((ErtsThrQElement_t *) ilast)
+ && ((ErtsThrQElement_t *) ilast) == &q->tail.data.marker
+ && q->head.first == &q->tail.data.marker) {
+ /* Empty and clean queue */
+ if (q->q.finalizing)
+ destroy(q);
+ return ERTS_THR_Q_CLEAN;
+ }
+
+#ifdef ERTS_SMP
+ if (q->head.next.thr_progress_reached
+ || erts_thr_progress_has_reached(q->head.next.thr_progress)) {
+ q->head.next.thr_progress_reached = 1;
+#endif
+ um_refc_ix = q->head.next.um_refc_ix;
+ if (erts_atomic_read_acqb(&q->tail.data.um_refc[um_refc_ix]) == 0) {
+ /* Move unreferenced end pointer forward... */
+ q->head.clean_reached_head_count = 0;
+ q->head.unref_end = q->head.next.unref_end;
+
+ if (!q->head.used_marker
+ && q->head.unref_end == (ErtsThrQElement_t *) ilast) {
+ q->head.used_marker = 1;
+ ilast = (erts_aint_t) enqueue_managed(q,
+ &q->tail.data.marker,
+ 1);
+ if (q->head.head.ptr == q->head.unref_end) {
+ ErtsThrQElement_t *next;
+ next = ((ErtsThrQElement_t *)
+ erts_atomic_read_acqb(&q->head.head.ptr->next.atmc));
+ if (next == &q->tail.data.marker) {
+ q->head.head.ptr->next.ptr = &q->tail.data.marker;
+ q->head.head.ptr = &q->tail.data.marker;
+ }
+ }
+ }
+
+ if (q->head.unref_end == (ErtsThrQElement_t *) ilast)
+ ERTS_SMP_MEMORY_BARRIER;
+ else {
+ q->head.next.unref_end = (ErtsThrQElement_t *) ilast;
+#ifdef ERTS_SMP
+ q->head.next.thr_progress = erts_thr_progress_later();
+#endif
+ erts_atomic32_set_relb(&q->tail.data.um_refc_ix,
+ um_refc_ix);
+ q->head.next.um_refc_ix = um_refc_ix == 0 ? 1 : 0;
+#ifdef ERTS_SMP
+ q->head.next.thr_progress_reached = 0;
+#endif
+ }
+ }
+#ifdef ERTS_SMP
+ }
+#endif
+
+ if (q->head.first == q->head.head.ptr) {
+ inspect_head:
+ if (!q->head.used_marker) {
+ erts_aint_t inext;
+ inext = erts_atomic_read_acqb(&q->head.head.ptr->next.atmc);
+ if (inext == ERTS_AINT_NULL) {
+ q->head.used_marker = 1;
+ (void) enqueue_managed(q, &q->tail.data.marker, 0);
+ inext = erts_atomic_read_acqb(&q->head.head.ptr->next.atmc);
+ if (inext == (erts_aint_t) &q->tail.data.marker) {
+ q->head.head.ptr->next.ptr = &q->tail.data.marker;
+ q->head.head.ptr = &q->tail.data.marker;
+ goto check_thr_progress;
+ }
+ }
+ }
+
+ if (q->q.finalizing) {
+ ilast = erts_atomic_read_nob(&q->tail.data.last);
+ if (q->head.first == ((ErtsThrQElement_t *) ilast)
+ && ((ErtsThrQElement_t *) ilast) == &q->tail.data.marker
+ && q->head.first == &q->tail.data.marker) {
+ destroy(q);
+ }
+ else {
+ goto dirty;
+ }
+ }
+ return ERTS_THR_Q_CLEAN;
+ }
+
+ if (q->head.first != q->head.unref_end)
+ goto dirty;
+
+check_thr_progress:
+
+#ifdef ERTS_SMP
+ if (q->head.next.thr_progress_reached)
+#endif
+ {
+ int um_refc_ix = q->head.next.um_refc_ix;
+ if (erts_atomic_read_acqb(&q->tail.data.um_refc[um_refc_ix]) == 0) {
+ dirty:
+ if (do_notify)
+ q->head.notify(q->head.arg);
+ return ERTS_THR_Q_DIRTY;
+ }
+ }
+
+ return ERTS_THR_Q_NEED_THR_PRGR;
+}
+
+#endif
+
+ErtsThrQCleanState_t
+erts_thr_q_clean(ErtsThrQ_t *q)
+{
+#ifdef USE_THREADS
+ return clean(q, ERTS_THR_Q_MAX_SCHED_CLEAN_OPS, 0);
+#else
+ return ERTS_THR_Q_CLEAN;
+#endif
+}
+
+ErtsThrQCleanState_t
+erts_thr_q_inspect(ErtsThrQ_t *q, int ensure_empty)
+{
+#ifndef USE_THREADS
+ return ERTS_THR_Q_CLEAN;
+#else
+ if (ensure_empty) {
+ erts_aint_t inext;
+ inext = erts_atomic_read_acqb(&q->head.head.ptr->next.atmc);
+ if (inext != ERTS_AINT_NULL) {
+ if (&q->tail.data.marker != (ErtsThrQElement_t *) inext)
+ return ERTS_THR_Q_DIRTY;
+ else {
+ q->head.head.ptr->next.ptr = (ErtsThrQElement_t *) inext;
+ q->head.head.ptr = (ErtsThrQElement_t *) inext;
+ inext = erts_atomic_read_acqb(&q->head.head.ptr->next.atmc);
+ if (inext != ERTS_AINT_NULL)
+ return ERTS_THR_Q_DIRTY;
+ }
+ }
+ }
+
+ if (q->head.first == q->head.head.ptr) {
+ if (!q->head.used_marker) {
+ erts_aint_t inext;
+ inext = erts_atomic_read_acqb(&q->head.head.ptr->next.atmc);
+ if (inext == ERTS_AINT_NULL)
+ return ERTS_THR_Q_DIRTY;
+ }
+ return ERTS_THR_Q_CLEAN;
+ }
+
+ if (q->head.first != q->head.unref_end)
+ return ERTS_THR_Q_DIRTY;
+
+#ifdef ERTS_SMP
+ if (q->head.next.thr_progress_reached)
+#endif
+ {
+ int um_refc_ix = q->head.next.um_refc_ix;
+ if (erts_atomic_read_acqb(&q->tail.data.um_refc[um_refc_ix]) == 0)
+ return ERTS_THR_Q_DIRTY;
+ }
+ return ERTS_THR_Q_NEED_THR_PRGR;
+#endif
+}
+
+static void
+enqueue(ErtsThrQ_t *q, void *data, ErtsThrQElement_t *this)
+{
+#ifndef USE_THREADS
+ ASSERT(data);
+
+ this->next.ptr = NULL;
+ this->data.ptr = data;
+
+ if (q->last)
+ q->last->next.ptr = this;
+ else {
+ q->first = q->last = this;
+ q->init.notify(q->init.arg);
+ }
+#else
+ int notify;
+ int um_refc_ix = 0;
+#ifdef ERTS_SMP
+ int unmanaged_thread;
+#endif
+
+#if ERTS_THR_Q_DBG_CHK_DATA
+ if (!data)
+ erl_exit(ERTS_ABORT_EXIT, "Missing data in enqueue\n");
+#endif
+
+ ASSERT(!q->q.finalizing);
+
+ this->data.ptr = data;
+
+#ifdef ERTS_SMP
+ unmanaged_thread = !erts_thr_progress_is_managed_thread();
+ if (unmanaged_thread)
+#endif
+ {
+ um_refc_ix = erts_atomic32_read_acqb(&q->tail.data.um_refc_ix);
+ while (1) {
+ int tmp_um_refc_ix;
+ erts_atomic_inc_acqb(&q->tail.data.um_refc[um_refc_ix]);
+ tmp_um_refc_ix = erts_atomic32_read_acqb(&q->tail.data.um_refc_ix);
+ if (tmp_um_refc_ix == um_refc_ix)
+ break;
+ erts_atomic_dec_relb(&q->tail.data.um_refc[um_refc_ix]);
+ um_refc_ix = tmp_um_refc_ix;
+ }
+ }
+
+ notify = this == enqueue_managed(q, this, 1);
+
+
+#ifdef ERTS_SMP
+ if (unmanaged_thread)
+#endif
+ {
+ if (notify)
+ erts_atomic_dec_relb(&q->tail.data.um_refc[um_refc_ix]);
+ else if (erts_atomic_dec_read_relb(&q->tail.data.um_refc[um_refc_ix]) == 0)
+ notify = 1;
+ }
+ if (notify)
+ q->tail.data.notify(q->tail.data.arg);
+#endif
+}
+
+void
+erts_thr_q_enqueue(ErtsThrQ_t *q, void *data)
+{
+ enqueue(q, data, element_alloc(q));
+}
+
+ErtsThrQPrepEnQ_t *
+erts_thr_q_prepare_enqueue(ErtsThrQ_t *q)
+{
+ return (ErtsThrQPrepEnQ_t *) element_alloc(q);
+}
+
+int
+erts_thr_q_get_finalize_dequeue_data(ErtsThrQ_t *q, ErtsThrQFinDeQ_t *fdp)
+{
+#ifndef USE_THREADS
+ return 0;
+#else
+#ifdef DEBUG
+ if (!q->head.deq_fini.start) {
+ ASSERT(!q->head.deq_fini.end);
+ }
+ else {
+ ErtsThrQElement_t *e = q->head.deq_fini.start;
+ ErtsThrQElement_t *end = q->head.deq_fini.end;
+ while (e != end) {
+ ASSERT(q->head.head.ptr != e);
+ ASSERT(q->head.first != e);
+ ASSERT(q->head.unref_end != e);
+ e = e->next.ptr;
+ }
+ }
+#endif
+ fdp->start = q->head.deq_fini.start;
+ fdp->end = q->head.deq_fini.end;
+ if (fdp->end)
+ fdp->end->next.ptr = NULL;
+ q->head.deq_fini.start = NULL;
+ q->head.deq_fini.end = NULL;
+ return fdp->start != NULL;
+#endif
+}
+
+void
+erts_thr_q_append_finalize_dequeue_data(ErtsThrQFinDeQ_t *fdp0,
+ ErtsThrQFinDeQ_t *fdp1)
+{
+#ifdef USE_THREADS
+ if (fdp1->start) {
+ if (fdp0->end)
+ fdp0->end->next.ptr = fdp1->start;
+ else
+ fdp0->start = fdp1->start;
+ fdp0->end = fdp1->end;
+ }
+#endif
+}
+
+
+int erts_thr_q_finalize_dequeue(ErtsThrQFinDeQ_t *state)
+{
+#ifdef USE_THREADS
+ ErtsThrQElement_t *start = state->start;
+ if (start) {
+ ErtsThrQLive_t live;
+ int i;
+ for (i = 0; i < ERTS_THR_Q_MAX_FINI_DEQ_OPS; i++) {
+ ErtsThrQElement_t *tmp;
+ if (!start)
+ break;
+ tmp = start;
+ start = start->next.ptr;
+ live = (ErtsThrQLive_t) (UWord) tmp->data.ptr;
+ element_live_free(live, tmp);
+ }
+ state->start = start;
+ if (start)
+ return 1; /* More to do */
+ state->end = NULL;
+ }
+#endif
+ return 0;
+}
+
+void
+erts_thr_q_finalize_dequeue_state_init(ErtsThrQFinDeQ_t *state)
+{
+#ifdef USE_THREADS
+ state->start = NULL;
+ state->end = NULL;
+#endif
+}
+
+
+void
+erts_thr_q_enqueue_prepared(ErtsThrQ_t *q, void *data, ErtsThrQPrepEnQ_t *prep)
+{
+ ASSERT(prep);
+ enqueue(q, data, (ErtsThrQElement_t *) prep);
+}
+
+void *
+erts_thr_q_dequeue(ErtsThrQ_t *q)
+{
+#ifndef USE_THREADS
+ void *res;
+ ErtsThrQElement_t *tmp;
+
+ if (!q->first)
+ return NULL;
+ tmp = q->first;
+ res = tmp->data.ptr;
+ q->first = tmp->next.ptr;
+ if (!q->first)
+ q->last = NULL;
+
+ element_free(q, tmp);
+
+ return res;
+#else
+ erts_aint_t inext;
+ void *res;
+
+ inext = erts_atomic_read_acqb(&q->head.head.ptr->next.atmc);
+ if (inext == ERTS_AINT_NULL)
+ return NULL;
+ q->head.head.ptr->next.ptr = (ErtsThrQElement_t *) inext;
+ q->head.head.ptr = (ErtsThrQElement_t *) inext;
+ if (q->head.head.ptr == &q->tail.data.marker) {
+ inext = erts_atomic_read_acqb(&q->head.head.ptr->next.atmc);
+ if (inext == ERTS_AINT_NULL)
+ return NULL;
+ q->head.head.ptr->next.ptr = (ErtsThrQElement_t *) inext;
+ q->head.head.ptr = (ErtsThrQElement_t *) inext;
+ }
+ res = q->head.head.ptr->data.ptr;
+#if ERTS_THR_Q_DBG_CHK_DATA
+ q->head.head.ptr->data.ptr = NULL;
+ if (!res)
+ erl_exit(ERTS_ABORT_EXIT, "Missing data in dequeue\n");
+#endif
+ clean(q,
+ (q->head.deq_fini.automatic
+ ? ERTS_THR_Q_MAX_DEQUEUE_CLEAN_OPS
+ : ERTS_THR_Q_MAX_SCHED_CLEAN_OPS), 1);
+ return res;
+#endif
+}
diff --git a/erts/emulator/beam/erl_thr_queue.h b/erts/emulator/beam/erl_thr_queue.h
new file mode 100644
index 0000000000..edcf2c3823
--- /dev/null
+++ b/erts/emulator/beam/erl_thr_queue.h
@@ -0,0 +1,209 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2011. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/*
+ * Description: Lock-free queue for communication between threads.
+ *
+ * Currently only a many-to-one version has been,
+ * implemented, i.e., many threads can enqueue but
+ * only one thread can dequeue at a time. It doesn't
+ * have to be the same thread dequeuing every time, but
+ * synchronization so that only one thread dequeues
+ * at a time has to be provided by other means.
+ *
+ * When/If the need for a many-to-many queue arises,
+ * this implementation can relatively easy be extended
+ * to support that too.
+ *
+ * Usage instructions can be found in erts_thr_queue.c
+ *
+ * Author: Rickard Green
+ */
+
+#ifndef ERL_THR_QUEUE_H__
+#define ERL_THR_QUEUE_H__
+
+#include "sys.h"
+#include "erl_threads.h"
+#include "erl_alloc.h"
+#include "erl_thr_progress.h"
+
+typedef enum {
+ ERTS_THR_Q_LIVE_UNDEF,
+ ERTS_THR_Q_LIVE_SHORT,
+ ERTS_THR_Q_LIVE_LONG
+} ErtsThrQLive_t;
+
+#define ERTS_THR_Q_INIT_DEFAULT \
+{ \
+ { \
+ ERTS_THR_Q_LIVE_UNDEF, \
+ ERTS_THR_Q_LIVE_SHORT \
+ }, \
+ NULL, \
+ NULL, \
+ 1 \
+}
+
+typedef struct ErtsThrQ_t_ ErtsThrQ_t;
+
+typedef struct {
+ struct {
+ ErtsThrQLive_t queue;
+ ErtsThrQLive_t objects;
+ } live;
+ void *arg;
+ void (*notify)(void *);
+ int auto_finalize_dequeue;
+} ErtsThrQInit_t;
+
+typedef struct ErtsThrQElement_t_ ErtsThrQElement_t;
+typedef struct ErtsThrQElement_t ErtsThrQPrepEnQ_t;
+
+typedef union {
+ erts_atomic_t atmc;
+ ErtsThrQElement_t *ptr;
+} ErtsThrQPtr_t;
+
+struct ErtsThrQElement_t_ {
+ ErtsThrQPtr_t next;
+ union {
+ erts_atomic_t atmc;
+ void *ptr;
+ } data;
+};
+
+typedef struct {
+ ErtsThrQElement_t *start;
+ ErtsThrQElement_t *end;
+} ErtsThrQFinDeQ_t;
+
+typedef enum {
+ ERTS_THR_Q_CLEAN,
+ ERTS_THR_Q_NEED_THR_PRGR,
+ ERTS_THR_Q_DIRTY,
+} ErtsThrQCleanState_t;
+
+#ifdef USE_THREADS
+
+typedef struct {
+ ErtsThrQElement_t marker;
+ erts_atomic_t last;
+ erts_atomic_t um_refc[2];
+ erts_atomic32_t um_refc_ix;
+ ErtsThrQLive_t live;
+#ifdef ERTS_SMP
+ erts_atomic32_t thr_prgr_clean_scheduled;
+#endif
+ void *arg;
+ void (*notify)(void *);
+} ErtsThrQTail_t;
+
+struct ErtsThrQ_t_ {
+ /*
+ * This structure needs to be cache line aligned for best
+ * performance.
+ */
+ union {
+ /* Modified by threads enqueuing */
+ ErtsThrQTail_t data;
+ char align__[ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(ErtsThrQTail_t))];
+ } tail;
+ /*
+ * Everything below this point is *only* accessed by the
+ * thread dequeuing.
+ */
+ struct {
+ ErtsThrQPtr_t head;
+ ErtsThrQLive_t live;
+ ErtsThrQElement_t *first;
+ ErtsThrQElement_t *unref_end;
+ int clean_reached_head_count;
+ struct {
+ int automatic;
+ ErtsThrQElement_t *start;
+ ErtsThrQElement_t *end;
+ } deq_fini;
+ struct {
+#ifdef ERTS_SMP
+ ErtsThrPrgrVal thr_progress;
+ int thr_progress_reached;
+#endif
+ int um_refc_ix;
+ ErtsThrQElement_t *unref_end;
+ } next;
+ int used_marker;
+ void *arg;
+ void (*notify)(void *);
+ } head;
+ struct {
+ int finalizing;
+ ErtsThrQLive_t live;
+ void *blk;
+ } q;
+};
+
+#else /* !USE_THREADS */
+
+struct ErtsThrQ_t_ {
+ ErtsThrQInit_t init;
+ ErtsThrQElement_t *first;
+ ErtsThrQElement_t *last;
+ struct {
+ void *blk;
+ } q;
+};
+
+#endif
+
+void erts_thr_q_init(void);
+void erts_thr_q_initialize(ErtsThrQ_t *, ErtsThrQInit_t *);
+ErtsThrQCleanState_t erts_thr_q_finalize(ErtsThrQ_t *);
+ErtsThrQ_t *erts_thr_q_create(ErtsThrQInit_t *);
+ErtsThrQCleanState_t erts_thr_q_destroy(ErtsThrQ_t *);
+ErtsThrQCleanState_t erts_thr_q_clean(ErtsThrQ_t *);
+ErtsThrQCleanState_t erts_thr_q_inspect(ErtsThrQ_t *, int);
+ErtsThrQPrepEnQ_t *erts_thr_q_prepare_enqueue(ErtsThrQ_t *);
+void erts_thr_q_enqueue_prepared(ErtsThrQ_t *, void *, ErtsThrQPrepEnQ_t *);
+void erts_thr_q_enqueue(ErtsThrQ_t *, void *);
+void * erts_thr_q_dequeue(ErtsThrQ_t *);
+int erts_thr_q_get_finalize_dequeue_data(ErtsThrQ_t *,
+ ErtsThrQFinDeQ_t *);
+void erts_thr_q_append_finalize_dequeue_data(ErtsThrQFinDeQ_t *,
+ ErtsThrQFinDeQ_t *);
+int erts_thr_q_finalize_dequeue(ErtsThrQFinDeQ_t *);
+void erts_thr_q_finalize_dequeue_state_init(ErtsThrQFinDeQ_t *);
+
+#ifdef ERTS_SMP
+ERTS_GLB_INLINE ErtsThrPrgrVal erts_thr_q_need_thr_progress(ErtsThrQ_t *q);
+#endif
+
+#if ERTS_GLB_INLINE_INCL_FUNC_DEF
+
+#ifdef ERTS_SMP
+ERTS_GLB_INLINE ErtsThrPrgrVal
+erts_thr_q_need_thr_progress(ErtsThrQ_t *q)
+{
+ return q->head.next.thr_progress;
+}
+#endif
+
+#endif /* ERTS_GLB_INLINE_INCL_FUNC_DEF */
+
+#endif /* ERL_THR_QUEUE_H__ */
diff --git a/erts/emulator/beam/erl_threads.h b/erts/emulator/beam/erl_threads.h
index 8c9cace0c5..ee47c98009 100644
--- a/erts/emulator/beam/erl_threads.h
+++ b/erts/emulator/beam/erl_threads.h
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2001-2011. All Rights Reserved.
+ * Copyright Ericsson AB 2001-2012. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -25,9 +25,243 @@
#ifndef ERL_THREAD_H__
#define ERL_THREAD_H__
+/*
+ * --- Documentation of atomics and memory barriers --------------------------
+ *
+ * The following explicit memory barriers exist:
+ *
+ * - ERTS_THR_MEMORY_BARRIER
+ * Full memory barrier. Orders both loads, and stores. No
+ * load or store is allowed to be reordered over the
+ * barrier.
+ * - ERTS_THR_WRITE_MEMORY_BARRIER
+ * Write barrier. Orders *only* stores. These are not
+ * allowed to be reordered over the barrier.
+ * - ERTS_THR_READ_MEMORY_BARRIER
+ * Read barrier. Orders *only* loads. These are not
+ * allowed to be reordered over the barrier.
+ * - ERTS_THR_DATA_DEPENDENCY_READ_MEMORY_BARRIER
+ * Data dependency read barrier. Orders *only* loads
+ * according to data dependency across the barrier.
+ *
+ * If thread support has been disabled, these barriers will become no-ops.
+ *
+ * If the prefix ERTS_THR_ is replaced with ERTS_SMP_, the barriers will
+ * be enabled only in the SMP enabled runtime system.
+ *
+ * --- Atomic operations ---
+ *
+ * Atomics operations exist for 32-bit, word size, and double word size
+ * integers. Function prototypes are listed below.
+ *
+ * Each function implementing an atomic operation exist with the following
+ * implied memory barrier semantics. Not all combinations are useful, but
+ * all of them exist for simplicity. <B> is suffix in function name:
+ *
+ * - <B> - Description
+ *
+ * - mb - Full memory barrier. Orders both loads, and
+ * stores before, and after the atomic operation.
+ * No load or store is allowed to be reordered
+ * over the atomic operation.
+ * - relb - Release barrier. Orders both loads, and
+ * stores appearing *before* the atomic
+ * operation. These are not allowed to be
+ * reordered over the atomic operation.
+ * - acqb - Acquire barrier. Orders both loads, and stores
+ * appearing *after* the atomic operation. These
+ * are not allowed to be reordered over the
+ * atomic operation.
+ * - wb - Write barrier. Orders *only* stores. These are
+ * not allowed to be reordered over the barrier.
+ * Store in atomic operation is ordered *after*
+ * the barrier.
+ * - rb - Read barrier. Orders *only* loads. These are
+ * not allowed to be reordered over the barrier.
+ * Load in atomic operation is ordered *before*
+ * the barrier.
+ * - ddrb - Data dependency read barrier. Orders *only*
+ * loads according to data dependency across the
+ * barrier. Load in atomic operation is ordered
+ * before the barrier.
+ *
+ * If thread support has been disabled, these functions are mapped to
+ * functions that performs the same operation, but aren't atomic
+ * and don't imply any memory barriers.
+ *
+ * If the atomic operations are prefixed with erts_smp_ instead of only
+ * erts_ the atomic operations will only be atomic in the SMP enabled
+ * runtime system, and will be mapped to non-atomic operations without
+ * memory barriers in the runtime system without SMP support. Atomic
+ * operations with erts_smp_ prefix should use the atomic types
+ * erts_smp_atomic32_t, erts_smp_atomic_t, and erts_smp_dw_atomic_t
+ * instead of erts_atomic32_t, erts_atomic_t, and erts_dw_atomic_t. The
+ * integer data types erts_aint32_t, erts_aint_t, and erts_dw_atomic_t
+ * are the same.
+ *
+ * --- 32-bit atomic operations ---
+ *
+ * The following 32-bit atomic operations exist. <B> should be
+ * replaced with a supported memory barrier (see above). Note
+ * that sizeof(erts_atomic32_t) might be larger than 4!
+ *
+ *
+ * Initialize (not necessarily the same as the set operation):
+ * void erts_atomic32_init_<B>(erts_atomic32_t *atmc,
+ * erts_aint32_t val);
+ *
+ * Set value:
+ * void erts_atomic32_set_<B>(erts_atomic32_t *atmc,
+ * erts_aint32_t val);
+ *
+ * Read; returns current value:
+ * erts_aint32_t erts_atomic32_read_<B>(erts_atomic32_t *atmc);
+ *
+ * Increment; returns resulting value:
+ * erts_aint32_t erts_atomic32_inc_read_<B>(erts_atomic32_t *atmc);
+ *
+ * Decrement; returns resulting value:
+ * erts_aint32_t erts_atomic32_dec_read_<B>(erts_atomic32_t *atmc);
+ *
+ * Increment:
+ * void erts_atomic32_inc_<B>(erts_atomic32_t *atmc);
+ *
+ * Decrement:
+ * void erts_atomic32_dec_<B>(erts_atomic32_t *atmc);
+ *
+ * Add value; returns resulting value:
+ * erts_aint32_t erts_atomic32_add_read_<B>(erts_atomic32_t *atmc,
+ * erts_aint32_t val);
+ *
+ * Add value:
+ * void erts_atomic32_add_<B>(erts_atomic32_t *atmc,
+ * erts_aint32_t val);
+ *
+ * Bitwise-or; returns previous value:
+ * erts_aint32_t erts_atomic32_read_bor_<B>(erts_atomic32_t *atmc,
+ * erts_aint32_t val);
+ *
+ * Bitwise-and; returns previous value:
+ * erts_aint32_t erts_atomic32_read_band_<B>(erts_atomic32_t *atmc,
+ * erts_aint32_t val);
+ *
+ * Exchange; returns previous value:
+ * erts_aint32_t erts_atomic32_xchg_<B>(erts_atomic32_t *atmc,
+ * erts_aint32_t val);
+ *
+ * Compare and exchange; returns previous or current value. If
+ * returned value equals 'exp' the value was changed to 'new';
+ * otherwise not:
+ * erts_aint32_t erts_atomic32_cmpxchg_<B>(erts_atomic32_t *a,
+ * erts_aint32_t new,
+ * erts_aint32_t exp);
+ *
+ * --- Word size atomic operations ---
+ *
+ * The following word size (same size as sizeof(void *)) atomic
+ * operations exist. <B> should be replaced with a supported
+ * memory barrier (see above). Note that sizeof(erts_atomic_t)
+ * might be larger than sizeof(void *)!
+ *
+ * Initialize (not necessarily the same as the set operation):
+ * void erts_atomic_init_<B>(erts_atomic_t *atmc,
+ * erts_aint_t val);
+ *
+ * Set value;
+ * void erts_atomic_set_<B>(erts_atomic_t *atmc,
+ * erts_aint_t val);
+ *
+ * Read; returns current value:
+ * erts_aint_t erts_atomic_read_<B>(erts_atomic_t *atmc);
+ *
+ * Increment; returns resulting value:
+ * erts_aint_t erts_atomic_inc_read_<B>(erts_atomic_t *atmc);
+ *
+ * Decrement; returns resulting value:
+ * erts_aint_t erts_atomic_dec_read_<B>(erts_atomic_t *atmc);
+ *
+ * Increment:
+ * void erts_atomic_inc_<B>(erts_atomic_t *atmc);
+ *
+ * Decrement:
+ * void erts_atomic_dec_<B>(erts_atomic_t *atmc);
+ *
+ * Add value; returns resulting value:
+ * erts_aint_t erts_atomic_add_read_<B>(erts_atomic_t *atmc,
+ * erts_aint_t val);
+ *
+ * Add value:
+ * void erts_atomic_add_<B>(erts_atomic_t *atmc,
+ * erts_aint_t val);
+ *
+ * Bitwise-or; returns previous value:
+ * erts_aint_t erts_atomic_read_bor_<B>(erts_atomic_t *atmc,
+ * erts_aint_t val);
+ *
+ * Bitwise-and; returns previous value:
+ * erts_aint_t erts_atomic_read_band_<B>(erts_atomic_t *atmc,
+ * erts_aint_t val);
+ *
+ * Exchange; returns previous value:
+ * erts_aint_t erts_atomic_xchg_<B>(erts_atomic_t *atmc,
+ * erts_aint_t val);
+ *
+ * Compare and exchange; returns previous or current value. If
+ * returned value equals 'exp' the value was changed to 'new';
+ * otherwise not:
+ * erts_aint_t erts_atomic_cmpxchg_<B>(erts_atomic_t *a,
+ * erts_aint_t new,
+ * erts_aint_t exp);
+ *
+ * --- Double word size atomic operations ---
+ *
+ * The following double word atomic operations exist. <B> should be
+ * replaced with a supported memory barrier (see above).
+ *
+ * Note that sizeof(erts_dw_atomic_t) usually is larger than
+ * 2*sizeof(void *)!
+ *
+ * The erts_dw_aint_t data type should be accessed as if it was defined
+ * like this:
+ *
+ * typedef struct {
+ * erts_aint_t sint[2];
+ * } erts_dw_aint_t;
+ *
+ * Most significant word is 'sint[ERTS_DW_AINT_HIGH_WORD]' and least
+ * significant word is 'sint[ERTS_DW_AINT_LOW_WORD]'.
+ *
+ *
+ * Initialize (not necessarily the same as the set operation):
+ * void erts_dw_atomic_init_<B>(erts_dw_atomic_t *var,
+ * erts_dw_aint_t *val);
+ *
+ * Set; value is written into 'val':
+ * void erts_dw_atomic_set_<B>(erts_dw_atomic_t *var,
+ * erts_dw_aint_t *val);
+ *
+ * Read; value is written into 'val':
+ * void erts_dw_atomic_read_<B>(erts_dw_atomic_t *var,
+ * erts_dw_aint_t *val);
+ *
+ * Compare and exchange; returns a value != 0 if exchange was
+ * made; otherwise 0. 'new_val' contains new value to set. If 'exp_act'
+ * contains the same value as in memory when the function is called,
+ * 'new' is written to memory; otherwise, not. If exchange was not
+ * made, 'exp_act' contains the actual value in memory:
+ * int erts_dw_atomic_cmpxchg_<B>(erts_dw_atomic_t *var,
+ * erts_dw_aint_t *new,
+ * erts_dw_aint_t *exp_act);
+ */
+
#define ERTS_SPIN_BODY ETHR_SPIN_BODY
#include "sys.h"
+
+typedef struct { SWord sint[2]; } erts_no_dw_atomic_t;
+typedef SWord erts_no_atomic_t;
+typedef Sint32 erts_no_atomic32_t;
+
#ifdef USE_THREADS
#define ETHR_TRY_INLINE_FUNCS
@@ -47,6 +281,9 @@
#endif
#define ERTS_THR_MEMORY_BARRIER ETHR_MEMORY_BARRIER
+#define ERTS_THR_WRITE_MEMORY_BARRIER ETHR_WRITE_MEMORY_BARRIER
+#define ERTS_THR_READ_MEMORY_BARRIER ETHR_READ_MEMORY_BARRIER
+#define ERTS_THR_DATA_DEPENDENCY_READ_MEMORY_BARRIER ETHR_READ_DEPEND_MEMORY_BARRIER
#ifdef ERTS_ENABLE_LOCK_COUNT
#define erts_mtx_lock(L) erts_mtx_lock_x(L, __FILE__, __LINE__)
@@ -87,6 +324,8 @@ typedef struct {
#endif
} erts_rwmtx_t;
+#define ERTS_MTX_OPT_DEFAULT_INITER ETHR_MUTEX_OPT_DEFAULT_INITER
+#define ERTS_CND_OPT_DEFAULT_INITER ETHR_COND_OPT_DEFAULT_INITER
#define ERTS_RWMTX_OPT_DEFAULT_INITER ETHR_RWMUTEX_OPT_DEFAULT_INITER
#define ERTS_RWMTX_TYPE_NORMAL ETHR_RWMUTEX_TYPE_NORMAL
#define ERTS_RWMTX_TYPE_FREQUENT_READ ETHR_RWMUTEX_TYPE_FREQUENT_READ
@@ -99,10 +338,15 @@ typedef ethr_rwmutex_opt erts_rwmtx_opt_t;
typedef ethr_tsd_key erts_tsd_key_t;
typedef ethr_ts_event erts_tse_t;
-typedef ethr_sint_t erts_aint_t;
-typedef ethr_atomic_t erts_atomic_t;
-typedef ethr_sint32_t erts_aint32_t;
-typedef ethr_atomic32_t erts_atomic32_t;
+#define erts_dw_aint_t ethr_dw_sint_t
+#define erts_dw_atomic_t ethr_dw_atomic_t
+#define erts_aint_t ethr_sint_t
+#define erts_atomic_t ethr_atomic_t
+#define erts_aint32_t ethr_sint32_t
+#define erts_atomic32_t ethr_atomic32_t
+
+#define ERTS_DW_AINT_HIGH_WORD ETHR_DW_SINT_HIGH_WORD
+#define ERTS_DW_AINT_LOW_WORD ETHR_DW_SINT_LOW_WORD
/* spinlock */
typedef struct {
@@ -140,6 +384,9 @@ __decl_noreturn void __noreturn erts_thr_fatal_error(int, char *);
#else /* #ifdef USE_THREADS */
#define ERTS_THR_MEMORY_BARRIER
+#define ERTS_THR_WRITE_MEMORY_BARRIER
+#define ERTS_THR_READ_MEMORY_BARRIER
+#define ERTS_THR_DATA_DEPENDENCY_READ_MEMORY_BARRIER
#define ERTS_THR_OPTS_DEFAULT_INITER 0
typedef int erts_thr_opts_t;
@@ -164,10 +411,12 @@ typedef struct {
typedef int erts_rwmtx_t;
typedef int erts_tsd_key_t;
typedef int erts_tse_t;
-typedef SWord erts_aint_t;
-typedef SWord erts_atomic_t;
-typedef SWord erts_aint32_t;
-typedef SWord erts_atomic32_t;
+#define erts_dw_aint_t erts_no_dw_atomic_t
+#define erts_dw_atomic_t erts_no_dw_atomic_t
+#define erts_aint_t SWord
+#define erts_atomic_t erts_no_atomic_t
+#define erts_aint32_t Sint32
+#define erts_atomic32_t erts_no_atomic32_t
#if __GNUC__ > 2
typedef struct { } erts_spinlock_t;
typedef struct { } erts_rwlock_t;
@@ -184,6 +433,8 @@ typedef struct { int gcc_is_buggy; } erts_rwlock_t;
#endif /* #ifdef USE_THREADS */
+#define ERTS_AINT_NULL ((erts_aint_t) NULL)
+
#define ERTS_AINT_T_MAX (~(((erts_aint_t) 1) << (sizeof(erts_aint_t)*8-1)))
#define ERTS_AINT_T_MIN ((((erts_aint_t) 1) << (sizeof(erts_aint_t)*8-1)))
#define ERTS_AINT32_T_MAX (~(((erts_aint32_t) 1) << (sizeof(erts_aint32_t)*8-1)))
@@ -247,65 +498,51 @@ ERTS_GLB_INLINE int erts_rwmtx_tryrwlock(erts_rwmtx_t *rwmtx);
ERTS_GLB_INLINE void erts_rwmtx_rwunlock(erts_rwmtx_t *rwmtx);
ERTS_GLB_INLINE int erts_lc_rwmtx_is_rlocked(erts_rwmtx_t *mtx);
ERTS_GLB_INLINE int erts_lc_rwmtx_is_rwlocked(erts_rwmtx_t *mtx);
-ERTS_GLB_INLINE void erts_atomic_init(erts_atomic_t *var, erts_aint_t i);
-ERTS_GLB_INLINE void erts_atomic_set(erts_atomic_t *var, erts_aint_t i);
-ERTS_GLB_INLINE erts_aint_t erts_atomic_read(erts_atomic_t *var);
-ERTS_GLB_INLINE erts_aint_t erts_atomic_inctest(erts_atomic_t *incp);
-ERTS_GLB_INLINE erts_aint_t erts_atomic_dectest(erts_atomic_t *decp);
-ERTS_GLB_INLINE void erts_atomic_inc(erts_atomic_t *incp);
-ERTS_GLB_INLINE void erts_atomic_dec(erts_atomic_t *decp);
-ERTS_GLB_INLINE erts_aint_t erts_atomic_addtest(erts_atomic_t *addp,
- erts_aint_t i);
-ERTS_GLB_INLINE void erts_atomic_add(erts_atomic_t *addp, erts_aint_t i);
-ERTS_GLB_INLINE erts_aint_t erts_atomic_xchg(erts_atomic_t *xchgp,
- erts_aint_t new);
-ERTS_GLB_INLINE erts_aint_t erts_atomic_cmpxchg(erts_atomic_t *xchgp,
- erts_aint_t new,
- erts_aint_t expected);
-ERTS_GLB_INLINE erts_aint_t erts_atomic_bor(erts_atomic_t *var,
- erts_aint_t mask);
-ERTS_GLB_INLINE erts_aint_t erts_atomic_band(erts_atomic_t *var,
- erts_aint_t mask);
-ERTS_GLB_INLINE erts_aint_t erts_atomic_read_acqb(erts_atomic_t *var);
-ERTS_GLB_INLINE void erts_atomic_set_relb(erts_atomic_t *var, erts_aint_t i);
-ERTS_GLB_INLINE void erts_atomic_dec_relb(erts_atomic_t *decp);
-ERTS_GLB_INLINE erts_aint_t erts_atomic_dectest_relb(erts_atomic_t *decp);
-ERTS_GLB_INLINE erts_aint_t erts_atomic_cmpxchg_acqb(erts_atomic_t *xchgp,
- erts_aint_t new,
- erts_aint_t exp);
-ERTS_GLB_INLINE erts_aint_t erts_atomic_cmpxchg_relb(erts_atomic_t *xchgp,
- erts_aint_t new,
- erts_aint_t exp);
-ERTS_GLB_INLINE void erts_atomic32_init(erts_atomic32_t *var, erts_aint32_t i);
-ERTS_GLB_INLINE void erts_atomic32_set(erts_atomic32_t *var, erts_aint32_t i);
-ERTS_GLB_INLINE erts_aint32_t erts_atomic32_read(erts_atomic32_t *var);
-ERTS_GLB_INLINE erts_aint32_t erts_atomic32_inctest(erts_atomic32_t *incp);
-ERTS_GLB_INLINE erts_aint32_t erts_atomic32_dectest(erts_atomic32_t *decp);
-ERTS_GLB_INLINE void erts_atomic32_inc(erts_atomic32_t *incp);
-ERTS_GLB_INLINE void erts_atomic32_dec(erts_atomic32_t *decp);
-ERTS_GLB_INLINE erts_aint32_t erts_atomic32_addtest(erts_atomic32_t *addp,
- erts_aint32_t i);
-ERTS_GLB_INLINE void erts_atomic32_add(erts_atomic32_t *addp, erts_aint32_t i);
-ERTS_GLB_INLINE erts_aint32_t erts_atomic32_xchg(erts_atomic32_t *xchgp,
- erts_aint32_t new);
-ERTS_GLB_INLINE erts_aint32_t erts_atomic32_cmpxchg(erts_atomic32_t *xchgp,
- erts_aint32_t new,
- erts_aint32_t expected);
-ERTS_GLB_INLINE erts_aint32_t erts_atomic32_bor(erts_atomic32_t *var,
- erts_aint32_t mask);
-ERTS_GLB_INLINE erts_aint32_t erts_atomic32_band(erts_atomic32_t *var,
- erts_aint32_t mask);
-ERTS_GLB_INLINE erts_aint32_t erts_atomic32_read_acqb(erts_atomic32_t *var);
-ERTS_GLB_INLINE void erts_atomic32_set_relb(erts_atomic32_t *var,
- erts_aint32_t i);
-ERTS_GLB_INLINE void erts_atomic32_dec_relb(erts_atomic32_t *decp);
-ERTS_GLB_INLINE erts_aint32_t erts_atomic32_dectest_relb(erts_atomic32_t *decp);
-ERTS_GLB_INLINE erts_aint32_t erts_atomic32_cmpxchg_acqb(erts_atomic32_t *xchgp,
- erts_aint32_t new,
- erts_aint32_t exp);
-ERTS_GLB_INLINE erts_aint32_t erts_atomic32_cmpxchg_relb(erts_atomic32_t *xchgp,
- erts_aint32_t new,
- erts_aint32_t exp);
+
+ERTS_GLB_INLINE void erts_no_dw_atomic_set(erts_no_dw_atomic_t *var, erts_no_dw_atomic_t *val);
+ERTS_GLB_INLINE void erts_no_dw_atomic_read(erts_no_dw_atomic_t *var, erts_no_dw_atomic_t *val);
+ERTS_GLB_INLINE int erts_no_dw_atomic_cmpxchg(erts_no_dw_atomic_t *var,
+ erts_no_dw_atomic_t *val,
+ erts_no_dw_atomic_t *old_val);
+ERTS_GLB_INLINE void erts_no_atomic_set(erts_no_atomic_t *var, erts_aint_t i);
+ERTS_GLB_INLINE erts_aint_t erts_no_atomic_read(erts_no_atomic_t *var);
+ERTS_GLB_INLINE erts_aint_t erts_no_atomic_inc_read(erts_no_atomic_t *incp);
+ERTS_GLB_INLINE erts_aint_t erts_no_atomic_dec_read(erts_no_atomic_t *decp);
+ERTS_GLB_INLINE void erts_no_atomic_inc(erts_no_atomic_t *incp);
+ERTS_GLB_INLINE void erts_no_atomic_dec(erts_no_atomic_t *decp);
+ERTS_GLB_INLINE erts_aint_t erts_no_atomic_add_read(erts_no_atomic_t *addp,
+ erts_aint_t i);
+ERTS_GLB_INLINE void erts_no_atomic_add(erts_no_atomic_t *addp, erts_aint_t i);
+ERTS_GLB_INLINE erts_aint_t erts_no_atomic_read_bor(erts_no_atomic_t *var,
+ erts_aint_t mask);
+ERTS_GLB_INLINE erts_aint_t erts_no_atomic_read_band(erts_no_atomic_t *var,
+ erts_aint_t mask);
+ERTS_GLB_INLINE erts_aint_t erts_no_atomic_xchg(erts_no_atomic_t *xchgp,
+ erts_aint_t new);
+ERTS_GLB_INLINE erts_aint_t erts_no_atomic_cmpxchg(erts_no_atomic_t *xchgp,
+ erts_aint_t new,
+ erts_aint_t expected);
+ERTS_GLB_INLINE void erts_no_atomic32_set(erts_no_atomic32_t *var,
+ erts_aint32_t i);
+ERTS_GLB_INLINE erts_aint32_t erts_no_atomic32_read(erts_no_atomic32_t *var);
+ERTS_GLB_INLINE erts_aint32_t erts_no_atomic32_inc_read(erts_no_atomic32_t *incp);
+ERTS_GLB_INLINE erts_aint32_t erts_no_atomic32_dec_read(erts_no_atomic32_t *decp);
+ERTS_GLB_INLINE void erts_no_atomic32_inc(erts_no_atomic32_t *incp);
+ERTS_GLB_INLINE void erts_no_atomic32_dec(erts_no_atomic32_t *decp);
+ERTS_GLB_INLINE erts_aint32_t erts_no_atomic32_add_read(erts_no_atomic32_t *addp,
+ erts_aint32_t i);
+ERTS_GLB_INLINE void erts_no_atomic32_add(erts_no_atomic32_t *addp,
+ erts_aint32_t i);
+ERTS_GLB_INLINE erts_aint32_t erts_no_atomic32_read_bor(erts_no_atomic32_t *var,
+ erts_aint32_t mask);
+ERTS_GLB_INLINE erts_aint32_t erts_no_atomic32_read_band(erts_no_atomic32_t *var,
+ erts_aint32_t mask);
+ERTS_GLB_INLINE erts_aint32_t erts_no_atomic32_xchg(erts_no_atomic32_t *xchgp,
+ erts_aint32_t new);
+ERTS_GLB_INLINE erts_aint32_t erts_no_atomic32_cmpxchg(erts_no_atomic32_t *xchgp,
+ erts_aint32_t new,
+ erts_aint32_t expected);
+
ERTS_GLB_INLINE void erts_spinlock_init_x_opt(erts_spinlock_t *lock,
char *name,
Eterm extra,
@@ -362,6 +599,491 @@ ERTS_GLB_INLINE void erts_thr_sigmask(int how, const sigset_t *set,
ERTS_GLB_INLINE void erts_thr_sigwait(const sigset_t *set, int *sig);
#endif /* #ifdef HAVE_ETHR_SIG_FUNCS */
+#ifdef USE_THREADS
+
+/*
+ * See "Documentation of atomics and memory barriers" at the top
+ * of this file for info on atomics.
+ */
+
+/* Double word size atomics */
+
+#define erts_dw_atomic_init_nob ethr_dw_atomic_init
+#define erts_dw_atomic_set_nob ethr_dw_atomic_set
+#define erts_dw_atomic_read_nob ethr_dw_atomic_read
+#define erts_dw_atomic_cmpxchg_nob ethr_dw_atomic_cmpxchg
+
+#define erts_dw_atomic_init_mb ethr_dw_atomic_init_mb
+#define erts_dw_atomic_set_mb ethr_dw_atomic_set_mb
+#define erts_dw_atomic_read_mb ethr_dw_atomic_read_mb
+#define erts_dw_atomic_cmpxchg_mb ethr_dw_atomic_cmpxchg_mb
+
+#define erts_dw_atomic_init_acqb ethr_dw_atomic_init_acqb
+#define erts_dw_atomic_set_acqb ethr_dw_atomic_set_acqb
+#define erts_dw_atomic_read_acqb ethr_dw_atomic_read_acqb
+#define erts_dw_atomic_cmpxchg_acqb ethr_dw_atomic_cmpxchg_acqb
+
+#define erts_dw_atomic_init_relb ethr_dw_atomic_init_relb
+#define erts_dw_atomic_set_relb ethr_dw_atomic_set_relb
+#define erts_dw_atomic_read_relb ethr_dw_atomic_read_relb
+#define erts_dw_atomic_cmpxchg_relb ethr_dw_atomic_cmpxchg_relb
+
+#define erts_dw_atomic_init_ddrb ethr_dw_atomic_init_ddrb
+#define erts_dw_atomic_set_ddrb ethr_dw_atomic_set_ddrb
+#define erts_dw_atomic_read_ddrb ethr_dw_atomic_read_ddrb
+#define erts_dw_atomic_cmpxchg_ddrb ethr_dw_atomic_cmpxchg_ddrb
+
+#define erts_dw_atomic_init_rb ethr_dw_atomic_init_rb
+#define erts_dw_atomic_set_rb ethr_dw_atomic_set_rb
+#define erts_dw_atomic_read_rb ethr_dw_atomic_read_rb
+#define erts_dw_atomic_cmpxchg_rb ethr_dw_atomic_cmpxchg_rb
+
+#define erts_dw_atomic_init_wb ethr_dw_atomic_init_wb
+#define erts_dw_atomic_set_wb ethr_dw_atomic_set_wb
+#define erts_dw_atomic_read_wb ethr_dw_atomic_read_wb
+#define erts_dw_atomic_cmpxchg_wb ethr_dw_atomic_cmpxchg_wb
+
+/* Word size atomics */
+
+#define erts_atomic_init_nob ethr_atomic_init
+#define erts_atomic_set_nob ethr_atomic_set
+#define erts_atomic_read_nob ethr_atomic_read
+#define erts_atomic_inc_read_nob ethr_atomic_inc_read
+#define erts_atomic_dec_read_nob ethr_atomic_dec_read
+#define erts_atomic_inc_nob ethr_atomic_inc
+#define erts_atomic_dec_nob ethr_atomic_dec
+#define erts_atomic_add_read_nob ethr_atomic_add_read
+#define erts_atomic_add_nob ethr_atomic_add
+#define erts_atomic_read_bor_nob ethr_atomic_read_bor
+#define erts_atomic_read_band_nob ethr_atomic_read_band
+#define erts_atomic_xchg_nob ethr_atomic_xchg
+#define erts_atomic_cmpxchg_nob ethr_atomic_cmpxchg
+
+#define erts_atomic_init_mb ethr_atomic_init_mb
+#define erts_atomic_set_mb ethr_atomic_set_mb
+#define erts_atomic_read_mb ethr_atomic_read_mb
+#define erts_atomic_inc_read_mb ethr_atomic_inc_read_mb
+#define erts_atomic_dec_read_mb ethr_atomic_dec_read_mb
+#define erts_atomic_inc_mb ethr_atomic_inc_mb
+#define erts_atomic_dec_mb ethr_atomic_dec_mb
+#define erts_atomic_add_read_mb ethr_atomic_add_read_mb
+#define erts_atomic_add_mb ethr_atomic_add_mb
+#define erts_atomic_read_bor_mb ethr_atomic_read_bor_mb
+#define erts_atomic_read_band_mb ethr_atomic_read_band_mb
+#define erts_atomic_xchg_mb ethr_atomic_xchg_mb
+#define erts_atomic_cmpxchg_mb ethr_atomic_cmpxchg_mb
+
+#define erts_atomic_init_acqb ethr_atomic_init_acqb
+#define erts_atomic_set_acqb ethr_atomic_set_acqb
+#define erts_atomic_read_acqb ethr_atomic_read_acqb
+#define erts_atomic_inc_read_acqb ethr_atomic_inc_read_acqb
+#define erts_atomic_dec_read_acqb ethr_atomic_dec_read_acqb
+#define erts_atomic_inc_acqb ethr_atomic_inc_acqb
+#define erts_atomic_dec_acqb ethr_atomic_dec_acqb
+#define erts_atomic_add_read_acqb ethr_atomic_add_read_acqb
+#define erts_atomic_add_acqb ethr_atomic_add_acqb
+#define erts_atomic_read_bor_acqb ethr_atomic_read_bor_acqb
+#define erts_atomic_read_band_acqb ethr_atomic_read_band_acqb
+#define erts_atomic_xchg_acqb ethr_atomic_xchg_acqb
+#define erts_atomic_cmpxchg_acqb ethr_atomic_cmpxchg_acqb
+
+#define erts_atomic_init_relb ethr_atomic_init_relb
+#define erts_atomic_set_relb ethr_atomic_set_relb
+#define erts_atomic_read_relb ethr_atomic_read_relb
+#define erts_atomic_inc_read_relb ethr_atomic_inc_read_relb
+#define erts_atomic_dec_read_relb ethr_atomic_dec_read_relb
+#define erts_atomic_inc_relb ethr_atomic_inc_relb
+#define erts_atomic_dec_relb ethr_atomic_dec_relb
+#define erts_atomic_add_read_relb ethr_atomic_add_read_relb
+#define erts_atomic_add_relb ethr_atomic_add_relb
+#define erts_atomic_read_bor_relb ethr_atomic_read_bor_relb
+#define erts_atomic_read_band_relb ethr_atomic_read_band_relb
+#define erts_atomic_xchg_relb ethr_atomic_xchg_relb
+#define erts_atomic_cmpxchg_relb ethr_atomic_cmpxchg_relb
+
+#define erts_atomic_init_ddrb ethr_atomic_init_ddrb
+#define erts_atomic_set_ddrb ethr_atomic_set_ddrb
+#define erts_atomic_read_ddrb ethr_atomic_read_ddrb
+#define erts_atomic_inc_read_ddrb ethr_atomic_inc_read_ddrb
+#define erts_atomic_dec_read_ddrb ethr_atomic_dec_read_ddrb
+#define erts_atomic_inc_ddrb ethr_atomic_inc_ddrb
+#define erts_atomic_dec_ddrb ethr_atomic_dec_ddrb
+#define erts_atomic_add_read_ddrb ethr_atomic_add_read_ddrb
+#define erts_atomic_add_ddrb ethr_atomic_add_ddrb
+#define erts_atomic_read_bor_ddrb ethr_atomic_read_bor_ddrb
+#define erts_atomic_read_band_ddrb ethr_atomic_read_band_ddrb
+#define erts_atomic_xchg_ddrb ethr_atomic_xchg_ddrb
+#define erts_atomic_cmpxchg_ddrb ethr_atomic_cmpxchg_ddrb
+
+#define erts_atomic_init_rb ethr_atomic_init_rb
+#define erts_atomic_set_rb ethr_atomic_set_rb
+#define erts_atomic_read_rb ethr_atomic_read_rb
+#define erts_atomic_inc_read_rb ethr_atomic_inc_read_rb
+#define erts_atomic_dec_read_rb ethr_atomic_dec_read_rb
+#define erts_atomic_inc_rb ethr_atomic_inc_rb
+#define erts_atomic_dec_rb ethr_atomic_dec_rb
+#define erts_atomic_add_read_rb ethr_atomic_add_read_rb
+#define erts_atomic_add_rb ethr_atomic_add_rb
+#define erts_atomic_read_bor_rb ethr_atomic_read_bor_rb
+#define erts_atomic_read_band_rb ethr_atomic_read_band_rb
+#define erts_atomic_xchg_rb ethr_atomic_xchg_rb
+#define erts_atomic_cmpxchg_rb ethr_atomic_cmpxchg_rb
+
+#define erts_atomic_init_wb ethr_atomic_init_wb
+#define erts_atomic_set_wb ethr_atomic_set_wb
+#define erts_atomic_read_wb ethr_atomic_read_wb
+#define erts_atomic_inc_read_wb ethr_atomic_inc_read_wb
+#define erts_atomic_dec_read_wb ethr_atomic_dec_read_wb
+#define erts_atomic_inc_wb ethr_atomic_inc_wb
+#define erts_atomic_dec_wb ethr_atomic_dec_wb
+#define erts_atomic_add_read_wb ethr_atomic_add_read_wb
+#define erts_atomic_add_wb ethr_atomic_add_wb
+#define erts_atomic_read_bor_wb ethr_atomic_read_bor_wb
+#define erts_atomic_read_band_wb ethr_atomic_read_band_wb
+#define erts_atomic_xchg_wb ethr_atomic_xchg_wb
+#define erts_atomic_cmpxchg_wb ethr_atomic_cmpxchg_wb
+
+/* 32-bit atomics */
+
+#define erts_atomic32_init_nob ethr_atomic32_init
+#define erts_atomic32_set_nob ethr_atomic32_set
+#define erts_atomic32_read_nob ethr_atomic32_read
+#define erts_atomic32_inc_read_nob ethr_atomic32_inc_read
+#define erts_atomic32_dec_read_nob ethr_atomic32_dec_read
+#define erts_atomic32_inc_nob ethr_atomic32_inc
+#define erts_atomic32_dec_nob ethr_atomic32_dec
+#define erts_atomic32_add_read_nob ethr_atomic32_add_read
+#define erts_atomic32_add_nob ethr_atomic32_add
+#define erts_atomic32_read_bor_nob ethr_atomic32_read_bor
+#define erts_atomic32_read_band_nob ethr_atomic32_read_band
+#define erts_atomic32_xchg_nob ethr_atomic32_xchg
+#define erts_atomic32_cmpxchg_nob ethr_atomic32_cmpxchg
+
+#define erts_atomic32_init_mb ethr_atomic32_init_mb
+#define erts_atomic32_set_mb ethr_atomic32_set_mb
+#define erts_atomic32_read_mb ethr_atomic32_read_mb
+#define erts_atomic32_inc_read_mb ethr_atomic32_inc_read_mb
+#define erts_atomic32_dec_read_mb ethr_atomic32_dec_read_mb
+#define erts_atomic32_inc_mb ethr_atomic32_inc_mb
+#define erts_atomic32_dec_mb ethr_atomic32_dec_mb
+#define erts_atomic32_add_read_mb ethr_atomic32_add_read_mb
+#define erts_atomic32_add_mb ethr_atomic32_add_mb
+#define erts_atomic32_read_bor_mb ethr_atomic32_read_bor_mb
+#define erts_atomic32_read_band_mb ethr_atomic32_read_band_mb
+#define erts_atomic32_xchg_mb ethr_atomic32_xchg_mb
+#define erts_atomic32_cmpxchg_mb ethr_atomic32_cmpxchg_mb
+
+#define erts_atomic32_init_acqb ethr_atomic32_init_acqb
+#define erts_atomic32_set_acqb ethr_atomic32_set_acqb
+#define erts_atomic32_read_acqb ethr_atomic32_read_acqb
+#define erts_atomic32_inc_read_acqb ethr_atomic32_inc_read_acqb
+#define erts_atomic32_dec_read_acqb ethr_atomic32_dec_read_acqb
+#define erts_atomic32_inc_acqb ethr_atomic32_inc_acqb
+#define erts_atomic32_dec_acqb ethr_atomic32_dec_acqb
+#define erts_atomic32_add_read_acqb ethr_atomic32_add_read_acqb
+#define erts_atomic32_add_acqb ethr_atomic32_add_acqb
+#define erts_atomic32_read_bor_acqb ethr_atomic32_read_bor_acqb
+#define erts_atomic32_read_band_acqb ethr_atomic32_read_band_acqb
+#define erts_atomic32_xchg_acqb ethr_atomic32_xchg_acqb
+#define erts_atomic32_cmpxchg_acqb ethr_atomic32_cmpxchg_acqb
+
+#define erts_atomic32_init_relb ethr_atomic32_init_relb
+#define erts_atomic32_set_relb ethr_atomic32_set_relb
+#define erts_atomic32_read_relb ethr_atomic32_read_relb
+#define erts_atomic32_inc_read_relb ethr_atomic32_inc_read_relb
+#define erts_atomic32_dec_read_relb ethr_atomic32_dec_read_relb
+#define erts_atomic32_inc_relb ethr_atomic32_inc_relb
+#define erts_atomic32_dec_relb ethr_atomic32_dec_relb
+#define erts_atomic32_add_read_relb ethr_atomic32_add_read_relb
+#define erts_atomic32_add_relb ethr_atomic32_add_relb
+#define erts_atomic32_read_bor_relb ethr_atomic32_read_bor_relb
+#define erts_atomic32_read_band_relb ethr_atomic32_read_band_relb
+#define erts_atomic32_xchg_relb ethr_atomic32_xchg_relb
+#define erts_atomic32_cmpxchg_relb ethr_atomic32_cmpxchg_relb
+
+#define erts_atomic32_init_ddrb ethr_atomic32_init_ddrb
+#define erts_atomic32_set_ddrb ethr_atomic32_set_ddrb
+#define erts_atomic32_read_ddrb ethr_atomic32_read_ddrb
+#define erts_atomic32_inc_read_ddrb ethr_atomic32_inc_read_ddrb
+#define erts_atomic32_dec_read_ddrb ethr_atomic32_dec_read_ddrb
+#define erts_atomic32_inc_ddrb ethr_atomic32_inc_ddrb
+#define erts_atomic32_dec_ddrb ethr_atomic32_dec_ddrb
+#define erts_atomic32_add_read_ddrb ethr_atomic32_add_read_ddrb
+#define erts_atomic32_add_ddrb ethr_atomic32_add_ddrb
+#define erts_atomic32_read_bor_ddrb ethr_atomic32_read_bor_ddrb
+#define erts_atomic32_read_band_ddrb ethr_atomic32_read_band_ddrb
+#define erts_atomic32_xchg_ddrb ethr_atomic32_xchg_ddrb
+#define erts_atomic32_cmpxchg_ddrb ethr_atomic32_cmpxchg_ddrb
+
+#define erts_atomic32_init_rb ethr_atomic32_init_rb
+#define erts_atomic32_set_rb ethr_atomic32_set_rb
+#define erts_atomic32_read_rb ethr_atomic32_read_rb
+#define erts_atomic32_inc_read_rb ethr_atomic32_inc_read_rb
+#define erts_atomic32_dec_read_rb ethr_atomic32_dec_read_rb
+#define erts_atomic32_inc_rb ethr_atomic32_inc_rb
+#define erts_atomic32_dec_rb ethr_atomic32_dec_rb
+#define erts_atomic32_add_read_rb ethr_atomic32_add_read_rb
+#define erts_atomic32_add_rb ethr_atomic32_add_rb
+#define erts_atomic32_read_bor_rb ethr_atomic32_read_bor_rb
+#define erts_atomic32_read_band_rb ethr_atomic32_read_band_rb
+#define erts_atomic32_xchg_rb ethr_atomic32_xchg_rb
+#define erts_atomic32_cmpxchg_rb ethr_atomic32_cmpxchg_rb
+
+#define erts_atomic32_init_wb ethr_atomic32_init_wb
+#define erts_atomic32_set_wb ethr_atomic32_set_wb
+#define erts_atomic32_read_wb ethr_atomic32_read_wb
+#define erts_atomic32_inc_read_wb ethr_atomic32_inc_read_wb
+#define erts_atomic32_dec_read_wb ethr_atomic32_dec_read_wb
+#define erts_atomic32_inc_wb ethr_atomic32_inc_wb
+#define erts_atomic32_dec_wb ethr_atomic32_dec_wb
+#define erts_atomic32_add_read_wb ethr_atomic32_add_read_wb
+#define erts_atomic32_add_wb ethr_atomic32_add_wb
+#define erts_atomic32_read_bor_wb ethr_atomic32_read_bor_wb
+#define erts_atomic32_read_band_wb ethr_atomic32_read_band_wb
+#define erts_atomic32_xchg_wb ethr_atomic32_xchg_wb
+#define erts_atomic32_cmpxchg_wb ethr_atomic32_cmpxchg_wb
+
+#else /* !USE_THREADS */
+
+/* Double word size atomics */
+
+#define erts_dw_atomic_init_nob erts_no_dw_atomic_set
+#define erts_dw_atomic_set_nob erts_no_dw_atomic_set
+#define erts_dw_atomic_read_nob erts_no_dw_atomic_read
+#define erts_dw_atomic_cmpxchg_nob erts_no_dw_atomic_cmpxchg
+
+#define erts_dw_atomic_init_mb erts_no_dw_atomic_init
+#define erts_dw_atomic_set_mb erts_no_dw_atomic_set
+#define erts_dw_atomic_read_mb erts_no_dw_atomic_read
+#define erts_dw_atomic_cmpxchg_mb erts_no_dw_atomic_cmpxchg
+
+#define erts_dw_atomic_init_acqb erts_no_dw_atomic_init
+#define erts_dw_atomic_set_acqb erts_no_dw_atomic_set
+#define erts_dw_atomic_read_acqb erts_no_dw_atomic_read
+#define erts_dw_atomic_cmpxchg_acqb erts_no_dw_atomic_cmpxchg
+
+#define erts_dw_atomic_init_relb erts_no_dw_atomic_init
+#define erts_dw_atomic_set_relb erts_no_dw_atomic_set
+#define erts_dw_atomic_read_relb erts_no_dw_atomic_read
+#define erts_dw_atomic_cmpxchg_relb erts_no_dw_atomic_cmpxchg
+
+#define erts_dw_atomic_init_ddrb erts_no_dw_atomic_init
+#define erts_dw_atomic_set_ddrb erts_no_dw_atomic_set
+#define erts_dw_atomic_read_ddrb erts_no_dw_atomic_read
+#define erts_dw_atomic_cmpxchg_ddrb erts_no_dw_atomic_cmpxchg
+
+#define erts_dw_atomic_init_rb erts_no_dw_atomic_init
+#define erts_dw_atomic_set_rb erts_no_dw_atomic_set
+#define erts_dw_atomic_read_rb erts_no_dw_atomic_read
+#define erts_dw_atomic_cmpxchg_rb erts_no_dw_atomic_cmpxchg
+
+#define erts_dw_atomic_init_wb erts_no_dw_atomic_init
+#define erts_dw_atomic_set_wb erts_no_dw_atomic_set
+#define erts_dw_atomic_read_wb erts_no_dw_atomic_read
+#define erts_dw_atomic_cmpxchg_wb erts_no_dw_atomic_cmpxchg
+
+/* Word size atomics */
+
+#define erts_atomic_init_nob erts_no_atomic_set
+#define erts_atomic_set_nob erts_no_atomic_set
+#define erts_atomic_read_nob erts_no_atomic_read
+#define erts_atomic_inc_read_nob erts_no_atomic_inc_read
+#define erts_atomic_dec_read_nob erts_no_atomic_dec_read
+#define erts_atomic_inc_nob erts_no_atomic_inc
+#define erts_atomic_dec_nob erts_no_atomic_dec
+#define erts_atomic_add_read_nob erts_no_atomic_add_read
+#define erts_atomic_add_nob erts_no_atomic_add
+#define erts_atomic_read_bor_nob erts_no_atomic_read_bor
+#define erts_atomic_read_band_nob erts_no_atomic_read_band
+#define erts_atomic_xchg_nob erts_no_atomic_xchg
+#define erts_atomic_cmpxchg_nob erts_no_atomic_cmpxchg
+
+#define erts_atomic_init_mb erts_no_atomic_set
+#define erts_atomic_set_mb erts_no_atomic_set
+#define erts_atomic_read_mb erts_no_atomic_read
+#define erts_atomic_inc_read_mb erts_no_atomic_inc_read
+#define erts_atomic_dec_read_mb erts_no_atomic_dec_read
+#define erts_atomic_inc_mb erts_no_atomic_inc
+#define erts_atomic_dec_mb erts_no_atomic_dec
+#define erts_atomic_add_read_mb erts_no_atomic_add_read
+#define erts_atomic_add_mb erts_no_atomic_add
+#define erts_atomic_read_bor_mb erts_no_atomic_read_bor
+#define erts_atomic_read_band_mb erts_no_atomic_read_band
+#define erts_atomic_xchg_mb erts_no_atomic_xchg
+#define erts_atomic_cmpxchg_mb erts_no_atomic_cmpxchg
+
+#define erts_atomic_init_acqb erts_no_atomic_set
+#define erts_atomic_set_acqb erts_no_atomic_set
+#define erts_atomic_read_acqb erts_no_atomic_read
+#define erts_atomic_inc_read_acqb erts_no_atomic_inc_read
+#define erts_atomic_dec_read_acqb erts_no_atomic_dec_read
+#define erts_atomic_inc_acqb erts_no_atomic_inc
+#define erts_atomic_dec_acqb erts_no_atomic_dec
+#define erts_atomic_add_read_acqb erts_no_atomic_add_read
+#define erts_atomic_add_acqb erts_no_atomic_add
+#define erts_atomic_read_bor_acqb erts_no_atomic_read_bor
+#define erts_atomic_read_band_acqb erts_no_atomic_read_band
+#define erts_atomic_xchg_acqb erts_no_atomic_xchg
+#define erts_atomic_cmpxchg_acqb erts_no_atomic_cmpxchg
+
+#define erts_atomic_init_relb erts_no_atomic_set
+#define erts_atomic_set_relb erts_no_atomic_set
+#define erts_atomic_read_relb erts_no_atomic_read
+#define erts_atomic_inc_read_relb erts_no_atomic_inc_read
+#define erts_atomic_dec_read_relb erts_no_atomic_dec_read
+#define erts_atomic_inc_relb erts_no_atomic_inc
+#define erts_atomic_dec_relb erts_no_atomic_dec
+#define erts_atomic_add_read_relb erts_no_atomic_add_read
+#define erts_atomic_add_relb erts_no_atomic_add
+#define erts_atomic_read_bor_relb erts_no_atomic_read_bor
+#define erts_atomic_read_band_relb erts_no_atomic_read_band
+#define erts_atomic_xchg_relb erts_no_atomic_xchg
+#define erts_atomic_cmpxchg_relb erts_no_atomic_cmpxchg
+
+#define erts_atomic_init_ddrb erts_no_atomic_set
+#define erts_atomic_set_ddrb erts_no_atomic_set
+#define erts_atomic_read_ddrb erts_no_atomic_read
+#define erts_atomic_inc_read_ddrb erts_no_atomic_inc_read
+#define erts_atomic_dec_read_ddrb erts_no_atomic_dec_read
+#define erts_atomic_inc_ddrb erts_no_atomic_inc
+#define erts_atomic_dec_ddrb erts_no_atomic_dec
+#define erts_atomic_add_read_ddrb erts_no_atomic_add_read
+#define erts_atomic_add_ddrb erts_no_atomic_add
+#define erts_atomic_read_bor_ddrb erts_no_atomic_read_bor
+#define erts_atomic_read_band_ddrb erts_no_atomic_read_band
+#define erts_atomic_xchg_ddrb erts_no_atomic_xchg
+#define erts_atomic_cmpxchg_ddrb erts_no_atomic_cmpxchg
+
+#define erts_atomic_init_rb erts_no_atomic_set
+#define erts_atomic_set_rb erts_no_atomic_set
+#define erts_atomic_read_rb erts_no_atomic_read
+#define erts_atomic_inc_read_rb erts_no_atomic_inc_read
+#define erts_atomic_dec_read_rb erts_no_atomic_dec_read
+#define erts_atomic_inc_rb erts_no_atomic_inc
+#define erts_atomic_dec_rb erts_no_atomic_dec
+#define erts_atomic_add_read_rb erts_no_atomic_add_read
+#define erts_atomic_add_rb erts_no_atomic_add
+#define erts_atomic_read_bor_rb erts_no_atomic_read_bor
+#define erts_atomic_read_band_rb erts_no_atomic_read_band
+#define erts_atomic_xchg_rb erts_no_atomic_xchg
+#define erts_atomic_cmpxchg_rb erts_no_atomic_cmpxchg
+
+#define erts_atomic_init_wb erts_no_atomic_set
+#define erts_atomic_set_wb erts_no_atomic_set
+#define erts_atomic_read_wb erts_no_atomic_read
+#define erts_atomic_inc_read_wb erts_no_atomic_inc_read
+#define erts_atomic_dec_read_wb erts_no_atomic_dec_read
+#define erts_atomic_inc_wb erts_no_atomic_inc
+#define erts_atomic_dec_wb erts_no_atomic_dec
+#define erts_atomic_add_read_wb erts_no_atomic_add_read
+#define erts_atomic_add_wb erts_no_atomic_add
+#define erts_atomic_read_bor_wb erts_no_atomic_read_bor
+#define erts_atomic_read_band_wb erts_no_atomic_read_band
+#define erts_atomic_xchg_wb erts_no_atomic_xchg
+#define erts_atomic_cmpxchg_wb erts_no_atomic_cmpxchg
+
+/* 32-bit atomics */
+
+#define erts_atomic32_init_nob erts_no_atomic32_set
+#define erts_atomic32_set_nob erts_no_atomic32_set
+#define erts_atomic32_read_nob erts_no_atomic32_read
+#define erts_atomic32_inc_read_nob erts_no_atomic32_inc_read
+#define erts_atomic32_dec_read_nob erts_no_atomic32_dec_read
+#define erts_atomic32_inc_nob erts_no_atomic32_inc
+#define erts_atomic32_dec_nob erts_no_atomic32_dec
+#define erts_atomic32_add_read_nob erts_no_atomic32_add_read
+#define erts_atomic32_add_nob erts_no_atomic32_add
+#define erts_atomic32_read_bor_nob erts_no_atomic32_read_bor
+#define erts_atomic32_read_band_nob erts_no_atomic32_read_band
+#define erts_atomic32_xchg_nob erts_no_atomic32_xchg
+#define erts_atomic32_cmpxchg_nob erts_no_atomic32_cmpxchg
+
+#define erts_atomic32_init_mb erts_no_atomic32_set
+#define erts_atomic32_set_mb erts_no_atomic32_set
+#define erts_atomic32_read_mb erts_no_atomic32_read
+#define erts_atomic32_inc_read_mb erts_no_atomic32_inc_read
+#define erts_atomic32_dec_read_mb erts_no_atomic32_dec_read
+#define erts_atomic32_inc_mb erts_no_atomic32_inc
+#define erts_atomic32_dec_mb erts_no_atomic32_dec
+#define erts_atomic32_add_read_mb erts_no_atomic32_add_read
+#define erts_atomic32_add_mb erts_no_atomic32_add
+#define erts_atomic32_read_bor_mb erts_no_atomic32_read_bor
+#define erts_atomic32_read_band_mb erts_no_atomic32_read_band
+#define erts_atomic32_xchg_mb erts_no_atomic32_xchg
+#define erts_atomic32_cmpxchg_mb erts_no_atomic32_cmpxchg
+
+#define erts_atomic32_init_acqb erts_no_atomic32_set
+#define erts_atomic32_set_acqb erts_no_atomic32_set
+#define erts_atomic32_read_acqb erts_no_atomic32_read
+#define erts_atomic32_inc_read_acqb erts_no_atomic32_inc_read
+#define erts_atomic32_dec_read_acqb erts_no_atomic32_dec_read
+#define erts_atomic32_inc_acqb erts_no_atomic32_inc
+#define erts_atomic32_dec_acqb erts_no_atomic32_dec
+#define erts_atomic32_add_read_acqb erts_no_atomic32_add_read
+#define erts_atomic32_add_acqb erts_no_atomic32_add
+#define erts_atomic32_read_bor_acqb erts_no_atomic32_read_bor
+#define erts_atomic32_read_band_acqb erts_no_atomic32_read_band
+#define erts_atomic32_xchg_acqb erts_no_atomic32_xchg
+#define erts_atomic32_cmpxchg_acqb erts_no_atomic32_cmpxchg
+
+#define erts_atomic32_init_relb erts_no_atomic32_set
+#define erts_atomic32_set_relb erts_no_atomic32_set
+#define erts_atomic32_read_relb erts_no_atomic32_read
+#define erts_atomic32_inc_read_relb erts_no_atomic32_inc_read
+#define erts_atomic32_dec_read_relb erts_no_atomic32_dec_read
+#define erts_atomic32_inc_relb erts_no_atomic32_inc
+#define erts_atomic32_dec_relb erts_no_atomic32_dec
+#define erts_atomic32_add_read_relb erts_no_atomic32_add_read
+#define erts_atomic32_add_relb erts_no_atomic32_add
+#define erts_atomic32_read_bor_relb erts_no_atomic32_read_bor
+#define erts_atomic32_read_band_relb erts_no_atomic32_read_band
+#define erts_atomic32_xchg_relb erts_no_atomic32_xchg
+#define erts_atomic32_cmpxchg_relb erts_no_atomic32_cmpxchg
+
+#define erts_atomic32_init_ddrb erts_no_atomic32_set
+#define erts_atomic32_set_ddrb erts_no_atomic32_set
+#define erts_atomic32_read_ddrb erts_no_atomic32_read
+#define erts_atomic32_inc_read_ddrb erts_no_atomic32_inc_read
+#define erts_atomic32_dec_read_ddrb erts_no_atomic32_dec_read
+#define erts_atomic32_inc_ddrb erts_no_atomic32_inc
+#define erts_atomic32_dec_ddrb erts_no_atomic32_dec
+#define erts_atomic32_add_read_ddrb erts_no_atomic32_add_read
+#define erts_atomic32_add_ddrb erts_no_atomic32_add
+#define erts_atomic32_read_bor_ddrb erts_no_atomic32_read_bor
+#define erts_atomic32_read_band_ddrb erts_no_atomic32_read_band
+#define erts_atomic32_xchg_ddrb erts_no_atomic32_xchg
+#define erts_atomic32_cmpxchg_ddrb erts_no_atomic32_cmpxchg
+
+#define erts_atomic32_init_rb erts_no_atomic32_set
+#define erts_atomic32_set_rb erts_no_atomic32_set
+#define erts_atomic32_read_rb erts_no_atomic32_read
+#define erts_atomic32_inc_read_rb erts_no_atomic32_inc_read
+#define erts_atomic32_dec_read_rb erts_no_atomic32_dec_read
+#define erts_atomic32_inc_rb erts_no_atomic32_inc
+#define erts_atomic32_dec_rb erts_no_atomic32_dec
+#define erts_atomic32_add_read_rb erts_no_atomic32_add_read
+#define erts_atomic32_add_rb erts_no_atomic32_add
+#define erts_atomic32_read_bor_rb erts_no_atomic32_read_bor
+#define erts_atomic32_read_band_rb erts_no_atomic32_read_band
+#define erts_atomic32_xchg_rb erts_no_atomic32_xchg
+#define erts_atomic32_cmpxchg_rb erts_no_atomic32_cmpxchg
+
+#define erts_atomic32_init_wb erts_no_atomic32_set
+#define erts_atomic32_set_wb erts_no_atomic32_set
+#define erts_atomic32_read_wb erts_no_atomic32_read
+#define erts_atomic32_inc_read_wb erts_no_atomic32_inc_read
+#define erts_atomic32_dec_read_wb erts_no_atomic32_dec_read
+#define erts_atomic32_inc_wb erts_no_atomic32_inc
+#define erts_atomic32_dec_wb erts_no_atomic32_dec
+#define erts_atomic32_add_read_wb erts_no_atomic32_add_read
+#define erts_atomic32_add_wb erts_no_atomic32_add
+#define erts_atomic32_read_bor_wb erts_no_atomic32_read_bor
+#define erts_atomic32_read_band_wb erts_no_atomic32_read_band
+#define erts_atomic32_xchg_wb erts_no_atomic32_xchg
+#define erts_atomic32_cmpxchg_wb erts_no_atomic32_cmpxchg
+
+#endif /* !USE_THREADS */
+
#if ERTS_GLB_INLINE_INCL_FUNC_DEF
ERTS_GLB_INLINE void
@@ -571,8 +1293,9 @@ erts_mtx_destroy(erts_mtx_t *mtx)
"Most likely a bug in pthread implementation.";
erts_send_warning_to_logger_str_nogl(warn);
}
+ else
#endif
- erts_thr_fatal_error(res, "destroy mutex");
+ erts_thr_fatal_error(res, "destroy mutex");
}
#endif
}
@@ -675,8 +1398,9 @@ erts_cnd_destroy(erts_cnd_t *cnd)
"Most likely a bug in pthread implementation.";
erts_send_warning_to_logger_str_nogl(warn);
}
+ else
#endif
- erts_thr_fatal_error(res, "destroy condition variable");
+ erts_thr_fatal_error(res, "destroy condition variable");
}
#endif
}
@@ -707,6 +1431,16 @@ erts_cnd_wait(erts_cnd_t *cnd, erts_mtx_t *mtx)
#endif
}
+/*
+ * IMPORTANT note about erts_cnd_signal() and erts_cnd_broadcast()
+ *
+ * POSIX allow a call to `pthread_cond_signal' or `pthread_cond_broadcast'
+ * even though the associated mutex/mutexes isn't/aren't locked by the
+ * caller. Our implementation do not allow that in order to avoid a
+ * performance penalty. That is, all associated mutexes *need* to be
+ * locked by the caller of erts_cnd_signal()/erts_cnd_broadcast()!
+ */
+
ERTS_GLB_INLINE void
erts_cnd_signal(erts_cnd_t *cnd)
{
@@ -810,8 +1544,9 @@ erts_rwmtx_destroy(erts_rwmtx_t *rwmtx)
"Most likely a bug in pthread implementation.";
erts_send_warning_to_logger_str_nogl(warn);
}
+ else
#endif
- erts_thr_fatal_error(res, "destroy rwmutex");
+ erts_thr_fatal_error(res, "destroy rwmutex");
}
#endif
}
@@ -995,428 +1730,206 @@ erts_lc_rwmtx_is_rwlocked(erts_rwmtx_t *mtx)
#endif
}
+/* No atomic ops */
+
ERTS_GLB_INLINE void
-erts_atomic_init(erts_atomic_t *var, erts_aint_t i)
+erts_no_dw_atomic_set(erts_no_dw_atomic_t *var, erts_no_dw_atomic_t *val)
{
-#ifdef USE_THREADS
- ethr_atomic_init(var, i);
-#else
- *var = i;
-#endif
+ var->sint[0] = val->sint[0];
+ var->sint[1] = val->sint[1];
}
ERTS_GLB_INLINE void
-erts_atomic_set(erts_atomic_t *var, erts_aint_t i)
+erts_no_dw_atomic_read(erts_no_dw_atomic_t *var, erts_no_dw_atomic_t *val)
+{
+ val->sint[0] = var->sint[0];
+ val->sint[1] = var->sint[1];
+}
+
+ERTS_GLB_INLINE int erts_no_dw_atomic_cmpxchg(erts_no_dw_atomic_t *var,
+ erts_no_dw_atomic_t *new_val,
+ erts_no_dw_atomic_t *old_val)
+{
+ if (var->sint[0] != old_val->sint[0] || var->sint[1] != old_val->sint[1]) {
+ erts_no_dw_atomic_read(var, old_val);
+ return 0;
+ }
+ else {
+ erts_no_dw_atomic_set(var, new_val);
+ return !0;
+ }
+}
+
+ERTS_GLB_INLINE void
+erts_no_atomic_set(erts_no_atomic_t *var, erts_aint_t i)
{
-#ifdef USE_THREADS
- ethr_atomic_set(var, i);
-#else
*var = i;
-#endif
}
ERTS_GLB_INLINE erts_aint_t
-erts_atomic_read(erts_atomic_t *var)
+erts_no_atomic_read(erts_no_atomic_t *var)
{
-#ifdef USE_THREADS
- return ethr_atomic_read(var);
-#else
return *var;
-#endif
}
ERTS_GLB_INLINE erts_aint_t
-erts_atomic_inctest(erts_atomic_t *incp)
+erts_no_atomic_inc_read(erts_no_atomic_t *incp)
{
-#ifdef USE_THREADS
- return ethr_atomic_inc_read(incp);
-#else
return ++(*incp);
-#endif
}
ERTS_GLB_INLINE erts_aint_t
-erts_atomic_dectest(erts_atomic_t *decp)
+erts_no_atomic_dec_read(erts_no_atomic_t *decp)
{
-#ifdef USE_THREADS
- return ethr_atomic_dec_read(decp);
-#else
return --(*decp);
-#endif
}
ERTS_GLB_INLINE void
-erts_atomic_inc(erts_atomic_t *incp)
+erts_no_atomic_inc(erts_no_atomic_t *incp)
{
-#ifdef USE_THREADS
- ethr_atomic_inc(incp);
-#else
++(*incp);
-#endif
}
ERTS_GLB_INLINE void
-erts_atomic_dec(erts_atomic_t *decp)
+erts_no_atomic_dec(erts_no_atomic_t *decp)
{
-#ifdef USE_THREADS
- ethr_atomic_dec(decp);
-#else
--(*decp);
-#endif
}
ERTS_GLB_INLINE erts_aint_t
-erts_atomic_addtest(erts_atomic_t *addp, erts_aint_t i)
+erts_no_atomic_add_read(erts_no_atomic_t *addp, erts_aint_t i)
{
-#ifdef USE_THREADS
- return ethr_atomic_add_read(addp, i);
-#else
return *addp += i;
-#endif
}
ERTS_GLB_INLINE void
-erts_atomic_add(erts_atomic_t *addp, erts_aint_t i)
+erts_no_atomic_add(erts_no_atomic_t *addp, erts_aint_t i)
{
-#ifdef USE_THREADS
- ethr_atomic_add(addp, i);
-#else
*addp += i;
-#endif
-}
-
-ERTS_GLB_INLINE erts_aint_t
-erts_atomic_xchg(erts_atomic_t *xchgp, erts_aint_t new)
-{
-#ifdef USE_THREADS
- return ethr_atomic_xchg(xchgp, new);
-#else
- erts_aint_t old = *xchgp;
- *xchgp = new;
- return old;
-#endif
}
ERTS_GLB_INLINE erts_aint_t
-erts_atomic_cmpxchg(erts_atomic_t *xchgp, erts_aint_t new, erts_aint_t expected)
+erts_no_atomic_read_bor(erts_no_atomic_t *var, erts_aint_t mask)
{
-#ifdef USE_THREADS
- return ethr_atomic_cmpxchg(xchgp, new, expected);
-#else
- erts_aint_t old = *xchgp;
- if (old == expected)
- *xchgp = new;
- return old;
-#endif
-}
-
-ERTS_GLB_INLINE erts_aint_t
-erts_atomic_bor(erts_atomic_t *var, erts_aint_t mask)
-{
-#ifdef USE_THREADS
- return ethr_atomic_read_bor(var, mask);
-#else
erts_aint_t old;
old = *var;
*var |= mask;
return old;
-#endif
}
ERTS_GLB_INLINE erts_aint_t
-erts_atomic_band(erts_atomic_t *var, erts_aint_t mask)
+erts_no_atomic_read_band(erts_no_atomic_t *var, erts_aint_t mask)
{
-#ifdef USE_THREADS
- return ethr_atomic_read_band(var, mask);
-#else
erts_aint_t old;
old = *var;
*var &= mask;
return old;
-#endif
}
ERTS_GLB_INLINE erts_aint_t
-erts_atomic_read_acqb(erts_atomic_t *var)
+erts_no_atomic_xchg(erts_no_atomic_t *xchgp, erts_aint_t new)
{
-#ifdef USE_THREADS
- return ethr_atomic_read_acqb(var);
-#else
- return *var;
-#endif
-}
-
-ERTS_GLB_INLINE void
-erts_atomic_set_relb(erts_atomic_t *var, erts_aint_t i)
-{
-#ifdef USE_THREADS
- ethr_atomic_set_relb(var, i);
-#else
- *var = i;
-#endif
-}
-
-ERTS_GLB_INLINE void
-erts_atomic_dec_relb(erts_atomic_t *decp)
-{
-#ifdef USE_THREADS
- ethr_atomic_dec_relb(decp);
-#else
- --(*decp);
-#endif
-}
-
-ERTS_GLB_INLINE erts_aint_t
-erts_atomic_dectest_relb(erts_atomic_t *decp)
-{
-#ifdef USE_THREADS
- return ethr_atomic_dec_read_relb(decp);
-#else
- return --(*decp);
-#endif
-}
-
-ERTS_GLB_INLINE erts_aint_t erts_atomic_cmpxchg_acqb(erts_atomic_t *xchgp,
- erts_aint_t new,
- erts_aint_t exp)
-{
-#ifdef USE_THREADS
- return ethr_atomic_cmpxchg_acqb(xchgp, new, exp);
-#else
erts_aint_t old = *xchgp;
- if (old == exp)
- *xchgp = new;
+ *xchgp = new;
return old;
-#endif
}
-ERTS_GLB_INLINE erts_aint_t erts_atomic_cmpxchg_relb(erts_atomic_t *xchgp,
- erts_aint_t new,
- erts_aint_t exp)
+ERTS_GLB_INLINE erts_aint_t
+erts_no_atomic_cmpxchg(erts_no_atomic_t *xchgp,
+ erts_aint_t new,
+ erts_aint_t expected)
{
-#ifdef USE_THREADS
- return ethr_atomic_cmpxchg_relb(xchgp, new, exp);
-#else
erts_aint_t old = *xchgp;
- if (old == exp)
+ if (old == expected)
*xchgp = new;
return old;
-#endif
}
/* atomic32 */
ERTS_GLB_INLINE void
-erts_atomic32_init(erts_atomic32_t *var, erts_aint32_t i)
-{
-#ifdef USE_THREADS
- ethr_atomic32_init(var, i);
-#else
- *var = i;
-#endif
-}
-
-ERTS_GLB_INLINE void
-erts_atomic32_set(erts_atomic32_t *var, erts_aint32_t i)
+erts_no_atomic32_set(erts_no_atomic32_t *var, erts_aint32_t i)
{
-#ifdef USE_THREADS
- ethr_atomic32_set(var, i);
-#else
*var = i;
-#endif
}
ERTS_GLB_INLINE erts_aint32_t
-erts_atomic32_read(erts_atomic32_t *var)
+erts_no_atomic32_read(erts_no_atomic32_t *var)
{
-#ifdef USE_THREADS
- return ethr_atomic32_read(var);
-#else
return *var;
-#endif
}
ERTS_GLB_INLINE erts_aint32_t
-erts_atomic32_inctest(erts_atomic32_t *incp)
+erts_no_atomic32_inc_read(erts_no_atomic32_t *incp)
{
-#ifdef USE_THREADS
- return ethr_atomic32_inc_read(incp);
-#else
return ++(*incp);
-#endif
}
ERTS_GLB_INLINE erts_aint32_t
-erts_atomic32_dectest(erts_atomic32_t *decp)
+erts_no_atomic32_dec_read(erts_no_atomic32_t *decp)
{
-#ifdef USE_THREADS
- return ethr_atomic32_dec_read(decp);
-#else
return --(*decp);
-#endif
}
ERTS_GLB_INLINE void
-erts_atomic32_inc(erts_atomic32_t *incp)
+erts_no_atomic32_inc(erts_no_atomic32_t *incp)
{
-#ifdef USE_THREADS
- ethr_atomic32_inc(incp);
-#else
++(*incp);
-#endif
}
ERTS_GLB_INLINE void
-erts_atomic32_dec(erts_atomic32_t *decp)
+erts_no_atomic32_dec(erts_no_atomic32_t *decp)
{
-#ifdef USE_THREADS
- ethr_atomic32_dec(decp);
-#else
--(*decp);
-#endif
}
ERTS_GLB_INLINE erts_aint32_t
-erts_atomic32_addtest(erts_atomic32_t *addp, erts_aint32_t i)
+erts_no_atomic32_add_read(erts_no_atomic32_t *addp, erts_aint32_t i)
{
-#ifdef USE_THREADS
- return ethr_atomic32_add_read(addp, i);
-#else
return *addp += i;
-#endif
}
ERTS_GLB_INLINE void
-erts_atomic32_add(erts_atomic32_t *addp, erts_aint32_t i)
+erts_no_atomic32_add(erts_no_atomic32_t *addp, erts_aint32_t i)
{
-#ifdef USE_THREADS
- ethr_atomic32_add(addp, i);
-#else
*addp += i;
-#endif
}
ERTS_GLB_INLINE erts_aint32_t
-erts_atomic32_xchg(erts_atomic32_t *xchgp, erts_aint32_t new)
+erts_no_atomic32_read_bor(erts_no_atomic32_t *var, erts_aint32_t mask)
{
-#ifdef USE_THREADS
- return ethr_atomic32_xchg(xchgp, new);
-#else
- erts_aint32_t old = *xchgp;
- *xchgp = new;
- return old;
-#endif
-}
-
-ERTS_GLB_INLINE erts_aint32_t
-erts_atomic32_cmpxchg(erts_atomic32_t *xchgp,
- erts_aint32_t new,
- erts_aint32_t expected)
-{
-#ifdef USE_THREADS
- return ethr_atomic32_cmpxchg(xchgp, new, expected);
-#else
- erts_aint32_t old = *xchgp;
- if (old == expected)
- *xchgp = new;
- return old;
-#endif
-}
-
-ERTS_GLB_INLINE erts_aint32_t
-erts_atomic32_bor(erts_atomic32_t *var, erts_aint32_t mask)
-{
-#ifdef USE_THREADS
- return ethr_atomic32_read_bor(var, mask);
-#else
erts_aint32_t old;
old = *var;
*var |= mask;
return old;
-#endif
}
ERTS_GLB_INLINE erts_aint32_t
-erts_atomic32_band(erts_atomic32_t *var, erts_aint32_t mask)
+erts_no_atomic32_read_band(erts_no_atomic32_t *var, erts_aint32_t mask)
{
-#ifdef USE_THREADS
- return ethr_atomic32_read_band(var, mask);
-#else
erts_aint32_t old;
old = *var;
*var &= mask;
return old;
-#endif
}
ERTS_GLB_INLINE erts_aint32_t
-erts_atomic32_read_acqb(erts_atomic32_t *var)
+erts_no_atomic32_xchg(erts_no_atomic32_t *xchgp, erts_aint32_t new)
{
-#ifdef USE_THREADS
- return ethr_atomic32_read_acqb(var);
-#else
- return *var;
-#endif
-}
-
-ERTS_GLB_INLINE void
-erts_atomic32_set_relb(erts_atomic32_t *var, erts_aint32_t i)
-{
-#ifdef USE_THREADS
- ethr_atomic32_set_relb(var, i);
-#else
- *var = i;
-#endif
-}
-
-ERTS_GLB_INLINE void
-erts_atomic32_dec_relb(erts_atomic32_t *decp)
-{
-#ifdef USE_THREADS
- ethr_atomic32_dec_relb(decp);
-#else
- --(*decp);
-#endif
-}
-
-ERTS_GLB_INLINE erts_aint32_t
-erts_atomic32_dectest_relb(erts_atomic32_t *decp)
-{
-#ifdef USE_THREADS
- return ethr_atomic32_dec_read_relb(decp);
-#else
- return --(*decp);
-#endif
-}
-
-ERTS_GLB_INLINE erts_aint32_t
-erts_atomic32_cmpxchg_acqb(erts_atomic32_t *xchgp,
- erts_aint32_t new,
- erts_aint32_t exp)
-{
-#ifdef USE_THREADS
- return ethr_atomic32_cmpxchg_acqb(xchgp, new, exp);
-#else
erts_aint32_t old = *xchgp;
- if (old == exp)
- *xchgp = new;
+ *xchgp = new;
return old;
-#endif
}
ERTS_GLB_INLINE erts_aint32_t
-erts_atomic32_cmpxchg_relb(erts_atomic32_t *xchgp,
- erts_aint32_t new,
- erts_aint32_t exp)
+erts_no_atomic32_cmpxchg(erts_no_atomic32_t *xchgp,
+ erts_aint32_t new,
+ erts_aint32_t expected)
{
-#ifdef USE_THREADS
- return ethr_atomic32_cmpxchg_relb(xchgp, new, exp);
-#else
erts_aint32_t old = *xchgp;
- if (old == exp)
+ if (old == expected)
*xchgp = new;
return old;
-#endif
}
/* spinlock */
@@ -1496,8 +2009,9 @@ erts_spinlock_destroy(erts_spinlock_t *lock)
"Most likely a bug in pthread implementation.";
erts_send_warning_to_logger_str_nogl(warn);
}
+ else
#endif
- erts_thr_fatal_error(res, "destroy rwlock");
+ erts_thr_fatal_error(res, "destroy rwlock");
}
#else
(void)lock;
@@ -1614,8 +2128,9 @@ erts_rwlock_destroy(erts_rwlock_t *lock)
"Most likely a bug in pthread implementation.";
erts_send_warning_to_logger_str_nogl(warn);
}
+ else
#endif
- erts_thr_fatal_error(res, "destroy rwlock");
+ erts_thr_fatal_error(res, "destroy rwlock");
}
#else
(void)lock;
@@ -1887,3 +2402,37 @@ erts_thr_sigwait(const sigset_t *set, int *sig)
#endif /* #if ERTS_GLB_INLINE_INCL_FUNC_DEF */
#endif /* #ifndef ERL_THREAD_H__ */
+
+#ifdef ERTS_UNDEF_DEPRECATED_ATOMICS
+
+/* Deprecated functions to replace */
+
+#undef erts_atomic_init
+#undef erts_atomic_set
+#undef erts_atomic_read
+#undef erts_atomic_inctest
+#undef erts_atomic_dectest
+#undef erts_atomic_inc
+#undef erts_atomic_dec
+#undef erts_atomic_addtest
+#undef erts_atomic_add
+#undef erts_atomic_xchg
+#undef erts_atomic_cmpxchg
+#undef erts_atomic_bor
+#undef erts_atomic_band
+
+#undef erts_atomic32_init
+#undef erts_atomic32_set
+#undef erts_atomic32_read
+#undef erts_atomic32_inctest
+#undef erts_atomic32_dectest
+#undef erts_atomic32_inc
+#undef erts_atomic32_dec
+#undef erts_atomic32_addtest
+#undef erts_atomic32_add
+#undef erts_atomic32_xchg
+#undef erts_atomic32_cmpxchg
+#undef erts_atomic32_bor
+#undef erts_atomic32_band
+
+#endif
diff --git a/erts/emulator/beam/erl_time.h b/erts/emulator/beam/erl_time.h
index d0ad73cd81..6c6e193818 100644
--- a/erts/emulator/beam/erl_time.h
+++ b/erts/emulator/beam/erl_time.h
@@ -20,7 +20,11 @@
#ifndef ERL_TIME_H__
#define ERL_TIME_H__
-extern erts_smp_atomic_t do_time; /* set at clock interrupt */
+#define ERTS_SHORT_TIME_T_MAX ERTS_AINT32_T_MAX
+#define ERTS_SHORT_TIME_T_MIN ERTS_AINT32_T_MIN
+typedef erts_aint32_t erts_short_time_t;
+
+extern erts_smp_atomic32_t do_time; /* set at clock interrupt */
extern SysTimeval erts_first_emu_time;
/*
@@ -71,22 +75,32 @@ void erts_cancel_smp_ptimer(ErtsSmpPTimer *ptimer);
void erts_init_time(void);
void erts_set_timer(ErlTimer*, ErlTimeoutProc, ErlCancelProc, void*, Uint);
void erts_cancel_timer(ErlTimer*);
-void erts_bump_timer(erts_aint_t);
+void erts_bump_timer(erts_short_time_t);
Uint erts_timer_wheel_memory_size(void);
Uint erts_time_left(ErlTimer *);
-erts_aint_t erts_next_time(void);
+erts_short_time_t erts_next_time(void);
#ifdef DEBUG
void erts_p_slpq(void);
#endif
-ERTS_GLB_INLINE erts_aint_t erts_do_time_read_and_reset(void);
-ERTS_GLB_INLINE void erts_do_time_add(long);
+ERTS_GLB_INLINE erts_short_time_t erts_do_time_read_and_reset(void);
+ERTS_GLB_INLINE void erts_do_time_add(erts_short_time_t);
#if ERTS_GLB_INLINE_INCL_FUNC_DEF
-ERTS_GLB_INLINE erts_aint_t erts_do_time_read_and_reset(void) { return erts_smp_atomic_xchg(&do_time, 0L); }
-ERTS_GLB_INLINE void erts_do_time_add(long elapsed) { erts_smp_atomic_add(&do_time, elapsed); }
+ERTS_GLB_INLINE erts_short_time_t erts_do_time_read_and_reset(void)
+{
+ erts_short_time_t time = erts_smp_atomic32_xchg_acqb(&do_time, 0);
+ if (time < 0)
+ erl_exit(ERTS_ABORT_EXIT, "Internal time management error\n");
+ return time;
+}
+
+ERTS_GLB_INLINE void erts_do_time_add(erts_short_time_t elapsed)
+{
+ erts_smp_atomic32_add_relb(&do_time, elapsed);
+}
#endif /* #if ERTS_GLB_INLINE_INCL_FUNC_DEF */
@@ -105,7 +119,7 @@ void erts_get_now_cpu(Uint* megasec, Uint* sec, Uint* microsec);
#endif
void erts_get_timeval(SysTimeval *tv);
-long erts_get_time(void);
+erts_time_t erts_get_time(void);
void erts_get_emu_time(SysTimeval *);
ERTS_GLB_INLINE int erts_cmp_timeval(SysTimeval *t1p, SysTimeval *t2p);
diff --git a/erts/emulator/beam/erl_time_sup.c b/erts/emulator/beam/erl_time_sup.c
index ca4b54188e..1d0735aa99 100644
--- a/erts/emulator/beam/erl_time_sup.c
+++ b/erts/emulator/beam/erl_time_sup.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1999-2010. All Rights Reserved.
+ * Copyright Ericsson AB 1999-2011. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -371,7 +371,7 @@ static void init_erts_deliver_time(const SysTimeval *inittv)
static void do_erts_deliver_time(const SysTimeval *current)
{
SysTimeval cur_time;
- long elapsed;
+ erts_time_t elapsed;
/* calculate and deliver appropriate number of ticks */
cur_time = *current;
@@ -385,7 +385,10 @@ static void do_erts_deliver_time(const SysTimeval *current)
this by simply pretend as if the time stood still. :) */
if (elapsed > 0) {
- erts_do_time_add(elapsed);
+
+ ASSERT(elapsed < ((erts_time_t) ERTS_SHORT_TIME_T_MAX));
+
+ erts_do_time_add((erts_short_time_t) elapsed);
last_delivered = cur_time;
}
}
@@ -421,11 +424,11 @@ erts_init_time_sup(void)
/* info functions */
void
-elapsed_time_both(unsigned long *ms_user, unsigned long *ms_sys,
- unsigned long *ms_user_diff, unsigned long *ms_sys_diff)
+elapsed_time_both(UWord *ms_user, UWord *ms_sys,
+ UWord *ms_user_diff, UWord *ms_sys_diff)
{
- unsigned long prev_total_user, prev_total_sys;
- unsigned long total_user, total_sys;
+ UWord prev_total_user, prev_total_sys;
+ UWord total_user, total_sys;
SysTimes now;
sys_times(&now);
@@ -456,9 +459,9 @@ elapsed_time_both(unsigned long *ms_user, unsigned long *ms_sys,
/* wall clock routines */
void
-wall_clock_elapsed_time_both(unsigned long *ms_total, unsigned long *ms_diff)
+wall_clock_elapsed_time_both(UWord *ms_total, UWord *ms_diff)
{
- unsigned long prev_total;
+ UWord prev_total;
SysTimeval tv;
erts_smp_mtx_lock(&erts_timeofday_mtx);
@@ -491,7 +494,7 @@ get_time(int *hour, int *minute, int *second)
the_clock = time((time_t *)0);
#ifdef HAVE_LOCALTIME_R
- localtime_r(&the_clock, (tm = &tmbuf));
+ tm = localtime_r(&the_clock, &tmbuf);
#else
tm = localtime(&the_clock);
#endif
@@ -513,7 +516,7 @@ get_date(int *year, int *month, int *day)
the_clock = time((time_t *)0);
#ifdef HAVE_LOCALTIME_R
- localtime_r(&the_clock, (tm = &tmbuf));
+ tm = localtime_r(&the_clock, &tmbuf);
#else
tm = localtime(&the_clock);
#endif
@@ -583,7 +586,44 @@ static const int mdays[14] = {0, 31, 28, 31, 30, 31, 30,
(((y) % 100) != 0)) || \
(((y) % 400) == 0))
-#define BASEYEAR 1970
+/* This is the earliest year we are sure to be able to handle
+ on all platforms w/o problems */
+#define BASEYEAR 1902
+
+/* A more "clever" mktime
+ * return 1, if successful
+ * return -1, if not successful
+ */
+
+static int erl_mktime(time_t *c, struct tm *tm) {
+ time_t clock;
+
+ clock = mktime(tm);
+
+ if (clock != -1) {
+ *c = clock;
+ return 1;
+ }
+
+ /* in rare occasions mktime returns -1
+ * when a correct value has been entered
+ *
+ * decrease seconds with one second
+ * if the result is -2, epochs should be -1
+ */
+
+ tm->tm_sec = tm->tm_sec - 1;
+ clock = mktime(tm);
+ tm->tm_sec = tm->tm_sec + 1;
+
+ *c = -1;
+
+ if (clock == -2) {
+ return 1;
+ }
+
+ return -1;
+}
/*
* gregday
@@ -592,10 +632,10 @@ static const int mdays[14] = {0, 31, 28, 31, 30, 31, 30,
* greater of equal to 1600 , and month [1-12] and day [1-31]
* are within range. Otherwise it returns -1.
*/
-static int long gregday(int year, int month, int day)
+static time_t gregday(int year, int month, int day)
{
- int long ndays = 0;
- int gyear, pyear, m;
+ Sint ndays = 0;
+ Sint gyear, pyear, m;
/* number of days in previous years */
gyear = year - 1600;
@@ -610,10 +650,72 @@ static int long gregday(int year, int month, int day)
if (is_leap_year(year) && (month > 2))
ndays++;
ndays += day - 1;
- return ndays - 135140; /* 135140 = Jan 1, 1970 */
+ return (time_t) (ndays - 135140); /* 135140 = Jan 1, 1970 */
+}
+
+#define SECONDS_PER_MINUTE (60)
+#define SECONDS_PER_HOUR (60 * SECONDS_PER_MINUTE)
+#define SECONDS_PER_DAY (24 * SECONDS_PER_HOUR)
+
+int seconds_to_univ(Sint64 time, Sint *year, Sint *month, Sint *day,
+ Sint *hour, Sint *minute, Sint *second) {
+
+ Sint y,mi;
+ Sint days = time / SECONDS_PER_DAY;
+ Sint secs = time % SECONDS_PER_DAY;
+ Sint tmp;
+
+ if (secs < 0) {
+ days--;
+ secs += SECONDS_PER_DAY;
+ }
+
+ tmp = secs % SECONDS_PER_HOUR;
+
+ *hour = secs / SECONDS_PER_HOUR;
+ *minute = tmp / SECONDS_PER_MINUTE;
+ *second = tmp % SECONDS_PER_MINUTE;
+
+ days += 719468;
+ y = (10000*((Sint64)days) + 14780) / 3652425;
+ tmp = days - (365 * y + y/4 - y/100 + y/400);
+
+ if (tmp < 0) {
+ y--;
+ tmp = days - (365*y + y/4 - y/100 + y/400);
+ }
+ mi = (100 * tmp + 52)/3060;
+ *month = (mi + 2) % 12 + 1;
+ *year = y + (mi + 2) / 12;
+ *day = tmp - (mi * 306 + 5)/10 + 1;
+
+ return 1;
}
+int univ_to_seconds(Sint year, Sint month, Sint day, Sint hour, Sint minute, Sint second, Sint64 *time) {
+ Sint days;
+
+ if (!(IN_RANGE(1600, year, INT_MAX - 1) &&
+ IN_RANGE(1, month, 12) &&
+ IN_RANGE(1, day, (mdays[month] +
+ (month == 2
+ && (year % 4 == 0)
+ && (year % 100 != 0 || year % 400 == 0)))) &&
+ IN_RANGE(0, hour, 23) &&
+ IN_RANGE(0, minute, 59) &&
+ IN_RANGE(0, second, 59))) {
+ return 0;
+ }
+
+ days = gregday(year, month, day);
+ *time = SECONDS_PER_DAY;
+ *time *= days; /* don't try overflow it, it hurts */
+ *time += SECONDS_PER_HOUR * hour;
+ *time += SECONDS_PER_MINUTE * minute;
+ *time += second;
+ return 1;
+}
int
local_to_univ(Sint *year, Sint *month, Sint *day,
@@ -644,15 +746,18 @@ local_to_univ(Sint *year, Sint *month, Sint *day,
t.tm_min = *minute;
t.tm_sec = *second;
t.tm_isdst = isdst;
- the_clock = mktime(&t);
- if (the_clock == -1) {
+
+ /* the nature of mktime makes this a bit interesting,
+ * up to four mktime calls could happen here
+ */
+
+ if (erl_mktime(&the_clock, &t) < 0) {
if (isdst) {
/* If this is a timezone without DST and the OS (correctly)
refuses to give us a DST time, we simulate the Linux/Solaris
behaviour of giving the same data as if is_dst was not set. */
t.tm_isdst = 0;
- the_clock = mktime(&t);
- if (the_clock == -1) {
+ if (erl_mktime(&the_clock, &t)) {
/* Failed anyway, something else is bad - will be a badarg */
return 0;
}
@@ -662,10 +767,13 @@ local_to_univ(Sint *year, Sint *month, Sint *day,
}
}
#ifdef HAVE_GMTIME_R
- gmtime_r(&the_clock, (tm = &tmbuf));
+ tm = gmtime_r(&the_clock, &tmbuf);
#else
tm = gmtime(&the_clock);
#endif
+ if (!tm) {
+ return 0;
+ }
*year = tm->tm_year + 1900;
*month = tm->tm_mon +1;
*day = tm->tm_mday;
@@ -719,17 +827,20 @@ univ_to_local(Sint *year, Sint *month, Sint *day,
#endif
#ifdef HAVE_LOCALTIME_R
- localtime_r(&the_clock, (tm = &tmbuf));
+ tm = localtime_r(&the_clock, &tmbuf);
#else
tm = localtime(&the_clock);
#endif
- *year = tm->tm_year + 1900;
- *month = tm->tm_mon +1;
- *day = tm->tm_mday;
- *hour = tm->tm_hour;
- *minute = tm->tm_min;
- *second = tm->tm_sec;
- return 1;
+ if (tm) {
+ *year = tm->tm_year + 1900;
+ *month = tm->tm_mon +1;
+ *day = tm->tm_mday;
+ *hour = tm->tm_hour;
+ *minute = tm->tm_min;
+ *second = tm->tm_sec;
+ return 1;
+ }
+ return 0;
}
@@ -798,13 +909,14 @@ void erts_deliver_time(void) {
void erts_time_remaining(SysTimeval *rem_time)
{
- int ticks;
+ erts_time_t ticks;
SysTimeval cur_time;
- long elapsed;
+ erts_time_t elapsed;
/* erts_next_time() returns no of ticks to next timeout or -1 if none */
- if ((ticks = erts_next_time()) == -1) {
+ ticks = (erts_time_t) erts_next_time();
+ if (ticks == (erts_time_t) -1) {
/* timer queue empty */
/* this will cause at most 100000000 ticks */
rem_time->tv_sec = 100000;
@@ -839,7 +951,7 @@ void erts_get_timeval(SysTimeval *tv)
erts_smp_mtx_unlock(&erts_timeofday_mtx);
}
-long
+erts_time_t
erts_get_time(void)
{
SysTimeval sys_tv;
diff --git a/erts/emulator/beam/erl_trace.c b/erts/emulator/beam/erl_trace.c
index 8833137112..009ca1eb52 100644
--- a/erts/emulator/beam/erl_trace.c
+++ b/erts/emulator/beam/erl_trace.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1999-2011. All Rights Reserved.
+ * Copyright Ericsson AB 1999-2012. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -36,6 +36,7 @@
#include "error.h"
#include "erl_binary.h"
#include "erl_bits.h"
+#include "erl_thr_progress.h"
#if 0
#define DEBUG_PRINTOUTS
@@ -124,8 +125,13 @@ do { \
enqueue_sys_msg_unlocked(SYS_MSG_TYPE_TRACE, (FPID), (TPID), (MSG), (BP)); \
} while(0)
#else
+#ifdef USE_VM_PROBES
#define ERTS_ENQ_TRACE_MSG(FPID, TPROC, MSG, BP) \
- erts_queue_message((TPROC), NULL, (BP), (MSG), NIL)
+ erts_queue_message((TPROC), NULL, (BP), (MSG), NIL, NIL)
+#else
+#define ERTS_ENQ_TRACE_MSG(FPID, TPROC, MSG, BP) \
+ erts_queue_message((TPROC), NULL, (BP), (MSG), NIL)
+#endif
#endif
/*
@@ -159,7 +165,7 @@ static Uint active_sched;
void
erts_system_profile_setup_active_schedulers(void)
{
- ERTS_SMP_LC_ASSERT(erts_is_system_blocked(0));
+ ERTS_SMP_LC_ASSERT(erts_thr_progress_is_blocking());
active_sched = erts_active_schedulers();
}
@@ -543,7 +549,7 @@ send_to_port(Process *c_p, Eterm message,
*/
static void
-profile_send(Eterm message) {
+profile_send(Eterm from, Eterm message) {
Uint sz = 0;
ErlHeapFragment *bp = NULL;
Uint *hp = NULL;
@@ -553,6 +559,9 @@ profile_send(Eterm message) {
Eterm profiler = erts_get_system_profile();
+ /* do not profile profiler pid */
+ if (from == profiler) return;
+
if (is_internal_port(profiler)) {
Port *profiler_port = NULL;
@@ -579,7 +588,11 @@ profile_send(Eterm message) {
hp = erts_alloc_message_heap(sz, &bp, &off_heap, profile_p, 0);
msg = copy_struct(message, sz, &hp, &bp->off_heap);
- erts_queue_message(profile_p, NULL, bp, msg, NIL);
+ erts_queue_message(profile_p, NULL, bp, msg, NIL
+#ifdef USE_VM_PROBES
+ , NIL
+#endif
+ );
}
}
@@ -990,9 +1003,13 @@ seq_trace_update_send(Process *p)
{
Eterm seq_tracer = erts_get_system_seq_tracer();
ASSERT((is_tuple(SEQ_TRACE_TOKEN(p)) || is_nil(SEQ_TRACE_TOKEN(p))));
- if ( (p->id == seq_tracer) || (SEQ_TRACE_TOKEN(p) == NIL))
+ if ( (p->id == seq_tracer) || (SEQ_TRACE_TOKEN(p) == NIL)
+#ifdef USE_VM_PROBES
+ || (SEQ_TRACE_TOKEN(p) == am_have_dt_utag)
+#endif
+ ) {
return 0;
-
+ }
SEQ_TRACE_TOKEN_SENDER(p) = p->id; /* Internal pid */
SEQ_TRACE_TOKEN_SERIAL(p) =
make_small(++(p -> seq_trace_clock));
@@ -1174,7 +1191,11 @@ seq_trace_output_generic(Eterm token, Eterm msg, Uint type,
enqueue_sys_msg_unlocked(SYS_MSG_TYPE_SEQTRACE, NIL, NIL, mess, bp);
erts_smp_mtx_unlock(&smq_mtx);
#else
- erts_queue_message(tracer, NULL, bp, mess, NIL); /* trace_token must be NIL here */
+ erts_queue_message(tracer, NULL, bp, mess, NIL
+#ifdef USE_VM_PROBES
+ , NIL
+#endif
+ ); /* trace_token must be NIL here */
#endif
}
}
@@ -1940,7 +1961,8 @@ trace_proc(Process *c_p, Process *t_p, Eterm what, Eterm data)
Eterm* hp;
int need;
- ERTS_SMP_LC_ASSERT((erts_proc_lc_my_proc_locks(t_p) != 0) || erts_is_system_blocked(0));
+ ERTS_SMP_LC_ASSERT((erts_proc_lc_my_proc_locks(t_p) != 0)
+ || erts_thr_progress_is_blocking());
if (is_internal_port(t_p->tracer_proc)) {
#define LOCAL_HEAP_SIZE (5+5)
DeclareTmpHeapNoproc(local_heap,LOCAL_HEAP_SIZE);
@@ -2092,8 +2114,7 @@ void save_calls(Process *p, Export *e)
* entries instead of the original BIF functions.
*/
Eterm
-erts_bif_trace(int bif_index, Process* p,
- Eterm arg1, Eterm arg2, Eterm arg3, BeamInstr *I)
+erts_bif_trace(int bif_index, Process* p, Eterm* args, BeamInstr* I)
{
Eterm result;
int meta = !!(erts_bif_trace_flags[bif_index] & BIF_TRACE_AS_META);
@@ -2107,10 +2128,10 @@ erts_bif_trace(int bif_index, Process* p,
* no tracing will occur. Doing the whole else branch will
* also do nothing, only slower.
*/
- Eterm (*func)(Process*, Eterm, Eterm, Eterm, BeamInstr*) = bif_table[bif_index].f;
- result = func(p, arg1, arg2, arg3, I);
+ Eterm (*func)(Process*, Eterm*, BeamInstr*) = bif_table[bif_index].f;
+ result = func(p, args, I);
} else {
- Eterm (*func)(Process*, Eterm, Eterm, Eterm, BeamInstr*);
+ Eterm (*func)(Process*, Eterm*, BeamInstr*);
Export* ep = bif_export[bif_index];
Uint32 flags = 0, flags_meta = 0;
int global = !!(erts_bif_trace_flags[bif_index] & BIF_TRACE_AS_GLOBAL);
@@ -2122,8 +2143,6 @@ erts_bif_trace(int bif_index, Process* p,
* export entry */
BeamInstr *cp = p->cp;
- Eterm args[3] = {arg1, arg2, arg3};
-
/*
* Make continuation pointer OK, it is not during direct BIF calls,
* but it is correct during apply of bif.
@@ -2155,7 +2174,7 @@ erts_bif_trace(int bif_index, Process* p,
func = bif_table[bif_index].f;
- result = func(p, arg1, arg2, arg3, I);
+ result = func(p, args, I);
if (applying && (flags & MATCH_SET_RETURN_TO_TRACE)) {
BeamInstr i_return_trace = beam_return_trace[0];
@@ -2467,7 +2486,11 @@ monitor_long_gc(Process *p, Uint time) {
#ifdef ERTS_SMP
enqueue_sys_msg(SYS_MSG_TYPE_SYSMON, p->id, NIL, msg, bp);
#else
- erts_queue_message(monitor_p, NULL, bp, msg, NIL);
+ erts_queue_message(monitor_p, NULL, bp, msg, NIL
+#ifdef USE_VM_PROBES
+ , NIL
+#endif
+ );
#endif
}
@@ -2539,7 +2562,11 @@ monitor_large_heap(Process *p) {
#ifdef ERTS_SMP
enqueue_sys_msg(SYS_MSG_TYPE_SYSMON, p->id, NIL, msg, bp);
#else
- erts_queue_message(monitor_p, NULL, bp, msg, NIL);
+ erts_queue_message(monitor_p, NULL, bp, msg, NIL
+#ifdef USE_VM_PROBES
+ , NIL
+#endif
+ );
#endif
}
@@ -2569,7 +2596,11 @@ monitor_generic(Process *p, Eterm type, Eterm spec) {
#ifdef ERTS_SMP
enqueue_sys_msg(SYS_MSG_TYPE_SYSMON, p->id, NIL, msg, bp);
#else
- erts_queue_message(monitor_p, NULL, bp, msg, NIL);
+ erts_queue_message(monitor_p, NULL, bp, msg, NIL
+#ifdef USE_VM_PROBES
+ , NIL
+#endif
+ );
#endif
}
@@ -2618,7 +2649,7 @@ profile_scheduler(Eterm scheduler_id, Eterm state) {
make_small(active_sched), timestamp); hp += 7;
#ifndef ERTS_SMP
- profile_send(msg);
+ profile_send(NIL, msg);
UnUseTmpHeapNoproc(LOCAL_HEAP_SIZE);
#undef LOCAL_HEAP_SIZE
#else
@@ -2653,7 +2684,7 @@ profile_scheduler_q(Eterm scheduler_id, Eterm state, Eterm no_schedulers, Uint M
timestamp = TUPLE3(hp, make_small(Ms), make_small(s), make_small(us)); hp += 4;
msg = TUPLE6(hp, am_profile, am_scheduler, scheduler_id, state, no_schedulers, timestamp); hp += 7;
#ifndef ERTS_SMP
- profile_send(msg);
+ profile_send(NIL, msg);
UnUseTmpHeapNoproc(LOCAL_HEAP_SIZE);
#undef LOCAL_HEAP_SIZE
#else
@@ -2745,7 +2776,8 @@ trace_port(Port *t_p, Eterm what, Eterm data) {
Eterm mess;
Eterm* hp;
- ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(t_p) || erts_is_system_blocked(0));
+ ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(t_p)
+ || erts_thr_progress_is_blocking());
if (is_internal_port(t_p->tracer_proc)) {
#define LOCAL_HEAP_SIZE (5+5)
@@ -2919,11 +2951,11 @@ profile_runnable_port(Port *p, Eterm status) {
msg = TUPLE5(hp, am_profile, p->id, status, count, timestamp); hp += 6;
#ifndef ERTS_SMP
- profile_send(msg);
+ profile_send(p->id, msg);
UnUseTmpHeapNoproc(LOCAL_HEAP_SIZE);
#undef LOCAL_HEAP_SIZE
#else
- enqueue_sys_msg_unlocked(SYS_MSG_TYPE_SYSPROF, NIL, NIL, msg, bp);
+ enqueue_sys_msg_unlocked(SYS_MSG_TYPE_SYSPROF, p->id, NIL, msg, bp);
#endif
erts_smp_mtx_unlock(&smq_mtx);
}
@@ -2972,11 +3004,11 @@ profile_runnable_proc(Process *p, Eterm status){
timestamp = TUPLE3(hp, make_small(Ms), make_small(s), make_small(us)); hp += 4;
msg = TUPLE5(hp, am_profile, p->id, status, where, timestamp); hp += 6;
#ifndef ERTS_SMP
- profile_send(msg);
+ profile_send(p->id, msg);
UnUseTmpHeapNoproc(LOCAL_HEAP_SIZE);
#undef LOCAL_HEAP_SIZE
#else
- enqueue_sys_msg_unlocked(SYS_MSG_TYPE_SYSPROF, NIL, NIL, msg, bp);
+ enqueue_sys_msg_unlocked(SYS_MSG_TYPE_SYSPROF, p->id, NIL, msg, bp);
#endif
erts_smp_mtx_unlock(&smq_mtx);
}
@@ -3021,8 +3053,6 @@ static ErtsSysMsgQ *sys_message_queue_end;
static erts_tid_t sys_msg_dispatcher_tid;
static erts_cnd_t smq_cnd;
-static int dispatcher_waiting;
-
ERTS_QUALLOC_IMPL(smq_element, ErtsSysMsgQ, 20, ERTS_ALC_T_SYS_MSG_Q)
static void
@@ -3066,18 +3096,6 @@ enqueue_sys_msg(enum ErtsSysMsgType type,
erts_smp_mtx_unlock(&smq_mtx);
}
-static void
-prepare_for_block(void *unused)
-{
- erts_smp_mtx_unlock(&smq_mtx);
-}
-
-static void
-resume_after_block(void *unused)
-{
- erts_smp_mtx_lock(&smq_mtx);
-}
-
void
erts_queue_error_logger_message(Eterm from, Eterm msg, ErlHeapFragment *bp)
{
@@ -3143,10 +3161,10 @@ sys_msg_disp_failure(ErtsSysMsgQ *smqp, Eterm receiver)
&& !erts_system_monitor_flags.busy_port
&& !erts_system_monitor_flags.busy_dist_port)
break; /* Everything is disabled */
- erts_smp_block_system(ERTS_BS_FLG_ALLOW_GC);
+ erts_smp_thr_progress_block();
if (system_monitor == receiver || receiver == NIL)
erts_system_monitor_clear(NULL);
- erts_smp_release_system();
+ erts_smp_thr_progress_unblock();
break;
case SYS_MSG_TYPE_SYSPROF:
if (receiver == NIL
@@ -3156,11 +3174,11 @@ sys_msg_disp_failure(ErtsSysMsgQ *smqp, Eterm receiver)
&& !erts_system_profile_flags.scheduler)
break;
/* Block system to clear flags */
- erts_smp_block_system(0);
+ erts_smp_thr_progress_block();
if (system_profile == receiver || receiver == NIL) {
erts_system_profile_clear(NULL);
}
- erts_smp_release_system();
+ erts_smp_thr_progress_unblock();
break;
case SYS_MSG_TYPE_ERRLGR: {
char *no_elgger = "(no error logger present)";
@@ -3201,22 +3219,68 @@ sys_msg_disp_failure(ErtsSysMsgQ *smqp, Eterm receiver)
}
}
+static void
+sys_msg_dispatcher_wakeup(void *vwait_p)
+{
+ int *wait_p = (int *) vwait_p;
+ erts_smp_mtx_lock(&smq_mtx);
+ *wait_p = 0;
+ erts_smp_cnd_signal(&smq_cnd);
+ erts_smp_mtx_unlock(&smq_mtx);
+}
+
+static void
+sys_msg_dispatcher_prep_wait(void *vwait_p)
+{
+ int *wait_p = (int *) vwait_p;
+ erts_smp_mtx_lock(&smq_mtx);
+ *wait_p = 1;
+ erts_smp_mtx_unlock(&smq_mtx);
+}
+
+static void
+sys_msg_dispatcher_fin_wait(void *vwait_p)
+{
+ int *wait_p = (int *) vwait_p;
+ erts_smp_mtx_lock(&smq_mtx);
+ *wait_p = 0;
+ erts_smp_mtx_unlock(&smq_mtx);
+}
+
+static void
+sys_msg_dispatcher_wait(void *vwait_p)
+{
+ int *wait_p = (int *) vwait_p;
+ erts_smp_mtx_lock(&smq_mtx);
+ while (*wait_p)
+ erts_smp_cnd_wait(&smq_cnd, &smq_mtx);
+ erts_smp_mtx_unlock(&smq_mtx);
+}
+
static void *
sys_msg_dispatcher_func(void *unused)
{
+ ErtsThrPrgrCallbacks callbacks;
ErtsSysMsgQ *local_sys_message_queue = NULL;
+ int wait = 0;
#ifdef ERTS_ENABLE_LOCK_CHECK
erts_lc_set_thread_name("system message dispatcher");
#endif
- erts_register_blockable_thread();
- erts_smp_activity_begin(ERTS_ACTIVITY_IO, NULL, NULL, NULL);
+ callbacks.arg = (void *) &wait;
+ callbacks.wakeup = sys_msg_dispatcher_wakeup;
+ callbacks.prepare_wait = sys_msg_dispatcher_prep_wait;
+ callbacks.wait = sys_msg_dispatcher_wait;
+ callbacks.finalize_wait = sys_msg_dispatcher_fin_wait;
+
+ erts_thr_progress_register_managed_thread(NULL, &callbacks, 0);
while (1) {
+ int end_wait = 0;
ErtsSysMsgQ *smqp;
- ERTS_SMP_LC_ASSERT(!ERTS_LC_IS_BLOCKING);
+ ERTS_SMP_LC_ASSERT(!erts_thr_progress_is_blocking());
erts_smp_mtx_lock(&smq_mtx);
@@ -3228,20 +3292,16 @@ sys_msg_dispatcher_func(void *unused)
}
/* Fetch current trace message queue ... */
- erts_smp_activity_change(ERTS_ACTIVITY_IO,
- ERTS_ACTIVITY_WAIT,
- prepare_for_block,
- resume_after_block,
- NULL);
- dispatcher_waiting = 1;
+ if (!sys_message_queue) {
+ erts_smp_mtx_unlock(&smq_mtx);
+ end_wait = 1;
+ erts_thr_progress_active(NULL, 0);
+ erts_thr_progress_prepare_wait(NULL);
+ erts_smp_mtx_lock(&smq_mtx);
+ }
+
while (!sys_message_queue)
erts_smp_cnd_wait(&smq_cnd, &smq_mtx);
- dispatcher_waiting = 0;
- erts_smp_activity_change(ERTS_ACTIVITY_WAIT,
- ERTS_ACTIVITY_IO,
- prepare_for_block,
- resume_after_block,
- NULL);
local_sys_message_queue = sys_message_queue;
sys_message_queue = NULL;
@@ -3249,6 +3309,11 @@ sys_msg_dispatcher_func(void *unused)
erts_smp_mtx_unlock(&smq_mtx);
+ if (end_wait) {
+ erts_thr_progress_finalize_wait(NULL);
+ erts_thr_progress_active(NULL, 1);
+ }
+
/* Send trace messages ... */
ASSERT(local_sys_message_queue);
@@ -3259,6 +3324,9 @@ sys_msg_dispatcher_func(void *unused)
Process *proc = NULL;
Port *port = NULL;
+ if (erts_thr_progress_update(NULL))
+ erts_thr_progress_leader_update(NULL);
+
#ifdef DEBUG_PRINTOUTS
print_msg_type(smqp);
#endif
@@ -3318,7 +3386,11 @@ sys_msg_dispatcher_func(void *unused)
}
else {
queue_proc_msg:
- erts_queue_message(proc,&proc_locks,smqp->bp,smqp->msg,NIL);
+ erts_queue_message(proc,&proc_locks,smqp->bp,smqp->msg,NIL
+#ifdef USE_VM_PROBES
+ , NIL
+#endif
+ );
#ifdef DEBUG_PRINTOUTS
erts_fprintf(stderr, "delivered\n");
#endif
@@ -3372,7 +3444,6 @@ sys_msg_dispatcher_func(void *unused)
}
}
- erts_smp_activity_end(ERTS_ACTIVITY_IO, NULL, NULL, NULL);
return NULL;
}
@@ -3422,7 +3493,6 @@ init_sys_msg_dispatcher(void)
sys_message_queue_end = NULL;
erts_smp_cnd_init(&smq_cnd);
erts_smp_mtx_init(&smq_mtx, "sys_msg_q");
- dispatcher_waiting = 0;
erts_smp_thr_create(&sys_msg_dispatcher_tid,
sys_msg_dispatcher_func,
NULL,
diff --git a/erts/emulator/beam/erl_unicode.c b/erts/emulator/beam/erl_unicode.c
index 158eb361a4..6d5eae73b0 100644
--- a/erts/emulator/beam/erl_unicode.c
+++ b/erts/emulator/beam/erl_unicode.c
@@ -47,7 +47,7 @@ typedef struct _restart_context {
static Uint max_loop_limit;
-static BIF_RETTYPE utf8_to_list(BIF_ALIST_1);
+static BIF_RETTYPE utf8_to_list(Process *p, Eterm arg1);
static BIF_RETTYPE finalize_list_to_list(Process *p,
byte *bytes,
Eterm rest,
@@ -227,8 +227,8 @@ static ERTS_INLINE int simple_loops_to_common(int cost)
static Sint aligned_binary_size(Eterm binary)
{
- unsigned char *bytes;
- Uint bitoffs;
+ ERTS_DECLARE_DUMMY(unsigned char *bytes);
+ ERTS_DECLARE_DUMMY(Uint bitoffs);
Uint bitsize;
ERTS_GET_BINARY_BYTES(binary, bytes, bitoffs, bitsize);
@@ -348,12 +348,6 @@ static int copy_utf8_bin(byte *target, byte *source, Uint size,
return copied;
}
- if (((*source) == 0xEF) && (source[1] == 0xBF) &&
- ((source[2] == 0xBE) || (source[2] == 0xBF))) {
- *err_pos = source;
- return copied;
- }
-
*(target++) = *(source++);
*(target++) = *(source++);
*(target++) = *(source++);
@@ -714,9 +708,8 @@ L_Again: /* Restart with sublist, old listend was pushed on stack */
target[(*pos)++] = (((byte) (x & 0x3F)) |
((byte) 0x80));
} else if (x < 0x10000) {
- if ((x >= 0xD800 && x <= 0xDFFF) ||
- (x == 0xFFFE) ||
- (x == 0xFFFF)) { /* Invalid unicode range */
+ if (x >= 0xD800 && x <= 0xDFFF) {
+ /* Invalid unicode range */
*err = 1;
goto done;
}
@@ -901,7 +894,9 @@ static BIF_RETTYPE build_utf8_return(Process *p,Eterm bin,int pos,
static BIF_RETTYPE characters_to_utf8_trap(BIF_ALIST_3)
{
+#ifdef DEBUG
Eterm *real_bin;
+#endif
byte* bytes;
Eterm rest_term;
int left, sleft;
@@ -915,8 +910,10 @@ static BIF_RETTYPE characters_to_utf8_trap(BIF_ALIST_3)
/*erts_printf("Trap %T!\r\n",BIF_ARG_2);*/
ASSERT(is_binary(BIF_ARG_1));
+#ifdef DEBUG
real_bin = binary_val(BIF_ARG_1);
ASSERT(*real_bin == HEADER_PROC_BIN);
+#endif
pos = (int) binary_size(BIF_ARG_1);
bytes = binary_bytes(BIF_ARG_1);
sleft = left = allowed_iterations(BIF_P);
@@ -1230,10 +1227,6 @@ int erts_analyze_utf8(byte *source, Uint size,
((source[1] & 0x20) != 0)) {
return ERTS_UTF8_ERROR;
}
- if (((*source) == 0xEF) && (source[1] == 0xBF) &&
- ((source[2] == 0xBE) || (source[2] == 0xBF))) {
- return ERTS_UTF8_ERROR;
- }
source += 3;
size -= 3;
} else if (((*source) & ((byte) 0xF8)) == 0xF0) {
@@ -1730,7 +1723,7 @@ static BIF_RETTYPE do_bif_utf8_to_list(Process *p,
if (b_sz) {
ErlSubBin *sb;
Eterm orig;
- Uint offset;
+ ERTS_DECLARE_DUMMY(Uint offset);
ASSERT(state != ERTS_UTF8_OK);
hp = HAlloc(p, ERL_SUB_BIN_SIZE);
sb = (ErlSubBin *) hp;
@@ -1839,13 +1832,13 @@ static BIF_RETTYPE characters_to_list_trap_4(BIF_ALIST_1)
* Instead of building an utf8 buffer, we analyze the binary given and use that.
*/
-static BIF_RETTYPE utf8_to_list(BIF_ALIST_1)
+static BIF_RETTYPE utf8_to_list(Process* p, Eterm arg)
{
- if (!is_binary(BIF_ARG_1) || aligned_binary_size(BIF_ARG_1) < 0) {
- BIF_ERROR(BIF_P,BADARG);
+ if (!is_binary(arg) || aligned_binary_size(arg) < 0) {
+ BIF_ERROR(p, BADARG);
}
- return do_bif_utf8_to_list(BIF_P, BIF_ARG_1, 0U, 0U, 0U,
- ERTS_UTF8_ANALYZE_MORE,NIL);
+ return do_bif_utf8_to_list(p, arg, 0U, 0U, 0U,
+ ERTS_UTF8_ANALYZE_MORE, NIL);
}
@@ -2166,9 +2159,8 @@ L_Again: /* Restart with sublist, old listend was pushed on stack */
} else if (x < 0x800) {
need += 2;
} else if (x < 0x10000) {
- if ((x >= 0xD800 && x <= 0xDFFF) ||
- (x == 0xFFFE) ||
- (x == 0xFFFF)) { /* Invalid unicode range */
+ if (x >= 0xD800 && x <= 0xDFFF) {
+ /* Invalid unicode range */
DESTROY_ESTACK(stack);
return ((Sint) -1);
}
@@ -2314,9 +2306,7 @@ L_Again: /* Restart with sublist, old listend was pushed on stack */
*p++ = (((byte) (x & 0x3F)) |
((byte) 0x80));
} else if (x < 0x10000) {
- ASSERT(!((x >= 0xD800 && x <= 0xDFFF) ||
- (x == 0xFFFE) ||
- (x == 0xFFFF)));
+ ASSERT(!(x >= 0xD800 && x <= 0xDFFF));
*p++ = (((byte) (x >> 12)) |
((byte) 0xE0));
*p++ = ((((byte) (x >> 6)) & 0x3F) |
@@ -2580,11 +2570,11 @@ BIF_RETTYPE prim_file_internal_native2name_1(BIF_ALIST_1)
BIF_RETTYPE prim_file_internal_normalize_utf8_1(BIF_ALIST_1)
{
- Eterm real_bin;
- Uint offset;
+ ERTS_DECLARE_DUMMY(Eterm real_bin);
+ ERTS_DECLARE_DUMMY(Uint offset);
Uint size,num_chars;
Uint bitsize;
- Uint bitoffs;
+ ERTS_DECLARE_DUMMY(Uint bitoffs);
Eterm ret;
byte *temp_alloc = NULL;
byte *bytes;
diff --git a/erts/emulator/beam/erl_vm.h b/erts/emulator/beam/erl_vm.h
index e7fd144ec3..5dc307e383 100644
--- a/erts/emulator/beam/erl_vm.h
+++ b/erts/emulator/beam/erl_vm.h
@@ -55,7 +55,7 @@
heap data on the C stack or if we use the buffers in the scheduler data. */
#define TMP_HEAP_SIZE 128 /* Number of Eterm in the schedulers
small heap for transient heap data */
-#define CMP_TMP_HEAP_SIZE 2 /* cmp wants its own tmp-heap... */
+#define CMP_TMP_HEAP_SIZE 32 /* cmp wants its own tmp-heap... */
#define ERL_ARITH_TMP_HEAP_SIZE 4 /* as does erl_arith... */
#define BEAM_EMU_TMP_HEAP_SIZE 2 /* and beam_emu... */
@@ -83,11 +83,7 @@
#define CP_SIZE 1
#define ErtsHAllocLockCheck(P) \
- ERTS_SMP_LC_ASSERT((ERTS_PROC_LOCK_MAIN & erts_proc_lc_my_proc_locks((P))) \
- || ((P)->id == ERTS_INVALID_PID) \
- || ((P)->scheduler_data \
- && (P) == (P)->scheduler_data->match_pseudo_process) \
- || erts_is_system_blocked(0))
+ ERTS_SMP_LC_ASSERT(erts_dbg_check_halloc_lock((P)))
#ifdef DEBUG
diff --git a/erts/emulator/beam/erlang_dtrace.d b/erts/emulator/beam/erlang_dtrace.d
new file mode 100644
index 0000000000..c1024dafc4
--- /dev/null
+++ b/erts/emulator/beam/erlang_dtrace.d
@@ -0,0 +1,726 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Dustin Sallings, Michal Ptaszek, Scott Lystig Fritchie 2011-2012.
+ * All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/*
+ * A note on probe naming: if "__" appears in a provider probe
+ * definition, then two things happen during compilation:
+ *
+ * 1. The "__" will turn into a hypen, "-", for the probe name.
+ * 2. The "__" will turn into a single underscore, "_", for the
+ * macro names and function definitions that the compiler and
+ * C developers will see.
+ *
+ * We'll try to use the following naming convention. We're a bit
+ * limited because, as a USDT probe, we can only specify the 4th part
+ * of the probe name, e.g. erlang*:::mumble. The 2nd part of the
+ * probe name is always going to be "beam" or "beam.smp", and the 3rd
+ * part of the probe name will always be the name of the function
+ * that's calling the probe.
+ *
+ * So, all probes will be have names defined in this file using the
+ * convention category__name or category__sub_category__name. This
+ * will translate to probe names of category-name or
+ * category-sub_category-name.
+ *
+ * Each of "category", "sub_category", and "name" may have underscores
+ * but may not have hyphens.
+ */
+
+provider erlang {
+ /**
+ * Fired when a message is sent from one local process to another.
+ *
+ * NOTE: The 'size' parameter is in machine-dependent words and
+ * that the actual size of any binary terms in the message
+ * are not included.
+ *
+ * @param sender the PID (string form) of the sender
+ * @param receiver the PID (string form) of the receiver
+ * @param size the size of the message being delivered (words)
+ * @param token_label for the sender's sequential trace token
+ * @param token_previous count for the sender's sequential trace token
+ * @param token_current count for the sender's sequential trace token
+ */
+ probe message__send(char *sender, char *receiver, uint32_t size,
+ int token_label, int token_previous, int token_current);
+
+ /**
+ * Fired when a message is sent from a local process to a remote process.
+ *
+ * NOTE: The 'size' parameter is in machine-dependent words and
+ * that the actual size of any binary terms in the message
+ * are not included.
+ *
+ * @param sender the PID (string form) of the sender
+ * @param node_name the Erlang node name (string form) of the receiver
+ * @param receiver the PID/name (string form) of the receiver
+ * @param size the size of the message being delivered (words)
+ * @param token_label for the sender's sequential trace token
+ * @param token_previous count for the sender's sequential trace token
+ * @param token_current count for the sender's sequential trace token
+ */
+ probe message__send__remote(char *sender, char *node_name, char *receiver,
+ uint32_t size,
+ int token_label, int token_previous, int token_current);
+
+ /**
+ * Fired when a message is queued to a local process. This probe
+ * will not fire if the sender's pid == receiver's pid.
+ *
+ * NOTE: The 'size' parameter is in machine-dependent words and
+ * that the actual size of any binary terms in the message
+ * are not included.
+ *
+ * NOTE: In cases of messages in external format (i.e. from another
+ * Erlang node), we probably don't know the message size
+ * without performing substantial extra computation. To
+ * avoid the extra CPU overhead, the message size may be
+ * reported as -1, which can appear to a D script as 4294967295.
+ *
+ * @param receiver the PID (string form) of the receiver
+ * @param size the size of the message being delivered (words)
+ * @param queue_len length of the queue of the receiving process
+ * @param token_label for the sender's sequential trace token
+ * @param token_previous count for the sender's sequential trace token
+ * @param token_current count for the sender's sequential trace token
+ */
+ probe message__queued(char *receiver, uint32_t size, uint32_t queue_len,
+ int token_label, int token_previous, int token_current);
+
+ /**
+ * Fired when a message is 'receive'd by a local process and removed
+ * from its mailbox.
+ *
+ * NOTE: The 'size' parameter is in machine-dependent words and
+ * that the actual size of any binary terms in the message
+ * are not included.
+ *
+ * NOTE: In cases of messages in external format (i.e. from another
+ * Erlang node), we probably don't know the message size
+ * without performing substantial extra computation. To
+ * avoid the extra CPU overhead, the message size may be
+ * reported as -1, which can appear to a D script as 4294967295.
+ *
+ * @param receiver the PID (string form) of the receiver
+ * @param size the size of the message being delivered (words)
+ * @param queue_len length of the queue of the receiving process
+ * @param token_label for the sender's sequential trace token
+ * @param token_previous count for the sender's sequential trace token
+ * @param token_current count for the sender's sequential trace token
+ */
+ probe message__receive(char *receiver, uint32_t size, uint32_t queue_len,
+ int token_label, int token_previous, int token_current);
+
+ /**
+ * Fired when an Eterm structure is being copied.
+ *
+ * NOTE: Due to the placement of this probe, the process ID of
+ * owner of the Eterm is not available.
+ *
+ * @param size the size of the structure
+ */
+ probe copy__struct(uint32_t size);
+
+ /**
+ * Fired when an Eterm is being copied onto a process.
+ *
+ * @param proc the PID (string form) of the recipient process
+ * @param size the size of the structure
+ */
+ probe copy__object(char *proc, uint32_t size);
+
+ /* PID, Module, Function, Arity */
+
+ /**
+ * Fired whenever a user function is being called locally.
+ *
+ * @param p the PID (string form) of the process
+ * @param mfa the m:f/a of the function
+ * @param depth the stack depth
+ */
+ probe local__function__entry(char *p, char *mfa, int depth);
+
+ /**
+ * Fired whenever a user function is called externally
+ * (through an export entry).
+ *
+ * @param p the PID (string form) of the process
+ * @param mfa the m:f/a of the function
+ * @param depth the stack depth
+ */
+ probe global__function__entry(char *p, char *mfa, int depth);
+
+ /**
+ * Fired whenever a user function returns.
+ *
+ * @param p the PID (string form) of the process
+ * @param mfa the m:f/a of the function
+ * @param depth the stack depth
+ */
+ probe function__return(char *p, char *mfa, int depth);
+
+ /**
+ * Fired whenever a Built In Function is called.
+ *
+ * @param p the PID (string form) of the process
+ * @param mfa the m:f/a of the function
+ */
+ probe bif__entry(char *p, char *mfa);
+
+ /**
+ * Fired whenever a Built In Function returns.
+ *
+ * @param p the PID (string form) of the process
+ * @param mfa the m:f/a of the function
+ */
+ probe bif__return(char *p, char *mfa);
+
+ /**
+ * Fired whenever a Native Function is called.
+ *
+ * @param p the PID (string form) of the process
+ * @param mfa the m:f/a of the function
+ */
+ probe nif__entry(char *p, char *mfa);
+
+ /**
+ * Fired whenever a Native Function returns.
+ *
+ * @param p the PID (string form) of the process
+ * @param mfa the m:f/a of the function
+ */
+ probe nif__return(char *p, char *mfa);
+
+ /**
+ * Fired when a major GC is starting.
+ *
+ * @param p the PID (string form) of the exiting process
+ * @param need the number of words needed on the heap
+ */
+ probe gc_major__start(char *p, int need);
+
+ /**
+ * Fired when a minor GC is starting.
+ *
+ * @param p the PID (string form) of the exiting process
+ * @param need the number of words needed on the heap
+ */
+ probe gc_minor__start(char *p, int need);
+
+ /**
+ * Fired when a major GC is starting.
+ *
+ * @param p the PID (string form) of the exiting process
+ * @param reclaimed the amount of space reclaimed
+ */
+ probe gc_major__end(char *p, int reclaimed);
+
+ /**
+ * Fired when a minor GC is starting.
+ *
+ * @param p the PID (string form) of the exiting process
+ * @param reclaimed the amount of space reclaimed
+ */
+ probe gc_minor__end(char *p, int reclaimed);
+
+ /**
+ * Fired when a process is spawned.
+ *
+ * @param p the PID (string form) of the new process.
+ * @param mfa the m:f/a of the function
+ */
+ probe process__spawn(char *p, char *mfa);
+
+ /**
+ * Fired when a process is exiting.
+ *
+ * @param p the PID (string form) of the exiting process
+ * @param reason the reason for the exit (may be truncated)
+ */
+ probe process__exit(char *p, char *reason);
+
+ /**
+ * Fired when exit signal is delivered to a local process.
+ *
+ * @param sender the PID (string form) of the exiting process
+ * @param receiver the PID (string form) of the process receiving EXIT signal
+ * @param reason the reason for the exit (may be truncated)
+ */
+ probe process__exit_signal(char *sender, char *receiver, char *reason);
+
+ /**
+ * Fired when exit signal is delivered to a remote process.
+ *
+ * @param sender the PID (string form) of the exiting process
+ * @param node_name the Erlang node name (string form) of the receiver
+ * @param receiver the PID (string form) of the process receiving EXIT signal
+ * @param reason the reason for the exit (may be truncated)
+ * @param token_label for the sender's sequential trace token
+ * @param token_previous count for the sender's sequential trace token
+ * @param token_current count for the sender's sequential trace token
+ */
+ probe process__exit_signal__remote(char *sender, char *node_name,
+ char *receiver, char *reason,
+ int token_label, int token_previous, int token_current);
+
+ /**
+ * Fired when a process is scheduled.
+ *
+ * @param p the PID (string form) of the newly scheduled process
+ * @param mfa the m:f/a of the function it should run next
+ */
+ probe process__scheduled(char *p, char *mfa);
+
+ /**
+ * Fired when a process is unscheduled.
+ *
+ * @param p the PID (string form) of the process that has been
+ * unscheduled.
+ */
+ probe process__unscheduled(char *p);
+
+ /**
+ * Fired when a process goes into hibernation.
+ *
+ * @param p the PID (string form) of the process entering hibernation
+ * @param mfa the m:f/a of the location to resume
+ */
+ probe process__hibernate(char *p, char *mfa);
+
+ /**
+ * Fired when a process is unblocked after a port has been unblocked.
+ *
+ * @param p the PID (string form) of the process that has been
+ * unscheduled.
+ * @param port the port that is no longer busy (i.e., is now unblocked)
+ */
+ probe process__port_unblocked(char *p, char *port);
+
+ /**
+ * Fired when process' heap is growing.
+ *
+ * @param p the PID (string form)
+ * @param old_size the size of the old heap
+ * @param new_size the size of the new heap
+ */
+ probe process__heap_grow(char *p, int old_size, int new_size);
+
+ /**
+ * Fired when process' heap is shrinking.
+ *
+ * @param p the PID (string form)
+ * @param old_size the size of the old heap
+ * @param new_size the size of the new heap
+ */
+ probe process__heap_shrink(char *p, int old_size, int new_size);
+
+ /* network distribution */
+
+ /**
+ * Fired when network distribution event monitor events are triggered.
+ *
+ * @param node the name of the reporting node
+ * @param what the type of event, e.g., nodeup, nodedown
+ * @param monitored_node the name of the monitored node
+ * @param type the type of node, e.g., visible, hidden
+ * @param reason the reason term, e.g., normal, connection_closed, term()
+ */
+ probe dist__monitor(char *node, char *what, char *monitored_node,
+ char *type, char *reason);
+
+ /**
+ * Fired when network distribution port is busy (i.e. blocked),
+ * usually due to the remote node not consuming distribution
+ * data quickly enough.
+ *
+ * @param node the name of the reporting node
+ * @param port the port ID of the busy port
+ * @param remote_node the name of the remote node.
+ * @param pid the PID (string form) of the local process that has
+ * become unschedulable until the port becomes unblocked.
+ */
+ probe dist__port_busy(char *node, char *port, char *remote_node,
+ char *pid);
+
+ /**
+ * Fired when network distribution's driver's "output" callback is called
+ *
+ * @param node the name of the reporting node
+ * @param port the port ID of the busy port
+ * @param remote_node the name of the remote node.
+ * @param bytes the number of bytes written
+ */
+ probe dist__output(char *node, char *port, char *remote_node, int bytes);
+
+ /**
+ * Fired when network distribution's driver's "outputv" callback is called
+ *
+ * @param node the name of the reporting node
+ * @param port the port ID of the busy port
+ * @param remote_node the name of the remote node.
+ * @param bytes the number of bytes written
+ */
+ probe dist__outputv(char *node, char *port, char *remote_node, int bytes);
+
+ /**
+ * Fired when network distribution port is no longer busy (i.e. blocked).
+ *
+ * NOTE: This probe may fire multiple times after the same single
+ * dist-port_busy probe firing.
+ *
+ * @param node the name of the reporting node
+ * @param port the port ID of the busy port
+ * @param remote_node the name of the remote node.
+ */
+ probe dist__port_not_busy(char *node, char *port, char *remote_node);
+
+ /* ports */
+
+ /**
+ * Fired when new port is opened.
+ *
+ * @param process the PID (string form)
+ * @param port_name the string used when the port was opened
+ * @param port the Port (string form) of the new port
+ */
+ probe port__open(char *process, char *port_name, char *port);
+
+ /**
+ * Fired when port_command is issued.
+ *
+ * @param process the PID (string form)
+ * @param port the Port (string form)
+ * @param port_name the string used when the port was opened
+ * @param command_type type of the issued command, one of: "close", "command" or "connect"
+ */
+ probe port__command(char *process, char *port, char *port_name, char *command_type);
+
+ /**
+ * Fired when port_control is issued.
+ *
+ * @param process the PID (string form)
+ * @param port the Port (string form)
+ * @param port_name the string used when the port was opened
+ * @param command_no command number that has been issued to the port
+ */
+ probe port__control(char *process, char *port, char *port_name, int command_no);
+
+ /**
+ * Fired when port is closed via port_close/1 (reason = 'normal')
+ * or is sent an exit signal.
+ *
+ * @param process the PID (string form)
+ * @param port the Port (string form)
+ * @param port_name the string used when the port was opened
+ * @param reason Erlang term representing the exit signal, e.g. 'normal'
+ */
+ probe port__exit(char *process, char *port, char *port_name,
+ char *new_process);
+
+ /**
+ * Fired when port_connect is issued.
+ *
+ * @param process the PID (string form) of the current port owner
+ * @param port the Port (string form)
+ * @param port_name the string used when the port was opened
+ * @param new_process the PID (string form) of the new port owner
+ */
+ probe port__connect(char *process, char *port, char *port_name,
+ char *new_process);
+
+ /**
+ * Fired when a port is busy (i.e. blocked)
+ *
+ * @param port the port ID of the busy port
+ */
+ probe port__busy(char *port);
+
+ /**
+ * Fired when a port is no longer busy (i.e. no longer blocked)
+ *
+ * @param port the port ID of the not busy port
+ */
+ probe port__not_busy(char *port);
+
+ /* drivers */
+
+ /**
+ * Fired when drivers's "init" callback is called.
+ *
+ * @param name the name of the driver
+ * @param major the major version number
+ * @param minor the minor version number
+ * @param flags the flags argument
+ */
+ probe driver__init(char *name, int major, int minor, int flags);
+
+ /**
+ * Fired when drivers's "start" callback is called.
+ *
+ * @param process the PID (string form) of the calling process
+ * @param name the name of the driver
+ * @param port the Port (string form) of the driver's port
+ */
+ probe driver__start(char *process, char *name, char *port);
+
+ /**
+ * Fired when drivers's "stop" callback is called.
+ *
+ * @param process the PID (string form) of the calling process
+ * @param name the name of the driver
+ * @param port the Port (string form) of the driver's port
+ */
+ probe driver__stop(char *process, char *name, char *port);
+
+ /**
+ * Fired when drivers's "finish" callback is called.
+ *
+ * @param name the name of the driver
+ */
+ probe driver__finish(char *name);
+
+ /**
+ * Fired when drivers's "flush" callback is called.
+ *
+ * @param process the PID (string form)
+ * @param port the Port (string form)
+ * @param port_name the string used when the port was opened
+ */
+ probe driver__flush(char *process, char *port, char *port_name);
+
+ /**
+ * Fired when driver's "output" callback is called
+ *
+ * @param process the PID (string form)
+ * @param port the Port (string form)
+ * @param port_name the string used when the port was opened
+ * @param bytes the number of bytes written
+ */
+ probe driver__output(char *node, char *port, char *port_name, int bytes);
+
+ /**
+ * Fired when driver's "outputv" callback is called
+ *
+ * @param process the PID (string form)
+ * @param port the Port (string form)
+ * @param port_name the string used when the port was opened
+ * @param bytes the number of bytes written
+ */
+ probe driver__outputv(char *node, char *port, char *port_name, int bytes);
+
+ /**
+ * Fired when driver's "control" callback is called
+ *
+ * @param process the PID (string form)
+ * @param port the Port (string form)
+ * @param port_name the string used when the port was opened
+ * @param command the command #
+ * @param bytes the number of bytes written
+ */
+ probe driver__control(char *node, char *port, char *port_name,
+ int command, int bytes);
+
+ /**
+ * Fired when driver's "call" callback is called
+ *
+ * @param process the PID (string form)
+ * @param port the Port (string form)
+ * @param port_name the string used when the port was opened
+ * @param command the command #
+ * @param bytes the number of bytes written
+ */
+ probe driver__call(char *node, char *port, char *port_name,
+ int command, int bytes);
+
+ /**
+ * Fired when driver's "event" callback is called
+ *
+ * @param process the PID (string form)
+ * @param port the Port (string form)
+ * @param port_name the string used when the port was opened
+ */
+ probe driver__event(char *node, char *port, char *port_name);
+
+ /**
+ * Fired when driver's "ready_input" callback is called
+ *
+ * @param process the PID (string form)
+ * @param port the Port (string form)
+ * @param port_name the string used when the port was opened
+ */
+ probe driver__ready_input(char *node, char *port, char *port_name);
+
+ /**
+ * Fired when driver's "read_output" callback is called
+ *
+ * @param process the PID (string form)
+ * @param port the Port (string form)
+ * @param port_name the string used when the port was opened
+ */
+ probe driver__ready_output(char *node, char *port, char *port_name);
+
+ /**
+ * Fired when driver's "timeout" callback is called
+ *
+ * @param process the PID (string form)
+ * @param port the Port (string form)
+ * @param port_name the string used when the port was opened
+ */
+ probe driver__timeout(char *node, char *port, char *port_name);
+
+ /**
+ * Fired when drivers's "ready_async" callback is called.
+ *
+ * @param process the PID (string form)
+ * @param port the Port (string form)
+ * @param port_name the string used when the port was opened
+ */
+ probe driver__ready_async(char *process, char *port, char *port_name);
+
+ /**
+ * Fired when driver's "process_exit" callback is called
+ *
+ * @param process the PID (string form)
+ * @param port the Port (string form)
+ * @param port_name the string used when the port was opened
+ */
+ probe driver__process_exit(char *node, char *port, char *port_name);
+
+ /**
+ * Fired when driver's "stop_select" callback is called
+ *
+ * @param name the name of the driver
+ */
+ probe driver__stop_select(char *name);
+
+
+ /* Async driver pool */
+
+ /**
+ * Show the post-add length of the async driver thread pool member's queue.
+ *
+ * NOTE: The port name is not available: additional lock(s) must
+ * be acquired in order to get the port name safely in an SMP
+ * environment. The same is true for the aio__pool_get probe.
+ *
+ * @param port the Port (string form)
+ * @param new queue length
+ */
+ probe aio_pool__add(char *, int);
+
+ /**
+ * Show the post-get length of the async driver thread pool member's queue.
+ *
+ * @param port the Port (string form)
+ * @param new queue length
+ */
+ probe aio_pool__get(char *, int);
+
+ /* Probes for efile_drv.c */
+
+ /**
+ * Entry into the efile_drv.c file I/O driver
+ *
+ * For a list of command numbers used by this driver, see the section
+ * "Guide to probe arguments" in ../../../README.md. That section
+ * also contains explanation of the various integer and string
+ * arguments that may be present when any particular probe fires.
+ *
+ * NOTE: Not all Linux platforms (using SystemTap) can support
+ * arguments beyond arg9.
+ *
+ *
+ * TODO: Adding the port string, args[10], is a pain. Making that
+ * port string available to all the other efile_drv.c probes
+ * will be more pain. Is the pain worth it? If yes, then
+ * add them everywhere else and grit our teeth. If no, then
+ * rip it out.
+ *
+ * @param thread-id number of the scheduler Pthread arg0
+ * @param tag number: {thread-id, tag} uniquely names a driver operation
+ * @param user-tag string arg2
+ * @param command number arg3
+ * @param string argument 1 arg4
+ * @param string argument 2 arg5
+ * @param integer argument 1 arg6
+ * @param integer argument 2 arg7
+ * @param integer argument 3 arg8
+ * @param integer argument 4 arg9
+ * @param port the port ID of the busy port args[10]
+ */
+ probe efile_drv__entry(int, int, char *, int, char *, char *,
+ int64_t, int64_t, int64_t, int64_t, char *);
+
+ /**
+ * Entry into the driver's internal work function. Computation here
+ * is performed by a async worker pool Pthread.
+ *
+ * @param thread-id number
+ * @param tag number
+ * @param command number
+ */
+ probe efile_drv__int_entry(int, int, int);
+
+ /**
+ * Return from the driver's internal work function.
+ *
+ * @param thread-id number
+ * @param tag number
+ * @param command number
+ */
+ probe efile_drv__int_return(int, int, int);
+
+ /**
+ * Return from the efile_drv.c file I/O driver
+ *
+ * @param thread-id number arg0
+ * @param tag number arg1
+ * @param user-tag string arg2
+ * @param command number arg3
+ * @param Success? 1 is success, 0 is failure arg4
+ * @param If failure, the errno of the error. arg5
+ */
+ probe efile_drv__return(int, int, char *, int, int, int);
+
+/*
+ * NOTE:
+ * For formatting int64_t arguments within a D script, see:
+ *
+ * http://mail.opensolaris.org/pipermail/dtrace-discuss/2006-November/002830.html
+ * Summary:
+ * "1) you don't need the 'l' printf() modifiers with DTrace ever"
+ */
+
+/*
+ * NOTE: For file_drv_return + SMP + R14B03 (and perhaps other
+ * releases), the sched-thread-id will be the same as the
+ * work-thread-id: erl_async.c's async_main() function
+ * will call the asynchronous invoke function and then
+ * immediately call the drivers ready_async function while
+ * inside the same I/O worker pool thread.
+ * For R14B03's source, see erl_async.c lines 302-317.
+ */
+};
+
+#pragma D attributes Evolving/Evolving/Common provider erlang provider
+#pragma D attributes Private/Private/Common provider erlang module
+#pragma D attributes Private/Private/Common provider erlang function
+#pragma D attributes Evolving/Evolving/Common provider erlang name
+#pragma D attributes Evolving/Evolving/Common provider erlang args
diff --git a/erts/emulator/beam/export.c b/erts/emulator/beam/export.c
index 5bc402fe22..fb0ee99119 100644
--- a/erts/emulator/beam/export.c
+++ b/erts/emulator/beam/export.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1996-2010. All Rights Reserved.
+ * Copyright Ericsson AB 1996-2011. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -208,7 +208,8 @@ erts_export_put(Eterm mod, Eterm func, unsigned int arity)
Export e;
int ix;
- ERTS_SMP_LC_ASSERT(erts_initialized == 0 || erts_smp_is_system_blocked(0));
+ ERTS_SMP_LC_ASSERT(erts_initialized == 0
+ || erts_smp_thr_progress_is_blocking());
ASSERT(is_atom(mod));
ASSERT(is_atom(func));
e.code[0] = mod;
@@ -265,7 +266,8 @@ erts_export_consolidate(void)
HashInfo hi;
#endif
- ERTS_SMP_LC_ASSERT(erts_initialized == 0 || erts_smp_is_system_blocked(0));
+ ERTS_SMP_LC_ASSERT(erts_initialized == 0
+ || erts_smp_thr_progress_is_blocking());
export_write_lock();
erts_index_merge(&secondary_export_table, &export_table);
diff --git a/erts/emulator/beam/external.c b/erts/emulator/beam/external.c
index 1a102f7187..44abc83d6d 100644
--- a/erts/emulator/beam/external.c
+++ b/erts/emulator/beam/external.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1996-2011. All Rights Reserved.
+ * Copyright Ericsson AB 1996-2012. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -46,7 +46,7 @@
#ifdef HIPE
#include "hipe_mode_switch.h"
#endif
-#define in_area(ptr,start,nbytes) ((Uint)((char*)(ptr) - (char*)(start)) < (nbytes))
+#define in_area(ptr,start,nbytes) ((UWord)((char*)(ptr) - (char*)(start)) < (nbytes))
#define MAX_STRING_LEN 0xffff
@@ -88,7 +88,7 @@ static byte* enc_pid(ErtsAtomCacheMap *, Eterm, byte*, Uint32);
static byte* dec_term(ErtsDistExternal *, Eterm**, byte*, ErlOffHeap*, Eterm*);
static byte* dec_atom(ErtsDistExternal *, byte*, Eterm*);
static byte* dec_pid(ErtsDistExternal *, Eterm**, byte*, ErlOffHeap*, Eterm*);
-static Sint decoded_size(byte *ep, byte* endp, int only_heap_bins, int internal_tags);
+static Sint decoded_size(byte *ep, byte* endp, int internal_tags);
static Uint encode_size_struct2(ErtsAtomCacheMap *, Eterm, unsigned);
@@ -459,6 +459,12 @@ Uint erts_encode_ext_size(Eterm term)
+ 1 /* VERSION_MAGIC */;
}
+Uint erts_encode_ext_size_2(Eterm term, unsigned dflags)
+{
+ return encode_size_struct2(NULL, term, TERM_TO_BINARY_DFLAGS|dflags)
+ + 1 /* VERSION_MAGIC */;
+}
+
Uint erts_encode_ext_size_ets(Eterm term)
{
return encode_size_struct2(NULL, term, TERM_TO_BINARY_DFLAGS|DFLAGS_INTERNAL_TAGS);
@@ -804,7 +810,7 @@ bad_dist_ext(ErtsDistExternal *edep)
}
Sint
-erts_decode_dist_ext_size(ErtsDistExternal *edep, int no_refc_bins)
+erts_decode_dist_ext_size(ErtsDistExternal *edep)
{
Sint res;
byte *ep;
@@ -823,7 +829,7 @@ erts_decode_dist_ext_size(ErtsDistExternal *edep, int no_refc_bins)
goto fail;
ep = edep->extp+1;
}
- res = decoded_size(ep, edep->ext_endp, no_refc_bins, 0);
+ res = decoded_size(ep, edep->ext_endp, 0);
if (res >= 0)
return res;
fail:
@@ -831,16 +837,16 @@ erts_decode_dist_ext_size(ErtsDistExternal *edep, int no_refc_bins)
return -1;
}
-Sint erts_decode_ext_size(byte *ext, Uint size, int no_refc_bins)
+Sint erts_decode_ext_size(byte *ext, Uint size)
{
if (size == 0 || *ext != VERSION_MAGIC)
return -1;
- return decoded_size(ext+1, ext+size, no_refc_bins, 0);
+ return decoded_size(ext+1, ext+size, 0);
}
Sint erts_decode_ext_size_ets(byte *ext, Uint size)
{
- Sint sz = decoded_size(ext, ext+size, 0, 1);
+ Sint sz = decoded_size(ext, ext+size, 1);
ASSERT(sz >= 0);
return sz;
}
@@ -962,7 +968,7 @@ BIF_RETTYPE erts_debug_dist_ext_to_term_2(BIF_ALIST_2)
ede.extp = binary_bytes(real_bin)+offset;
ede.ext_endp = ede.extp + size;
- hsz = erts_decode_dist_ext_size(&ede, 0);
+ hsz = erts_decode_dist_ext_size(&ede);
if (hsz < 0)
goto badarg;
@@ -982,16 +988,16 @@ BIF_RETTYPE erts_debug_dist_ext_to_term_2(BIF_ALIST_2)
}
-Eterm
-term_to_binary_1(Process* p, Eterm Term)
+BIF_RETTYPE term_to_binary_1(BIF_ALIST_1)
{
- return erts_term_to_binary(p, Term, 0, TERM_TO_BINARY_DFLAGS);
+ return erts_term_to_binary(BIF_P, BIF_ARG_1, 0, TERM_TO_BINARY_DFLAGS);
}
-
-Eterm
-term_to_binary_2(Process* p, Eterm Term, Eterm Flags)
+BIF_RETTYPE term_to_binary_2(BIF_ALIST_2)
{
+ Process* p = BIF_P;
+ Eterm Term = BIF_ARG_1;
+ Eterm Flags = BIF_ARG_2;
int level = 0;
Uint flags = TERM_TO_BINARY_DFLAGS;
@@ -1100,7 +1106,7 @@ binary2term_prepare(ErtsBinary2TermState *state, byte *data, Sint data_size)
goto error;
size = (Sint) dest_len;
}
- res = decoded_size(state->extp, state->extp + size, 0, 0);
+ res = decoded_size(state->extp, state->extp + size, 0);
if (res < 0)
goto error;
return res;
@@ -1250,8 +1256,11 @@ BIF_RETTYPE binary_to_term_2(BIF_ALIST_2)
}
Eterm
-external_size_1(Process* p, Eterm Term)
+external_size_1(BIF_ALIST_1)
{
+ Process* p = BIF_P;
+ Eterm Term = BIF_ARG_1;
+
Uint size = erts_encode_ext_size(Term);
if (IS_USMALL(0, size)) {
BIF_RET(make_small(size));
@@ -1262,6 +1271,49 @@ external_size_1(Process* p, Eterm Term)
}
Eterm
+external_size_2(BIF_ALIST_2)
+{
+ Uint size;
+ Uint flags = TERM_TO_BINARY_DFLAGS;
+
+ while (is_list(BIF_ARG_2)) {
+ Eterm arg = CAR(list_val(BIF_ARG_2));
+ Eterm* tp;
+
+ if (is_tuple(arg) && *(tp = tuple_val(arg)) == make_arityval(2)) {
+ if (tp[1] == am_minor_version && is_small(tp[2])) {
+ switch (signed_val(tp[2])) {
+ case 0:
+ break;
+ case 1:
+ flags |= DFLAG_NEW_FLOATS;
+ break;
+ default:
+ goto error;
+ }
+ } else {
+ goto error;
+ }
+ } else {
+ error:
+ BIF_ERROR(BIF_P, BADARG);
+ }
+ BIF_ARG_2 = CDR(list_val(BIF_ARG_2));
+ }
+ if (is_not_nil(BIF_ARG_2)) {
+ goto error;
+ }
+
+ size = erts_encode_ext_size_2(BIF_ARG_1, flags);
+ if (IS_USMALL(0, size)) {
+ BIF_RET(make_small(size));
+ } else {
+ Eterm* hp = HAlloc(BIF_P, BIG_UINT_HEAP_SIZE);
+ BIF_RET(uint_to_big(size, hp));
+ }
+}
+
+Eterm
erts_term_to_binary(Process* p, Eterm Term, int level, Uint flags)
{
Uint size;
@@ -2402,7 +2454,7 @@ dec_term_atom_common:
n = get_int32(ep);
ep += 4;
- if (n <= ERL_ONHEAP_BIN_LIMIT || off_heap == NULL) {
+ if (n <= ERL_ONHEAP_BIN_LIMIT) {
ErlHeapBin* hb = (ErlHeapBin *) hp;
hb->thing_word = header_heap_bin(n);
@@ -2440,7 +2492,7 @@ dec_term_atom_common:
n = get_int32(ep);
bitsize = ep[4];
ep += 5;
- if (n <= ERL_ONHEAP_BIN_LIMIT || off_heap == NULL) {
+ if (n <= ERL_ONHEAP_BIN_LIMIT) {
ErlHeapBin* hb = (ErlHeapBin *) hp;
hb->thing_word = header_heap_bin(n);
@@ -3009,7 +3061,7 @@ encode_size_struct2(ErtsAtomCacheMap *acmp, Eterm obj, unsigned dflags)
}
static Sint
-decoded_size(byte *ep, byte* endp, int no_refc_bins, int internal_tags)
+decoded_size(byte *ep, byte* endp, int internal_tags)
{
int heap_size = 0;
int terms;
@@ -3066,6 +3118,9 @@ decoded_size(byte *ep, byte* endp, int no_refc_bins, int internal_tags)
case LARGE_BIG_EXT:
CHKSIZE(4);
n = get_int32(ep);
+ if (n > BIG_ARITY_MAX*sizeof(ErtsDigit)) {
+ return -1;
+ }
SKIP2(n,4+1); /* skip, size,sign,digits */
heap_size += 1+1+(n+sizeof(Eterm)-1)/sizeof(Eterm); /* XXX: 1 too much? */
break;
@@ -3171,7 +3226,7 @@ decoded_size(byte *ep, byte* endp, int no_refc_bins, int internal_tags)
CHKSIZE(4);
n = get_int32(ep);
SKIP2(n, 4);
- if (n <= ERL_ONHEAP_BIN_LIMIT || no_refc_bins) {
+ if (n <= ERL_ONHEAP_BIN_LIMIT) {
heap_size += heap_bin_size(n);
} else {
heap_size += PROC_BIN_SIZE;
@@ -3182,7 +3237,7 @@ decoded_size(byte *ep, byte* endp, int no_refc_bins, int internal_tags)
CHKSIZE(5);
n = get_int32(ep);
SKIP2(n, 5);
- if (n <= ERL_ONHEAP_BIN_LIMIT || no_refc_bins) {
+ if (n <= ERL_ONHEAP_BIN_LIMIT) {
heap_size += heap_bin_size(n) + ERL_SUB_BIN_SIZE;
} else {
heap_size += PROC_BIN_SIZE + ERL_SUB_BIN_SIZE;
diff --git a/erts/emulator/beam/external.h b/erts/emulator/beam/external.h
index d8287b96a4..eddd4571dd 100644
--- a/erts/emulator/beam/external.h
+++ b/erts/emulator/beam/external.h
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1996-2010. All Rights Reserved.
+ * Copyright Ericsson AB 1996-2011. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -160,6 +160,7 @@ Uint erts_encode_dist_ext_size(Eterm, Uint32, ErtsAtomCacheMap *);
void erts_encode_dist_ext(Eterm, byte **, Uint32, ErtsAtomCacheMap *);
Uint erts_encode_ext_size(Eterm);
+Uint erts_encode_ext_size_2(Eterm, unsigned);
Uint erts_encode_ext_size_ets(Eterm);
void erts_encode_ext(Eterm, byte **);
byte* erts_encode_ext_ets(Eterm, byte *, struct erl_off_heap_header** ext_off_heap);
@@ -174,10 +175,10 @@ void *erts_dist_ext_trailer(ErtsDistExternal *);
void erts_destroy_dist_ext_copy(ErtsDistExternal *);
int erts_prepare_dist_ext(ErtsDistExternal *, byte *, Uint,
DistEntry *, ErtsAtomCache *);
-Sint erts_decode_dist_ext_size(ErtsDistExternal *, int);
+Sint erts_decode_dist_ext_size(ErtsDistExternal *);
Eterm erts_decode_dist_ext(Eterm **, ErlOffHeap *, ErtsDistExternal *);
-Sint erts_decode_ext_size(byte*, Uint, int);
+Sint erts_decode_ext_size(byte*, Uint);
Sint erts_decode_ext_size_ets(byte*, Uint);
Eterm erts_decode_ext(Eterm **, ErlOffHeap *, byte**);
Eterm erts_decode_ext_ets(Eterm **, ErlOffHeap *, byte*);
diff --git a/erts/emulator/beam/fix_alloc.c b/erts/emulator/beam/fix_alloc.c
deleted file mode 100644
index 5637281597..0000000000
--- a/erts/emulator/beam/fix_alloc.c
+++ /dev/null
@@ -1,287 +0,0 @@
-/*
- * %CopyrightBegin%
- *
- * Copyright Ericsson AB 1996-2009. All Rights Reserved.
- *
- * The contents of this file are subject to the Erlang Public License,
- * Version 1.1, (the "License"); you may not use this file except in
- * compliance with the License. You should have received a copy of the
- * Erlang Public License along with this software. If not, it can be
- * retrieved online at http://www.erlang.org/.
- *
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and limitations
- * under the License.
- *
- * %CopyrightEnd%
- */
-/* General purpose Memory allocator for fixed block size objects */
-/* This allocater is at least an order of magnitude faster than malloc() */
-
-
-#define NOPERBLOCK 20
-#ifdef HAVE_CONFIG_H
-# include "config.h"
-#endif
-
-#include "sys.h"
-#include "erl_vm.h"
-#include "global.h"
-#include "erl_db.h"
-
-#ifdef ERTS_ALC_N_MIN_A_FIXED_SIZE
-
-#if ERTS_ALC_MTA_FIXED_SIZE
-#include "erl_threads.h"
-#include "erl_smp.h"
-# ifdef ERTS_SMP
-# define FA_LOCK(FA) erts_smp_spin_lock(&(FA)->slck)
-# define FA_UNLOCK(FA) erts_smp_spin_unlock(&(FA)->slck)
-# else
-# define FA_LOCK(FA) erts_mtx_lock(&(FA)->mtx)
-# define FA_UNLOCK(FA) erts_mtx_unlock(&(FA)->mtx)
-# endif
-#else
-# define FA_LOCK(FA)
-# define FA_UNLOCK(FA)
-#endif
-
-typedef union {double d; long l;} align_t;
-
-typedef struct fix_alloc_block {
- struct fix_alloc_block *next;
- align_t mem[1];
-} FixAllocBlock;
-
-typedef struct fix_alloc {
- Uint item_size;
- void *freelist;
- Uint no_free;
- Uint no_blocks;
- FixAllocBlock *blocks;
-#if ERTS_ALC_MTA_FIXED_SIZE
-# ifdef ERTS_SMP
- erts_smp_spinlock_t slck;
-# else
- erts_mtx_t mtx;
-# endif
-#endif
-} FixAlloc;
-
-static void *(*core_alloc)(Uint);
-static Uint xblk_sz;
-
-static FixAlloc **fa;
-#define FA_SZ (1 + ERTS_ALC_N_MAX_A_FIXED_SIZE - ERTS_ALC_N_MIN_A_FIXED_SIZE)
-
-#define FIX_IX(N) ((N) - ERTS_ALC_N_MIN_A_FIXED_SIZE)
-
-#define FIX_POOL_SZ(I_SZ) \
- ((I_SZ)*NOPERBLOCK + sizeof(FixAllocBlock) - sizeof(align_t))
-
-#if defined(DEBUG) && !ERTS_ALC_MTA_FIXED_SIZE
-static int first_time;
-#endif
-
-void erts_init_fix_alloc(Uint extra_block_size,
- void *(*alloc)(Uint))
-{
- int i;
-
- xblk_sz = extra_block_size;
- core_alloc = alloc;
-
- fa = (FixAlloc **) (*core_alloc)(FA_SZ * sizeof(FixAlloc *));
- if (!fa)
- erts_alloc_enomem(ERTS_ALC_T_UNDEF, FA_SZ * sizeof(FixAlloc *));
-
- for (i = 0; i < FA_SZ; i++)
- fa[i] = NULL;
-#if defined(DEBUG) && !ERTS_ALC_MTA_FIXED_SIZE
- first_time = 1;
-#endif
-}
-
-Uint
-erts_get_fix_size(ErtsAlcType_t type)
-{
- Uint i = FIX_IX(ERTS_ALC_T2N(type));
- return i < FA_SZ && fa[i] ? fa[i]->item_size : 0;
-}
-
-void
-erts_set_fix_size(ErtsAlcType_t type, Uint size)
-{
- Uint sz;
- Uint i;
- FixAlloc *fs;
- ErtsAlcType_t t_no = ERTS_ALC_T2N(type);
- sz = xblk_sz + size;
-
-#ifdef DEBUG
- ASSERT(ERTS_ALC_N_MIN_A_FIXED_SIZE <= t_no);
- ASSERT(t_no <= ERTS_ALC_N_MAX_A_FIXED_SIZE);
-#endif
-
- while (sz % sizeof(align_t) != 0) /* Alignment */
- sz++;
-
- i = FIX_IX(t_no);
- fs = (FixAlloc *) (*core_alloc)(sizeof(FixAlloc));
- if (!fs)
- erts_alloc_n_enomem(t_no, sizeof(FixAlloc));
-
- fs->item_size = sz;
- fs->no_blocks = 0;
- fs->no_free = 0;
- fs->blocks = NULL;
- fs->freelist = NULL;
- if (fa[i])
- erl_exit(-1, "Attempt to overwrite existing fix size (%d)", i);
- fa[i] = fs;
-
-#if ERTS_ALC_MTA_FIXED_SIZE
-#ifdef ERTS_SMP
- erts_smp_spinlock_init_x(&fs->slck, "fix_alloc", make_small(i));
-#else
- erts_mtx_init_x(&fs->mtx, "fix_alloc", make_small(i));
-#endif
-#endif
-
-}
-
-void
-erts_fix_info(ErtsAlcType_t type, ErtsFixInfo *efip)
-{
- Uint i;
- FixAlloc *f;
-#ifdef DEBUG
- FixAllocBlock *b;
- void *fp;
-#endif
- Uint real_item_size;
- ErtsAlcType_t t_no = ERTS_ALC_T2N(type);
-
- ASSERT(ERTS_ALC_N_MIN_A_FIXED_SIZE <= t_no);
- ASSERT(t_no <= ERTS_ALC_N_MAX_A_FIXED_SIZE);
-
- i = FIX_IX(t_no);
- f = fa[i];
-
- efip->total = sizeof(FixAlloc *);
- efip->used = 0;
- if (!f)
- return;
-
- real_item_size = f->item_size - xblk_sz;
-
- FA_LOCK(f);
-
- efip->total += sizeof(FixAlloc);
- efip->total += f->no_blocks*FIX_POOL_SZ(real_item_size);
- efip->used = efip->total - f->no_free*real_item_size;
-
-#ifdef DEBUG
- ASSERT(efip->total >= efip->used);
- for(i = 0, b = f->blocks; b; i++, b = b->next);
- ASSERT(f->no_blocks == i);
- for (i = 0, fp = f->freelist; fp; i++, fp = *((void **) fp));
- ASSERT(f->no_free == i);
-#endif
-
- FA_UNLOCK(f);
-
-}
-
-void
-erts_fix_free(ErtsAlcType_t t_no, void *extra, void* ptr)
-{
- Uint i;
- FixAlloc *f;
-
- ASSERT(ERTS_ALC_N_MIN_A_FIXED_SIZE <= t_no);
- ASSERT(t_no <= ERTS_ALC_N_MAX_A_FIXED_SIZE);
-
- i = FIX_IX(t_no);
- f = fa[i];
-
- FA_LOCK(f);
- *((void **) ptr) = f->freelist;
- f->freelist = ptr;
- f->no_free++;
- FA_UNLOCK(f);
-}
-
-
-void *erts_fix_realloc(ErtsAlcType_t t_no, void *extra, void* ptr, Uint size)
-{
- erts_alc_fatal_error(ERTS_ALC_E_NOTSUP, ERTS_ALC_O_REALLOC, t_no);
- return NULL;
-}
-
-void *erts_fix_alloc(ErtsAlcType_t t_no, void *extra, Uint size)
-{
- void *ret;
- int i;
- FixAlloc *f;
-
-#if defined(DEBUG) && !ERTS_ALC_MTA_FIXED_SIZE
- ASSERT(ERTS_ALC_N_MIN_A_FIXED_SIZE <= t_no);
- ASSERT(t_no <= ERTS_ALC_N_MAX_A_FIXED_SIZE);
- if (first_time) { /* Check that all sizes have been initialized */
- int i;
- for (i = 0; i < FA_SZ; i++)
- ASSERT(fa[i]);
- first_time = 0;
- }
-#endif
-
-
- i = FIX_IX(t_no);
- f = fa[i];
-
- ASSERT(f);
- ASSERT(f->item_size >= size);
-
- FA_LOCK(f);
- if (f->freelist == NULL) { /* Gotta alloc some more mem */
- char *ptr;
- FixAllocBlock *bl;
- Uint n;
-
-
- FA_UNLOCK(f);
- bl = (*core_alloc)(FIX_POOL_SZ(f->item_size));
- if (!bl)
- return NULL;
-
- FA_LOCK(f);
- bl->next = f->blocks; /* link in first */
- f->blocks = bl;
-
- n = NOPERBLOCK;
- ptr = (char *) &f->blocks->mem[0];
- while(n--) {
- *((void **) ptr) = f->freelist;
- f->freelist = (void *) ptr;
- ptr += f->item_size;
- }
-#if !ERTS_ALC_MTA_FIXED_SIZE
- ASSERT(f->no_free == 0);
-#endif
- f->no_free += NOPERBLOCK;
- f->no_blocks++;
- }
-
- ret = f->freelist;
- f->freelist = *((void **) f->freelist);
- ASSERT(f->no_free > 0);
- f->no_free--;
-
- FA_UNLOCK(f);
-
- return ret;
-}
-
-#endif /* #ifdef ERTS_ALC_N_MIN_A_FIXED_SIZE */
diff --git a/erts/emulator/beam/global.h b/erts/emulator/beam/global.h
index 499bdd77ba..b000e2c5d4 100644
--- a/erts/emulator/beam/global.h
+++ b/erts/emulator/beam/global.h
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1996-2011. All Rights Reserved.
+ * Copyright Ericsson AB 1996-2012. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -37,22 +37,17 @@
#include "erl_process.h"
#include "erl_sys_driver.h"
#include "erl_debug.h"
+#include "error.h"
typedef struct port Port;
#include "erl_port_task.h"
-#define ERTS_MAX_NO_OF_ASYNC_THREADS 1024
-extern int erts_async_max_threads;
-#define ERTS_ASYNC_THREAD_MIN_STACK_SIZE 16 /* Kilo words */
-#define ERTS_ASYNC_THREAD_MAX_STACK_SIZE 8192 /* Kilo words */
-extern int erts_async_thread_suggested_stack_size;
-
typedef struct erts_driver_t_ erts_driver_t;
#define SMALL_IO_QUEUE 5 /* Number of fixed elements */
typedef struct {
- int size; /* total size in bytes */
+ ErlDrvSizeT size; /* total size in bytes */
SysIOVec* v_start;
SysIOVec* v_end;
@@ -68,9 +63,9 @@ typedef struct {
} ErlIOQueue;
typedef struct line_buf { /* Buffer used in line oriented I/O */
- int bufsiz; /* Size of character buffer */
- int ovlen; /* Length of overflow data */
- int ovsiz; /* Actual size of overflow buffer */
+ ErlDrvSizeT bufsiz; /* Size of character buffer */
+ ErlDrvSizeT ovlen; /* Length of overflow data */
+ ErlDrvSizeT ovsiz; /* Actual size of overflow buffer */
char data[1]; /* Starting point of buffer data,
data[0] is a flag indicating an unprocess CR,
The rest is the overflow buffer. */
@@ -177,7 +172,7 @@ struct port {
DistEntry *dist_entry; /* Dist entry used in DISTRIBUTION */
char *name; /* String used in the open */
erts_driver_t* drv_ptr;
- long drv_data;
+ UWord drv_data;
ErtsProcList *suspended; /* List of suspended processes. */
LineBuf *linebuf; /* Buffer to hold data not ready for
process to get (line oriented I/O)*/
@@ -200,17 +195,17 @@ erts_port_runq(Port *prt)
{
#ifdef ERTS_SMP
ErtsRunQueue *rq1, *rq2;
- rq1 = (ErtsRunQueue *) erts_smp_atomic_read(&prt->run_queue);
+ rq1 = (ErtsRunQueue *) erts_smp_atomic_read_nob(&prt->run_queue);
while (1) {
erts_smp_runq_lock(rq1);
- rq2 = (ErtsRunQueue *) erts_smp_atomic_read(&prt->run_queue);
+ rq2 = (ErtsRunQueue *) erts_smp_atomic_read_nob(&prt->run_queue);
if (rq1 == rq2)
return rq1;
erts_smp_runq_unlock(rq1);
rq1 = rq2;
}
#else
- return erts_common_run_queue;
+ return ERTS_RUNQ_IX(0);
#endif
}
@@ -328,12 +323,15 @@ struct erts_driver_t_ {
void (*stop)(ErlDrvData drv_data);
void (*finish)(void);
void (*flush)(ErlDrvData drv_data);
- void (*output)(ErlDrvData drv_data, char *buf, int len);
+ void (*output)(ErlDrvData drv_data, char *buf, ErlDrvSizeT len);
void (*outputv)(ErlDrvData drv_data, ErlIOVec *ev); /* Might be NULL */
- int (*control)(ErlDrvData drv_data, unsigned int command, char *buf,
- int len, char **rbuf, int rlen); /* Might be NULL */
- int (*call)(ErlDrvData drv_data, unsigned int command, char *buf,
- int len, char **rbuf, int rlen, unsigned int *flags); /* Might be NULL */
+ ErlDrvSSizeT (*control)(ErlDrvData drv_data, unsigned int command,
+ char *buf, ErlDrvSizeT len,
+ char **rbuf, ErlDrvSizeT rlen); /* Might be NULL */
+ ErlDrvSSizeT (*call)(ErlDrvData drv_data, unsigned int command,
+ char *buf, ErlDrvSizeT len,
+ char **rbuf, ErlDrvSizeT rlen, /* Might be NULL */
+ unsigned int *flags);
void (*event)(ErlDrvData drv_data, ErlDrvEvent event,
ErlDrvEventData event_data);
void (*ready_input)(ErlDrvData drv_data, ErlDrvEvent event);
@@ -403,7 +401,7 @@ extern Eterm erts_ddll_monitor_driver(Process *p,
typedef struct binary {
ERTS_INTERNAL_BINARY_FIELDS
- long orig_size;
+ SWord orig_size;
char orig_bytes[1]; /* to be continued */
} Binary;
@@ -412,7 +410,7 @@ typedef struct binary {
typedef struct {
ERTS_INTERNAL_BINARY_FIELDS
- long orig_size;
+ SWord orig_size;
void (*destructor)(Binary *);
char magic_bin_data[1];
} ErtsMagicBinary;
@@ -542,10 +540,11 @@ ERTS_GLB_INLINE void erts_may_save_closed_port(Port *prt)
ERTS_SMP_LC_ASSERT(erts_smp_lc_spinlock_is_locked(&prt->state_lck));
if (prt->snapshot != erts_smp_atomic32_read_acqb(&erts_ports_snapshot)) {
/* Dead ports are added from the end of the snapshot buffer */
- Eterm* tombstone = (Eterm*) erts_smp_atomic_addtest(&erts_dead_ports_ptr,
- -(erts_aint_t)sizeof(Eterm));
+ Eterm* tombstone;
+ tombstone = (Eterm*) erts_smp_atomic_add_read_nob(&erts_dead_ports_ptr,
+ -(erts_aint_t)sizeof(Eterm));
ASSERT(tombstone+1 != NULL);
- ASSERT(prt->snapshot == erts_smp_atomic32_read(&erts_ports_snapshot) - 1);
+ ASSERT(prt->snapshot == erts_smp_atomic32_read_nob(&erts_ports_snapshot) - 1);
*tombstone = prt->id;
}
/*else no ongoing snapshot or port was already included or created after snapshot */
@@ -559,7 +558,6 @@ extern Eterm node_cookie;
extern erts_smp_atomic_t erts_bytes_out; /* no bytes written out */
extern erts_smp_atomic_t erts_bytes_in; /* no bytes sent into the system */
extern Uint display_items; /* no of items to display in traces etc */
-extern Uint display_loads; /* print info about loaded modules */
extern int erts_backtrace_depth;
extern erts_smp_atomic32_t erts_max_gen_gcs;
@@ -808,6 +806,8 @@ do { \
/* Port uses port specific locking (opposed to driver specific locking) */
#define ERTS_PORT_SFLG_PORT_SPECIFIC_LOCK ((Uint32) (1 << 13))
#define ERTS_PORT_SFLG_INVALID ((Uint32) (1 << 14))
+/* Last port to terminate halts the emulator */
+#define ERTS_PORT_SFLG_HALT ((Uint32) (1 << 15))
#ifdef DEBUG
/* Only debug: make sure all flags aren't cleared unintentionally */
#define ERTS_PORT_SFLG_PORT_DEBUG ((Uint32) (1 << 31))
@@ -850,18 +850,41 @@ void erts_queue_monitor_message(Process *,
Eterm,
Eterm);
void erts_init_bif(void);
+Eterm erl_send(Process *p, Eterm to, Eterm msg);
+
+/* erl_bif_op.c */
+
+Eterm erl_is_function(Process* p, Eterm arg1, Eterm arg2);
/* erl_bif_port.c */
/* erl_bif_trace.c */
+Eterm erl_seq_trace_info(Process *p, Eterm arg1);
void erts_system_monitor_clear(Process *c_p);
void erts_system_profile_clear(Process *c_p);
/* beam_load.c */
-int erts_load_module(Process *c_p, ErtsProcLocks c_p_locks,
- Eterm group_leader, Eterm* mod, byte* code, int size);
+typedef struct {
+ BeamInstr* current; /* Pointer to: Mod, Name, Arity */
+ Uint needed; /* Heap space needed for entire tuple */
+ Uint32 loc; /* Location in source code */
+ Eterm* fname_ptr; /* Pointer to fname table */
+} FunctionInfo;
+
+struct LoaderState* erts_alloc_loader_state(void);
+Eterm erts_prepare_loading(struct LoaderState*, Process *c_p,
+ Eterm group_leader, Eterm* modp,
+ byte* code, Uint size);
+Eterm erts_finish_loading(struct LoaderState* stp, Process* c_p,
+ ErtsProcLocks c_p_locks, Eterm* modp);
+Eterm erts_load_module(Process *c_p, ErtsProcLocks c_p_locks,
+ Eterm group_leader, Eterm* mod, byte* code, Uint size);
void init_load(void);
BeamInstr* find_function_from_pc(BeamInstr* pc);
+Eterm* erts_build_mfa_item(FunctionInfo* fi, Eterm* hp,
+ Eterm args, Eterm* mfa_p);
+void erts_lookup_function_info(FunctionInfo* fi, BeamInstr* pc, int full_info);
+void erts_set_current_function(FunctionInfo* fi, BeamInstr* current);
Eterm erts_module_info_0(Process* p, Eterm module);
Eterm erts_module_info_1(Process* p, Eterm module, Eterm what);
Eterm erts_make_stub_module(Process* p, Eterm Mod, Eterm Beam, Eterm Info);
@@ -878,14 +901,9 @@ void loaded(int, void *);
/* config.c */
__decl_noreturn void __noreturn erl_exit(int n, char*, ...);
-__decl_noreturn void __noreturn erl_exit0(char *, int, int n, char*, ...);
+__decl_noreturn void __noreturn erl_exit_flush_async(int n, char*, ...);
void erl_error(char*, va_list);
-#define ERL_EXIT0(n,f) erl_exit0(__FILE__, __LINE__, n, f)
-#define ERL_EXIT1(n,f,a) erl_exit0(__FILE__, __LINE__, n, f, a)
-#define ERL_EXIT2(n,f,a,b) erl_exit0(__FILE__, __LINE__, n, f, a, b)
-#define ERL_EXIT3(n,f,a,b,c) erl_exit0(__FILE__, __LINE__, n, f, a, b, c)
-
/* copy.c */
void init_copy(void);
Eterm copy_object(Eterm, Process*);
@@ -1035,7 +1053,8 @@ extern int erts_do_net_exits(DistEntry*, Eterm);
extern int distribution_info(int, void *);
extern int is_node_name_atom(Eterm a);
-extern int erts_net_message(Port *, DistEntry *, byte *, int, byte *, int);
+extern int erts_net_message(Port *, DistEntry *,
+ byte *, ErlDrvSizeT, byte *, ErlDrvSizeT);
extern void init_dist(void);
extern int stop_dist(void);
@@ -1052,6 +1071,7 @@ void init_emulator(void);
void process_main(void);
Eterm build_stacktrace(Process* c_p, Eterm exc);
Eterm expand_error_value(Process* c_p, Uint freason, Eterm Value);
+void erts_save_stacktrace(Process* p, struct StackTrace* s, int depth);
/* erl_init.c */
@@ -1073,6 +1093,7 @@ extern ErtsModifiedTimings erts_modified_timings[];
#define ERTS_MODIFIED_TIMING_INPUT_REDS \
(erts_modified_timings[erts_modified_timing_level].input_reds)
+extern int erts_no_line_info;
extern Eterm erts_error_logger_warnings;
extern int erts_initialized;
extern int erts_compat_rel;
@@ -1106,7 +1127,9 @@ void erts_init_gc(void);
int erts_garbage_collect(Process*, int, Eterm*, int);
void erts_garbage_collect_hibernate(Process* p);
Eterm erts_gc_after_bif_call(Process* p, Eterm result, Eterm* regs, Uint arity);
-void erts_garbage_collect_literals(Process* p, Eterm* literals, Uint lit_size);
+void erts_garbage_collect_literals(Process* p, Eterm* literals,
+ Uint lit_size,
+ struct erl_off_heap_header* oh);
Uint erts_next_heap_size(Uint, Uint);
Eterm erts_heap_sizes(Process* p);
@@ -1200,11 +1223,11 @@ erts_smp_port_trylock(Port *prt)
#ifdef ERTS_SMP
int res;
- ASSERT(erts_smp_atomic_read(&prt->refc) > 0);
- erts_smp_atomic_inc(&prt->refc);
+ ASSERT(erts_smp_atomic_read_nob(&prt->refc) > 0);
+ erts_smp_atomic_inc_nob(&prt->refc);
res = erts_smp_mtx_trylock(prt->lock);
if (res == EBUSY) {
- erts_smp_atomic_dec(&prt->refc);
+ erts_smp_atomic_dec_nob(&prt->refc);
}
return res;
@@ -1217,8 +1240,8 @@ ERTS_GLB_INLINE void
erts_smp_port_lock(Port *prt)
{
#ifdef ERTS_SMP
- ASSERT(erts_smp_atomic_read(&prt->refc) > 0);
- erts_smp_atomic_inc(&prt->refc);
+ ASSERT(erts_smp_atomic_read_nob(&prt->refc) > 0);
+ erts_smp_atomic_inc_nob(&prt->refc);
erts_smp_mtx_lock(prt->lock);
#endif
}
@@ -1229,7 +1252,7 @@ erts_smp_port_unlock(Port *prt)
#ifdef ERTS_SMP
erts_aint_t refc;
erts_smp_mtx_unlock(prt->lock);
- refc = erts_smp_atomic_dectest(&prt->refc);
+ refc = erts_smp_atomic_dec_read_nob(&prt->refc);
ASSERT(refc >= 0);
if (refc == 0)
erts_port_cleanup(prt);
@@ -1298,7 +1321,7 @@ erts_id2port_sflgs(Eterm id, Process *c_p, ErtsProcLocks c_p_locks, Uint32 sflgs
}
#ifdef ERTS_SMP
else {
- erts_smp_atomic_inc(&prt->refc);
+ erts_smp_atomic_inc_nob(&prt->refc);
erts_smp_port_state_unlock(prt);
if (no_proc_locks)
@@ -1626,8 +1649,7 @@ void monitor_generic(Process *p, Eterm type, Eterm spec);
Uint erts_trace_flag2bit(Eterm flag);
int erts_trace_flags(Eterm List,
Uint *pMask, Eterm *pTracer, int *pCpuTimestamp);
-Eterm erts_bif_trace(int bif_index, Process* p,
- Eterm arg1, Eterm arg2, Eterm arg3, BeamInstr *I);
+Eterm erts_bif_trace(int bif_index, Process* p, Eterm* args, BeamInstr *I);
#ifdef ERTS_SMP
void erts_send_pending_trace_msgs(ErtsSchedulerData *esdp);
@@ -1640,7 +1662,7 @@ do { \
#define ERTS_SMP_CHK_PEND_TRACE_MSGS(ESDP)
#endif
-void bin_write(int, void*, byte*, int);
+void bin_write(int, void*, byte*, size_t);
int intlist_to_buf(Eterm, char*, int); /* most callers pass plain char*'s */
struct Sint_buf {
@@ -1656,7 +1678,7 @@ char* Sint_to_buf(Sint, struct Sint_buf*);
#define ERTS_IOLIST_OVERFLOW 1
#define ERTS_IOLIST_TYPE 2
-Eterm buf_to_intlist(Eterm**, char*, int, Eterm); /* most callers pass plain char*'s */
+Eterm buf_to_intlist(Eterm**, char*, size_t, Eterm); /* most callers pass plain char*'s */
int io_list_to_buf(Eterm, char*, int);
int io_list_to_buf2(Eterm, char*, int);
int erts_iolist_size(Eterm, Uint *);
@@ -1952,4 +1974,46 @@ erts_alloc_message_heap(Uint size,
# define UseTmpHeapNoproc(Size) /* Nothing */
# define UnUseTmpHeapNoproc(Size) /* Nothing */
#endif /* HEAP_ON_C_STACK */
+
+#if ERTS_GLB_INLINE_INCL_FUNC_DEF
+
+#include "dtrace-wrapper.h"
+
+ERTS_GLB_INLINE void
+dtrace_pid_str(Eterm pid, char *process_buf)
+{
+ erts_snprintf(process_buf, DTRACE_TERM_BUF_SIZE, "<%lu.%lu.%lu>",
+ pid_channel_no(pid),
+ pid_number(pid),
+ pid_serial(pid));
+}
+
+ERTS_GLB_INLINE void
+dtrace_proc_str(Process *process, char *process_buf)
+{
+ dtrace_pid_str(process->id, process_buf);
+}
+
+ERTS_GLB_INLINE void
+dtrace_port_str(Port *port, char *port_buf)
+{
+ erts_snprintf(port_buf, DTRACE_TERM_BUF_SIZE, "#Port<%lu.%lu>",
+ port_channel_no(port->id),
+ port_number(port->id));
+}
+
+ERTS_GLB_INLINE void
+dtrace_fun_decode(Process *process,
+ Eterm module, Eterm function, int arity,
+ char *process_buf, char *mfa_buf)
+{
+ if (process_buf) {
+ dtrace_proc_str(process, process_buf);
+ }
+
+ erts_snprintf(mfa_buf, DTRACE_TERM_BUF_SIZE, "%T:%T/%d",
+ module, function, arity);
+}
+#endif /* #if ERTS_GLB_INLINE_INCL_FUNC_DEF */
+
#endif /* !__GLOBAL_H__ */
diff --git a/erts/emulator/beam/io.c b/erts/emulator/beam/io.c
index df5f8b22a3..8a2a43bebd 100644
--- a/erts/emulator/beam/io.c
+++ b/erts/emulator/beam/io.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1996-2011. All Rights Reserved.
+ * Copyright Ericsson AB 1996-2012. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -42,6 +42,8 @@
#include "erl_bits.h"
#include "erl_version.h"
#include "error.h"
+#include "erl_async.h"
+#include "dtrace-wrapper.h"
extern ErlDrvEntry fd_driver_entry;
extern ErlDrvEntry vanilla_driver_entry;
@@ -163,8 +165,8 @@ erts_port_ioq_size(Port *pp)
typedef struct line_buf_context {
LineBuf **b;
char *buf;
- int left;
- int retlen;
+ ErlDrvSizeT left;
+ ErlDrvSizeT retlen;
} LineBufContext;
#define LINEBUF_EMPTY 0
@@ -179,6 +181,20 @@ typedef struct line_buf_context {
#define LINEBUF_INITIAL 100
+#ifdef USE_VM_PROBES
+#define DTRACE_FORMAT_COMMON_PID_AND_PORT(PID, PORT) \
+ DTRACE_CHARBUF(process_str, DTRACE_TERM_BUF_SIZE); \
+ DTRACE_CHARBUF(port_str, DTRACE_TERM_BUF_SIZE); \
+ \
+ dtrace_pid_str((PID), process_str); \
+ dtrace_port_str((PORT), port_str);
+#define DTRACE_FORMAT_COMMON_PROC_AND_PORT(PID, PORT) \
+ DTRACE_CHARBUF(process_str, DTRACE_TERM_BUF_SIZE); \
+ DTRACE_CHARBUF(port_str, DTRACE_TERM_BUF_SIZE); \
+ \
+ dtrace_proc_str((PID), process_str); \
+ dtrace_port_str((PORT), port_str);
+#endif
/* The 'number' field in a port now has two parts: the lowest bits
contain the index in the port table, and the higher bits are a counter
@@ -244,8 +260,8 @@ get_free_port(void)
}
port->status = ERTS_PORT_SFLG_INITIALIZING;
#ifdef ERTS_SMP
- ERTS_SMP_LC_ASSERT(erts_smp_atomic_read(&port->refc) == 0);
- erts_smp_atomic_set(&port->refc, 2); /* Port alive + lock */
+ ERTS_SMP_LC_ASSERT(erts_smp_atomic_read_nob(&port->refc) == 0);
+ erts_smp_atomic_set_nob(&port->refc, 2); /* Port alive + lock */
#endif
erts_smp_port_state_unlock(port);
return num & port_num_mask;
@@ -327,7 +343,7 @@ port_cleanup(Port *prt)
#ifdef ERTS_SMP
ASSERT(prt->status & ERTS_PORT_SFLG_FREE_SCHEDULED);
- ERTS_SMP_LC_ASSERT(erts_smp_atomic_read(&prt->refc) == 0);
+ ERTS_SMP_LC_ASSERT(erts_smp_atomic_read_nob(&prt->refc) == 0);
port_specific = (prt->status & ERTS_PORT_SFLG_PORT_SPECIFIC_LOCK);
@@ -425,11 +441,11 @@ setup_port(Port* prt, Eterm pid, erts_driver_t *driver,
erts_smp_runq_lock(runq);
erts_smp_port_state_lock(prt);
prt->status = ERTS_PORT_SFLG_CONNECTED | xstatus;
- prt->snapshot = erts_smp_atomic32_read(&erts_ports_snapshot);
+ prt->snapshot = erts_smp_atomic32_read_nob(&erts_ports_snapshot);
old_name = prt->name;
prt->name = new_name;
#ifdef ERTS_SMP
- erts_smp_atomic_set(&prt->run_queue, (erts_aint_t) runq);
+ erts_smp_atomic_set_nob(&prt->run_queue, (erts_aint_t) runq);
#endif
ASSERT(!prt->drv_ptr);
prt->drv_ptr = driver;
@@ -444,7 +460,7 @@ setup_port(Port* prt, Eterm pid, erts_driver_t *driver,
prt->control_flags = 0;
prt->connected = pid;
- prt->drv_data = (long) drv_data;
+ prt->drv_data = (SWord) drv_data;
prt->bytes_in = 0;
prt->bytes_out = 0;
prt->dist_entry = NULL;
@@ -590,8 +606,8 @@ erts_open_driver(erts_driver_t* driver, /* Pointer to driver. */
erts_smp_port_state_lock(port);
port->status = ERTS_PORT_SFLG_FREE;
#ifdef ERTS_SMP
- ERTS_SMP_LC_ASSERT(erts_smp_atomic_read(&port->refc) == 2);
- erts_smp_atomic_set(&port->refc, 0);
+ ERTS_SMP_LC_ASSERT(erts_smp_atomic_read_nob(&port->refc) == 2);
+ erts_smp_atomic_set_nob(&port->refc, 0);
#endif
erts_smp_port_state_unlock(port);
return -3;
@@ -638,16 +654,21 @@ erts_open_driver(erts_driver_t* driver, /* Pointer to driver. */
trace_sched_ports_where(port, am_in, am_start);
}
port->caller = pid;
+#ifdef USE_VM_PROBES
+ if (DTRACE_ENABLED(driver_start)) {
+ DTRACE_FORMAT_COMMON_PID_AND_PORT(pid, port)
+ DTRACE3(driver_start, process_str, driver->name, port_str);
+ }
+#endif
fpe_was_unmasked = erts_block_fpe();
drv_data = (*driver->start)((ErlDrvPort)(port_ix),
name, opts);
erts_unblock_fpe(fpe_was_unmasked);
port->caller = NIL;
- erts_unblock_fpe(fpe_was_unmasked);
if (IS_TRACED_FL(port, F_TRACE_SCHED_PORTS)) {
trace_sched_ports_where(port, am_out, am_start);
}
- if (error_number_ptr && ((long) drv_data) == (long) -2)
+ if (error_number_ptr && ((SWord) drv_data) == (SWord) -2)
*error_number_ptr = errno;
#ifdef ERTS_SMP
if (port->xports)
@@ -656,10 +677,10 @@ erts_open_driver(erts_driver_t* driver, /* Pointer to driver. */
#endif
}
- if (((long)drv_data) == -1 ||
- ((long)drv_data) == -2 ||
- ((long)drv_data) == -3) {
- int res = (int) ((long) drv_data);
+ if (((SWord)drv_data) == -1 ||
+ ((SWord)drv_data) == -2 ||
+ ((SWord)drv_data) == -3) {
+ int res = (int) ((SWord) drv_data);
if (res == -3 && error_number_ptr) {
*error_number_ptr = BADARG;
@@ -688,7 +709,7 @@ erts_open_driver(erts_driver_t* driver, /* Pointer to driver. */
erts_port_release(port);
return res;
}
- port->drv_data = (long) drv_data;
+ port->drv_data = (SWord) drv_data;
return port_ix;
}
@@ -743,7 +764,7 @@ driver_create_port(ErlDrvPort creator_port_ix, /* Creating port */
return (ErlDrvTermData) -1; /* pid does not exist */
}
if ((port_num = get_free_port()) < 0) {
- errno = ENFILE;
+ errno = SYSTEM_LIMIT;
erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK);
erts_smp_mtx_unlock(&erts_driver_list_lock);
return (ErlDrvTermData) -1;
@@ -818,6 +839,11 @@ erts_smp_xports_unlock(Port *prt)
#define SET_VEC(iov, bv, bin, ptr, len, vlen) do { \
(iov)->iov_base = (ptr); \
(iov)->iov_len = (len); \
+ if (sizeof((iov)->iov_len) < sizeof(len) \
+ /* Check if (len) overflowed (iov)->iov_len */ \
+ && ((len) >> (sizeof((iov)->iov_len)*CHAR_BIT)) != 0) { \
+ goto L_overflow; \
+ } \
*(bv)++ = (bin); \
(iov)++; \
(vlen)++; \
@@ -828,13 +854,13 @@ io_list_to_vec(Eterm obj, /* io-list */
SysIOVec* iov, /* io vector */
ErlDrvBinary** binv, /* binary reference vector */
ErlDrvBinary* cbin, /* binary to store characters */
- int bin_limit) /* small binaries limit */
+ ErlDrvSizeT bin_limit) /* small binaries limit */
{
DECLARE_ESTACK(s);
Eterm* objp;
char *buf = cbin->orig_bytes;
- int len = cbin->orig_size;
- int csize = 0;
+ ErlDrvSizeT len = cbin->orig_size;
+ ErlDrvSizeT csize = 0;
int vlen = 0;
char* cptr = buf;
@@ -874,7 +900,7 @@ io_list_to_vec(Eterm obj, /* io-list */
Eterm real_bin;
Uint offset;
Eterm* bptr;
- int size;
+ ErlDrvSizeT size;
int bitoffs;
int bitsize;
@@ -949,9 +975,9 @@ io_list_to_vec(Eterm obj, /* io-list */
#define IO_LIST_VEC_COUNT(obj) \
do { \
- int _size = binary_size(obj); \
+ ErlDrvSizeT _size = binary_size(obj); \
Eterm _real; \
- Uint _offset; \
+ ERTS_DECLARE_DUMMY(Uint _offset); \
int _bitoffs; \
int _bitsize; \
ERTS_GET_REAL_BIN(obj, _real, _offset, _bitoffs, _bitsize); \
@@ -1104,7 +1130,7 @@ int erts_write_to_port(Eterm caller_id, Port *p, Eterm list)
Uint csize;
Uint pvsize;
Uint pcsize;
- int blimit;
+ ErlDrvSizeT blimit;
SysIOVec iv[SMALL_WRITE_VEC];
ErlDrvBinary* bv[SMALL_WRITE_VEC];
SysIOVec* ivp;
@@ -1146,15 +1172,31 @@ int erts_write_to_port(Eterm caller_id, Port *p, Eterm list)
ivp[0].iov_len = 0;
bvp[0] = NULL;
ev.vsize = io_list_to_vec(list, ivp+1, bvp+1, cbin, blimit);
+ if (ev.vsize < 0) {
+ if (ivp != iv) {
+ erts_free(ERTS_ALC_T_TMP, (void *) ivp);
+ }
+ if (bvp != bv) {
+ erts_free(ERTS_ALC_T_TMP, (void *) bvp);
+ }
+ driver_free_binary(cbin);
+ goto bad_value;
+ }
ev.vsize++;
#if 0
/* This assertion may say something useful, but it can
be falsified during the emulator test suites. */
- ASSERT((ev.vsize >= 0) && (ev.vsize == vsize));
+ ASSERT(ev.vsize == vsize);
#endif
ev.size = size; /* total size */
ev.iov = ivp;
ev.binv = bvp;
+#ifdef USE_VM_PROBES
+ if (DTRACE_ENABLED(driver_outputv)) {
+ DTRACE_FORMAT_COMMON_PID_AND_PORT(caller_id, p)
+ DTRACE4(driver_outputv, process_str, port_str, p->name, size);
+ }
+#endif
fpe_was_unmasked = erts_block_fpe();
(*drv->outputv)((ErlDrvData)p->drv_data, &ev);
erts_unblock_fpe(fpe_was_unmasked);
@@ -1174,8 +1216,21 @@ int erts_write_to_port(Eterm caller_id, Port *p, Eterm list)
buf = erts_alloc(ERTS_ALC_T_TMP, size+1);
r = io_list_to_buf(list, buf, size);
+#ifdef USE_VM_PROBES
+ if(DTRACE_ENABLED(port_command)) {
+ DTRACE_FORMAT_COMMON_PID_AND_PORT(caller_id, p)
+ DTRACE4(port_command, process_str, port_str, p->name, "command");
+ }
+#endif
+
if (r >= 0) {
size -= r;
+#ifdef USE_VM_PROBES
+ if (DTRACE_ENABLED(driver_output)) {
+ DTRACE_FORMAT_COMMON_PID_AND_PORT(caller_id, p)
+ DTRACE4(driver_output, process_str, port_str, p->name, size);
+ }
+#endif
fpe_was_unmasked = erts_block_fpe();
(*drv->output)((ErlDrvData)p->drv_data, buf, size);
erts_unblock_fpe(fpe_was_unmasked);
@@ -1199,6 +1254,12 @@ int erts_write_to_port(Eterm caller_id, Port *p, Eterm list)
*/
buf = erts_alloc(ERTS_ALC_T_TMP, size+1);
r = io_list_to_buf(list, buf, size);
+#ifdef USE_VM_PROBES
+ if (DTRACE_ENABLED(driver_output)) {
+ DTRACE_FORMAT_COMMON_PID_AND_PORT(caller_id, p)
+ DTRACE4(driver_output, process_str, port_str, p->name, size);
+ }
+#endif
fpe_was_unmasked = erts_block_fpe();
(*drv->output)((ErlDrvData)p->drv_data, buf, size);
erts_unblock_fpe(fpe_was_unmasked);
@@ -1206,7 +1267,7 @@ int erts_write_to_port(Eterm caller_id, Port *p, Eterm list)
}
}
p->bytes_out += size;
- erts_smp_atomic_add(&erts_bytes_out, size);
+ erts_smp_atomic_add_nob(&erts_bytes_out, size);
#ifdef ERTS_SMP
if (p->xports)
@@ -1277,13 +1338,13 @@ void init_io(void)
erts_port = (Port *) erts_alloc(ERTS_ALC_T_PORT_TABLE,
erts_max_ports * sizeof(Port));
- erts_smp_atomic_init(&erts_bytes_out, 0);
- erts_smp_atomic_init(&erts_bytes_in, 0);
+ erts_smp_atomic_init_nob(&erts_bytes_out, 0);
+ erts_smp_atomic_init_nob(&erts_bytes_in, 0);
for (i = 0; i < erts_max_ports; i++) {
erts_port_task_init_sched(&erts_port[i].sched);
#ifdef ERTS_SMP
- erts_smp_atomic_init(&erts_port[i].refc, 0);
+ erts_smp_atomic_init_nob(&erts_port[i].refc, 0);
erts_port[i].lock = NULL;
erts_port[i].xports = NULL;
erts_smp_spinlock_init_x(&erts_port[i].state_lck, "port_state", make_small(i));
@@ -1300,7 +1361,7 @@ void init_io(void)
erts_port[i].port_data_lock = NULL;
}
- erts_smp_atomic32_init(&erts_ports_snapshot, (erts_aint32_t) 0);
+ erts_smp_atomic32_init_nob(&erts_ports_snapshot, (erts_aint32_t) 0);
last_port_num = 0;
erts_smp_spinlock_init(&get_free_port_lck, "get_free_port");
@@ -1360,7 +1421,8 @@ int bufsiz;
* buf - A buffer containing the data to be read and split to lines.
* len - The number of bytes in buf.
*/
-static int init_linebuf_context(LineBufContext *lc, LineBuf **lb, char *buf, int len)
+static int init_linebuf_context(LineBufContext *lc, LineBuf **lb,
+ char *buf, ErlDrvSizeT len)
{
if(lc == NULL || lb == NULL)
return -1;
@@ -1513,7 +1575,11 @@ deliver_result(Eterm sender, Eterm pid, Eterm res)
hp = erts_alloc_message_heap(sz_res + 3, &bp, &ohp, rp, &rp_locks);
res = copy_struct(res, sz_res, &hp, ohp);
tuple = TUPLE2(hp, sender, res);
- erts_queue_message(rp, &rp_locks, bp, tuple, NIL);
+ erts_queue_message(rp, &rp_locks, bp, tuple, NIL
+#ifdef USE_VM_PROBES
+ , NIL
+#endif
+ );
erts_smp_proc_unlock(rp, rp_locks);
erts_smp_proc_dec_refc(rp);
}
@@ -1529,10 +1595,10 @@ deliver_result(Eterm sender, Eterm pid, Eterm res)
*/
static void deliver_read_message(Port* prt, Eterm to,
- char *hbuf, int hlen,
- char *buf, int len, int eol)
+ char *hbuf, ErlDrvSizeT hlen,
+ char *buf, ErlDrvSizeT len, int eol)
{
- int need;
+ ErlDrvSizeT need;
Eterm listp;
Eterm tuple;
Process* rp;
@@ -1602,7 +1668,11 @@ static void deliver_read_message(Port* prt, Eterm to,
tuple = TUPLE2(hp, prt->id, tuple);
hp += 3;
- erts_queue_message(rp, &rp_locks, bp, tuple, am_undefined);
+ erts_queue_message(rp, &rp_locks, bp, tuple, am_undefined
+#ifdef USE_VM_PROBES
+ , NIL
+#endif
+ );
erts_smp_proc_unlock(rp, rp_locks);
erts_smp_proc_dec_refc(rp);
}
@@ -1612,8 +1682,8 @@ static void deliver_read_message(Port* prt, Eterm to,
* deliver_read_message, and takes the same parameters.
*/
static void deliver_linebuf_message(Port* prt, Eterm to,
- char* hbuf, int hlen,
- char *buf, int len)
+ char* hbuf, ErlDrvSizeT hlen,
+ char *buf, ErlDrvSizeT len)
{
LineBufContext lc;
int ret;
@@ -1656,14 +1726,14 @@ static void
deliver_vec_message(Port* prt, /* Port */
Eterm to, /* Receiving pid */
char* hbuf, /* "Header" buffer... */
- int hlen, /* ... and its length */
+ ErlDrvSizeT hlen, /* ... and its length */
ErlDrvBinary** binv, /* Vector of binaries */
SysIOVec* iov, /* I/O vector */
int vsize, /* Size of binv & iov */
- int csize) /* Size of characters in
+ ErlDrvSizeT csize) /* Size of characters in
iov (not hlen) */
{
- int need;
+ ErlDrvSizeT need;
Eterm listp;
Eterm tuple;
Process* rp;
@@ -1744,7 +1814,7 @@ deliver_vec_message(Port* prt, /* Port */
}
}
- if (hlen > 0) { /* Prepend the header */
+ if (hlen != 0) { /* Prepend the header */
Eterm* thp = hp;
listp = buf_to_intlist(&thp, hbuf, hlen, listp);
hp = thp;
@@ -1755,7 +1825,11 @@ deliver_vec_message(Port* prt, /* Port */
tuple = TUPLE2(hp, prt->id, tuple);
hp += 3;
- erts_queue_message(rp, &rp_locks, bp, tuple, am_undefined);
+ erts_queue_message(rp, &rp_locks, bp, tuple, am_undefined
+#ifdef USE_VM_PROBES
+ , NIL
+#endif
+ );
erts_smp_proc_unlock(rp, rp_locks);
erts_smp_proc_dec_refc(rp);
}
@@ -1764,10 +1838,10 @@ deliver_vec_message(Port* prt, /* Port */
static void deliver_bin_message(Port* prt, /* port */
Eterm to, /* receiving pid */
char* hbuf, /* "header" buffer */
- int hlen, /* and it's length */
+ ErlDrvSizeT hlen, /* and it's length */
ErlDrvBinary* bin, /* binary data */
- int offs, /* offset into binary */
- int len) /* length of binary */
+ ErlDrvSizeT offs, /* offset into binary */
+ ErlDrvSizeT len) /* length of binary */
{
SysIOVec vec;
@@ -1794,6 +1868,12 @@ static void flush_port(Port *p)
ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(p));
if (p->drv_ptr->flush != NULL) {
+#ifdef USE_VM_PROBES
+ if (DTRACE_ENABLED(driver_flush)) {
+ DTRACE_FORMAT_COMMON_PID_AND_PORT(p->connected, p)
+ DTRACE3(driver_flush, process_str, port_str, p->name);
+ }
+#endif
if (IS_TRACED_FL(p, F_TRACE_SCHED_PORTS)) {
trace_sched_ports_where(p, am_in, am_flush);
}
@@ -1821,6 +1901,7 @@ terminate_port(Port *prt)
Eterm send_closed_port_id;
Eterm connected_id = NIL /* Initialize to silence compiler */;
erts_driver_t *drv;
+ int halt;
ERTS_SMP_CHK_NO_PROC_LOCKS;
ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt));
@@ -1828,6 +1909,8 @@ terminate_port(Port *prt)
ASSERT(!prt->nlinks);
ASSERT(!prt->monitors);
+ /* prt->status may be altered by kill_port()below */
+ halt = (prt->status & ERTS_PORT_SFLG_HALT) != 0;
if (prt->status & ERTS_PORT_SFLG_SEND_CLOSED) {
erts_port_status_band_set(prt, ~ERTS_PORT_SFLG_SEND_CLOSED);
send_closed_port_id = prt->id;
@@ -1846,6 +1929,12 @@ terminate_port(Port *prt)
drv = prt->drv_ptr;
if ((drv != NULL) && (drv->stop != NULL)) {
int fpe_was_unmasked = erts_block_fpe();
+#ifdef USE_VM_PROBES
+ if (DTRACE_ENABLED(driver_stop)) {
+ DTRACE_FORMAT_COMMON_PID_AND_PORT(prt->connected, prt)
+ DTRACE3(driver_stop, process_str, drv->name, port_str);
+ }
+#endif
(*drv->stop)((ErlDrvData)prt->drv_data);
erts_unblock_fpe(fpe_was_unmasked);
#ifdef ERTS_SMP
@@ -1879,6 +1968,10 @@ terminate_port(Port *prt)
* We don't want to send the closed message until after the
* port has been removed from the port table (in kill_port()).
*/
+ if (halt && (erts_smp_atomic32_dec_read_nob(&erts_halt_progress) == 0)) {
+ erts_smp_port_unlock(prt); /* We will exit and never return */
+ erl_exit_flush_async(erts_halt_code, "");
+ }
if (is_internal_port(send_closed_port_id))
deliver_result(send_closed_port_id, connected_id, am_closed);
@@ -2003,6 +2096,19 @@ erts_do_exit_port(Port *p, Eterm from, Eterm reason)
rreason = (reason == am_kill) ? am_killed : reason;
+#ifdef USE_VM_PROBES
+ if (DTRACE_ENABLED(port_exit)) {
+ DTRACE_CHARBUF(from_str, DTRACE_TERM_BUF_SIZE);
+ DTRACE_CHARBUF(port_str, DTRACE_TERM_BUF_SIZE);
+ DTRACE_CHARBUF(rreason_str, 64);
+
+ erts_snprintf(from_str, sizeof(from_str), "%T", from);
+ dtrace_port_str(p, port_str);
+ erts_snprintf(rreason_str, sizeof(rreason_str), "%T", rreason);
+ DTRACE4(port_exit, from_str, port_str, p->name, rreason_str);
+ }
+#endif
+
if ((p->status & (ERTS_PORT_SFLGS_DEAD
| ERTS_PORT_SFLG_EXITING
| ERTS_PORT_SFLG_IMMORTAL))
@@ -2103,6 +2209,13 @@ void erts_port_command(Process *proc,
if (tp[2] == am_close) {
erts_port_status_bor_set(port, ERTS_PORT_SFLG_SEND_CLOSED);
erts_do_exit_port(port, pid, am_normal);
+
+#ifdef USE_VM_PROBES
+ if(DTRACE_ENABLED(port_command)) {
+ DTRACE_FORMAT_COMMON_PROC_AND_PORT(proc, port)
+ DTRACE4(port_command, process_str, port_str, port->name, "close");
+ }
+#endif
goto done;
} else if (is_tuple_arity(tp[2], 2)) {
tp = tuple_val(tp[2]);
@@ -2110,6 +2223,12 @@ void erts_port_command(Process *proc,
if (erts_write_to_port(caller_id, port, tp[2]) == 0)
goto done;
} else if ((tp[1] == am_connect) && is_internal_pid(tp[2])) {
+#ifdef USE_VM_PROBES
+ if(DTRACE_ENABLED(port_command)) {
+ DTRACE_FORMAT_COMMON_PROC_AND_PORT(proc, port)
+ DTRACE4(port_command, process_str, port_str, port->name, "connect");
+ }
+#endif
port->connected = tp[2];
deliver_result(port->id, pid, am_connected);
goto done;
@@ -2155,8 +2274,9 @@ erts_port_control(Process* p, Port* prt, Uint command, Eterm iolist)
int must_free = 0; /* True if the buffer should be freed. */
char port_result[ERL_ONHEAP_BIN_LIMIT]; /* Default buffer for result from port. */
char* port_resp; /* Pointer to result buffer. */
- int n;
- int (*control)(ErlDrvData, unsigned, char*, int, char**, int);
+ ErlDrvSSizeT n;
+ ErlDrvSSizeT (*control)
+ (ErlDrvData, unsigned, char*, ErlDrvSizeT, char**, ErlDrvSizeT);
int fpe_was_unmasked;
ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt));
@@ -2170,8 +2290,8 @@ erts_port_control(Process* p, Port* prt, Uint command, Eterm iolist)
* and with its length in to_len.
*/
if (is_binary(iolist) && binary_bitoffset(iolist) == 0) {
- Uint bitoffs;
- Uint bitsize;
+ ERTS_DECLARE_DUMMY(Uint bitoffs);
+ ERTS_DECLARE_DUMMY(Uint bitsize);
ERTS_GET_BINARY_BYTES(iolist, to_port, bitoffs, bitsize);
to_len = binary_size(iolist);
} else {
@@ -2211,6 +2331,15 @@ erts_port_control(Process* p, Port* prt, Uint command, Eterm iolist)
erts_smp_proc_unlock(p, ERTS_PROC_LOCK_MAIN);
ERTS_SMP_CHK_NO_PROC_LOCKS;
+#ifdef USE_VM_PROBES
+ if (DTRACE_ENABLED(port_control) || DTRACE_ENABLED(driver_control)) {
+ DTRACE_FORMAT_COMMON_PROC_AND_PORT(p, prt);
+ DTRACE4(port_control, process_str, port_str, prt->name, command);
+ DTRACE5(driver_control, process_str, port_str, prt->name,
+ command, to_len);
+ }
+#endif
+
/*
* Call the port's control routine.
*/
@@ -2351,6 +2480,10 @@ print_port_info(int to, void *arg, int i)
void
set_busy_port(ErlDrvPort port_num, int on)
{
+#ifdef USE_VM_PROBES
+ DTRACE_CHARBUF(port_str, 16);
+#endif
+
ERTS_SMP_CHK_NO_PROC_LOCKS;
ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(&erts_port[port_num]));
@@ -2358,12 +2491,26 @@ set_busy_port(ErlDrvPort port_num, int on)
if (on) {
erts_port_status_bor_set(&erts_port[port_num],
ERTS_PORT_SFLG_PORT_BUSY);
+#ifdef USE_VM_PROBES
+ if (DTRACE_ENABLED(port_busy)) {
+ erts_snprintf(port_str, sizeof(port_str),
+ "%T", erts_port[port_num].id);
+ DTRACE1(port_busy, port_str);
+ }
+#endif
} else {
ErtsProcList* plp = erts_port[port_num].suspended;
erts_port_status_band_set(&erts_port[port_num],
~ERTS_PORT_SFLG_PORT_BUSY);
erts_port[port_num].suspended = NULL;
+#ifdef USE_VM_PROBES
+ if (DTRACE_ENABLED(port_not_busy)) {
+ erts_snprintf(port_str, sizeof(port_str),
+ "%T", erts_port[port_num].id);
+ DTRACE1(port_not_busy, port_str);
+ }
+#endif
if (erts_port[port_num].dist_entry) {
/*
* Processes suspended on distribution ports are
@@ -2381,6 +2528,28 @@ set_busy_port(ErlDrvPort port_num, int on)
*/
if (plp) {
+#ifdef USE_VM_PROBES
+ /*
+ * Hrm, for blocked dist ports, plp always seems to be NULL.
+ * That's not so fun.
+ * Well, another way to get the same info is using a D
+ * script to correlate an earlier process-port_blocked+pid
+ * event with a later process-scheduled event. That's
+ * subject to the multi-CPU races with how events are
+ * handled, but hey, that way works most of the time.
+ */
+ if (DTRACE_ENABLED(process_port_unblocked)) {
+ DTRACE_CHARBUF(pid_str, 16);
+ ErtsProcList* plp2 = plp;
+
+ erts_snprintf(port_str, sizeof(port_str),
+ "%T", erts_port[port_num]);
+ while (plp2 != NULL) {
+ erts_snprintf(pid_str, sizeof(pid_str), "%T", plp2->pid);
+ DTRACE2(process_port_unblocked, pid_str, port_str);
+ }
+ }
+#endif
/* First proc should be resumed last */
if (plp->next) {
erts_resume_processes(plp->next);
@@ -2427,6 +2596,14 @@ void erts_raw_port_command(Port* p, byte* buf, Uint len)
p->drv_ptr->name ? p->drv_ptr->name : "unknown");
p->caller = NIL;
+#ifdef USE_VM_PROBES
+ if (DTRACE_ENABLED(driver_output)) {
+ DTRACE_CHARBUF(port_str, DTRACE_TERM_BUF_SIZE);
+
+ dtrace_port_str(p, port_str);
+ DTRACE4(driver_output, "-raw-", port_str, p->name, len);
+ }
+#endif
fpe_was_unmasked = erts_block_fpe();
(*p->drv_ptr->output)((ErlDrvData)p->drv_data, (char*) buf, (int) len);
erts_unblock_fpe(fpe_was_unmasked);
@@ -2442,6 +2619,12 @@ int async_ready(Port *p, void* data)
ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(p));
ASSERT(!(p->status & ERTS_PORT_SFLGS_DEAD));
if (p->drv_ptr->ready_async != NULL) {
+#ifdef USE_VM_PROBES
+ if (DTRACE_ENABLED(driver_ready_async)) {
+ DTRACE_FORMAT_COMMON_PID_AND_PORT(p->connected, p)
+ DTRACE3(driver_ready_async, process_str, port_str, p->name);
+ }
+#endif
(*p->drv_ptr->ready_async)((ErlDrvData)p->drv_data, data);
need_free = 0;
#ifdef ERTS_SMP
@@ -2636,7 +2819,11 @@ void driver_report_exit(int ix, int status)
hp += 3;
tuple = TUPLE2(hp, prt->id, tuple);
- erts_queue_message(rp, &rp_locks, bp, tuple, am_undefined);
+ erts_queue_message(rp, &rp_locks, bp, tuple, am_undefined
+#ifdef USE_VM_PROBES
+ , NIL
+#endif
+ );
erts_smp_proc_unlock(rp, rp_locks);
erts_smp_proc_dec_refc(rp);
@@ -3082,7 +3269,7 @@ driver_deliver_term(ErlDrvPort port,
Binary* bp = erts_bin_nrml_alloc(size);
ASSERT(bufp);
bp->flags = 0;
- bp->orig_size = (long) size;
+ bp->orig_size = (SWord) size;
erts_refc_init(&bp->refc, 1);
sys_memcpy((void *) bp->orig_bytes, (void *) bufp, size);
pbp = (ProcBin *) hp;
@@ -3186,7 +3373,11 @@ driver_deliver_term(ErlDrvPort port,
HRelease(rp, hp_end, hp);
}
/* send message */
- erts_queue_message(rp, &rp_locks, bp, mess, am_undefined);
+ erts_queue_message(rp, &rp_locks, bp, mess, am_undefined
+#ifdef USE_VM_PROBES
+ , NIL
+#endif
+ );
}
else {
if (b2t.ix > b2t.used)
@@ -3239,8 +3430,8 @@ driver_send_term(ErlDrvPort ix, ErlDrvTermData to, ErlDrvTermData* data, int len
* and data is len length of bin starting from offset offs.
*/
-int driver_output_binary(ErlDrvPort ix, char* hbuf, int hlen,
- ErlDrvBinary* bin, int offs, int len)
+int driver_output_binary(ErlDrvPort ix, char* hbuf, ErlDrvSizeT hlen,
+ ErlDrvBinary* bin, ErlDrvSizeT offs, ErlDrvSizeT len)
{
Port* prt = erts_drvport2port(ix);
@@ -3253,7 +3444,7 @@ int driver_output_binary(ErlDrvPort ix, char* hbuf, int hlen,
return 0;
prt->bytes_in += (hlen + len);
- erts_smp_atomic_add(&erts_bytes_in, (erts_aint_t) (hlen + len));
+ erts_smp_atomic_add_nob(&erts_bytes_in, (erts_aint_t) (hlen + len));
if (prt->status & ERTS_PORT_SFLG_DISTRIBUTION) {
return erts_net_message(prt,
prt->dist_entry,
@@ -3273,7 +3464,8 @@ int driver_output_binary(ErlDrvPort ix, char* hbuf, int hlen,
** Example: if hlen = 3 then the port owner will receive the data
** [H1,H2,H3 | T]
*/
-int driver_output2(ErlDrvPort ix, char* hbuf, int hlen, char* buf, int len)
+int driver_output2(ErlDrvPort ix, char* hbuf, ErlDrvSizeT hlen,
+ char* buf, ErlDrvSizeT len)
{
Port* prt = erts_drvport2port(ix);
@@ -3288,7 +3480,7 @@ int driver_output2(ErlDrvPort ix, char* hbuf, int hlen, char* buf, int len)
return 0;
prt->bytes_in += (hlen + len);
- erts_smp_atomic_add(&erts_bytes_in, (erts_aint_t) (hlen + len));
+ erts_smp_atomic_add_nob(&erts_bytes_in, (erts_aint_t) (hlen + len));
if (prt->status & ERTS_PORT_SFLG_DISTRIBUTION) {
if (len == 0)
return erts_net_message(prt,
@@ -3310,27 +3502,29 @@ int driver_output2(ErlDrvPort ix, char* hbuf, int hlen, char* buf, int len)
/* Interface functions available to driver writers */
-int driver_output(ErlDrvPort ix, char* buf, int len)
+int driver_output(ErlDrvPort ix, char* buf, ErlDrvSizeT len)
{
ERTS_SMP_CHK_NO_PROC_LOCKS;
return driver_output2(ix, NULL, 0, buf, len);
}
-int driver_outputv(ErlDrvPort ix, char* hbuf, int hlen, ErlIOVec* vec, int skip)
+int driver_outputv(ErlDrvPort ix, char* hbuf, ErlDrvSizeT hlen,
+ ErlIOVec* vec, ErlDrvSizeT skip)
{
int n;
- int len;
- int size;
+ ErlDrvSizeT len;
+ ErlDrvSizeT size;
SysIOVec* iov;
ErlDrvBinary** binv;
Port* prt;
ERTS_SMP_CHK_NO_PROC_LOCKS;
- size = vec->size - skip; /* Size of remaining bytes in vector */
- ASSERT(size >= 0);
- if (size <= 0)
+ ASSERT(vec->size >= skip);
+ if (vec->size <= skip)
return driver_output2(ix, hbuf, hlen, NULL, 0);
+ size = vec->size - skip; /* Size of remaining bytes in vector */
+
ASSERT(hlen >= 0); /* debug only */
if (hlen < 0)
hlen = 0;
@@ -3365,7 +3559,7 @@ int driver_outputv(ErlDrvPort ix, char* hbuf, int hlen, ErlIOVec* vec, int skip)
/* XXX handle distribution !!! */
prt->bytes_in += (hlen + size);
- erts_smp_atomic_add(&erts_bytes_in, (erts_aint_t) (hlen + size));
+ erts_smp_atomic_add_nob(&erts_bytes_in, (erts_aint_t) (hlen + size));
deliver_vec_message(prt, prt->connected, hbuf, hlen, binv, iov, n, size);
return 0;
}
@@ -3374,17 +3568,14 @@ int driver_outputv(ErlDrvPort ix, char* hbuf, int hlen, ErlIOVec* vec, int skip)
** input is a vector a buffer and a max length
** return bytes copied
*/
-int driver_vec_to_buf(vec, buf, len)
-ErlIOVec* vec;
-char* buf;
-int len;
+ErlDrvSizeT driver_vec_to_buf(ErlIOVec *vec, char *buf, ErlDrvSizeT len)
{
SysIOVec* iov = vec->iov;
int n = vec->vsize;
- int orig_len = len;
+ ErlDrvSizeT orig_len = len;
while(n--) {
- int ilen = iov->iov_len;
+ size_t ilen = iov->iov_len;
if (ilen < len) {
sys_memcpy(buf, iov->iov_base, ilen);
len -= ilen;
@@ -3436,43 +3627,34 @@ driver_binary_dec_refc(ErlDrvBinary *dbp)
*/
ErlDrvBinary*
-driver_alloc_binary(int size)
+driver_alloc_binary(ErlDrvSizeT size)
{
Binary* bin;
- if (size < 0)
- return NULL;
-
bin = erts_bin_drv_alloc_fnf((Uint) size);
if (!bin)
return NULL; /* The driver write must take action */
bin->flags = BIN_FLAG_DRV;
erts_refc_init(&bin->refc, 1);
- bin->orig_size = (long) size;
+ bin->orig_size = (SWord) size;
return Binary2ErlDrvBinary(bin);
}
/* Reallocate space hold by binary */
-ErlDrvBinary* driver_realloc_binary(ErlDrvBinary* bin, int size)
+ErlDrvBinary* driver_realloc_binary(ErlDrvBinary* bin, ErlDrvSizeT size)
{
Binary* oldbin;
Binary* newbin;
- if (!bin || size < 0) {
+ if (!bin) {
erts_dsprintf_buf_t *dsbufp = erts_create_logger_dsbuf();
erts_dsprintf(dsbufp,
- "Bad use of driver_realloc_binary(%p, %d): "
+ "Bad use of driver_realloc_binary(%p, %lu): "
"called with ",
- bin, size);
+ bin, (unsigned long)size);
if (!bin) {
erts_dsprintf(dsbufp, "NULL pointer as first argument");
- if (size < 0)
- erts_dsprintf(dsbufp, ", and ");
- }
- if (size < 0) {
- erts_dsprintf(dsbufp, "negative size as second argument");
- size = 0;
}
erts_send_warning_to_logger_nogl(dsbufp);
if (!bin)
@@ -3512,12 +3694,12 @@ ErlDrvBinary* dbin;
* Allocation/deallocation of memory for drivers
*/
-void *driver_alloc(size_t size)
+void *driver_alloc(ErlDrvSizeT size)
{
return erts_alloc_fnf(ERTS_ALC_T_DRV, (Uint) size);
}
-void *driver_realloc(void *ptr, size_t size)
+void *driver_realloc(void *ptr, ErlDrvSizeT size)
{
return erts_realloc_fnf(ERTS_ALC_T_DRV, ptr, (Uint) size);
}
@@ -3539,13 +3721,13 @@ pdl_init(void)
static ERTS_INLINE void
pdl_init_refc(ErlDrvPDL pdl)
{
- erts_atomic_init(&pdl->refc, 1);
+ erts_atomic_init_nob(&pdl->refc, 1);
}
static ERTS_INLINE ErlDrvSInt
pdl_read_refc(ErlDrvPDL pdl)
{
- erts_aint_t refc = erts_atomic_read(&pdl->refc);
+ erts_aint_t refc = erts_atomic_read_nob(&pdl->refc);
ERTS_LC_ASSERT(refc >= 0);
return (ErlDrvSInt) refc;
}
@@ -3553,14 +3735,14 @@ pdl_read_refc(ErlDrvPDL pdl)
static ERTS_INLINE void
pdl_inc_refc(ErlDrvPDL pdl)
{
- erts_atomic_inc(&pdl->refc);
+ erts_atomic_inc_nob(&pdl->refc);
ERTS_LC_ASSERT(driver_pdl_get_refc(pdl) > 1);
}
static ERTS_INLINE ErlDrvSInt
pdl_inctest_refc(ErlDrvPDL pdl)
{
- erts_aint_t refc = erts_atomic_inctest(&pdl->refc);
+ erts_aint_t refc = erts_atomic_inc_read_nob(&pdl->refc);
ERTS_LC_ASSERT(refc > 1);
return (ErlDrvSInt) refc;
}
@@ -3569,7 +3751,7 @@ pdl_inctest_refc(ErlDrvPDL pdl)
static ERTS_INLINE void
pdl_dec_refc(ErlDrvPDL pdl)
{
- erts_atomic_dec(&pdl->refc);
+ erts_atomic_dec_nob(&pdl->refc);
ERTS_LC_ASSERT(driver_pdl_get_refc(pdl) > 0);
}
#endif
@@ -3577,7 +3759,7 @@ pdl_dec_refc(ErlDrvPDL pdl)
static ERTS_INLINE ErlDrvSInt
pdl_dectest_refc(ErlDrvPDL pdl)
{
- erts_aint_t refc = erts_atomic_dectest(&pdl->refc);
+ erts_aint_t refc = erts_atomic_dec_read_nob(&pdl->refc);
ERTS_LC_ASSERT(refc >= 0);
return (ErlDrvSInt) refc;
}
@@ -3779,11 +3961,11 @@ static int expandq(ErlIOQueue* q, int n, int tail)
/* Put elements from vec at q tail */
-int driver_enqv(ErlDrvPort ix, ErlIOVec* vec, int skip)
+int driver_enqv(ErlDrvPort ix, ErlIOVec* vec, ErlDrvSizeT skip)
{
int n;
- int len;
- int size;
+ size_t len;
+ ErlDrvSizeT size;
SysIOVec* iov;
ErlDrvBinary** binv;
ErlDrvBinary* b;
@@ -3792,10 +3974,10 @@ int driver_enqv(ErlDrvPort ix, ErlIOVec* vec, int skip)
if (q == NULL)
return -1;
- size = vec->size - skip;
- ASSERT(size >= 0); /* debug only */
- if (size <= 0)
+ ASSERT(vec->size >= skip); /* debug only */
+ if (vec->size <= skip)
return 0;
+ size = vec->size - skip;
iov = vec->iov;
binv = vec->binv;
@@ -3845,11 +4027,11 @@ int driver_enqv(ErlDrvPort ix, ErlIOVec* vec, int skip)
}
/* Put elements from vec at q head */
-int driver_pushqv(ErlDrvPort ix, ErlIOVec* vec, int skip)
+int driver_pushqv(ErlDrvPort ix, ErlIOVec* vec, ErlDrvSizeT skip)
{
int n;
- int len;
- int size;
+ size_t len;
+ ErlDrvSizeT size;
SysIOVec* iov;
ErlDrvBinary** binv;
ErlDrvBinary* b;
@@ -3858,8 +4040,10 @@ int driver_pushqv(ErlDrvPort ix, ErlIOVec* vec, int skip)
if (q == NULL)
return -1;
- if ((size = vec->size - skip) <= 0)
+ if (vec->size <= skip)
return 0;
+ size = vec->size - skip;
+
iov = vec->iov;
binv = vec->binv;
n = vec->vsize;
@@ -3914,15 +4098,14 @@ int driver_pushqv(ErlDrvPort ix, ErlIOVec* vec, int skip)
** Remove size bytes from queue head
** Return number of bytes that remain in queue
*/
-int driver_deq(ErlDrvPort ix, int size)
+ErlDrvSizeT driver_deq(ErlDrvPort ix, ErlDrvSizeT size)
{
ErlIOQueue* q = drvport2ioq(ix);
- int len;
- int sz;
+ ErlDrvSizeT len;
- if ((q == NULL) || (sz = (q->size - size)) < 0)
+ if ((q == NULL) || (q->size < size))
return -1;
- q->size = sz;
+ q->size -= size;
while (size > 0) {
ASSERT(q->v_head != q->v_tail);
@@ -3945,16 +4128,16 @@ int driver_deq(ErlDrvPort ix, int size)
q->v_head = q->v_tail = q->v_start;
q->b_head = q->b_tail = q->b_start;
}
- return sz;
+ return q->size;
}
-int driver_peekqv(ErlDrvPort ix, ErlIOVec *ev) {
+ErlDrvSizeT driver_peekqv(ErlDrvPort ix, ErlIOVec *ev) {
ErlIOQueue *q = drvport2ioq(ix);
ASSERT(ev);
if (! q) {
- return -1;
+ return (ErlDrvSizeT) -1;
} else {
if ((ev->vsize = q->v_tail - q->v_head) == 0) {
ev->size = 0;
@@ -3983,12 +4166,12 @@ SysIOVec* driver_peekq(ErlDrvPort ix, int* vlenp) /* length of io-vector */
}
-int driver_sizeq(ErlDrvPort ix)
+ErlDrvSizeT driver_sizeq(ErlDrvPort ix)
{
ErlIOQueue* q = drvport2ioq(ix);
if (q == NULL)
- return -1;
+ return (size_t) -1;
return q->size;
}
@@ -3996,7 +4179,8 @@ int driver_sizeq(ErlDrvPort ix)
/* Utils */
/* Enqueue a binary */
-int driver_enq_bin(ErlDrvPort ix, ErlDrvBinary* bin, int offs, int len)
+int driver_enq_bin(ErlDrvPort ix, ErlDrvBinary* bin,
+ ErlDrvSizeT offs, ErlDrvSizeT len)
{
SysIOVec iov;
ErlIOVec ev;
@@ -4013,7 +4197,7 @@ int driver_enq_bin(ErlDrvPort ix, ErlDrvBinary* bin, int offs, int len)
return driver_enqv(ix, &ev, 0);
}
-int driver_enq(ErlDrvPort ix, char* buffer, int len)
+int driver_enq(ErlDrvPort ix, char* buffer, ErlDrvSizeT len)
{
int code;
ErlDrvBinary* bin;
@@ -4029,7 +4213,8 @@ int driver_enq(ErlDrvPort ix, char* buffer, int len)
return code;
}
-int driver_pushq_bin(ErlDrvPort ix, ErlDrvBinary* bin, int offs, int len)
+int driver_pushq_bin(ErlDrvPort ix, ErlDrvBinary* bin,
+ ErlDrvSizeT offs, ErlDrvSizeT len)
{
SysIOVec iov;
ErlIOVec ev;
@@ -4046,7 +4231,7 @@ int driver_pushq_bin(ErlDrvPort ix, ErlDrvBinary* bin, int offs, int len)
return driver_pushqv(ix, &ev, 0);
}
-int driver_pushq(ErlDrvPort ix, char* buffer, int len)
+int driver_pushq(ErlDrvPort ix, char* buffer, ErlDrvSizeT len)
{
int code;
ErlDrvBinary* bin;
@@ -4075,7 +4260,7 @@ drv_cancel_timer(Port *prt)
erts_port_task_abort(prt->id, &prt->timeout_task);
}
-int driver_set_timer(ErlDrvPort ix, UWord t)
+int driver_set_timer(ErlDrvPort ix, unsigned long t)
{
Port* prt = erts_drvport2port(ix);
@@ -4423,6 +4608,12 @@ void erts_fire_port_monitor(Port *prt, Eterm ref)
ASSERT(callback != NULL);
ref_to_driver_monitor(ref,&drv_monitor);
DRV_MONITOR_UNLOCK_PDL(prt);
+#ifdef USE_VM_PROBES
+ if (DTRACE_ENABLED(driver_process_exit)) {
+ DTRACE_FORMAT_COMMON_PID_AND_PORT(prt->connected, prt)
+ DTRACE3(driver_process_exit, process_str, port_str, prt->name);
+ }
+#endif
fpe_was_unmasked = erts_block_fpe();
(*callback)((ErlDrvData) (prt->drv_data), &drv_monitor);
erts_unblock_fpe(fpe_was_unmasked);
@@ -4579,7 +4770,10 @@ int driver_lock_driver(ErlDrvPort ix)
erts_smp_mtx_lock(&erts_driver_list_lock);
- if (prt == NULL) return -1;
+ if (prt == NULL) {
+ erts_smp_mtx_unlock(&erts_driver_list_lock);
+ return -1;
+ }
ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt));
if ((dh = (DE_Handle*)prt->drv_ptr->handle ) == NULL) {
@@ -4764,7 +4958,7 @@ get_current_port(void)
*/
static void
-no_output_callback(ErlDrvData drv_data, char *buf, int len)
+no_output_callback(ErlDrvData drv_data, char *buf, ErlDrvSizeT len)
{
}
@@ -4815,16 +5009,11 @@ static int
init_driver(erts_driver_t *drv, ErlDrvEntry *de, DE_Handle *handle)
{
drv->name = de->driver_name;
- if (de->extended_marker == ERL_DRV_EXTENDED_MARKER) {
- drv->version.major = de->major_version;
- drv->version.minor = de->minor_version;
- drv->flags = de->driver_flags;
- }
- else {
- drv->version.major = 0;
- drv->version.minor = 0;
- drv->flags = 0;
- }
+ ASSERT(de->extended_marker == ERL_DRV_EXTENDED_MARKER);
+ ASSERT(de->major_version >= 2);
+ drv->version.major = de->major_version;
+ drv->version.minor = de->minor_version;
+ drv->flags = de->driver_flags;
drv->handle = handle;
#ifdef ERTS_SMP
if (drv->flags & ERL_DRV_FLAG_USE_PORT_LOCKING)
@@ -4857,11 +5046,8 @@ init_driver(erts_driver_t *drv, ErlDrvEntry *de, DE_Handle *handle)
drv->ready_output = de->ready_output ? de->ready_output : no_ready_output_callback;
drv->timeout = de->timeout ? de->timeout : no_timeout_callback;
drv->ready_async = de->ready_async;
- if (de->extended_marker == ERL_DRV_EXTENDED_MARKER)
- drv->process_exit = de->process_exit;
- else
- drv->process_exit = NULL;
- if (de->minor_version >= 3/*R13A*/ && de->stop_select)
+ drv->process_exit = de->process_exit;
+ if (de->stop_select)
drv->stop_select = de->stop_select;
else
drv->stop_select = no_stop_select_callback;
@@ -4871,6 +5057,8 @@ init_driver(erts_driver_t *drv, ErlDrvEntry *de, DE_Handle *handle)
else {
int res;
int fpe_was_unmasked = erts_block_fpe();
+ DTRACE4(driver_init, drv->name, drv->version.major, drv->version.minor,
+ drv->flags);
res = (*de->init)();
erts_unblock_fpe(fpe_was_unmasked);
return res;
diff --git a/erts/emulator/beam/module.c b/erts/emulator/beam/module.c
index 91e4ccce70..b93b1ad09a 100644
--- a/erts/emulator/beam/module.c
+++ b/erts/emulator/beam/module.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1996-2010. All Rights Reserved.
+ * Copyright Ericsson AB 1996-2011. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -108,7 +108,8 @@ erts_put_module(Eterm mod)
int index;
ASSERT(is_atom(mod));
- ERTS_SMP_LC_ASSERT(erts_initialized == 0 || erts_smp_is_system_blocked(0));
+ ERTS_SMP_LC_ASSERT(erts_initialized == 0
+ || erts_smp_thr_progress_is_blocking());
e.module = atom_val(mod);
index = index_put(&module_table, (void*) &e);
return (Module*) erts_index_lookup(&module_table, index);
diff --git a/erts/emulator/beam/ops.tab b/erts/emulator/beam/ops.tab
index 8a5763b4bb..9b168889dd 100644
--- a/erts/emulator/beam/ops.tab
+++ b/erts/emulator/beam/ops.tab
@@ -1,7 +1,7 @@
#
# %CopyrightBegin%
#
-# Copyright Ericsson AB 1997-2011. All Rights Reserved.
+# Copyright Ericsson AB 1997-2012. All Rights Reserved.
#
# The contents of this file are subject to the Erlang Public License,
# Version 1.1, (the "License"); you may not use this file except in
@@ -25,30 +25,12 @@
# instruction transformations; thus, they never occur in BEAM files.
#
-# Special instruction used to generate an error message when
-# trying to load a module compiled by the V1 compiler (R5 & R6).
-# (Specially treated in beam_load.c.)
+# The too_old_compiler/0 instruction is specially handled in beam_load.c
+# to produce a user-friendly message informing the user that the module
+# needs to be re-compiled with a modern compiler.
too_old_compiler/0
-too_old_compiler
-
-#
-# Obsolete instruction usage follow. (Nowdays we use f with
-# a zero label instead of p.)
-#
-
-is_list p S => too_old_compiler
-is_nonempty_list p R => too_old_compiler
-is_nil p R => too_old_compiler
-
-is_tuple p S => too_old_compiler
-test_arity p S Arity => too_old_compiler
-
-is_integer p R => too_old_compiler
-is_float p R => too_old_compiler
-is_atom p R => too_old_compiler
-
-is_eq_exact p S1 S2 => too_old_compiler
+too_old_compiler | never() =>
# In R9C and earlier, the loader used to insert special instructions inside
# the module_info/0,1 functions. (In R10B and later, the compiler inserts
@@ -88,12 +70,42 @@ i_time_breakpoint
i_return_time_trace
i_return_to_trace
i_yield
-i_global_cons
-i_global_tuple
-i_global_copy
return
+#
+# To ensure that a "move Src x(0)" instruction can be combined
+# with the following call instruction, we need to make sure that
+# there is no line/1 instruction between the move and the call.
+#
+
+move S r | line Loc | call_ext Ar Func => \
+ line Loc | move S r | call_ext Ar Func
+move S r | line Loc | call_ext_last Ar Func=u$is_bif D => \
+ line Loc | move S r | call_ext_last Ar Func D
+move S r | line Loc | call_ext_only Ar Func=u$is_bif => \
+ line Loc | move S r | call_ext_only Ar Func
+move S r | line Loc | call Ar Func => \
+ line Loc | move S r | call Ar Func
+
+#
+# A tail-recursive call to an external function (non-BIF) will
+# never be saved on the stack, so there is no reason to keep
+# the line instruction. (The compiler did not remove the line
+# instruction because it cannot tell the difference between
+# BIFs and ordinary Erlang functions.)
+#
+
+line Loc | call_ext_last Ar Func=u$is_not_bif D => \
+ call_ext_last Ar Func D
+line Loc | call_ext_only Ar Func=u$is_not_bif => \
+ call_ext_only Ar Func
+
+line Loc | func_info M F A => func_info M F A | line Loc
+
+line I
+
+
%macro: allocate Allocate -pack
%macro: allocate_zero AllocateZero -pack
%macro: allocate_heap AllocateHeap -pack
@@ -277,8 +289,6 @@ raise s s
badarg j
system_limit j
-move R R =>
-
move C=cxy r | jump Lbl => move_jump Lbl C
%macro: move_jump MoveJump -nonext
@@ -585,8 +595,6 @@ get_tuple_element Reg P Dst => i_get_tuple_element Reg P Dst | original_reg Reg
original_reg Reg Pos =>
-get_tuple_element Reg P Dst => i_get_tuple_element Reg P Dst
-
original_reg/2
extract_next_element D1=xy | original_reg Reg P1 | get_tuple_element Reg P2 D2=xy | \
@@ -837,11 +845,11 @@ call_ext_only u==3 u$func:erlang:apply/3 => i_apply_only
# thus there is no need to generate any return instruction.
#
-call_ext_last u==1 Bif=u$bif:erlang:exit/1 D => call_bif1 Bif
-call_ext_last u==1 Bif=u$bif:erlang:throw/1 D => call_bif1 Bif
+call_ext_last u==1 Bif=u$bif:erlang:exit/1 D => call_bif Bif
+call_ext_last u==1 Bif=u$bif:erlang:throw/1 D => call_bif Bif
-call_ext_only u==1 Bif=u$bif:erlang:exit/1 => call_bif1 Bif
-call_ext_only u==1 Bif=u$bif:erlang:throw/1 => call_bif1 Bif
+call_ext_only u==1 Bif=u$bif:erlang:exit/1 => call_bif Bif
+call_ext_only u==1 Bif=u$bif:erlang:throw/1 => call_bif Bif
#
# The error/1 and error/2 BIFs never execute the instruction following them;
@@ -851,13 +859,13 @@ call_ext_only u==1 Bif=u$bif:erlang:throw/1 => call_bif1 Bif
# the continuation pointer on the stack.
#
-call_ext_last u==1 Bif=u$bif:erlang:error/1 D => call_bif1 Bif
-call_ext_last u==2 Bif=u$bif:erlang:error/2 D => call_bif2 Bif
+call_ext_last u==1 Bif=u$bif:erlang:error/1 D => call_bif Bif
+call_ext_last u==2 Bif=u$bif:erlang:error/2 D => call_bif Bif
call_ext_only Ar=u==1 Bif=u$bif:erlang:error/1 => \
- allocate u Ar | call_bif1 Bif
+ allocate u Ar | call_bif Bif
call_ext_only Ar=u==2 Bif=u$bif:erlang:error/2 => \
- allocate u Ar | call_bif2 Bif
+ allocate u Ar | call_bif Bif
#
# The yield/0 BIF is an instruction
@@ -875,21 +883,93 @@ call_ext_last u==3 u$func:erlang:hibernate/3 D => i_hibernate
call_ext_only u==3 u$func:erlang:hibernate/3 => i_hibernate
#
-# Hybrid memory architecture need special cons and tuple instructions
-# that allocate on the message area. These looks like BIFs in the BEAM code.
-#
-
-call_ext u==2 u$func:hybrid:cons/2 => i_global_cons
-call_ext_last u==2 u$func:hybrid:cons/2 D => i_global_cons | deallocate_return D
-call_ext_only Ar=u==2 u$func:hybrid:cons/2 => i_global_cons | return
-
-call_ext u==1 u$func:hybrid:tuple/1 => i_global_tuple
-call_ext_last u==1 u$func:hybrid:tuple/1 D => i_global_tuple | deallocate_return D
-call_ext_only Ar=u==1 u$func:hybrid:tuple/1 => i_global_tuple | return
-
-call_ext u==1 u$func:hybrid:copy/1 => i_global_copy
-call_ext_last u==1 u$func:hybrid:copy/1 D => i_global_copy | deallocate_return D
-call_ext_only u==1 Ar=u$func:hybrid:copy/1 => i_global_copy | return
+# If VM probes are not enabled, we want to short-circult calls to
+# the dt tag BIFs to make them as cheap as possible.
+#
+
+%unless USE_VM_PROBES
+
+call_ext Arity u$func:erlang:dt_get_tag/0 => \
+ move a=am_undefined r
+call_ext_last Arity u$func:erlang:dt_get_tag/0 D => \
+ move a=am_undefined r | deallocate D | return
+call_ext_only Arity u$func:erlang:dt_get_tag/0 => \
+ move a=am_undefined r | return
+
+move Any r | call_ext Arity u$func:erlang:dt_put_tag/1 => \
+ move a=am_undefined r
+move Any r | call_ext_last Arity u$func:erlang:dt_put_tag/1 D => \
+ move a=am_undefined r | deallocate D | return
+move Any r | call_ext_only Arity u$func:erlang:dt_put_tag/1 => \
+ move a=am_undefined r | return
+call_ext Arity u$func:erlang:dt_put_tag/1 => \
+ move a=am_undefined r
+call_ext_last Arity u$func:erlang:dt_put_tag/1 D => \
+ move a=am_undefined r | deallocate D | return
+call_ext_only Arity u$func:erlang:dt_put_tag/1 => \
+ move a=am_undefined r | return
+
+call_ext Arity u$func:erlang:dt_get_tag_data/0 => \
+ move a=am_undefined r
+call_ext_last Arity u$func:erlang:dt_get_tag_data/0 D => \
+ move a=am_undefined r | deallocate D | return
+call_ext_only Arity u$func:erlang:dt_get_tag_data/0 => \
+ move a=am_undefined r | return
+
+move Any r | call_ext Arity u$func:erlang:dt_spread_tag/1 => \
+ move a=am_true r
+move Any r | call_ext_last Arity u$func:erlang:dt_spread_tag/1 D => \
+ move a=am_true r | deallocate D | return
+move Any r | call_ext_only Arity u$func:erlang:dt_spread_tag/1 => \
+ move a=am_true r | return
+call_ext Arity u$func:erlang:dt_spread_tag/1 => \
+ move a=am_true r
+call_ext_last Arity u$func:erlang:dt_spread_tag/1 D => \
+ move a=am_true r | deallocate D | return
+call_ext_only Arity u$func:erlang:dt_spread_tag/1 => \
+ move a=am_true r | return
+
+move Any r | call_ext Arity u$func:erlang:dt_restore_tag/1 => \
+ move a=am_true r
+move Any r | call_ext_last Arity u$func:erlang:dt_restore_tag/1 D => \
+ move a=am_true r | deallocate D | return
+move Any r | call_ext_only Arity u$func:erlang:dt_restore_tag/1 => \
+ move a=am_true r | return
+call_ext Arity u$func:erlang:dt_restore_tag/1 => \
+ move a=am_true r
+call_ext_last Arity u$func:erlang:dt_restore_tag/1 D => \
+ move a=am_true r | deallocate D | return
+call_ext_only Arity u$func:erlang:dt_restore_tag/1 => \
+ move a=am_true r | return
+
+move Any r | call_ext Arity u$func:erlang:dt_prepend_vm_tag_data/1 => \
+ move Any r
+move Any r | call_ext_last Arity u$func:erlang:dt_prepend_vm_tag_data/1 D => \
+ move Any r | deallocate D | return
+move Any r | call_ext_only Arity u$func:erlang:dt_prepend_vm_tag_data/1 => \
+ move Any r | return
+call_ext Arity u$func:erlang:dt_prepend_vm_tag_data/1 =>
+call_ext_last Arity u$func:erlang:dt_prepend_vm_tag_data/1 D => \
+ deallocate D | return
+call_ext_only Arity u$func:erlang:dt_prepend_vm_tag_data/1 => \
+ return
+
+move Any r | call_ext Arity u$func:erlang:dt_append_vm_tag_data/1 => \
+ move Any r
+move Any r | call_ext_last Arity u$func:erlang:dt_append_vm_tag_data/1 D => \
+ move Any r | deallocate D | return
+move Any r | call_ext_only Arity u$func:erlang:dt_append_vm_tag_data/1 => \
+ move Any r | return
+call_ext Arity u$func:erlang:dt_append_vm_tag_data/1 =>
+call_ext_last Arity u$func:erlang:dt_append_vm_tag_data/1 D => \
+ deallocate D | return
+call_ext_only Arity u$func:erlang:dt_append_vm_tag_data/1 => \
+ return
+
+# Can happen after one of the transformations above.
+move Discarded r | move Something r => move Something r
+
+%endif
#
# The general case for BIFs that have no special instructions.
@@ -898,24 +978,12 @@ call_ext_only u==1 Ar=u$func:hybrid:copy/1 => i_global_copy | return
# To make trapping and stack backtraces work correctly, we make sure that
# the continuation pointer is always stored on the stack.
-call_ext u==0 Bif=u$is_bif => call_bif0 Bif
-call_ext u==1 Bif=u$is_bif => call_bif1 Bif
-call_ext u==2 Bif=u$is_bif => call_bif2 Bif
-call_ext u==3 Bif=$is_bif => call_bif3 Bif
+call_ext u Bif=u$is_bif => call_bif Bif
-call_ext_last u==0 Bif=u$is_bif D => call_bif0 Bif | deallocate_return D
-call_ext_last u==1 Bif=u$is_bif D => call_bif1 Bif | deallocate_return D
-call_ext_last u==2 Bif=u$is_bif D => call_bif2 Bif | deallocate_return D
-call_ext_last u==3 Bif=u$is_bif D => call_bif3 Bif | deallocate_return D
+call_ext_last u Bif=u$is_bif D => call_bif Bif | deallocate_return D
-call_ext_only Ar=u==0 Bif=u$is_bif => \
- allocate u Ar | call_bif0 Bif | deallocate_return u
-call_ext_only Ar=u==1 Bif=u$is_bif => \
- allocate u Ar | call_bif1 Bif | deallocate_return u
-call_ext_only Ar=u==2 Bif=u$is_bif => \
- allocate u Ar | call_bif2 Bif | deallocate_return u
-call_ext_only Ar=u==3 Bif=u$is_bif => \
- allocate u Ar | call_bif3 Bif | deallocate_return u
+call_ext_only Ar=u Bif=u$is_bif => \
+ allocate u Ar | call_bif Bif | deallocate_return u
#
# Any remaining calls are calls to Erlang functions, not BIFs.
@@ -928,9 +996,9 @@ move S=c r | call_ext Ar=u Func=u$is_not_bif => i_move_call_ext S r Func
move S=c r | call_ext_last Ar=u Func=u$is_not_bif D => i_move_call_ext_last Func D S r
move S=c r | call_ext_only Ar=u Func=u$is_not_bif => i_move_call_ext_only Func S r
-call_ext Ar=u Func => i_call_ext Func
-call_ext_last Ar=u Func D => i_call_ext_last Func D
-call_ext_only Ar=u Func => i_call_ext_only Func
+call_ext Ar Func => i_call_ext Func
+call_ext_last Ar Func D => i_call_ext_last Func D
+call_ext_only Ar Func => i_call_ext_only Func
i_apply
i_apply_last P
@@ -942,10 +1010,7 @@ i_apply_fun_only
i_hibernate
-call_bif0 e
-call_bif1 e
-call_bif2 e
-call_bif3 e
+call_bif e
#
# Calls to non-building and guard BIFs.
@@ -964,7 +1029,7 @@ bif1 p Bif S1 Dst => bif1_body Bif S1 Dst
bif1_body Bif Literal=q Dst => move Literal x | bif1_body Bif x Dst
bif2 p Bif S1 S2 Dst => i_fetch S1 S2 | i_bif2_body Bif Dst
-bif2 Fail=f Bif S1 S2 Dst => i_fetch S1 S2 | i_bif2 Fail Bif Dst
+bif2 Fail Bif S1 S2 Dst => i_fetch S1 S2 | i_bif2 Fail Bif Dst
i_get s d
@@ -1047,8 +1112,8 @@ i_move_call_ext_only e c r
# Fun calls.
-call_fun Arity=u | deallocate D | return => i_call_fun_last Arity D
-call_fun Arity=u => i_call_fun Arity
+call_fun Arity | deallocate D | return => i_call_fun_last Arity D
+call_fun Arity => i_call_fun Arity
i_call_fun I
i_call_fun_last I P
@@ -1236,7 +1301,7 @@ i_bs_init_heap I I I d
i_bs_init_heap_bin_heap I I I d
-bs_init_bits Fail Sz Words Regs Flags Dst | binary_too_big_bits(Sz) => system_limit Fail
+bs_init_bits Fail Sz=o Words Regs Flags Dst => system_limit Fail
bs_init_bits Fail Sz=u Words=u==0 Regs Flags Dst => i_bs_init_bits Sz Regs Dst
bs_init_bits Fail Sz=u Words Regs Flags Dst => i_bs_init_bits_heap Sz Words Regs Dst
@@ -1304,13 +1369,13 @@ i_bs_utf16_size s d
bs_put_utf8 Fail=j Flags=u Literal=q => \
move Literal x | bs_put_utf8 Fail Flags x
-bs_put_utf8 Fail=j u Src=s => i_bs_put_utf8 Fail Src
+bs_put_utf8 Fail u Src=s => i_bs_put_utf8 Fail Src
i_bs_put_utf8 j s
bs_put_utf16 Fail=j Flags=u Literal=q => \
move Literal x | bs_put_utf16 Fail Flags x
-bs_put_utf16 Fail=j Flags=u Src=s => i_bs_put_utf16 Fail Flags Src
+bs_put_utf16 Fail Flags=u Src=s => i_bs_put_utf16 Fail Flags Src
i_bs_put_utf16 j I s
@@ -1475,34 +1540,13 @@ bif1 Fail u$bif:erlang:trunc/1 s d => too_old_compiler
#
# Guard BIFs.
#
-gc_bif1 Fail I Bif=u$bif:erlang:length/1 Src Dst=d => \
- gen_guard_bif1(Fail, I, Bif, Src, Dst)
-
-gc_bif1 Fail I Bif=u$bif:erlang:size/1 Src Dst=d => \
- gen_guard_bif1(Fail, I, Bif, Src, Dst)
-
-gc_bif1 Fail I Bif=u$bif:erlang:bit_size/1 Src Dst=d => \
- gen_guard_bif1(Fail, I, Bif, Src, Dst)
-
-gc_bif1 Fail I Bif=u$bif:erlang:byte_size/1 Src Dst=d => \
- gen_guard_bif1(Fail, I, Bif, Src, Dst)
-
-gc_bif1 Fail I Bif=u$bif:erlang:abs/1 Src Dst=d => \
+gc_bif1 Fail I Bif Src Dst => \
gen_guard_bif1(Fail, I, Bif, Src, Dst)
-gc_bif1 Fail I Bif=u$bif:erlang:float/1 Src Dst=d => \
- gen_guard_bif1(Fail, I, Bif, Src, Dst)
-
-gc_bif1 Fail I Bif=u$bif:erlang:round/1 Src Dst=d => \
- gen_guard_bif1(Fail, I, Bif, Src, Dst)
-
-gc_bif1 Fail I Bif=u$bif:erlang:trunc/1 Src Dst=d => \
- gen_guard_bif1(Fail, I, Bif, Src, Dst)
-
-gc_bif2 Fail I Bif=u$bif:erlang:binary_part/2 S1 S2 Dst=d => \
+gc_bif2 Fail I Bif S1 S2 Dst => \
gen_guard_bif2(Fail, I, Bif, S1, S2, Dst)
-gc_bif3 Fail I Bif=u$bif:erlang:binary_part/3 S1 S2 S3 Dst=d => \
+gc_bif3 Fail I Bif S1 S2 S3 Dst => \
gen_guard_bif3(Fail, I, Bif, S1, S2, S3, Dst)
i_gc_bif1 Fail Bif V=q Live D => move V x | i_gc_bif1 Fail Bif x Live D
@@ -1520,6 +1564,15 @@ ii_gc_bif3/7
ii_gc_bif3 Fail Bif S1 S2 S3 Live D => move S1 x | i_fetch S2 S3 | i_gc_bif3 Fail Bif x Live D
i_gc_bif3 j I s I d
+
+#
+# The following instruction is specially handled in beam_load.c
+# to produce a user-friendly message if an unsupported guard BIF is
+# encountered.
+#
+unsupported_guard_bif/3
+unsupported_guard_bif A B C | never() =>
+
#
# R13B03
#
diff --git a/erts/emulator/beam/packet_parser.c b/erts/emulator/beam/packet_parser.c
index a66d60aa22..f1cfa8df39 100644
--- a/erts/emulator/beam/packet_parser.c
+++ b/erts/emulator/beam/packet_parser.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2008-2010. All Rights Reserved.
+ * Copyright Ericsson AB 2008-2011. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -301,7 +301,11 @@ int packet_get_length(enum PacketParseType htype,
/* TCP_PB_LINE_LF: [Data ... \n] */
const char* ptr2;
if ((ptr2 = memchr(ptr, '\n', n)) == NULL) {
- if (n >= trunc_len && trunc_len!=0) { /* buffer full */
+ if (n > max_plen && max_plen != 0) { /* packet full */
+ DEBUGF((" => packet full (no NL)=%d\r\n", n));
+ goto error;
+ }
+ else if (n >= trunc_len && trunc_len!=0) { /* buffer full */
DEBUGF((" => line buffer full (no NL)=%d\r\n", n));
return trunc_len;
}
@@ -309,6 +313,10 @@ int packet_get_length(enum PacketParseType htype,
}
else {
int len = (ptr2 - ptr) + 1; /* including newline */
+ if (len > max_plen && max_plen!=0) {
+ DEBUGF((" => packet_size %d exceeded\r\n", max_plen));
+ goto error;
+ }
if (len > trunc_len && trunc_len!=0) {
DEBUGF((" => truncated line=%d\r\n", trunc_len));
return trunc_len;
@@ -397,33 +405,50 @@ int packet_get_length(enum PacketParseType htype,
const char* ptr1 = ptr;
int len = plen;
+ if (!max_plen) {
+ /* This is for backward compatibility with old user of decode_packet
+ * that might use option 'line_length' to limit accepted length of
+ * http lines.
+ */
+ max_plen = trunc_len;
+ }
+
while (1) {
const char* ptr2 = memchr(ptr1, '\n', len);
if (ptr2 == NULL) {
- if (n >= trunc_len && trunc_len!=0) { /* buffer full */
- plen = trunc_len;
- goto done;
+ if (max_plen != 0) {
+ if (n >= max_plen) /* packet full */
+ goto error;
}
goto more;
}
else {
plen = (ptr2 - ptr) + 1;
-
- if (*statep == 0)
+
+ if (*statep == 0) {
+ if (max_plen != 0 && plen > max_plen)
+ goto error;
goto done;
-
+ }
+
if (plen < n) {
if (SP(ptr2+1) && plen>2) {
/* header field value continue on next line */
ptr1 = ptr2+1;
len = n - plen;
}
- else
+ else {
+ if (max_plen != 0 && plen > max_plen)
+ goto error;
goto done;
+ }
}
- else
+ else {
+ if (max_plen != 0 && plen > max_plen)
+ goto error;
goto more;
+ }
}
}
}
diff --git a/erts/emulator/beam/register.h b/erts/emulator/beam/register.h
index 97bab3ab71..38e8cfbf28 100644
--- a/erts/emulator/beam/register.h
+++ b/erts/emulator/beam/register.h
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1996-2009. All Rights Reserved.
+ * Copyright Ericsson AB 1996-2012. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -41,7 +41,7 @@ struct port;
typedef struct reg_proc
{
HashBucket bucket; /* MUST BE LOCATED AT TOP OF STRUCT!!! */
- Process *p; /* The process registerd (only one of this and
+ Process *p; /* The process registered (only one of this and
'pt' is non-NULL */
struct port *pt; /* The port registered */
Eterm name; /* Atom name */
diff --git a/erts/emulator/beam/safe_hash.c b/erts/emulator/beam/safe_hash.c
index 4c54e19cdb..3326e5cc2a 100644
--- a/erts/emulator/beam/safe_hash.c
+++ b/erts/emulator/beam/safe_hash.c
@@ -61,7 +61,7 @@ static ERTS_INLINE int align_up_pow2(int val)
*/
static void rehash(SafeHash* h, int grow_limit)
{
- if (erts_smp_atomic_xchg(&h->is_rehashing, 1) != 0) {
+ if (erts_smp_atomic_xchg_acqb(&h->is_rehashing, 1) != 0) {
return; /* already in progress */
}
if (h->grow_limit == grow_limit) {
@@ -166,8 +166,8 @@ SafeHash* safe_hash_init(ErtsAlcType_t type, SafeHash* h, char* name, int size,
h->name = name;
h->fun = fun;
set_size(h,size);
- erts_smp_atomic_init(&h->is_rehashing, 0);
- erts_smp_atomic_init(&h->nitems, 0);
+ erts_smp_atomic_init_nob(&h->is_rehashing, 0);
+ erts_smp_atomic_init_nob(&h->nitems, 0);
for (i=0; i<SAFE_HASH_LOCK_CNT; i++) {
erts_smp_mtx_init(&h->lock_vec[i].mtx,"safe_hash");
}
@@ -222,7 +222,7 @@ void* safe_hash_put(SafeHash* h, void* tmpl)
*head = b;
grow_limit = h->grow_limit;
erts_smp_mtx_unlock(lock);
- if (erts_smp_atomic_inctest(&h->nitems) > grow_limit) {
+ if (erts_smp_atomic_inc_read_nob(&h->nitems) > grow_limit) {
rehash(h, grow_limit);
}
return (void*) b;
@@ -245,7 +245,7 @@ void* safe_hash_erase(SafeHash* h, void* tmpl)
if ((b->hvalue == hval) && (h->fun.cmp(tmpl, (void*)b) == 0)) {
*prevp = b->next;
erts_smp_mtx_unlock(lock);
- erts_smp_atomic_dec(&h->nitems);
+ erts_smp_atomic_dec_nob(&h->nitems);
h->fun.free((void*)b);
return tmpl;
}
diff --git a/erts/emulator/beam/sys.h b/erts/emulator/beam/sys.h
index e64c43de6e..7b2bb81f62 100644
--- a/erts/emulator/beam/sys.h
+++ b/erts/emulator/beam/sys.h
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1996-2011. All Rights Reserved.
+ * Copyright Ericsson AB 1996-2012. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -103,6 +103,15 @@ typedef ERTS_SYS_FD_TYPE ErtsSysFdType;
# define ERTS_LIKELY(BOOL) (BOOL)
# define ERTS_UNLIKELY(BOOL) (BOOL)
#endif
+#ifdef __GNUC__
+# if __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ > 5)
+# define ERTS_DECLARE_DUMMY(X) X __attribute__ ((unused))
+# else
+# define ERTS_DECLARE_DUMMY(X) X
+# endif
+#else
+# define ERTS_DECLARE_DUMMY(X) X
+#endif
#if defined(DEBUG) || defined(ERTS_ENABLE_LOCK_CHECK)
# undef ERTS_CAN_INLINE
@@ -212,7 +221,8 @@ int real_printf(const char *fmt, ...);
*/
#if !((SIZEOF_VOID_P >= 4) && (SIZEOF_VOID_P == SIZEOF_SIZE_T) \
- && ((SIZEOF_VOID_P == SIZEOF_INT) || (SIZEOF_VOID_P == SIZEOF_LONG)))
+ && ((SIZEOF_VOID_P == SIZEOF_INT) || (SIZEOF_VOID_P == SIZEOF_LONG) || \
+ (SIZEOF_VOID_P == SIZEOF_LONG_LONG)))
#error Cannot handle this combination of int/long/void*/size_t sizes
#endif
@@ -246,6 +256,7 @@ typedef unsigned int Eterm;
typedef unsigned int Uint;
typedef int Sint;
#define ERTS_SIZEOF_ETERM SIZEOF_INT
+#define ErtsStrToSint strtol
#else
#error Found no appropriate type to use for 'Eterm', 'Uint' and 'Sint'
#endif
@@ -253,9 +264,18 @@ typedef int Sint;
#if SIZEOF_VOID_P == SIZEOF_LONG
typedef unsigned long UWord;
typedef long SWord;
+#define SWORD_CONSTANT(Const) Const##L
+#define UWORD_CONSTANT(Const) Const##UL
#elif SIZEOF_VOID_P == SIZEOF_INT
typedef unsigned int UWord;
typedef int SWord;
+#define SWORD_CONSTANT(Const) Const
+#define UWORD_CONSTANT(Const) Const##U
+#elif SIZEOF_VOID_P == SIZEOF_LONG_LONG
+typedef unsigned long long UWord;
+typedef long long SWord;
+#define SWORD_CONSTANT(Const) Const##LL
+#define UWORD_CONSTANT(Const) Const##ULL
#else
#error Found no appropriate type to use for 'Eterm', 'Uint' and 'Sint'
#endif
@@ -266,12 +286,30 @@ typedef int SWord;
typedef unsigned long Eterm;
typedef unsigned long Uint;
typedef long Sint;
+#define SWORD_CONSTANT(Const) Const##L
+#define UWORD_CONSTANT(Const) Const##UL
#define ERTS_SIZEOF_ETERM SIZEOF_LONG
+#define ErtsStrToSint strtol
#elif SIZEOF_VOID_P == SIZEOF_INT
typedef unsigned int Eterm;
typedef unsigned int Uint;
typedef int Sint;
+#define SWORD_CONSTANT(Const) Const
+#define UWORD_CONSTANT(Const) Const##U
#define ERTS_SIZEOF_ETERM SIZEOF_INT
+#define ErtsStrToSint strtol
+#elif SIZEOF_VOID_P == SIZEOF_LONG_LONG
+typedef unsigned long long Eterm;
+typedef unsigned long long Uint;
+typedef long long Sint;
+#define SWORD_CONSTANT(Const) Const##LL
+#define UWORD_CONSTANT(Const) Const##ULL
+#define ERTS_SIZEOF_ETERM SIZEOF_LONG_LONG
+#if defined(__WIN32__)
+#define ErtsStrToSint _strtoi64
+#else
+#define ErtsStrToSint strtoll
+#endif
#else
#error Found no appropriate type to use for 'Eterm', 'Uint' and 'Sint'
#endif
@@ -340,7 +378,8 @@ int erts_send_warning_to_logger_str_nogl(char *);
#ifdef ERTS_WANT_BREAK_HANDLING
# ifdef ERTS_SMP
extern erts_smp_atomic32_t erts_break_requested;
-# define ERTS_BREAK_REQUESTED ((int) erts_smp_atomic32_read(&erts_break_requested))
+# define ERTS_BREAK_REQUESTED \
+ ((int) erts_smp_atomic32_read_nob(&erts_break_requested))
# else
extern volatile int erts_break_requested;
# define ERTS_BREAK_REQUESTED erts_break_requested
@@ -354,7 +393,7 @@ void erts_do_break_handling(void);
# else
# ifdef ERTS_SMP
extern erts_smp_atomic32_t erts_got_sigusr1;
-# define ERTS_GOT_SIGUSR1 ((int) erts_smp_atomic32_read(&erts_got_sigusr1))
+# define ERTS_GOT_SIGUSR1 ((int) erts_smp_atomic32_read_mb(&erts_got_sigusr1))
# else
extern volatile int erts_got_sigusr1;
# define ERTS_GOT_SIGUSR1 erts_got_sigusr1
@@ -363,11 +402,15 @@ extern volatile int erts_got_sigusr1;
#endif
#ifdef ERTS_SMP
-extern erts_smp_atomic_t erts_writing_erl_crash_dump;
+extern erts_smp_atomic32_t erts_writing_erl_crash_dump;
+extern erts_tsd_key_t erts_is_crash_dumping_key;
+#define ERTS_SOMEONE_IS_CRASH_DUMPING \
+ ((int) erts_smp_atomic32_read_mb(&erts_writing_erl_crash_dump))
#define ERTS_IS_CRASH_DUMPING \
- ((int) erts_smp_atomic_read(&erts_writing_erl_crash_dump))
+ ((int) (SWord) erts_tsd_get(erts_is_crash_dumping_key))
#else
extern volatile int erts_writing_erl_crash_dump;
+#define ERTS_SOMEONE_IS_CRASH_DUMPING erts_writing_erl_crash_dump
#define ERTS_IS_CRASH_DUMPING erts_writing_erl_crash_dump
#endif
@@ -468,16 +511,7 @@ __decl_noreturn void __noreturn erl_exit(int n, char*, ...);
/* Some special erl_exit() codes: */
#define ERTS_INTR_EXIT INT_MIN /* called from signal handler */
#define ERTS_ABORT_EXIT (INT_MIN + 1) /* no crash dump; only abort() */
-#define ERTS_DUMP_EXIT (127) /* crash dump; then exit() */
-
-
-#ifndef ERTS_SMP
-int check_async_ready(void);
-#ifdef USE_THREADS
-void sys_async_ready(int hndl);
-int erts_register_async_ready_callback(void (*funcp)(void));
-#endif
-#endif
+#define ERTS_DUMP_EXIT (INT_MIN + 2) /* crash dump; then exit() */
Eterm erts_check_io_info(void *p);
@@ -610,14 +644,12 @@ extern char *erts_sys_ddll_error(int code);
/*
* System interfaces for startup.
*/
+#include "erl_time.h"
-
-#ifdef ERTS_SMP
void erts_sys_schedule_interrupt(int set);
-void erts_sys_schedule_interrupt_timed(int set, long msec);
+#ifdef ERTS_SMP
+void erts_sys_schedule_interrupt_timed(int set, erts_short_time_t msec);
void erts_sys_main_thread(void);
-#else
-#define erts_sys_schedule_interrupt(Set)
#endif
extern void erts_sys_prepare_crash_dump(void);
@@ -633,17 +665,24 @@ Preload* sys_preloaded(void);
unsigned char* sys_preload_begin(Preload*);
void sys_preload_end(Preload*);
int sys_get_key(int);
-void elapsed_time_both(unsigned long *ms_user, unsigned long *ms_sys,
- unsigned long *ms_user_diff, unsigned long *ms_sys_diff);
-void wall_clock_elapsed_time_both(unsigned long *ms_total,
- unsigned long *ms_diff);
+void elapsed_time_both(UWord *ms_user, UWord *ms_sys,
+ UWord *ms_user_diff, UWord *ms_sys_diff);
+void wall_clock_elapsed_time_both(UWord *ms_total,
+ UWord *ms_diff);
void get_time(int *hour, int *minute, int *second);
void get_date(int *year, int *month, int *day);
void get_localtime(int *year, int *month, int *day,
int *hour, int *minute, int *second);
void get_universaltime(int *year, int *month, int *day,
int *hour, int *minute, int *second);
-int univ_to_local(Sint *year, Sint *month, Sint *day,
+int seconds_to_univ(Sint64 seconds,
+ Sint *year, Sint *month, Sint *day,
+ Sint *hour, Sint *minute, Sint *second);
+int univ_to_seconds(Sint year, Sint month, Sint day,
+ Sint hour, Sint minute, Sint second,
+ Sint64* seconds);
+int univ_to_local(
+ Sint *year, Sint *month, Sint *day,
Sint *hour, Sint *minute, Sint *second);
int local_to_univ(Sint *year, Sint *month, Sint *day,
Sint *hour, Sint *minute, Sint *second, int isdst);
@@ -669,6 +708,8 @@ int erts_sys_putenv(char *key_value, int sep_ix);
*size), a value > 0 if value buffer is too small (*size is set to needed
size), and a value < 0 on failure. */
int erts_sys_getenv(char *key, char *value, size_t *size);
+/* erts_sys_getenv__() is only allowed to be used in early init phase */
+int erts_sys_getenv__(char *key, char *value, size_t *size);
/* Easier to use, but not as efficient, environment functions */
char *erts_read_env(char *key);
@@ -692,291 +733,14 @@ int erts_write_env(char *key, char *value);
int sys_alloc_opt(int, int);
typedef struct {
- Sint trim_threshold;
- Sint top_pad;
- Sint mmap_threshold;
- Sint mmap_max;
+ int trim_threshold;
+ int top_pad;
+ int mmap_threshold;
+ int mmap_max;
} SysAllocStat;
void sys_alloc_stat(SysAllocStat *);
-/* Block the whole system... */
-
-#define ERTS_BS_FLG_ALLOW_GC (((Uint32) 1) << 0)
-#define ERTS_BS_FLG_ALLOW_IO (((Uint32) 1) << 1)
-
-/* Activities... */
-typedef enum {
- ERTS_ACTIVITY_UNDEFINED, /* Undefined activity */
- ERTS_ACTIVITY_WAIT, /* Waiting */
- ERTS_ACTIVITY_GC, /* Garbage collecting */
- ERTS_ACTIVITY_IO /* I/O including message passing to erl procs */
-} erts_activity_t;
-
-#ifdef ERTS_SMP
-
-typedef enum {
- ERTS_ACT_ERR_LEAVE_WAIT_UNLOCKED,
- ERTS_ACT_ERR_LEAVE_UNKNOWN_ACTIVITY,
- ERTS_ACT_ERR_ENTER_UNKNOWN_ACTIVITY
-} erts_activity_error_t;
-
-typedef struct {
- erts_smp_atomic32_t do_block;
- struct {
- erts_smp_atomic32_t wait;
- erts_smp_atomic32_t gc;
- erts_smp_atomic32_t io;
- } in_activity;
-} erts_system_block_state_t;
-
-extern erts_system_block_state_t erts_system_block_state;
-
-int erts_is_system_blocked(erts_activity_t allowed_activities);
-void erts_block_me(void (*prepare)(void *), void (*resume)(void *), void *arg);
-void erts_register_blockable_thread(void);
-void erts_unregister_blockable_thread(void);
-void erts_note_activity_begin(erts_activity_t activity);
-void
-erts_check_block(erts_activity_t old_activity,
- erts_activity_t new_activity,
- int locked,
- void (*prepare)(void *),
- void (*resume)(void *),
- void *arg);
-void erts_block_system(Uint32 allowed_activities);
-int erts_emergency_block_system(long timeout, Uint32 allowed_activities);
-void erts_release_system(void);
-void erts_system_block_init(void);
-void erts_set_activity_error(erts_activity_error_t, char *, int);
-#ifdef ERTS_ENABLE_LOCK_CHECK
-void erts_lc_activity_change_begin(void);
-void erts_lc_activity_change_end(void);
-int erts_lc_is_blocking(void);
-#define ERTS_LC_IS_BLOCKING \
- (erts_smp_pending_system_block() && erts_lc_is_blocking())
-#endif
-#endif
-
-#define erts_smp_activity_begin(NACT, PRP, RSM, ARG) \
- erts_smp_set_activity(ERTS_ACTIVITY_UNDEFINED, \
- (NACT), \
- 0, \
- (PRP), \
- (RSM), \
- (ARG), \
- __FILE__, \
- __LINE__)
-#define erts_smp_activity_change(OACT, NACT, PRP, RSM, ARG) \
- erts_smp_set_activity((OACT), \
- (NACT), \
- 0, \
- (PRP), \
- (RSM), \
- (ARG), \
- __FILE__, \
- __LINE__)
-#define erts_smp_activity_end(OACT, PRP, RSM, ARG) \
- erts_smp_set_activity((OACT), \
- ERTS_ACTIVITY_UNDEFINED, \
- 0, \
- (PRP), \
- (RSM), \
- (ARG), \
- __FILE__, \
- __LINE__)
-
-#define erts_smp_locked_activity_begin(NACT) \
- erts_smp_set_activity(ERTS_ACTIVITY_UNDEFINED, \
- (NACT), \
- 1, \
- NULL, \
- NULL, \
- NULL, \
- __FILE__, \
- __LINE__)
-#define erts_smp_locked_activity_change(OACT, NACT) \
- erts_smp_set_activity((OACT), \
- (NACT), \
- 1, \
- NULL, \
- NULL, \
- NULL, \
- __FILE__, \
- __LINE__)
-#define erts_smp_locked_activity_end(OACT) \
- erts_smp_set_activity((OACT), \
- ERTS_ACTIVITY_UNDEFINED, \
- 1, \
- NULL, \
- NULL, \
- NULL, \
- __FILE__, \
- __LINE__)
-
-
-ERTS_GLB_INLINE int erts_smp_is_system_blocked(erts_activity_t allowed_activities);
-ERTS_GLB_INLINE void erts_smp_block_system(Uint32 allowed_activities);
-ERTS_GLB_INLINE int erts_smp_emergency_block_system(long timeout,
- Uint32 allowed_activities);
-ERTS_GLB_INLINE void erts_smp_release_system(void);
-ERTS_GLB_INLINE int erts_smp_pending_system_block(void);
-ERTS_GLB_INLINE void erts_smp_chk_system_block(void (*prepare)(void *),
- void (*resume)(void *),
- void *arg);
-ERTS_GLB_INLINE void
-erts_smp_set_activity(erts_activity_t old_activity,
- erts_activity_t new_activity,
- int locked,
- void (*prepare)(void *),
- void (*resume)(void *),
- void *arg,
- char *file,
- int line);
-
-#if ERTS_GLB_INLINE_INCL_FUNC_DEF
-
-
-ERTS_GLB_INLINE int
-erts_smp_is_system_blocked(erts_activity_t allowed_activities)
-{
-#ifdef ERTS_SMP
- return erts_is_system_blocked(allowed_activities);
-#else
- return 1;
-#endif
-}
-
-ERTS_GLB_INLINE void
-erts_smp_block_system(Uint32 allowed_activities)
-{
-#ifdef ERTS_SMP
- erts_block_system(allowed_activities);
-#endif
-}
-
-ERTS_GLB_INLINE int
-erts_smp_emergency_block_system(long timeout, Uint32 allowed_activities)
-{
-#ifdef ERTS_SMP
- return erts_emergency_block_system(timeout, allowed_activities);
-#else
- return 0;
-#endif
-}
-
-ERTS_GLB_INLINE void
-erts_smp_release_system(void)
-{
-#ifdef ERTS_SMP
- erts_release_system();
-#endif
-}
-
-ERTS_GLB_INLINE int
-erts_smp_pending_system_block(void)
-{
-#ifdef ERTS_SMP
- return (int) erts_smp_atomic32_read(&erts_system_block_state.do_block);
-#else
- return 0;
-#endif
-}
-
-
-ERTS_GLB_INLINE void
-erts_smp_chk_system_block(void (*prepare)(void *),
- void (*resume)(void *),
- void *arg)
-{
-#ifdef ERTS_SMP
- if (erts_smp_pending_system_block())
- erts_block_me(prepare, resume, arg);
-#endif
-}
-
-ERTS_GLB_INLINE void
-erts_smp_set_activity(erts_activity_t old_activity,
- erts_activity_t new_activity,
- int locked,
- void (*prepare)(void *),
- void (*resume)(void *),
- void *arg,
- char *file,
- int line)
-{
-#ifdef ERTS_SMP
-#ifdef ERTS_ENABLE_LOCK_CHECK
- erts_lc_activity_change_begin();
-#endif
- switch (old_activity) {
- case ERTS_ACTIVITY_UNDEFINED:
- break;
- case ERTS_ACTIVITY_WAIT:
- erts_smp_atomic32_dec(&erts_system_block_state.in_activity.wait);
- if (locked) {
- /* You are not allowed to leave activity waiting
- * without supplying the possibility to block
- * unlocked.
- */
- erts_set_activity_error(ERTS_ACT_ERR_LEAVE_WAIT_UNLOCKED,
- file, line);
- }
- break;
- case ERTS_ACTIVITY_GC:
- erts_smp_atomic32_dec(&erts_system_block_state.in_activity.gc);
- break;
- case ERTS_ACTIVITY_IO:
- erts_smp_atomic32_dec(&erts_system_block_state.in_activity.io);
- break;
- default:
- erts_set_activity_error(ERTS_ACT_ERR_LEAVE_UNKNOWN_ACTIVITY,
- file, line);
- break;
- }
-
- /* We are not allowed to block when going to activity waiting... */
- if (new_activity != ERTS_ACTIVITY_WAIT && erts_smp_pending_system_block())
- erts_check_block(old_activity,new_activity,locked,prepare,resume,arg);
-
- switch (new_activity) {
- case ERTS_ACTIVITY_UNDEFINED:
- break;
- case ERTS_ACTIVITY_WAIT:
- erts_smp_atomic32_inc(&erts_system_block_state.in_activity.wait);
- break;
- case ERTS_ACTIVITY_GC:
- erts_smp_atomic32_inc(&erts_system_block_state.in_activity.gc);
- break;
- case ERTS_ACTIVITY_IO:
- erts_smp_atomic32_inc(&erts_system_block_state.in_activity.io);
- break;
- default:
- erts_set_activity_error(ERTS_ACT_ERR_ENTER_UNKNOWN_ACTIVITY,
- file, line);
- break;
- }
-
- switch (new_activity) {
- case ERTS_ACTIVITY_WAIT:
- case ERTS_ACTIVITY_GC:
- case ERTS_ACTIVITY_IO:
- if (erts_smp_pending_system_block())
- erts_note_activity_begin(new_activity);
- break;
- default:
- break;
- }
-
-#ifdef ERTS_ENABLE_LOCK_CHECK
- erts_lc_activity_change_end();
-#endif
-
-#endif
-}
-
-#endif /* #if ERTS_GLB_INLINE_INCL_FUNC_DEF */
-
#if defined(DEBUG) || defined(ERTS_ENABLE_LOCK_CHECK)
#undef ERTS_REFC_DEBUG
#define ERTS_REFC_DEBUG
@@ -1001,27 +765,27 @@ ERTS_GLB_INLINE erts_aint_t erts_refc_read(erts_refc_t *refcp,
ERTS_GLB_INLINE void
erts_refc_init(erts_refc_t *refcp, erts_aint_t val)
{
- erts_smp_atomic_init((erts_smp_atomic_t *) refcp, val);
+ erts_smp_atomic_init_nob((erts_smp_atomic_t *) refcp, val);
}
ERTS_GLB_INLINE void
erts_refc_inc(erts_refc_t *refcp, erts_aint_t min_val)
{
#ifdef ERTS_REFC_DEBUG
- erts_aint_t val = erts_smp_atomic_inctest((erts_smp_atomic_t *) refcp);
+ erts_aint_t val = erts_smp_atomic_inc_read_nob((erts_smp_atomic_t *) refcp);
if (val < min_val)
erl_exit(ERTS_ABORT_EXIT,
"erts_refc_inc(): Bad refc found (refc=%ld < %ld)!\n",
val, min_val);
#else
- erts_smp_atomic_inc((erts_smp_atomic_t *) refcp);
+ erts_smp_atomic_inc_nob((erts_smp_atomic_t *) refcp);
#endif
}
ERTS_GLB_INLINE erts_aint_t
erts_refc_inctest(erts_refc_t *refcp, erts_aint_t min_val)
{
- erts_aint_t val = erts_smp_atomic_inctest((erts_smp_atomic_t *) refcp);
+ erts_aint_t val = erts_smp_atomic_inc_read_nob((erts_smp_atomic_t *) refcp);
#ifdef ERTS_REFC_DEBUG
if (val < min_val)
erl_exit(ERTS_ABORT_EXIT,
@@ -1035,20 +799,20 @@ ERTS_GLB_INLINE void
erts_refc_dec(erts_refc_t *refcp, erts_aint_t min_val)
{
#ifdef ERTS_REFC_DEBUG
- erts_aint_t val = erts_smp_atomic_dectest((erts_smp_atomic_t *) refcp);
+ erts_aint_t val = erts_smp_atomic_dec_read_nob((erts_smp_atomic_t *) refcp);
if (val < min_val)
erl_exit(ERTS_ABORT_EXIT,
"erts_refc_dec(): Bad refc found (refc=%ld < %ld)!\n",
val, min_val);
#else
- erts_smp_atomic_dec((erts_smp_atomic_t *) refcp);
+ erts_smp_atomic_dec_nob((erts_smp_atomic_t *) refcp);
#endif
}
ERTS_GLB_INLINE erts_aint_t
erts_refc_dectest(erts_refc_t *refcp, erts_aint_t min_val)
{
- erts_aint_t val = erts_smp_atomic_dectest((erts_smp_atomic_t *) refcp);
+ erts_aint_t val = erts_smp_atomic_dec_read_nob((erts_smp_atomic_t *) refcp);
#ifdef ERTS_REFC_DEBUG
if (val < min_val)
erl_exit(ERTS_ABORT_EXIT,
@@ -1062,20 +826,20 @@ ERTS_GLB_INLINE void
erts_refc_add(erts_refc_t *refcp, erts_aint_t diff, erts_aint_t min_val)
{
#ifdef ERTS_REFC_DEBUG
- erts_aint_t val = erts_smp_atomic_addtest((erts_smp_atomic_t *) refcp, diff);
+ erts_aint_t val = erts_smp_atomic_add_read_nob((erts_smp_atomic_t *) refcp, diff);
if (val < min_val)
erl_exit(ERTS_ABORT_EXIT,
"erts_refc_add(%ld): Bad refc found (refc=%ld < %ld)!\n",
diff, val, min_val);
#else
- erts_smp_atomic_add((erts_smp_atomic_t *) refcp, diff);
+ erts_smp_atomic_add_nob((erts_smp_atomic_t *) refcp, diff);
#endif
}
ERTS_GLB_INLINE erts_aint_t
erts_refc_read(erts_refc_t *refcp, erts_aint_t min_val)
{
- erts_aint_t val = erts_smp_atomic_read((erts_smp_atomic_t *) refcp);
+ erts_aint_t val = erts_smp_atomic_read_nob((erts_smp_atomic_t *) refcp);
#ifdef ERTS_REFC_DEBUG
if (val < min_val)
erl_exit(ERTS_ABORT_EXIT,
@@ -1241,6 +1005,19 @@ void erl_bin_write(unsigned char *, int, int);
#endif
+#ifdef __WIN32__
+#ifdef ARCH_64
+#define ERTS_ALLOC_ALIGN_BYTES 16
+#define ERTS_SMALL_ABS(Small) _abs64(Small)
+#else
+#define ERTS_ALLOC_ALIGN_BYTES 8
+#define ERTS_SMALL_ABS(Small) labs(Small)
+#endif
+#else
+#define ERTS_ALLOC_ALIGN_BYTES 8
+#define ERTS_SMALL_ABS(Small) labs(Small)
+#endif
+
#ifdef __WIN32__
diff --git a/erts/emulator/beam/time.c b/erts/emulator/beam/time.c
index a00faff912..932d157cd8 100644
--- a/erts/emulator/beam/time.c
+++ b/erts/emulator/beam/time.c
@@ -107,20 +107,31 @@ static ErlTimer *tiw_min_ptr;
/* Actual interval time chosen by sys_init_time() */
static int itime; /* Constant after init */
-erts_smp_atomic_t do_time; /* set at clock interrupt */
-static ERTS_INLINE erts_aint_t do_time_read(void) { return erts_smp_atomic_read(&do_time); }
-static ERTS_INLINE erts_aint_t do_time_update(void) { return do_time_read(); }
-static ERTS_INLINE void do_time_init(void) { erts_smp_atomic_init(&do_time, 0L); }
+erts_smp_atomic32_t do_time; /* set at clock interrupt */
+static ERTS_INLINE erts_short_time_t do_time_read(void)
+{
+ return erts_smp_atomic32_read_acqb(&do_time);
+}
+
+static ERTS_INLINE erts_short_time_t do_time_update(void)
+{
+ return do_time_read();
+}
+
+static ERTS_INLINE void do_time_init(void)
+{
+ erts_smp_atomic32_init_nob(&do_time, 0);
+}
/* get the time (in units of itime) to the next timeout,
or -1 if there are no timeouts */
-static erts_aint_t next_time_internal(void) /* PRE: tiw_lock taken by caller */
+static erts_short_time_t next_time_internal(void) /* PRE: tiw_lock taken by caller */
{
int i, tm, nto;
- unsigned int min;
+ Uint32 min;
ErlTimer* p;
- erts_aint_t dt;
+ erts_short_time_t dt;
if (tiw_nto == 0)
return -1; /* no timeouts in wheel */
@@ -133,7 +144,7 @@ static erts_aint_t next_time_internal(void) /* PRE: tiw_lock taken by caller */
/* start going through wheel to find next timeout */
tm = nto = 0;
- min = (unsigned int) -1; /* max unsigned int */
+ min = (Uint32) -1; /* max Uint32 */
i = tiw_pos;
do {
p = tiw[i];
@@ -162,7 +173,11 @@ static erts_aint_t next_time_internal(void) /* PRE: tiw_lock taken by caller */
i = (i + 1) % TIW_SIZE;
} while (i != tiw_pos);
dt = do_time_read();
- return ((min >= dt) ? (min - dt) : 0);
+ if (min <= (Uint32) dt)
+ return 0;
+ if ((min - (Uint32) dt) > (Uint32) ERTS_SHORT_TIME_T_MAX)
+ return ERTS_SHORT_TIME_T_MAX;
+ return (erts_short_time_t) (min - (Uint32) dt);
}
static void remove_timer(ErlTimer *p) {
@@ -191,9 +206,9 @@ static void remove_timer(ErlTimer *p) {
}
/* Private export to erl_time_sup.c */
-erts_aint_t erts_next_time(void)
+erts_short_time_t erts_next_time(void)
{
- erts_aint_t ret;
+ erts_short_time_t ret;
erts_smp_mtx_lock(&tiw_lock);
(void)do_time_update();
@@ -202,7 +217,7 @@ erts_aint_t erts_next_time(void)
return ret;
}
-static ERTS_INLINE void bump_timer_internal(erts_aint_t dt) /* PRE: tiw_lock is write-locked */
+static ERTS_INLINE void bump_timer_internal(erts_short_time_t dt) /* PRE: tiw_lock is write-locked */
{
Uint keep_pos;
Uint count;
@@ -273,7 +288,7 @@ static ERTS_INLINE void bump_timer_internal(erts_aint_t dt) /* PRE: tiw_lock is
}
}
-void erts_bump_timer(erts_aint_t dt) /* dt is value from do_time */
+void erts_bump_timer(erts_short_time_t dt) /* dt is value from do_time */
{
erts_smp_mtx_lock(&tiw_lock);
bump_timer_internal(dt);
@@ -378,8 +393,8 @@ erts_set_timer(ErlTimer* p, ErlTimeoutProc timeout, ErlCancelProc cancel,
insert_timer(p, t);
erts_smp_mtx_unlock(&tiw_lock);
#if defined(ERTS_SMP)
- if (t <= (Uint) LONG_MAX)
- erts_sys_schedule_interrupt_timed(1, (long) t);
+ if (t <= (Uint) ERTS_SHORT_TIME_T_MAX)
+ erts_sys_schedule_interrupt_timed(1, (erts_short_time_t) t);
#endif
}
@@ -419,7 +434,7 @@ Uint
erts_time_left(ErlTimer *p)
{
Uint left;
- erts_aint_t dt;
+ erts_short_time_t dt;
erts_smp_mtx_lock(&tiw_lock);
@@ -444,7 +459,7 @@ erts_time_left(ErlTimer *p)
}
#ifdef DEBUG
-void erts_p_slpq()
+void erts_p_slpq(void)
{
int i;
ErlTimer* p;
diff --git a/erts/emulator/beam/utils.c b/erts/emulator/beam/utils.c
index a17de717bc..a36d15204e 100644
--- a/erts/emulator/beam/utils.c
+++ b/erts/emulator/beam/utils.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1996-2011. All Rights Reserved.
+ * Copyright Ericsson AB 1996-2012. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -42,6 +42,10 @@
#include "erl_threads.h"
#include "erl_smp.h"
#include "erl_time.h"
+#include "erl_thr_progress.h"
+#include "erl_thr_queue.h"
+#include "erl_sched_spec_pre_alloc.h"
+#include "beam_bp.h"
#undef M_TRIM_THRESHOLD
#undef M_TOP_PAD
@@ -75,6 +79,7 @@ typedef struct {
#ifdef ERTS_SMP
+#if 0 /* Unused */
static void
dispatch_profile_msg_q(profile_sched_msg_q *psmq)
{
@@ -86,6 +91,7 @@ dispatch_profile_msg_q(profile_sched_msg_q *psmq)
profile_scheduler_q(make_small(msg->scheduler_id), msg->state, am_undefined, msg->Ms, msg->s, msg->us);
}
}
+#endif
#endif
@@ -1691,7 +1697,11 @@ static int do_send_to_logger(Eterm tag, Eterm gleader, char *buf, int len)
erts_queue_error_logger_message(from, tuple3, bp);
}
#else
- erts_queue_message(p, NULL /* only used for smp build */, bp, tuple3, NIL);
+ erts_queue_message(p, NULL /* only used for smp build */, bp, tuple3, NIL
+#ifdef USE_VM_PROBES
+ , NIL
+#endif
+ );
#endif
return 0;
}
@@ -2642,7 +2652,7 @@ tailrecur_ne:
FloatDef f1, f2;
Eterm big;
#if HEAP_ON_C_STACK
- Eterm big_buf[2]; /* If HEAP_ON_C_STACK */
+ Eterm big_buf[CMP_TMP_HEAP_SIZE]; /* If HEAP_ON_C_STACK */
#else
Eterm *big_buf = erts_get_scheduler_data()->cmp_tmp_heap;
#endif
@@ -2653,42 +2663,98 @@ tailrecur_ne:
Eterm aw = a;
Eterm bw = b;
#endif
+#define MAX_LOSSLESS_FLOAT ((double)((1LL << 53) - 2))
+#define MIN_LOSSLESS_FLOAT ((double)(((1LL << 53) - 2)*-1))
+#define BIG_ARITY_FLOAT_MAX (1024 / D_EXP) /* arity of max float as a bignum */
b_tag = tag_val_def(bw);
switch(_NUMBER_CODE(a_tag, b_tag)) {
case SMALL_BIG:
- big = small_to_big(signed_val(a), big_buf);
- j = big_comp(big, bw);
+ j = big_sign(bw) ? 1 : -1;
+ break;
+ case BIG_SMALL:
+ j = big_sign(aw) ? -1 : 1;
break;
case SMALL_FLOAT:
- f1.fd = signed_val(a);
GET_DOUBLE(bw, f2);
- j = float_comp(f1.fd, f2.fd);
- break;
- case BIG_SMALL:
- big = small_to_big(signed_val(b), big_buf);
- j = big_comp(aw, big);
+ if (f2.fd < MAX_LOSSLESS_FLOAT && f2.fd > MIN_LOSSLESS_FLOAT) {
+ // Float is within the no loss limit
+ f1.fd = signed_val(aw);
+ j = float_comp(f1.fd, f2.fd);
+#if ERTS_SIZEOF_ETERM == 8
+ } else if (f2.fd > (double) (MAX_SMALL + 1)) {
+ // Float is a positive bignum, i.e. bigger
+ j = -1;
+ } else if (f2.fd < (double) (MIN_SMALL - 1)) {
+ // Float is a negative bignum, i.e. smaller
+ j = 1;
+ } else { // Float is a Sint but less precise
+ j = signed_val(aw) - (Sint) f2.fd;
+ }
+#else
+ } else {
+ // If float is positive it is bigger than small
+ j = (f2.fd > 0.0) ? -1 : 1;
+ }
+#endif // ERTS_SIZEOF_ETERM == 8
break;
+ case FLOAT_BIG:
+ {
+ Wterm tmp = aw;
+ aw = bw;
+ bw = tmp;
+ }/* fall through */
case BIG_FLOAT:
- if (big_to_double(aw, &f1.fd) < 0) {
- j = big_sign(a) ? -1 : 1;
+ GET_DOUBLE(bw, f2);
+ if ((f2.fd < (double) (MAX_SMALL + 1))
+ && (f2.fd > (double) (MIN_SMALL - 1))) {
+ // Float is a Sint
+ j = big_sign(aw) ? -1 : 1;
+ } else if (big_arity(aw) > BIG_ARITY_FLOAT_MAX
+ || pow(2.0,(big_arity(aw)-1)*D_EXP) > fabs(f2.fd)) {
+ // If bignum size shows that it is bigger than the abs float
+ j = big_sign(aw) ? -1 : 1;
+ } else if (big_arity(aw) < BIG_ARITY_FLOAT_MAX
+ && (pow(2.0,(big_arity(aw))*D_EXP)-1.0) < fabs(f2.fd)) {
+ // If bignum size shows that it is smaller than the abs float
+ j = f2.fd < 0 ? 1 : -1;
+ } else if (f2.fd < MAX_LOSSLESS_FLOAT && f2.fd > MIN_LOSSLESS_FLOAT) {
+ // Float is within the no loss limit
+ if (big_to_double(aw, &f1.fd) < 0) {
+ j = big_sign(aw) ? -1 : 1;
+ } else {
+ j = float_comp(f1.fd, f2.fd);
+ }
} else {
- GET_DOUBLE(bw, f2);
- j = float_comp(f1.fd, f2.fd);
+ big = double_to_big(f2.fd, big_buf);
+ j = big_comp(aw, big);
+ }
+ if (_NUMBER_CODE(a_tag, b_tag) == FLOAT_BIG) {
+ j = -j;
}
break;
case FLOAT_SMALL:
GET_DOUBLE(aw, f1);
- f2.fd = signed_val(b);
- j = float_comp(f1.fd, f2.fd);
- break;
- case FLOAT_BIG:
- if (big_to_double(bw, &f2.fd) < 0) {
- j = big_sign(b) ? 1 : -1;
- } else {
- GET_DOUBLE(aw, f1);
+ if (f1.fd < MAX_LOSSLESS_FLOAT && f1.fd > MIN_LOSSLESS_FLOAT) {
+ // Float is within the no loss limit
+ f2.fd = signed_val(bw);
j = float_comp(f1.fd, f2.fd);
+#if ERTS_SIZEOF_ETERM == 8
+ } else if (f1.fd > (double) (MAX_SMALL + 1)) {
+ // Float is a positive bignum, i.e. bigger
+ j = 1;
+ } else if (f1.fd < (double) (MIN_SMALL - 1)) {
+ // Float is a negative bignum, i.e. smaller
+ j = -1;
+ } else { // Float is a Sint but less precise it
+ j = (Sint) f1.fd - signed_val(bw);
+ }
+#else
+ } else {
+ // If float is positive it is bigger than small
+ j = (f1.fd > 0.0) ? 1 : -1;
}
+#endif // ERTS_SIZEOF_ETERM == 8
break;
default:
j = b_tag - a_tag;
@@ -2803,9 +2869,9 @@ store_external_or_ref_in_proc_(Process *proc, Eterm ns)
return store_external_or_ref_(&hp, &MSO(proc), ns);
}
-void bin_write(int to, void *to_arg, byte* buf, int sz)
+void bin_write(int to, void *to_arg, byte* buf, size_t sz)
{
- int i;
+ size_t i;
for (i=0;i<sz;i++) {
if (IS_DIGIT(buf[i]))
@@ -2880,17 +2946,17 @@ char* Sint_to_buf(Sint n, struct Sint_buf *buf)
*/
Eterm
-buf_to_intlist(Eterm** hpp, char *buf, int len, Eterm tail)
+buf_to_intlist(Eterm** hpp, char *buf, size_t len, Eterm tail)
{
Eterm* hp = *hpp;
+ size_t i = len;
- buf += (len-1);
- while(len > 0) {
- tail = CONS(hp, make_small((byte)*buf), tail);
+ while(i != 0) {
+ --i;
+ tail = CONS(hp, make_small((Uint)(byte)buf[i]), tail);
hp += 2;
- buf--;
- len--;
}
+
*hpp = hp;
return tail;
}
@@ -3250,10 +3316,10 @@ erts_cancel_smp_ptimer(ErtsSmpPTimer *ptimer)
#endif
-static Sint trim_threshold;
-static Sint top_pad;
-static Sint mmap_threshold;
-static Sint mmap_max;
+static int trim_threshold;
+static int top_pad;
+static int mmap_threshold;
+static int mmap_max;
Uint tot_bin_allocated;
@@ -3276,8 +3342,8 @@ int
sys_alloc_opt(int opt, int value)
{
#if HAVE_MALLOPT
- Sint m_opt;
- Sint *curr_val;
+ int m_opt;
+ int *curr_val;
switch(opt) {
case SYS_ALLOC_OPT_TRIM_THRESHOLD:
@@ -3317,7 +3383,7 @@ sys_alloc_opt(int opt, int value)
}
if(mallopt(m_opt, value)) {
- *curr_val = (Sint) value;
+ *curr_val = value;
return 1;
}
@@ -3336,686 +3402,6 @@ sys_alloc_stat(SysAllocStat *sasp)
}
-#ifdef ERTS_SMP
-
-/* Local system block state */
-
-struct {
- int emergency;
- long emergency_timeout;
- erts_smp_cnd_t watchdog_cnd;
- erts_smp_tid_t watchdog_tid;
- int threads_to_block;
- int have_blocker;
- erts_smp_tid_t blocker_tid;
- int recursive_block;
- Uint32 allowed_activities;
- erts_smp_tsd_key_t blockable_key;
- erts_smp_mtx_t mtx;
- erts_smp_cnd_t cnd;
-#ifdef ERTS_ENABLE_LOCK_CHECK
- int activity_changing;
- int checking;
-#endif
-} system_block_state;
-
-/* Global system block state */
-erts_system_block_state_t erts_system_block_state;
-
-
-static ERTS_INLINE int
-is_blockable_thread(void)
-{
- return erts_smp_tsd_get(system_block_state.blockable_key) != NULL;
-}
-
-static ERTS_INLINE int
-is_blocker(void)
-{
- return (system_block_state.have_blocker
- && erts_smp_equal_tids(system_block_state.blocker_tid,
- erts_smp_thr_self()));
-}
-
-#ifdef ERTS_ENABLE_LOCK_CHECK
-int
-erts_lc_is_blocking(void)
-{
- int res;
- erts_smp_mtx_lock(&system_block_state.mtx);
- res = erts_smp_pending_system_block() && is_blocker();
- erts_smp_mtx_unlock(&system_block_state.mtx);
- return res;
-}
-#endif
-
-static ERTS_INLINE void
-block_me(void (*prepare)(void *),
- void (*resume)(void *),
- void *arg,
- int mtx_locked,
- int want_to_block,
- int update_act_changing,
- profile_sched_msg_q *psmq)
-{
- if (prepare)
- (*prepare)(arg);
-
- /* Locks might be held... */
-
- if (!mtx_locked)
- erts_smp_mtx_lock(&system_block_state.mtx);
-
- if (erts_smp_pending_system_block() && !is_blocker()) {
- int is_blockable = is_blockable_thread();
- ASSERT(is_blockable);
-
- if (is_blockable)
- system_block_state.threads_to_block--;
-
- if (erts_system_profile_flags.scheduler && psmq) {
- ErtsSchedulerData *esdp = erts_get_scheduler_data();
- if (esdp) {
- profile_sched_msg *msg = NULL;
-
- ASSERT(psmq->n < 2);
- msg = &((psmq->msg)[psmq->n]);
- msg->scheduler_id = esdp->no;
- get_now(&(msg->Ms), &(msg->s), &(msg->us));
- msg->no_schedulers = 0;
- msg->state = am_inactive;
- psmq->n++;
- }
- }
-
-#ifdef ERTS_ENABLE_LOCK_CHECK
- if (update_act_changing)
- system_block_state.activity_changing--;
-#endif
-
- erts_smp_cnd_broadcast(&system_block_state.cnd);
-
- do {
- erts_smp_cnd_wait(&system_block_state.cnd, &system_block_state.mtx);
- } while (erts_smp_pending_system_block()
- && !(want_to_block && !system_block_state.have_blocker));
-
-#ifdef ERTS_ENABLE_LOCK_CHECK
- if (update_act_changing)
- system_block_state.activity_changing++;
-#endif
- if (erts_system_profile_flags.scheduler && psmq) {
- ErtsSchedulerData *esdp = erts_get_scheduler_data();
- if (esdp) {
- profile_sched_msg *msg = NULL;
-
- ASSERT(psmq->n < 2);
- msg = &((psmq->msg)[psmq->n]);
- msg->scheduler_id = esdp->no;
- get_now(&(msg->Ms), &(msg->s), &(msg->us));
- msg->no_schedulers = 0;
- msg->state = am_active;
- psmq->n++;
- }
- }
-
- if (is_blockable)
- system_block_state.threads_to_block++;
- }
-
- if (!mtx_locked)
- erts_smp_mtx_unlock(&system_block_state.mtx);
-
- if (resume)
- (*resume)(arg);
-}
-
-void
-erts_block_me(void (*prepare)(void *),
- void (*resume)(void *),
- void *arg)
-{
- profile_sched_msg_q psmq;
- psmq.n = 0;
- if (prepare)
- (*prepare)(arg);
-
-#ifdef ERTS_ENABLE_LOCK_CHECK
- erts_lc_check_exact(NULL, 0); /* No locks should be locked */
-#endif
-
- block_me(NULL, NULL, NULL, 0, 0, 0, &psmq);
-
- if (erts_system_profile_flags.scheduler && psmq.n > 0)
- dispatch_profile_msg_q(&psmq);
-
- if (resume)
- (*resume)(arg);
-}
-
-void
-erts_register_blockable_thread(void)
-{
- profile_sched_msg_q psmq;
- psmq.n = 0;
- if (!is_blockable_thread()) {
- erts_smp_mtx_lock(&system_block_state.mtx);
- system_block_state.threads_to_block++;
- erts_smp_tsd_set(system_block_state.blockable_key,
- (void *) &erts_system_block_state);
-
- /* Someone might be waiting for us to block... */
- if (erts_smp_pending_system_block())
- block_me(NULL, NULL, NULL, 1, 0, 0, &psmq);
- erts_smp_mtx_unlock(&system_block_state.mtx);
-
- if (erts_system_profile_flags.scheduler && psmq.n > 0)
- dispatch_profile_msg_q(&psmq);
- }
-}
-
-void
-erts_unregister_blockable_thread(void)
-{
- if (is_blockable_thread()) {
- erts_smp_mtx_lock(&system_block_state.mtx);
- system_block_state.threads_to_block--;
- ASSERT(system_block_state.threads_to_block >= 0);
- erts_smp_tsd_set(system_block_state.blockable_key, NULL);
-
- /* Someone might be waiting for us to block... */
- if (erts_smp_pending_system_block())
- erts_smp_cnd_broadcast(&system_block_state.cnd);
- erts_smp_mtx_unlock(&system_block_state.mtx);
- }
-}
-
-void
-erts_note_activity_begin(erts_activity_t activity)
-{
- erts_smp_mtx_lock(&system_block_state.mtx);
- if (erts_smp_pending_system_block()) {
- Uint32 broadcast = 0;
- switch (activity) {
- case ERTS_ACTIVITY_GC:
- broadcast = (system_block_state.allowed_activities
- & ERTS_BS_FLG_ALLOW_GC);
- break;
- case ERTS_ACTIVITY_IO:
- broadcast = (system_block_state.allowed_activities
- & ERTS_BS_FLG_ALLOW_IO);
- break;
- case ERTS_ACTIVITY_WAIT:
- broadcast = 1;
- break;
- default:
- abort();
- break;
- }
- if (broadcast)
- erts_smp_cnd_broadcast(&system_block_state.cnd);
- }
- erts_smp_mtx_unlock(&system_block_state.mtx);
-}
-
-void
-erts_check_block(erts_activity_t old_activity,
- erts_activity_t new_activity,
- int locked,
- void (*prepare)(void *),
- void (*resume)(void *),
- void *arg)
-{
- int do_block;
- profile_sched_msg_q psmq;
-
- psmq.n = 0;
- if (!locked && prepare)
- (*prepare)(arg);
-
- erts_smp_mtx_lock(&system_block_state.mtx);
-
- /* First check if it is ok to block... */
- if (!locked)
- do_block = 1;
- else {
- switch (old_activity) {
- case ERTS_ACTIVITY_UNDEFINED:
- do_block = 0;
- break;
- case ERTS_ACTIVITY_GC:
- do_block = (system_block_state.allowed_activities
- & ERTS_BS_FLG_ALLOW_GC);
- break;
- case ERTS_ACTIVITY_IO:
- do_block = (system_block_state.allowed_activities
- & ERTS_BS_FLG_ALLOW_IO);
- break;
- case ERTS_ACTIVITY_WAIT:
- /* You are not allowed to leave activity waiting
- * without supplying the possibility to block
- * unlocked.
- */
- erts_set_activity_error(ERTS_ACT_ERR_LEAVE_WAIT_UNLOCKED,
- __FILE__, __LINE__);
- do_block = 0;
- break;
- default:
- erts_set_activity_error(ERTS_ACT_ERR_LEAVE_UNKNOWN_ACTIVITY,
- __FILE__, __LINE__);
- do_block = 0;
- break;
- }
- }
-
- if (do_block) {
- /* ... then check if it is necessary to block... */
-
- switch (new_activity) {
- case ERTS_ACTIVITY_UNDEFINED:
- do_block = 1;
- break;
- case ERTS_ACTIVITY_GC:
- do_block = !(system_block_state.allowed_activities
- & ERTS_BS_FLG_ALLOW_GC);
- break;
- case ERTS_ACTIVITY_IO:
- do_block = !(system_block_state.allowed_activities
- & ERTS_BS_FLG_ALLOW_IO);
- break;
- case ERTS_ACTIVITY_WAIT:
- /* No need to block if we are going to wait */
- do_block = 0;
- break;
- default:
- erts_set_activity_error(ERTS_ACT_ERR_ENTER_UNKNOWN_ACTIVITY,
- __FILE__, __LINE__);
- break;
- }
- }
-
- if (do_block) {
-
-#ifdef ERTS_ENABLE_LOCK_CHECK
- if (!locked) {
- /* Only system_block_state.mtx should be held */
- erts_lc_check_exact(&system_block_state.mtx.lc, 1);
- }
-#endif
-
- block_me(NULL, NULL, NULL, 1, 0, 1, &psmq);
-
- }
-
- erts_smp_mtx_unlock(&system_block_state.mtx);
-
- if (erts_system_profile_flags.scheduler && psmq.n > 0)
- dispatch_profile_msg_q(&psmq);
-
- if (!locked && resume)
- (*resume)(arg);
-}
-
-
-
-void
-erts_set_activity_error(erts_activity_error_t error, char *file, int line)
-{
- switch (error) {
- case ERTS_ACT_ERR_LEAVE_WAIT_UNLOCKED:
- erl_exit(1, "%s:%d: Fatal error: Leaving activity waiting without "
- "supplying the possibility to block unlocked.",
- file, line);
- break;
- case ERTS_ACT_ERR_LEAVE_UNKNOWN_ACTIVITY:
- erl_exit(1, "%s:%d: Fatal error: Leaving unknown activity.",
- file, line);
- break;
- case ERTS_ACT_ERR_ENTER_UNKNOWN_ACTIVITY:
- erl_exit(1, "%s:%d: Fatal error: Leaving unknown activity.",
- file, line);
- break;
- default:
- erl_exit(1, "%s:%d: Internal error in erts_smp_set_activity()",
- file, line);
- break;
- }
-
-}
-
-
-static ERTS_INLINE erts_aint32_t
-threads_not_under_control(void)
-{
- erts_aint32_t res = system_block_state.threads_to_block;
-
- /* Waiting is always an allowed activity... */
- res -= erts_smp_atomic32_read(&erts_system_block_state.in_activity.wait);
-
- if (system_block_state.allowed_activities & ERTS_BS_FLG_ALLOW_GC)
- res -= erts_smp_atomic32_read(&erts_system_block_state.in_activity.gc);
-
- if (system_block_state.allowed_activities & ERTS_BS_FLG_ALLOW_IO)
- res -= erts_smp_atomic32_read(&erts_system_block_state.in_activity.io);
-
- if (res < 0) {
- ASSERT(0);
- return 0;
- }
- return res;
-}
-
-/*
- * erts_block_system() blocks all threads registered as blockable.
- * It doesn't return until either all threads have blocked (0 is returned)
- * or it has timed out (ETIMEDOUT) is returned.
- *
- * If allowed activities == 0, blocked threads will release all locks
- * before blocking.
- *
- * If allowed_activities is != 0, erts_block_system() will allow blockable
- * threads to continue executing as long as they are doing an allowed
- * activity. When they are done with the allowed activity they will block,
- * *but* they will block holding locks. Therefore, the thread calling
- * erts_block_system() must *not* try to aquire any locks that might be
- * held by blocked threads holding locks from allowed activities.
- *
- * Currently allowed_activities are:
- * * ERTS_BS_FLG_ALLOW_GC Thread continues with garbage
- * collection and blocks with
- * main process lock on current
- * process locked.
- * * ERTS_BS_FLG_ALLOW_IO Thread continues with I/O
- */
-
-void
-erts_block_system(Uint32 allowed_activities)
-{
- int do_block;
- profile_sched_msg_q psmq;
-
- psmq.n = 0;
-#ifdef ERTS_ENABLE_LOCK_CHECK
- erts_lc_check_exact(NULL, 0); /* No locks should be locked */
-#endif
-
- erts_smp_mtx_lock(&system_block_state.mtx);
-
- do_block = erts_smp_pending_system_block();
- if (do_block
- && system_block_state.have_blocker
- && erts_smp_equal_tids(system_block_state.blocker_tid,
- erts_smp_thr_self())) {
- ASSERT(system_block_state.recursive_block >= 0);
- system_block_state.recursive_block++;
-
- /* You are not allowed to restrict allowed activites
- in a recursive block! */
- ERTS_SMP_LC_ASSERT((system_block_state.allowed_activities
- & ~allowed_activities) == 0);
- }
- else {
-
- erts_smp_atomic32_inc(&erts_system_block_state.do_block);
-
- /* Someone else might be waiting for us to block... */
- if (do_block) {
- do_block_me:
- block_me(NULL, NULL, NULL, 1, 1, 0, &psmq);
- }
-
- ASSERT(!system_block_state.have_blocker);
- system_block_state.have_blocker = 1;
- system_block_state.blocker_tid = erts_smp_thr_self();
- system_block_state.allowed_activities = allowed_activities;
-
- if (is_blockable_thread())
- system_block_state.threads_to_block--;
-
- while (threads_not_under_control() && !system_block_state.emergency)
- erts_smp_cnd_wait(&system_block_state.cnd, &system_block_state.mtx);
-
- if (system_block_state.emergency) {
- system_block_state.have_blocker = 0;
- goto do_block_me;
- }
- }
-
- erts_smp_mtx_unlock(&system_block_state.mtx);
-
- if (erts_system_profile_flags.scheduler && psmq.n > 0 )
- dispatch_profile_msg_q(&psmq);
-}
-
-/*
- * erts_emergency_block_system() should only be called when we are
- * about to write a crash dump...
- */
-
-int
-erts_emergency_block_system(long timeout, Uint32 allowed_activities)
-{
- int res = 0;
- long another_blocker;
-
- erts_smp_mtx_lock(&system_block_state.mtx);
-
- if (system_block_state.emergency) {
- /* Argh... */
- res = EINVAL;
- goto done;
- }
-
- another_blocker = erts_smp_pending_system_block();
- system_block_state.emergency = 1;
- erts_smp_atomic32_inc(&erts_system_block_state.do_block);
-
- if (another_blocker) {
- if (is_blocker()) {
- erts_smp_atomic32_dec(&erts_system_block_state.do_block);
- res = 0;
- goto done;
- }
- /* kick the other blocker */
- erts_smp_cnd_broadcast(&system_block_state.cnd);
- while (system_block_state.have_blocker)
- erts_smp_cnd_wait(&system_block_state.cnd, &system_block_state.mtx);
- }
-
- ASSERT(!system_block_state.have_blocker);
- system_block_state.have_blocker = 1;
- system_block_state.blocker_tid = erts_smp_thr_self();
- system_block_state.allowed_activities = allowed_activities;
-
- if (is_blockable_thread())
- system_block_state.threads_to_block--;
-
- if (timeout < 0) {
- while (threads_not_under_control())
- erts_smp_cnd_wait(&system_block_state.cnd, &system_block_state.mtx);
- }
- else {
- system_block_state.emergency_timeout = timeout;
- erts_smp_cnd_signal(&system_block_state.watchdog_cnd);
-
- while (system_block_state.emergency_timeout >= 0
- && threads_not_under_control()) {
- erts_smp_cnd_wait(&system_block_state.cnd,
- &system_block_state.mtx);
- }
- }
- done:
- erts_smp_mtx_unlock(&system_block_state.mtx);
- return res;
-}
-
-void
-erts_release_system(void)
-{
- long do_block;
- profile_sched_msg_q psmq;
-
- psmq.n = 0;
-
-#ifdef ERTS_ENABLE_LOCK_CHECK
- erts_lc_check_exact(NULL, 0); /* No locks should be locked */
-#endif
-
- erts_smp_mtx_lock(&system_block_state.mtx);
- ASSERT(is_blocker());
-
- ASSERT(system_block_state.recursive_block >= 0);
-
- if (system_block_state.recursive_block)
- system_block_state.recursive_block--;
- else {
- do_block = erts_smp_atomic32_dectest(&erts_system_block_state.do_block);
- system_block_state.have_blocker = 0;
- if (is_blockable_thread())
- system_block_state.threads_to_block++;
- else
- do_block = 0;
-
- /* Someone else might be waiting for us to block... */
- if (do_block)
- block_me(NULL, NULL, NULL, 1, 0, 0, &psmq);
- else
- erts_smp_cnd_broadcast(&system_block_state.cnd);
- }
-
- erts_smp_mtx_unlock(&system_block_state.mtx);
-
- if (erts_system_profile_flags.scheduler && psmq.n > 0)
- dispatch_profile_msg_q(&psmq);
-}
-
-#ifdef ERTS_ENABLE_LOCK_CHECK
-
-void
-erts_lc_activity_change_begin(void)
-{
- erts_smp_mtx_lock(&system_block_state.mtx);
- system_block_state.activity_changing++;
- erts_smp_mtx_unlock(&system_block_state.mtx);
-}
-
-void
-erts_lc_activity_change_end(void)
-{
- erts_smp_mtx_lock(&system_block_state.mtx);
- system_block_state.activity_changing--;
- if (system_block_state.checking && !system_block_state.activity_changing)
- erts_smp_cnd_broadcast(&system_block_state.cnd);
- erts_smp_mtx_unlock(&system_block_state.mtx);
-}
-
-#endif
-
-int
-erts_is_system_blocked(erts_activity_t allowed_activities)
-{
- int blkd;
-
- erts_smp_mtx_lock(&system_block_state.mtx);
- blkd = (erts_smp_pending_system_block()
- && system_block_state.have_blocker
- && erts_smp_equal_tids(system_block_state.blocker_tid,
- erts_smp_thr_self())
- && !(system_block_state.allowed_activities & ~allowed_activities));
-#ifdef ERTS_ENABLE_LOCK_CHECK
- if (blkd) {
- system_block_state.checking = 1;
- while (system_block_state.activity_changing)
- erts_smp_cnd_wait(&system_block_state.cnd, &system_block_state.mtx);
- system_block_state.checking = 0;
- blkd = !threads_not_under_control();
- }
-#endif
- erts_smp_mtx_unlock(&system_block_state.mtx);
- return blkd;
-}
-
-static void *
-emergency_watchdog(void *unused)
-{
- erts_smp_mtx_lock(&system_block_state.mtx);
- while (1) {
- long timeout;
- while (system_block_state.emergency_timeout < 0)
- erts_smp_cnd_wait(&system_block_state.watchdog_cnd, &system_block_state.mtx);
- timeout = system_block_state.emergency_timeout;
- erts_smp_mtx_unlock(&system_block_state.mtx);
-
- if (erts_disable_tolerant_timeofday)
- erts_milli_sleep(timeout);
- else {
- SysTimeval to;
- erts_get_timeval(&to);
- to.tv_sec += timeout / 1000;
- to.tv_usec += timeout % 1000;
-
- while (1) {
- SysTimeval curr;
- erts_milli_sleep(timeout);
- erts_get_timeval(&curr);
- if (curr.tv_sec > to.tv_sec
- || (curr.tv_sec == to.tv_sec && curr.tv_usec >= to.tv_usec)) {
- break;
- }
- timeout = (to.tv_sec - curr.tv_sec)*1000;
- timeout += (to.tv_usec - curr.tv_usec)/1000;
- }
- }
-
- erts_smp_mtx_lock(&system_block_state.mtx);
- system_block_state.emergency_timeout = -1;
- erts_smp_cnd_broadcast(&system_block_state.cnd);
- }
- erts_smp_mtx_unlock(&system_block_state.mtx);
- return NULL;
-}
-
-void
-erts_system_block_init(void)
-{
- erts_smp_thr_opts_t thr_opts = ERTS_SMP_THR_OPTS_DEFAULT_INITER;
- /* Local state... */
- system_block_state.emergency = 0;
- system_block_state.emergency_timeout = -1;
- erts_smp_cnd_init(&system_block_state.watchdog_cnd);
- system_block_state.threads_to_block = 0;
- system_block_state.have_blocker = 0;
- /* system_block_state.block_tid */
- system_block_state.recursive_block = 0;
- system_block_state.allowed_activities = 0;
- erts_smp_tsd_key_create(&system_block_state.blockable_key);
- erts_smp_mtx_init(&system_block_state.mtx, "system_block");
- erts_smp_cnd_init(&system_block_state.cnd);
-#ifdef ERTS_ENABLE_LOCK_CHECK
- system_block_state.activity_changing = 0;
- system_block_state.checking = 0;
-#endif
-
- thr_opts.suggested_stack_size = 8;
- erts_smp_thr_create(&system_block_state.watchdog_tid,
- emergency_watchdog,
- NULL,
- &thr_opts);
-
- /* Global state... */
-
- erts_smp_atomic32_init(&erts_system_block_state.do_block, 0);
- erts_smp_atomic32_init(&erts_system_block_state.in_activity.wait, 0);
- erts_smp_atomic32_init(&erts_system_block_state.in_activity.gc, 0);
- erts_smp_atomic32_init(&erts_system_block_state.in_activity.io, 0);
-
- /* Make sure blockable threads unregister when exiting... */
- erts_smp_install_exit_handler(erts_unregister_blockable_thread);
-}
-
-
-#endif /* #ifdef ERTS_SMP */
-
char *
erts_read_env(char *key)
{
@@ -4077,11 +3463,9 @@ void erts_silence_warn_unused_result(long unused)
* Handy functions when using a debugger - don't use in the code!
*/
-void upp(buf,sz)
-byte* buf;
-int sz;
+void upp(byte *buf, size_t sz)
{
- bin_write(ERTS_PRINT_STDERR,NULL,buf,sz);
+ bin_write(ERTS_PRINT_STDERR, NULL, buf, sz);
}
void pat(Eterm atom)
diff --git a/erts/emulator/drivers/common/efile_drv.c b/erts/emulator/drivers/common/efile_drv.c
index f0ff3f54c5..603d1d47b6 100644
--- a/erts/emulator/drivers/common/efile_drv.c
+++ b/erts/emulator/drivers/common/efile_drv.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1996-2011. All Rights Reserved.
+ * Copyright Ericsson AB 1996-2012. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -55,6 +55,7 @@
#define FILE_READ_LINE 29
#define FILE_FDATASYNC 30
#define FILE_FADVISE 31
+#define FILE_SENDFILE 32
/* Return codes */
@@ -69,6 +70,7 @@
#define FILE_RESP_EOF 8
#define FILE_RESP_FNAME 9
#define FILE_RESP_ALL_DATA 10
+#define FILE_RESP_LFNAME 11
/* Options */
@@ -97,12 +99,20 @@
# include "config.h"
#endif
#include <stdlib.h>
+
+/* Need (NON)BLOCKING macros for sendfile */
+#ifndef WANT_NONBLOCKING
+#define WANT_NONBLOCKING
+#endif
+
#include "sys.h"
+
#include "erl_driver.h"
#include "erl_efile.h"
#include "erl_threads.h"
#include "zlib.h"
#include "gzio.h"
+#include "dtrace-wrapper.h"
#include <ctype.h>
#include <sys/types.h>
@@ -110,6 +120,39 @@ void erl_exit(int n, char *fmt, ...);
static ErlDrvSysInfo sys_info;
+/* For explanation of this var, see comment for same var in erl_async.c */
+static unsigned gcc_optimizer_hack = 0;
+
+#ifdef USE_VM_PROBES
+
+#define DTRACE_EFILE_BUFSIZ 128
+
+#define DTRACE_INVOKE_SETUP(op) \
+ do { DTRACE3(efile_drv_int_entry, d->sched_i1, d->sched_i2, op); } while (0)
+#define DTRACE_INVOKE_SETUP_BY_NAME(op) \
+ struct t_data *d = (struct t_data *) data ; \
+ DTRACE_INVOKE_SETUP(op)
+#define DTRACE_INVOKE_RETURN(op) \
+ do { DTRACE3(efile_drv_int_return, d->sched_i1, d->sched_i2, \
+ op); } while (0) ; gcc_optimizer_hack++ ;
+
+/* Assign human-friendlier id numbers to scheduler & I/O worker threads */
+int dt_driver_idnum = 0;
+int dt_driver_io_worker_base = 5000;
+erts_mtx_t dt_driver_mutex;
+pthread_key_t dt_driver_key;
+
+typedef struct {
+ int thread_num;
+ Uint64 tag;
+} dt_private;
+
+dt_private *get_dt_private(int);
+#else /* USE_VM_PROBES */
+#define DTRACE_INVOKE_SETUP(op) do {} while (0)
+#define DTRACE_INVOKE_SETUP_BY_NAME(op) do {} while (0)
+#define DTRACE_INVOKE_RETURN(op) do {} while (0)
+#endif /* USE_VM_PROBES */
/* #define TRACE 1 */
#ifdef TRACE
@@ -139,6 +182,22 @@ static ErlDrvSysInfo sys_info;
#define MUTEX_UNLOCK(m)
#endif
+
+/**
+ * On DARWIN sendfile can deadlock with close if called in
+ * different threads. So until Apple fixes so that sendfile
+ * is not buggy we disable usage of the async pool for
+ * DARWIN. The testcase t_sendfile_crashduring reproduces
+ * this error when using +A 10.
+ */
+#if defined(__APPLE__) && defined(__MACH__)
+#define USE_THRDS_FOR_SENDFILE 0
+#else
+#define USE_THRDS_FOR_SENDFILE (sys_info.async_threads > 0)
+#endif /* defined(__APPLE__) && defined(__MACH__) */
+
+
+
#if 0
/* Experimental, for forcing all file operations to use the same thread. */
static unsigned file_fixed_key = 1;
@@ -149,6 +208,9 @@ static ErlDrvSysInfo sys_info;
#ifdef FILENAMES_16BIT
+#ifdef USE_VM_PROBES
+#error 16bit characters in filenames and dtrace in combination is not supported.
+#endif
# define FILENAME_BYTELEN(Str) filename_len_16bit(Str)
# define FILENAME_COPY(To,From) filename_cpy_16bit((To),(From))
# define FILENAME_CHARSIZE 2
@@ -184,6 +246,7 @@ static ErlDrvSysInfo sys_info;
# define RESBUFSIZE BUFSIZ
#endif
+#define READDIR_CHUNKS (5)
@@ -215,17 +278,26 @@ typedef unsigned char uchar;
static ErlDrvData file_start(ErlDrvPort port, char* command);
static int file_init(void);
static void file_stop(ErlDrvData);
-static void file_output(ErlDrvData, char* buf, int len);
-static int file_control(ErlDrvData, unsigned int command,
- char* buf, int len, char **rbuf, int rlen);
+static void file_output(ErlDrvData, char* buf, ErlDrvSizeT len);
+static ErlDrvSSizeT file_control(ErlDrvData, unsigned int command,
+ char* buf, ErlDrvSizeT len,
+ char **rbuf, ErlDrvSizeT rlen);
static void file_timeout(ErlDrvData);
static void file_outputv(ErlDrvData, ErlIOVec*);
static void file_async_ready(ErlDrvData, ErlDrvThreadData);
static void file_flush(ErlDrvData);
+#ifdef HAVE_SENDFILE
+static void file_ready_output(ErlDrvData data, ErlDrvEvent event);
+static void file_stop_select(ErlDrvEvent event, void* _);
+#endif /* HAVE_SENDFILE */
enum e_timer {timer_idle, timer_again, timer_write};
+#ifdef HAVE_SENDFILE
+enum e_sendfile {sending, not_sending};
+static void free_sendfile(void *data);
+#endif /* HAVE_SENDFILE */
struct t_data;
@@ -240,6 +312,9 @@ typedef struct {
struct t_data *cq_head; /* Queue of incoming commands */
struct t_data *cq_tail; /* -""- */
enum e_timer timer_state;
+#ifdef HAVE_SENDFILE
+ enum e_sendfile sendfile_state;
+#endif /* HAVE_SENDFILE */
size_t read_bufsize;
ErlDrvBinary *read_binp;
size_t read_offset;
@@ -251,6 +326,10 @@ typedef struct {
ErlDrvPDL q_mtx; /* Mutex for the driver queue, known by the emulator. Also used for
mutual exclusion when accessing field(s) below. */
size_t write_buffered;
+#ifdef USE_VM_PROBES
+ int idnum; /* Unique ID # for this driver thread/desc */
+ char port_str[DTRACE_TERM_BUF_SIZE];
+#endif
} file_descriptor;
@@ -262,7 +341,11 @@ struct erl_drv_entry efile_driver_entry = {
file_stop,
file_output,
NULL,
+#ifdef HAVE_SENDFILE
+ file_ready_output,
+#else
NULL,
+#endif /* HAVE_SENDFILE */
"efile",
NULL,
NULL,
@@ -277,7 +360,13 @@ struct erl_drv_entry efile_driver_entry = {
ERL_DRV_EXTENDED_MAJOR_VERSION,
ERL_DRV_EXTENDED_MINOR_VERSION,
ERL_DRV_FLAG_USE_PORT_LOCKING,
+ NULL,
+ NULL,
+#ifdef HAVE_SENDFILE
+ file_stop_select
+#else
NULL
+#endif /* HAVE_SENDFILE */
};
@@ -317,15 +406,16 @@ struct t_preadv {
Sint64 offsets[1];
};
-#define READDIR_BUFSIZE (8*1024)
-#if READDIR_BUFSIZE < (FILENAME_CHARSIZE*2*(MAXPATHLEN+1))
+#define READDIR_BUFSIZE (8*1024)*READDIR_CHUNKS
+#if READDIR_BUFSIZE < (1 + (2 + MAXPATHLEN)*FILENAME_CHARSIZE*READDIR_CHUNKS)
# undef READDIR_BUFSIZE
-# define READDIR_BUFSIZE (FILENAME_CHARSIZE*2*(MAXPATHLEN+1))
+# define READDIR_BUFSIZE (1 + (2 + MAXPATHLEN)*FILENAME_CHARSIZE*READDIR_CHUNKS)
#endif
struct t_readdir_buf {
- struct t_readdir_buf *next;
- char buf[READDIR_BUFSIZE];
+ struct t_readdir_buf *next;
+ size_t n;
+ char buf[READDIR_BUFSIZE];
};
struct t_data
@@ -337,6 +427,11 @@ struct t_data
void (*free)(void *);
int again;
int reply;
+#ifdef USE_VM_PROBES
+ int sched_i1;
+ Uint64 sched_i2;
+ char sched_utag[DTRACE_EFILE_BUFSIZ+1];
+#endif
int result_ok;
Efile_error errInfo;
int flags;
@@ -395,12 +490,20 @@ struct t_data
Sint64 length;
int advise;
} fadvise;
+#ifdef HAVE_SENDFILE
+ struct {
+ ErlDrvPort port;
+ ErlDrvPDL q_mtx;
+ int out_fd;
+ off_t offset;
+ Uint64 nbytes;
+ Uint64 written;
+ } sendfile;
+#endif /* HAVE_SENDFILE */
} c;
char b[1];
};
-
-
#define EF_ALLOC(S) driver_alloc((S))
#define EF_REALLOC(P, S) driver_realloc((P), (S))
#define EF_SAFE_ALLOC(S) ef_safe_alloc((S))
@@ -429,7 +532,7 @@ static void *ef_safe_realloc(void *op, Uint s)
* ErlIOVec manipulation functions.
*/
-/* char EV_CHAR(ErlIOVec *ev, int p, int q) */
+/* char EV_CHAR_P(ErlIOVec *ev, int p, int q) */
#define EV_CHAR_P(ev, p, q) \
(((char *)(ev)->iov[(q)].iov_base) + (p))
@@ -482,7 +585,6 @@ static void *ef_safe_realloc(void *op, Uint s)
: 0)
-
#if 0
static void ev_clear(ErlIOVec *ev) {
@@ -610,7 +712,6 @@ static struct t_data *cq_deq(file_descriptor *desc) {
}
-
/*********************************************************************
* Driver entry point -> init
*/
@@ -625,6 +726,12 @@ file_init(void)
? atoi(buf)
: 0);
driver_system_info(&sys_info, sizeof(ErlDrvSysInfo));
+
+#ifdef USE_VM_PROBES
+ erts_mtx_init(&dt_driver_mutex, "efile_drv dtrace mutex");
+ pthread_key_create(&dt_driver_key, NULL);
+#endif /* USE_VM_PROBES */
+
return 0;
}
@@ -652,6 +759,9 @@ file_start(ErlDrvPort port, char* command)
desc->cq_head = NULL;
desc->cq_tail = NULL;
desc->timer_state = timer_idle;
+#ifdef HAVE_SENDFILE
+ desc->sendfile_state = not_sending;
+#endif
desc->read_bufsize = 0;
desc->read_binp = NULL;
desc->read_offset = 0;
@@ -661,6 +771,10 @@ file_start(ErlDrvPort port, char* command)
desc->write_error = 0;
MUTEX_INIT(desc->q_mtx, port); /* Refc is one, referenced by emulator now */
desc->write_buffered = 0;
+#ifdef USE_VM_PROBES
+ dtrace_drvport_str(port, desc->port_str);
+ get_dt_private(0); /* throw away return value */
+#endif /* USE_VM_PROBES */
return (ErlDrvData) desc;
}
@@ -680,8 +794,10 @@ static void do_close(int flags, SWord fd) {
static void invoke_close(void *data)
{
struct t_data *d = (struct t_data *) data;
+ DTRACE_INVOKE_SETUP(FILE_CLOSE);
d->again = 0;
do_close(d->flags, d->fd);
+ DTRACE_INVOKE_RETURN(FILE_CLOSE);
}
/*********************************************************************
@@ -759,7 +875,16 @@ static void reply_Uint_posix_error(file_descriptor *desc, Uint num,
driver_output2(desc->port, response, t-response, NULL, 0);
}
+static void reply_string_error(file_descriptor *desc, char* str) {
+ char response[256]; /* Response buffer. */
+ char* s;
+ char* t;
+ response[0] = FILE_RESP_ERROR;
+ for (s = str, t = response+1; *s; s++, t++)
+ *t = tolower(*s);
+ driver_output2(desc->port, response, t-response, NULL, 0);
+}
static int reply_error(file_descriptor *desc,
Efile_error *errInfo) /* The error codes. */
@@ -890,8 +1015,6 @@ static int reply_eof(file_descriptor *desc) {
driver_output2(desc->port, &c, 1, NULL, 0);
return 0;
}
-
-
static void invoke_name(void *data, int (*f)(Efile_error *, char *))
{
@@ -904,49 +1027,63 @@ static void invoke_name(void *data, int (*f)(Efile_error *, char *))
static void invoke_mkdir(void *data)
{
+ DTRACE_INVOKE_SETUP_BY_NAME(FILE_MKDIR);
invoke_name(data, efile_mkdir);
+ DTRACE_INVOKE_RETURN(FILE_MKDIR);
}
static void invoke_rmdir(void *data)
{
+ DTRACE_INVOKE_SETUP_BY_NAME(FILE_RMDIR);
invoke_name(data, efile_rmdir);
+ DTRACE_INVOKE_RETURN(FILE_RMDIR);
}
static void invoke_delete_file(void *data)
{
+ DTRACE_INVOKE_SETUP_BY_NAME(FILE_DELETE);
invoke_name(data, efile_delete_file);
+ DTRACE_INVOKE_RETURN(FILE_DELETE);
}
static void invoke_chdir(void *data)
{
+ DTRACE_INVOKE_SETUP_BY_NAME(FILE_CHDIR);
invoke_name(data, efile_chdir);
+ DTRACE_INVOKE_RETURN(FILE_CHDIR);
}
static void invoke_fdatasync(void *data)
{
struct t_data *d = (struct t_data *) data;
int fd = (int) d->fd;
+ DTRACE_INVOKE_SETUP(FILE_FDATASYNC);
d->again = 0;
d->result_ok = efile_fdatasync(&d->errInfo, fd);
+ DTRACE_INVOKE_RETURN(FILE_FDATASYNC);
}
static void invoke_fsync(void *data)
{
struct t_data *d = (struct t_data *) data;
int fd = (int) d->fd;
+ DTRACE_INVOKE_SETUP(FILE_FSYNC);
d->again = 0;
d->result_ok = efile_fsync(&d->errInfo, fd);
+ DTRACE_INVOKE_RETURN(FILE_FSYNC);
}
static void invoke_truncate(void *data)
{
struct t_data *d = (struct t_data *) data;
int fd = (int) d->fd;
+ DTRACE_INVOKE_SETUP(FILE_TRUNCATE);
d->again = 0;
d->result_ok = efile_truncate_file(&d->errInfo, &fd, d->flags);
+ DTRACE_INVOKE_RETURN(FILE_TRUNCATE);
}
static void invoke_read(void *data)
@@ -954,6 +1091,7 @@ static void invoke_read(void *data)
struct t_data *d = (struct t_data *) data;
int status, segment;
size_t size, read_size;
+ DTRACE_INVOKE_SETUP(FILE_READ);
segment = d->again && d->c.read.bin_size >= 2*FILE_SEGMENT_READ;
if (segment) {
@@ -988,6 +1126,7 @@ static void invoke_read(void *data)
} else {
d->again = 0;
}
+ DTRACE_INVOKE_RETURN(FILE_READ);
}
static void free_read(void *data)
@@ -1004,13 +1143,14 @@ static void invoke_read_line(void *data)
int status;
size_t read_size;
int local_loop = (d->again == 0);
+ DTRACE_INVOKE_SETUP(FILE_READ_LINE);
do {
size_t size = (d->c.read_line.binp)->orig_size -
d->c.read_line.read_offset - d->c.read_line.read_size;
if (size == 0) {
/* Need more place */
- size_t need = (d->c.read_line.read_size >= DEFAULT_LINEBUF_SIZE) ?
+ ErlDrvSizeT need = (d->c.read_line.read_size >= DEFAULT_LINEBUF_SIZE) ?
d->c.read_line.read_size + DEFAULT_LINEBUF_SIZE : DEFAULT_LINEBUF_SIZE;
ErlDrvBinary *newbin = driver_alloc_binary(need);
if (newbin == NULL) {
@@ -1095,6 +1235,7 @@ static void invoke_read_line(void *data)
break;
}
} while (local_loop);
+ DTRACE_INVOKE_RETURN(FILE_READ_LINE);
}
static void free_read_line(void *data)
@@ -1110,6 +1251,7 @@ static void invoke_read_file(void *data)
struct t_data *d = (struct t_data *) data;
size_t read_size;
int chop;
+ DTRACE_INVOKE_SETUP(FILE_READ_FILE);
if (! d->c.read_file.binp) { /* First invocation only */
int fd;
@@ -1146,12 +1288,14 @@ static void invoke_read_file(void *data)
&read_size);
if (d->result_ok) {
d->c.read_file.offset += read_size;
- if (chop) return; /* again */
+ if (chop) goto chop_done; /* again */
}
close:
efile_closefile((int) d->fd);
done:
d->again = 0;
+ chop_done:
+ DTRACE_INVOKE_RETURN(FILE_READ_FILE);
}
static void free_read_file(void *data)
@@ -1171,6 +1315,7 @@ static void invoke_preadv(void *data)
ErlIOVec *ev = &c->eiov;
size_t bytes_read_so_far = 0;
unsigned char *p = (unsigned char *)ev->iov[0].iov_base + 4+4+8*c->cnt;
+ DTRACE_INVOKE_SETUP(FILE_PREADV);
while (c->cnt < c->n) {
size_t read_size = ev->iov[1 + c->cnt].iov_len - c->size;
@@ -1192,7 +1337,7 @@ static void invoke_preadv(void *data)
bytes_read_so_far += bytes_read;
if (chop && bytes_read == read_size) {
c->size += bytes_read;
- return;
+ goto done;
}
ASSERT(bytes_read <= read_size);
ev->iov[1 + c->cnt].iov_len = bytes_read + c->size;
@@ -1203,7 +1348,7 @@ static void invoke_preadv(void *data)
if (d->again
&& bytes_read_so_far >= FILE_SEGMENT_READ
&& c->cnt < c->n) {
- return;
+ goto done;
}
} else {
/* In case of a read error, ev->size will not be correct,
@@ -1214,6 +1359,8 @@ static void invoke_preadv(void *data)
}
}
d->again = 0;
+ done:
+ DTRACE_INVOKE_RETURN(FILE_PREADV);
}
static void free_preadv(void *data) {
@@ -1235,6 +1382,7 @@ static void invoke_ipread(void *data)
size_t bytes_read = 0;
char buf[2*sizeof(Uint32)];
Uint32 offset, size;
+ DTRACE_INVOKE_SETUP(FILE_IPREAD);
/* Read indirection header */
if (! efile_pread(&d->errInfo, (int) d->fd, c->offsets[0],
@@ -1273,14 +1421,17 @@ static void invoke_ipread(void *data)
/* Read data block */
d->invoke = invoke_preadv;
invoke_preadv(data);
+ DTRACE_INVOKE_RETURN(FILE_IPREAD);
return;
error:
d->result_ok = 0;
d->again = 0;
+ DTRACE_INVOKE_RETURN(FILE_IPREAD);
return;
done:
d->result_ok = !0;
d->again = 0;
+ DTRACE_INVOKE_RETURN(FILE_IPREAD);
}
/* invoke_writev and invoke_pwritev are the only thread functions that
@@ -1303,6 +1454,7 @@ static void invoke_writev(void *data) {
size_t size;
size_t p;
int segment;
+ DTRACE_INVOKE_SETUP(FILE_WRITE);
segment = d->again && d->c.writev.size >= 2*FILE_SEGMENT_WRITE;
if (segment) {
@@ -1311,7 +1463,11 @@ static void invoke_writev(void *data) {
size = d->c.writev.size;
}
- /* Copy the io vector to avoid locking the port que while writing */
+ /* Copy the io vector to avoid locking the port que while writing,
+ * also, both we and efile_writev might/will change the SysIOVec
+ * when segmenting or due to partial write and we do not want to
+ * tamper with the actual queue that we get from driver_peekq
+ */
MUTEX_LOCK(d->c.writev.q_mtx); /* Lock before accessing the port queue */
iov0 = driver_peekq(d->c.writev.port, &iovlen);
@@ -1350,7 +1506,7 @@ static void invoke_writev(void *data) {
} else {
d->result_ok = efile_writev(&d->errInfo,
d->flags, (int) d->fd,
- iov, iovcnt, size);
+ iov, iovcnt);
}
} else if (iovlen == 0) {
d->result_ok = 1;
@@ -1372,6 +1528,7 @@ static void invoke_writev(void *data) {
TRACE_F(("w%lu", (unsigned long)size));
}
+ DTRACE_INVOKE_RETURN(FILE_WRITE);
}
static void free_writev(void *data) {
@@ -1385,34 +1542,40 @@ static void free_writev(void *data) {
static void invoke_pwd(void *data)
{
struct t_data *d = (struct t_data *) data;
+ DTRACE_INVOKE_SETUP(FILE_PWD);
d->again = 0;
d->result_ok = efile_getdcwd(&d->errInfo,d->drive, d->b+1,
RESBUFSIZE-1);
+ DTRACE_INVOKE_RETURN(FILE_PWD);
}
static void invoke_readlink(void *data)
{
struct t_data *d = (struct t_data *) data;
char resbuf[RESBUFSIZE]; /* Result buffer. */
+ DTRACE_INVOKE_SETUP(FILE_READLINK);
d->again = 0;
d->result_ok = efile_readlink(&d->errInfo, d->b, resbuf+1,
RESBUFSIZE-1);
if (d->result_ok != 0)
FILENAME_COPY((char *) d->b + 1, resbuf+1);
+ DTRACE_INVOKE_RETURN(FILE_READLINK);
}
static void invoke_altname(void *data)
{
struct t_data *d = (struct t_data *) data;
char resbuf[RESBUFSIZE]; /* Result buffer. */
+ DTRACE_INVOKE_SETUP(FILE_ALTNAME);
d->again = 0;
d->result_ok = efile_altname(&d->errInfo, d->b, resbuf+1,
RESBUFSIZE-1);
if (d->result_ok != 0)
FILENAME_COPY((char *) d->b + 1, resbuf+1);
+ DTRACE_INVOKE_RETURN(FILE_ALTNAME);
}
static void invoke_pwritev(void *data) {
@@ -1425,6 +1588,7 @@ static void invoke_pwritev(void *data) {
size_t p;
int segment;
size_t size, write_size;
+ DTRACE_INVOKE_SETUP(FILE_PWRITEV);
segment = d->again && c->size >= 2*FILE_SEGMENT_WRITE;
if (segment) {
@@ -1504,6 +1668,7 @@ static void invoke_pwritev(void *data) {
}
done:
EF_FREE(iov); /* Free our copy of the vector, nothing to restore */
+ DTRACE_INVOKE_RETURN(FILE_PWRITEV);
}
static void free_pwritev(void *data) {
@@ -1519,9 +1684,14 @@ static void invoke_flstat(void *data)
{
struct t_data *d = (struct t_data *) data;
+ DTRACE3(efile_drv_int_entry, d->sched_i1, d->sched_i2,
+ d->command == FILE_LSTAT ? FILE_LSTAT : FILE_FSTAT);
d->again = 0;
d->result_ok = efile_fileinfo(&d->errInfo, &d->info,
d->b, d->command == FILE_LSTAT);
+ DTRACE3(efile_drv_int_entry, d->sched_i1, d->sched_i2,
+ d->command == FILE_LSTAT ? FILE_LSTAT : FILE_FSTAT);
+ gcc_optimizer_hack++;
}
static void invoke_link(void *data)
@@ -1529,10 +1699,12 @@ static void invoke_link(void *data)
struct t_data *d = (struct t_data *) data;
char *name = d->b;
char *new_name;
+ DTRACE_INVOKE_SETUP(FILE_LINK);
d->again = 0;
new_name = name+FILENAME_BYTELEN(name)+FILENAME_CHARSIZE;
d->result_ok = efile_link(&d->errInfo, name, new_name);
+ DTRACE_INVOKE_RETURN(FILE_LINK);
}
static void invoke_symlink(void *data)
@@ -1540,10 +1712,12 @@ static void invoke_symlink(void *data)
struct t_data *d = (struct t_data *) data;
char *name = d->b;
char *new_name;
+ DTRACE_INVOKE_SETUP(FILE_SYMLINK);
d->again = 0;
new_name = name+FILENAME_BYTELEN(name)+FILENAME_CHARSIZE;
d->result_ok = efile_symlink(&d->errInfo, name, new_name);
+ DTRACE_INVOKE_RETURN(FILE_SYMLINK);
}
static void invoke_rename(void *data)
@@ -1551,24 +1725,29 @@ static void invoke_rename(void *data)
struct t_data *d = (struct t_data *) data;
char *name = d->b;
char *new_name;
+ DTRACE_INVOKE_SETUP(FILE_RENAME);
d->again = 0;
new_name = name+FILENAME_BYTELEN(name)+FILENAME_CHARSIZE;
d->result_ok = efile_rename(&d->errInfo, name, new_name);
+ DTRACE_INVOKE_RETURN(FILE_RENAME);
}
static void invoke_write_info(void *data)
{
struct t_data *d = (struct t_data *) data;
+ DTRACE_INVOKE_SETUP(FILE_WRITE_INFO);
d->again = 0;
d->result_ok = efile_write_info(&d->errInfo, &d->info, d->b);
+ DTRACE_INVOKE_RETURN(FILE_WRITE_INFO);
}
static void invoke_lseek(void *data)
{
struct t_data *d = (struct t_data *) data;
int status;
+ DTRACE_INVOKE_SETUP(FILE_LSEEK);
d->again = 0;
if (d->flags & EFILE_COMPRESSED) {
@@ -1593,66 +1772,61 @@ static void invoke_lseek(void *data)
&d->c.lseek.location);
}
d->result_ok = status;
+ DTRACE_INVOKE_RETURN(FILE_LSEEK);
}
static void invoke_readdir(void *data)
{
struct t_data *d = (struct t_data *) data;
- int s;
char *p = NULL;
- int buf_sz = 0;
- size_t tmp_bs;
+ size_t file_bs;
+ size_t n = 0, total = 0;
+ struct t_readdir_buf *b = NULL;
+ int res = 0;
+ DTRACE_INVOKE_SETUP(FILE_READDIR);
d->again = 0;
d->errInfo.posix_errno = 0;
- while (1) {
- char *str;
- if (buf_sz < (4 /* sz */ + 1 /* cmd */ +
- FILENAME_CHARSIZE*(MAXPATHLEN + 1))) {
- struct t_readdir_buf *b;
- if (p) {
- put_int32(0, p); /* EOB */
- }
- b = EF_SAFE_ALLOC(sizeof(struct t_readdir_buf));
- b->next = NULL;
- if (d->c.read_dir.last_buf)
- d->c.read_dir.last_buf->next = b;
- else
- d->c.read_dir.first_buf = b;
- d->c.read_dir.last_buf = b;
- p = &b->buf[0];
- buf_sz = READDIR_BUFSIZE - 4/* EOB */;
- }
-
- p[4] = FILE_RESP_FNAME;
- buf_sz -= 4 + 1;
- str = p + 4 + 1;
- ASSERT(buf_sz >= MAXPATHLEN + 1);
- tmp_bs = buf_sz;
- s = efile_readdir(&d->errInfo, d->b, &d->dir_handle, str, &tmp_bs);
-
- if (s) {
- put_int32(tmp_bs + 1 /* 1 byte for opcode */, p);
- p += 4 + tmp_bs + 1;
- ASSERT(p == (str + tmp_bs));
- buf_sz -= tmp_bs;
- }
- else {
- put_int32(1, p);
- p += 4 + 1;
- put_int32(0, p); /* EOB */
- d->result_ok = (d->errInfo.posix_errno == 0);
- break;
+ do {
+ total = READDIR_BUFSIZE;
+ n = 1;
+ b = EF_SAFE_ALLOC(sizeof(struct t_readdir_buf));
+ b->next = NULL;
+
+ if (d->c.read_dir.last_buf) {
+ d->c.read_dir.last_buf->next = b;
+ } else {
+ d->c.read_dir.first_buf = b;
}
- }
+ d->c.read_dir.last_buf = b;
+
+ p = &b->buf[0];
+ p[0] = FILE_RESP_LFNAME;
+ file_bs = READDIR_BUFSIZE - n;
+
+ do {
+ res = efile_readdir(&d->errInfo, d->b, &d->dir_handle, p + n + 2, &file_bs);
+
+ if (res) {
+ put_int16((Uint16)file_bs, p + n);
+ n += 2 + file_bs;
+ file_bs = READDIR_BUFSIZE - n;
+ }
+ } while( res && ((total - n - 2) >= MAXPATHLEN*FILENAME_CHARSIZE));
+
+ b->n = n;
+ } while(res);
+
+ d->result_ok = (d->errInfo.posix_errno == 0);
+ DTRACE_INVOKE_RETURN(FILE_READDIR);
}
static void invoke_open(void *data)
{
struct t_data *d = (struct t_data *) data;
-
int status = 1; /* Status of open call. */
+ DTRACE_INVOKE_SETUP(FILE_OPEN);
d->again = 0;
if ((d->flags & EFILE_COMPRESSED) == 0) {
@@ -1685,6 +1859,7 @@ static void invoke_open(void *data)
}
d->result_ok = status;
+ DTRACE_INVOKE_RETURN(FILE_OPEN);
}
static void invoke_fadvise(void *data)
@@ -1694,15 +1869,92 @@ static void invoke_fadvise(void *data)
off_t offset = (off_t) d->c.fadvise.offset;
off_t length = (off_t) d->c.fadvise.length;
int advise = (int) d->c.fadvise.advise;
+ DTRACE_INVOKE_SETUP(FILE_FADVISE);
d->again = 0;
d->result_ok = efile_fadvise(&d->errInfo, fd, offset, length, advise);
+ DTRACE_INVOKE_RETURN(FILE_FADVISE);
+}
+
+#ifdef HAVE_SENDFILE
+static void invoke_sendfile(void *data)
+{
+ struct t_data *d = (struct t_data *)data;
+ int fd = d->fd;
+ int out_fd = (int)d->c.sendfile.out_fd;
+ Uint64 nbytes = d->c.sendfile.nbytes;
+ int result = 0;
+ d->again = 0;
+
+ result = efile_sendfile(&d->errInfo, fd, out_fd, &d->c.sendfile.offset, &nbytes, NULL);
+
+ d->c.sendfile.written += nbytes;
+
+ if (result == 1 || (result == 0 && USE_THRDS_FOR_SENDFILE)) {
+ d->result_ok = 0;
+ } else if (result == 0 && (d->errInfo.posix_errno == EAGAIN
+ || d->errInfo.posix_errno == EINTR)) {
+ if ((d->c.sendfile.nbytes - nbytes) != 0) {
+ d->result_ok = 1;
+ if (d->c.sendfile.nbytes != 0)
+ d->c.sendfile.nbytes -= nbytes;
+ } else
+ d->result_ok = 0;
+ } else {
+ d->result_ok = -1;
+ }
+}
+
+static void free_sendfile(void *data) {
+ struct t_data *d = (struct t_data *)data;
+ if (USE_THRDS_FOR_SENDFILE) {
+ SET_NONBLOCKING(d->c.sendfile.out_fd);
+ } else {
+ MUTEX_LOCK(d->c.sendfile.q_mtx);
+ driver_deq(d->c.sendfile.port,1);
+ MUTEX_UNLOCK(d->c.sendfile.q_mtx);
+ driver_select(d->c.sendfile.port, (ErlDrvEvent)(long)d->c.sendfile.out_fd, ERL_DRV_USE_NO_CALLBACK|ERL_DRV_WRITE, 0);
+ }
+ EF_FREE(data);
+}
+
+static void file_ready_output(ErlDrvData data, ErlDrvEvent event)
+{
+ file_descriptor* fd = (file_descriptor*) data;
+
+ switch (fd->d->command) {
+ case FILE_SENDFILE:
+ driver_select(fd->d->c.sendfile.port, event,
+ (int)ERL_DRV_WRITE,(int) 0);
+ invoke_sendfile((void *)fd->d);
+ file_async_ready(data, (ErlDrvThreadData)fd->d);
+ break;
+ default:
+ break;
+ }
+}
+
+static void file_stop_select(ErlDrvEvent event, void* _)
+{
+
+}
+
+static int flush_sendfile(file_descriptor *desc,void *_) {
+ if (desc->sendfile_state == sending) {
+ desc->d->result_ok = -1;
+ desc->d->errInfo.posix_errno = ECONNABORTED;
+ file_async_ready((ErlDrvData)desc,(ErlDrvThreadData)desc->d);
+ }
+ return 1;
}
+#endif /* HAVE_SENDFILE */
+
static void free_readdir(void *data)
{
struct t_data *d = (struct t_data *) data;
struct t_readdir_buf *b1 = d->c.read_dir.first_buf;
+
while (b1) {
struct t_readdir_buf *b2 = b1;
b1 = b1->next;
@@ -1760,6 +2012,10 @@ static void cq_execute(file_descriptor *desc) {
register void *void_ptr; /* Soft cast variable */
if (desc->timer_state == timer_again)
return;
+#ifdef HAVE_SENDFILE
+ if (desc->sendfile_state == sending)
+ return;
+#endif
if (! (d = cq_deq(desc)))
return;
TRACE_F(("x%i", (int) d->command));
@@ -1767,12 +2023,16 @@ static void cq_execute(file_descriptor *desc) {
DRIVER_ASYNC(d->level, desc, d->invoke, void_ptr=d, d->free);
}
-static int async_write(file_descriptor *desc, int *errp,
- int reply, Uint32 reply_size) {
+static struct t_data *async_write(file_descriptor *desc, int *errp,
+ int reply, Uint32 reply_size
+#ifdef USE_VM_PROBES
+ ,Sint64 *dt_i1, Sint64 *dt_i2, Sint64 *dt_i3
+#endif
+) {
struct t_data *d;
if (! (d = EF_ALLOC(sizeof(struct t_data) - 1))) {
if (errp) *errp = ENOMEM;
- return -1;
+ return NULL;
}
TRACE_F(("w%lu", (unsigned long)desc->write_buffered));
d->command = FILE_WRITE;
@@ -1781,6 +2041,13 @@ static int async_write(file_descriptor *desc, int *errp,
d->c.writev.port = desc->port;
d->c.writev.q_mtx = desc->q_mtx;
d->c.writev.size = desc->write_buffered;
+#ifdef USE_VM_PROBES
+ if (dt_i1 != NULL) {
+ *dt_i1 = d->fd;
+ *dt_i2 = d->flags;
+ *dt_i3 = d->c.writev.size;
+ }
+#endif
d->reply = reply;
d->c.writev.free_size = 0;
d->c.writev.reply_size = reply_size;
@@ -1789,18 +2056,49 @@ static int async_write(file_descriptor *desc, int *errp,
d->level = 1;
cq_enq(desc, d);
desc->write_buffered = 0;
- return 0;
+ return d;
}
-static int flush_write(file_descriptor *desc, int *errp) {
- int result;
+static int flush_write(file_descriptor *desc, int *errp
+#ifdef USE_VM_PROBES
+ , dt_private *dt_priv, char *dt_utag
+#endif
+) {
+ int result = 0;
+#ifdef USE_VM_PROBES
+ Sint64 dt_i1 = 0, dt_i2 = 0, dt_i3 = 0;
+#endif
+ struct t_data *d = NULL;
+
MUTEX_LOCK(desc->q_mtx);
if (desc->write_buffered > 0) {
- result = async_write(desc, errp, 0, 0);
- } else {
- result = 0;
+ if ((d = async_write(desc, errp, 0, 0
+#ifdef USE_VM_PROBES
+ ,&dt_i1, &dt_i2, &dt_i3
+#endif
+ )) == NULL) {
+ result = -1;
+ }
}
MUTEX_UNLOCK(desc->q_mtx);
+#ifdef USE_VM_PROBES
+ if (d != NULL) {
+ d->sched_i1 = dt_priv->thread_num;
+ d->sched_i2 = dt_priv->tag;
+ d->sched_utag[0] = '\0';
+ if (dt_utag != NULL) {
+ if (dt_utag[0] == '\0') {
+ dt_utag = NULL;
+ } else {
+ strncpy(d->sched_utag, dt_utag, sizeof(d->sched_utag) - 1);
+ d->sched_utag[sizeof(d->sched_utag) - 1] = '\0';
+ }
+ }
+ DTRACE11(efile_drv_entry, dt_priv->thread_num, dt_priv->tag++,
+ dt_utag, FILE_WRITE,
+ NULL, NULL, dt_i1, dt_i2, dt_i3, 0, desc->port_str);
+ }
+#endif /* USE_VM_PROBES */
return result;
}
@@ -1813,9 +2111,17 @@ static int check_write_error(file_descriptor *desc, int *errp) {
return 0;
}
-static int flush_write_check_error(file_descriptor *desc, int *errp) {
+static int flush_write_check_error(file_descriptor *desc, int *errp
+#ifdef USE_VM_PROBES
+ , dt_private *dt_priv, char *dt_utag
+#endif
+ ) {
int r;
- if ( (r = flush_write(desc, errp)) != 0) {
+ if ( (r = flush_write(desc, errp
+#ifdef USE_VM_PROBES
+ , dt_priv, dt_utag
+#endif
+ )) != 0) {
check_write_error(desc, NULL);
return r;
} else {
@@ -1823,12 +2129,16 @@ static int flush_write_check_error(file_descriptor *desc, int *errp) {
}
}
-static int async_lseek(file_descriptor *desc, int *errp, int reply,
- Sint64 offset, int origin) {
+static struct t_data *async_lseek(file_descriptor *desc, int *errp, int reply,
+ Sint64 offset, int origin
+#ifdef USE_VM_PROBES
+ , Sint64 *dt_i1, Sint64 *dt_i2, Sint64 *dt_i3
+#endif
+ ) {
struct t_data *d;
if (! (d = EF_ALLOC(sizeof(struct t_data)))) {
*errp = ENOMEM;
- return -1;
+ return NULL;
}
d->flags = desc->flags;
d->fd = desc->fd;
@@ -1836,11 +2146,18 @@ static int async_lseek(file_descriptor *desc, int *errp, int reply,
d->reply = reply;
d->c.lseek.offset = offset;
d->c.lseek.origin = origin;
+#ifdef USE_VM_PROBES
+ if (dt_i1 != NULL) {
+ *dt_i1 = d->fd;
+ *dt_i2 = d->c.lseek.offset;
+ *dt_i3 = d->c.lseek.origin;
+ }
+#endif
d->invoke = invoke_lseek;
d->free = free_data;
d->level = 1;
cq_enq(desc, d);
- return 0;
+ return d;
}
static void flush_read(file_descriptor *desc) {
@@ -1852,18 +2169,45 @@ static void flush_read(file_descriptor *desc) {
}
}
-static int lseek_flush_read(file_descriptor *desc, int *errp) {
+static int lseek_flush_read(file_descriptor *desc, int *errp
+#ifdef USE_VM_PROBES
+ ,dt_private *dt_priv, char *dt_utag
+#endif
+ ) {
int r = 0;
size_t read_size = desc->read_size;
+#ifdef USE_VM_PROBES
+ Sint64 dt_i1 = 0, dt_i2 = 0, dt_i3 = 0;
+#endif
+ struct t_data *d;
+
+ flush_read(desc);
if (read_size != 0) {
- flush_read(desc);
- if ((r = async_lseek(desc, errp, 0,
- -((ssize_t)read_size), EFILE_SEEK_CUR))
- < 0) {
- return r;
- }
- } else {
- flush_read(desc);
+ if ((d = async_lseek(desc, errp, 0,
+ -((ssize_t)read_size), EFILE_SEEK_CUR
+#ifdef USE_VM_PROBES
+ , &dt_i1, &dt_i2, &dt_i3
+#endif
+ )) == NULL) {
+ r = -1;
+ } else {
+#ifdef USE_VM_PROBES
+ d->sched_i1 = dt_priv->thread_num;
+ d->sched_i2 = dt_priv->tag;
+ d->sched_utag[0] = '\0';
+ if (dt_utag != NULL) {
+ if (dt_utag[0] == '\0') {
+ dt_utag = NULL;
+ } else {
+ strncpy(d->sched_utag, dt_utag, sizeof(d->sched_utag) - 1);
+ d->sched_utag[sizeof(d->sched_utag) - 1] = '\0';
+ }
+ }
+ DTRACE11(efile_drv_entry, dt_priv->thread_num, dt_priv->tag++,
+ dt_utag, FILE_LSEEK,
+ NULL, NULL, dt_i1, dt_i2, dt_i3, 0, desc->port_str);
+#endif /* USE_VM_PROBES */
+ }
}
return r;
}
@@ -1880,11 +2224,23 @@ file_async_ready(ErlDrvData e, ErlDrvThreadData data)
struct t_data *d = (struct t_data *) data;
char header[5]; /* result code + count */
char resbuf[RESBUFSIZE]; /* Result buffer. */
-
+#ifdef USE_VM_PROBES
+ int sched_i1 = d->sched_i1, sched_i2 = d->sched_i2, command = d->command,
+ result_ok = d->result_ok,
+ posix_errno = d->result_ok ? 0 : d->errInfo.posix_errno;
+ DTRACE_CHARBUF(sched_utag, DTRACE_EFILE_BUFSIZ+1);
+
+ sched_utag[0] = '\0';
+ if (DTRACE_ENABLED(efile_drv_return)) {
+ strncpy(sched_utag, d->sched_utag, DTRACE_EFILE_BUFSIZ);
+ sched_utag[DTRACE_EFILE_BUFSIZ] = '\0';
+ }
+#endif /* USE_VM_PROBES */
TRACE_C('r');
if (try_again(desc, d)) {
+ /* DTRACE TODO: what kind of probe makes sense here? */
return;
}
@@ -2026,24 +2382,25 @@ file_async_ready(ErlDrvData e, ErlDrvThreadData data)
if (d->result_ok) {
resbuf[0] = FILE_RESP_INFO;
- put_int32(d->info.size_high, &resbuf[1 + (0 * 4)]);
- put_int32(d->info.size_low, &resbuf[1 + (1 * 4)]);
- put_int32(d->info.type, &resbuf[1 + (2 * 4)]);
-
- PUT_TIME(d->info.accessTime, resbuf + 1 + 3*4);
- PUT_TIME(d->info.modifyTime, resbuf + 1 + 9*4);
- PUT_TIME(d->info.cTime, resbuf + 1 + 15*4);
-
- put_int32(d->info.mode, &resbuf[1 + (21 * 4)]);
- put_int32(d->info.links, &resbuf[1 + (22 * 4)]);
- put_int32(d->info.major_device, &resbuf[1 + (23 * 4)]);
- put_int32(d->info.minor_device, &resbuf[1 + (24 * 4)]);
- put_int32(d->info.inode, &resbuf[1 + (25 * 4)]);
- put_int32(d->info.uid, &resbuf[1 + (26 * 4)]);
- put_int32(d->info.gid, &resbuf[1 + (27 * 4)]);
- put_int32(d->info.access, &resbuf[1 + (28 * 4)]);
-
-#define RESULT_SIZE (1 + (29 * 4))
+ put_int32(d->info.size_high, &resbuf[1 + ( 0 * 4)]);
+ put_int32(d->info.size_low, &resbuf[1 + ( 1 * 4)]);
+ put_int32(d->info.type, &resbuf[1 + ( 2 * 4)]);
+
+ /* Note 64 bit indexing in resbuf here */
+ put_int64(d->info.accessTime, &resbuf[1 + ( 3 * 4)]);
+ put_int64(d->info.modifyTime, &resbuf[1 + ( 5 * 4)]);
+ put_int64(d->info.cTime, &resbuf[1 + ( 7 * 4)]);
+
+ put_int32(d->info.mode, &resbuf[1 + ( 9 * 4)]);
+ put_int32(d->info.links, &resbuf[1 + (10 * 4)]);
+ put_int32(d->info.major_device, &resbuf[1 + (11 * 4)]);
+ put_int32(d->info.minor_device, &resbuf[1 + (12 * 4)]);
+ put_int32(d->info.inode, &resbuf[1 + (13 * 4)]);
+ put_int32(d->info.uid, &resbuf[1 + (14 * 4)]);
+ put_int32(d->info.gid, &resbuf[1 + (15 * 4)]);
+ put_int32(d->info.access, &resbuf[1 + (16 * 4)]);
+
+#define RESULT_SIZE (1 + (17 * 4))
TRACE_C('R');
driver_output2(desc->port, resbuf, RESULT_SIZE, NULL, 0);
#undef RESULT_SIZE
@@ -2053,30 +2410,24 @@ file_async_ready(ErlDrvData e, ErlDrvThreadData data)
free_data(data);
break;
case FILE_READDIR:
- if (!d->result_ok)
+ if (!d->result_ok) {
reply_error(desc, &d->errInfo);
- else {
+ } else {
struct t_readdir_buf *b1 = d->c.read_dir.first_buf;
+ char op = FILE_RESP_LFNAME;
+
TRACE_C('R');
ASSERT(b1);
+
while (b1) {
struct t_readdir_buf *b2 = b1;
char *p = &b1->buf[0];
- int sz = get_int32(p);
- while (sz) { /* 0 == EOB */
- p += 4;
- if (sz - 1 > 0) {
- driver_output2(desc->port, p, 1, p+1, sz-1);
- } else {
- driver_output2(desc->port, p, 1, NULL, 0);
- }
- p += sz;
- sz = get_int32(p);
- }
+ driver_output2(desc->port, p, 1, p + 1, b1->n - 1);
b1 = b1->next;
EF_FREE(b2);
}
-
+ driver_output2(desc->port, &op, 1, NULL, 0);
+
d->c.read_dir.first_buf = NULL;
d->c.read_dir.last_buf = NULL;
}
@@ -2087,6 +2438,9 @@ file_async_ready(ErlDrvData e, ErlDrvThreadData data)
if (d->reply) {
TRACE_C('K');
reply_ok(desc);
+#ifdef USE_VM_PROBES
+ result_ok = 1;
+#endif
}
free_data(data);
break;
@@ -2116,9 +2470,34 @@ file_async_ready(ErlDrvData e, ErlDrvThreadData data)
}
free_preadv(data);
break;
+#ifdef HAVE_SENDFILE
+ case FILE_SENDFILE:
+ if (d->result_ok == -1) {
+ if (d->errInfo.posix_errno == ECONNRESET ||
+ d->errInfo.posix_errno == ENOTCONN ||
+ d->errInfo.posix_errno == EPIPE)
+ reply_string_error(desc,"closed");
+ else
+ reply_error(desc, &d->errInfo);
+ desc->sendfile_state = not_sending;
+ free_sendfile(data);
+ } else if (d->result_ok == 0) {
+ reply_Sint64(desc, d->c.sendfile.written);
+ desc->sendfile_state = not_sending;
+ free_sendfile(data);
+ } else if (d->result_ok == 1) { // If we are using select to send the rest of the data
+ desc->sendfile_state = sending;
+ desc->d = d;
+ driver_select(desc->port, (ErlDrvEvent)(long)d->c.sendfile.out_fd,
+ ERL_DRV_USE_NO_CALLBACK|ERL_DRV_WRITE, 1);
+ }
+ break;
+#endif
default:
abort();
}
+ DTRACE6(efile_drv_return, sched_i1, sched_i2, sched_utag,
+ command, result_ok, posix_errno);
if (desc->write_buffered != 0 && desc->timer_state == timer_idle) {
desc->timer_state = timer_write;
driver_set_timer(desc->port, desc->write_delay);
@@ -2126,11 +2505,12 @@ file_async_ready(ErlDrvData e, ErlDrvThreadData data)
cq_execute(desc);
}
+
/*********************************************************************
* Driver entry point -> output
*/
static void
-file_output(ErlDrvData e, char* buf, int count)
+file_output(ErlDrvData e, char* buf, ErlDrvSizeT count)
{
file_descriptor* desc = (file_descriptor*)e;
Efile_error errInfo; /* The error codes for the last operation. */
@@ -2140,7 +2520,15 @@ file_output(ErlDrvData e, char* buf, int count)
char* name; /* Points to the filename in buf. */
int command;
struct t_data *d = NULL;
-
+#ifdef USE_VM_PROBES
+ char *dt_utag = NULL;
+ char *dt_s1 = NULL, *dt_s2 = NULL;
+ Sint64 dt_i1 = 0;
+ Sint64 dt_i2 = 0;
+ Sint64 dt_i3 = 0;
+ Sint64 dt_i4 = 0;
+ dt_private *dt_priv = get_dt_private(0);
+#endif /* USE_VM_PROBES */
TRACE_C('o');
@@ -2155,6 +2543,10 @@ file_output(ErlDrvData e, char* buf, int count)
d = EF_SAFE_ALLOC(sizeof(struct t_data) - 1 + FILENAME_BYTELEN(name) + FILENAME_CHARSIZE);
FILENAME_COPY(d->b, name);
+#ifdef USE_VM_PROBES
+ dt_s1 = d->b;
+ dt_utag = name + FILENAME_BYTELEN(name) + FILENAME_CHARSIZE;
+#endif
d->command = command;
d->invoke = invoke_mkdir;
d->free = free_data;
@@ -2166,6 +2558,10 @@ file_output(ErlDrvData e, char* buf, int count)
d = EF_SAFE_ALLOC(sizeof(struct t_data) - 1 + FILENAME_BYTELEN(name) + FILENAME_CHARSIZE);
FILENAME_COPY(d->b, name);
+#ifdef USE_VM_PROBES
+ dt_s1 = d->b;
+ dt_utag = name + FILENAME_BYTELEN(name) + FILENAME_CHARSIZE;
+#endif
d->command = command;
d->invoke = invoke_rmdir;
d->free = free_data;
@@ -2177,6 +2573,10 @@ file_output(ErlDrvData e, char* buf, int count)
d = EF_SAFE_ALLOC(sizeof(struct t_data) - 1 + FILENAME_BYTELEN(name) + FILENAME_CHARSIZE);
FILENAME_COPY(d->b, name);
+#ifdef USE_VM_PROBES
+ dt_s1 = d->b;
+ dt_utag = name + FILENAME_BYTELEN(name) + FILENAME_CHARSIZE;
+#endif
d->command = command;
d->invoke = invoke_delete_file;
d->free = free_data;
@@ -2194,6 +2594,11 @@ file_output(ErlDrvData e, char* buf, int count)
FILENAME_COPY(d->b, name);
FILENAME_COPY(d->b + namelen, new_name);
+#ifdef USE_VM_PROBES
+ dt_s1 = d->b;
+ dt_s2 = d->b + namelen;
+ dt_utag = buf + namelen + FILENAME_BYTELEN(name) + FILENAME_CHARSIZE;
+#endif
d->flags = desc->flags;
d->fd = fd;
d->command = command;
@@ -2207,6 +2612,10 @@ file_output(ErlDrvData e, char* buf, int count)
d = EF_SAFE_ALLOC(sizeof(struct t_data) - 1 + FILENAME_BYTELEN(name) + FILENAME_CHARSIZE);
FILENAME_COPY(d->b, name);
+#ifdef USE_VM_PROBES
+ dt_s1 = d->b;
+ dt_utag = name + FILENAME_BYTELEN(name) + FILENAME_CHARSIZE;
+#endif
d->command = command;
d->invoke = invoke_chdir;
d->free = free_data;
@@ -2218,6 +2627,9 @@ file_output(ErlDrvData e, char* buf, int count)
d = EF_SAFE_ALLOC(sizeof(struct t_data) - 1 + RESBUFSIZE + 1);
d->drive = *(uchar*)buf;
+#ifdef USE_VM_PROBES
+ dt_utag = buf + 1;
+#endif
d->command = command;
d->invoke = invoke_pwd;
d->free = free_data;
@@ -2233,6 +2645,10 @@ file_output(ErlDrvData e, char* buf, int count)
FILENAME_CHARSIZE);
FILENAME_COPY(d->b, name);
+#ifdef USE_VM_PROBES
+ dt_s1 = d->b;
+ dt_utag = name + FILENAME_BYTELEN(name) + FILENAME_CHARSIZE;
+#endif
d->dir_handle = NULL;
d->command = command;
d->invoke = invoke_readdir;
@@ -2246,23 +2662,65 @@ file_output(ErlDrvData e, char* buf, int count)
#endif
{
size_t resbufsize;
- char resbuf[RESBUFSIZE+1];
+ size_t n = 0, total = 0;
+ int res = 0;
+ char resbuf[READDIR_BUFSIZE];
+
EFILE_DIR_HANDLE dir_handle; /* Handle to open directory. */
+ total = READDIR_BUFSIZE;
errInfo.posix_errno = 0;
- dir_handle = NULL;
- resbuf[0] = FILE_RESP_FNAME;
- resbufsize = RESBUFSIZE;
-
- while (efile_readdir(&errInfo, name, &dir_handle,
- resbuf+1, &resbufsize)) {
- driver_output2(desc->port, resbuf, 1, resbuf+1, resbufsize);
- resbufsize = RESBUFSIZE;
- }
+ dir_handle = NULL;
+ resbuf[0] = FILE_RESP_LFNAME;
+
+#ifdef USE_VM_PROBES
+ dt_s1 = name;
+ dt_utag = name + FILENAME_BYTELEN(name) + FILENAME_CHARSIZE;
+#endif
+ /* Fill the buffer with multiple directory listings before sending it to the
+ * receiving process. READDIR_CHUNKS is minimum number of files sent to the
+ * receiver.
+ * Format for each driver_output2:
+ * ------------------------------------
+ * | Type | Len | Filename | ...
+ * | 1 byte | 2 bytes | Len bytes | ...
+ * ------------------------------------
+ */
+
+ do {
+ n = 1;
+ resbufsize = READDIR_BUFSIZE - n;
+
+ do {
+ res = efile_readdir(&errInfo, name, &dir_handle, resbuf + n + 2, &resbufsize);
+
+ if (res) {
+ put_int16((Uint16)resbufsize, resbuf + n);
+ n += 2 + resbufsize;
+ resbufsize = READDIR_BUFSIZE - n;
+ }
+ } while( res && ((total - n - 2) >= MAXPATHLEN*FILENAME_CHARSIZE));
+
+ if (n > 1) {
+ driver_output2(desc->port, resbuf, 1, resbuf + 1, n - 1);
+ }
+ } while(res);
+
if (errInfo.posix_errno != 0) {
reply_error(desc, &errInfo);
return;
}
+#ifdef USE_VM_PROBES
+ if (dt_utag != NULL && dt_utag[0] == '\0') {
+ dt_utag = NULL;
+ }
+
+ DTRACE11(efile_drv_entry, dt_priv->thread_num, dt_priv->tag,
+ dt_utag, command, name, dt_s2,
+ dt_i1, dt_i2, dt_i3, dt_i4, desc->port_str);
+ DTRACE6(efile_drv_return, dt_priv->thread_num, dt_priv->tag++,
+ dt_utag, command, 1, 0);
+#endif
TRACE_C('R');
driver_output2(desc->port, resbuf, 1, NULL, 0);
return;
@@ -2275,6 +2733,11 @@ file_output(ErlDrvData e, char* buf, int count)
d->flags = get_int32((uchar*)buf);
name = buf+4;
FILENAME_COPY(d->b, name);
+#ifdef USE_VM_PROBES
+ dt_i1 = d->flags;
+ dt_s1 = d->b;
+ dt_utag = name + FILENAME_BYTELEN(d->b) + FILENAME_CHARSIZE;
+#endif
d->command = command;
d->invoke = invoke_open;
d->free = free_data;
@@ -2287,6 +2750,10 @@ file_output(ErlDrvData e, char* buf, int count)
d = EF_SAFE_ALLOC(sizeof(struct t_data));
d->fd = fd;
+#ifdef USE_VM_PROBES
+ dt_utag = name;
+ dt_i1 = fd;
+#endif
d->command = command;
d->invoke = invoke_fdatasync;
d->free = free_data;
@@ -2299,6 +2766,10 @@ file_output(ErlDrvData e, char* buf, int count)
d = EF_SAFE_ALLOC(sizeof(struct t_data));
d->fd = fd;
+#ifdef USE_VM_PROBES
+ dt_utag = name;
+ dt_i1 = fd;
+#endif
d->command = command;
d->invoke = invoke_fsync;
d->free = free_data;
@@ -2315,6 +2786,14 @@ file_output(ErlDrvData e, char* buf, int count)
FILENAME_COPY(d->b, name);
d->fd = fd;
+#ifdef USE_VM_PROBES
+ dt_utag = name + FILENAME_BYTELEN(d->b) + FILENAME_CHARSIZE;
+ if (command == FILE_LSTAT) {
+ dt_s1 = d->b;
+ } else {
+ dt_i1 = fd;
+ }
+#endif
d->command = command;
d->invoke = invoke_flstat;
d->free = free_data;
@@ -2328,6 +2807,11 @@ file_output(ErlDrvData e, char* buf, int count)
d->flags = desc->flags;
d->fd = fd;
+#ifdef USE_VM_PROBES
+ dt_utag = name;
+ dt_i1 = fd;
+ dt_i2 = d->flags;
+#endif
d->command = command;
d->invoke = invoke_truncate;
d->free = free_data;
@@ -2338,15 +2822,23 @@ file_output(ErlDrvData e, char* buf, int count)
case FILE_WRITE_INFO:
{
d = EF_SAFE_ALLOC(sizeof(struct t_data) - 1
- + FILENAME_BYTELEN(buf+21*4) + FILENAME_CHARSIZE);
+ + FILENAME_BYTELEN(buf + 9*4) + FILENAME_CHARSIZE);
- d->info.mode = get_int32(buf + 0 * 4);
- d->info.uid = get_int32(buf + 1 * 4);
- d->info.gid = get_int32(buf + 2 * 4);
- GET_TIME(d->info.accessTime, buf + 3 * 4);
- GET_TIME(d->info.modifyTime, buf + 9 * 4);
- GET_TIME(d->info.cTime, buf + 15 * 4);
- FILENAME_COPY(d->b, buf+21*4);
+ d->info.mode = get_int32(buf + 0 * 4);
+ d->info.uid = get_int32(buf + 1 * 4);
+ d->info.gid = get_int32(buf + 2 * 4);
+ d->info.accessTime = (time_t)((Sint64)get_int64(buf + 3 * 4));
+ d->info.modifyTime = (time_t)((Sint64)get_int64(buf + 5 * 4));
+ d->info.cTime = (time_t)((Sint64)get_int64(buf + 7 * 4));
+
+ FILENAME_COPY(d->b, buf + 9*4);
+#ifdef USE_VM_PROBES
+ dt_i1 = d->info.mode;
+ dt_i2 = d->info.uid;
+ dt_i3 = d->info.gid;
+ dt_s1 = d->b;
+ dt_utag = buf + 9 * 4 + FILENAME_BYTELEN(d->b) + FILENAME_CHARSIZE;
+#endif
d->command = command;
d->invoke = invoke_write_info;
d->free = free_data;
@@ -2359,6 +2851,10 @@ file_output(ErlDrvData e, char* buf, int count)
d = EF_SAFE_ALLOC(sizeof(struct t_data) - 1 + RESBUFSIZE + 1);
FILENAME_COPY(d->b, name);
+#ifdef USE_VM_PROBES
+ dt_s1 = d->b;
+ dt_utag = name + FILENAME_BYTELEN(d->b) + FILENAME_CHARSIZE;
+#endif
d->command = command;
d->invoke = invoke_readlink;
d->free = free_data;
@@ -2370,6 +2866,10 @@ file_output(ErlDrvData e, char* buf, int count)
{
d = EF_SAFE_ALLOC(sizeof(struct t_data) - 1 + RESBUFSIZE + 1);
FILENAME_COPY(d->b, name);
+#ifdef USE_VM_PROBES
+ dt_s1 = d->b;
+ dt_utag = name + FILENAME_BYTELEN(d->b) + FILENAME_CHARSIZE;
+#endif
d->command = command;
d->invoke = invoke_altname;
d->free = free_data;
@@ -2390,6 +2890,11 @@ file_output(ErlDrvData e, char* buf, int count)
FILENAME_COPY(d->b, name);
FILENAME_COPY(d->b + namelen, new_name);
+#ifdef USE_VM_PROBES
+ dt_s1 = d->b;
+ dt_s2 = d->b + namelen;
+ dt_utag = buf + namelen + FILENAME_BYTELEN(dt_s2) + FILENAME_CHARSIZE;
+#endif
d->flags = desc->flags;
d->fd = fd;
d->command = command;
@@ -2411,6 +2916,11 @@ file_output(ErlDrvData e, char* buf, int count)
FILENAME_COPY(d->b, name);
FILENAME_COPY(d->b + namelen, new_name);
+#ifdef USE_VM_PROBES
+ dt_s1 = d->b;
+ dt_s2 = d->b + namelen;
+ dt_utag = buf + namelen + FILENAME_BYTELEN(dt_s2) + FILENAME_CHARSIZE;
+#endif
d->flags = desc->flags;
d->fd = fd;
d->command = command;
@@ -2432,6 +2942,13 @@ file_output(ErlDrvData e, char* buf, int count)
d->c.fadvise.offset = get_int64((uchar*) buf);
d->c.fadvise.length = get_int64(((uchar*) buf) + sizeof(Sint64));
d->c.fadvise.advise = get_int32(((uchar*) buf) + 2 * sizeof(Sint64));
+#ifdef USE_VM_PROBES
+ dt_i1 = d->fd;
+ dt_i2 = d->c.fadvise.offset;
+ dt_i3 = d->c.fadvise.length;
+ dt_i4 = d->c.fadvise.advise;
+ dt_utag = buf + 3 * sizeof(Sint64);
+#endif
goto done;
}
@@ -2445,6 +2962,22 @@ file_output(ErlDrvData e, char* buf, int count)
done:
if (d) {
+#ifdef USE_VM_PROBES
+ d->sched_i1 = dt_priv->thread_num;
+ d->sched_i2 = dt_priv->tag;
+ d->sched_utag[0] = '\0';
+ if (dt_utag != NULL) {
+ if (dt_utag[0] == '\0') {
+ dt_utag = NULL;
+ } else {
+ strncpy(d->sched_utag, dt_utag, sizeof(d->sched_utag) - 1);
+ d->sched_utag[sizeof(d->sched_utag) - 1] = '\0';
+ }
+ }
+ DTRACE11(efile_drv_entry, dt_priv->thread_num, dt_priv->tag++,
+ dt_utag, command, dt_s1, dt_s2,
+ dt_i1, dt_i2, dt_i3, dt_i4, desc->port_str);
+#endif
cq_enq(desc, d);
}
}
@@ -2455,16 +2988,33 @@ file_output(ErlDrvData e, char* buf, int count)
static void
file_flush(ErlDrvData e) {
file_descriptor *desc = (file_descriptor *)e;
+#ifdef DEBUG
int r;
+#endif
+#ifdef USE_VM_PROBES
+ dt_private *dt_priv = get_dt_private(dt_driver_io_worker_base);
+#endif
TRACE_C('f');
- r = flush_write(desc, NULL);
+#ifdef HAVE_SENDFILE
+ flush_sendfile(desc, NULL);
+#endif
+
+#ifdef DEBUG
+ r =
+#endif
+ flush_write(desc, NULL
+#ifdef USE_VM_PROBES
+ , dt_priv, (desc->d == NULL) ? NULL : desc->d->sched_utag
+#endif
+ );
/* Only possible reason for bad return value is ENOMEM, and
* there is nobody to tell...
*/
+#ifdef DEBUG
ASSERT(r == 0);
- r = 0; /* Avoiding warning */
+#endif
cq_execute(desc);
}
@@ -2473,16 +3023,23 @@ file_flush(ErlDrvData e) {
/*********************************************************************
* Driver entry point -> control
*/
-static int
+static ErlDrvSSizeT
file_control(ErlDrvData e, unsigned int command,
- char* buf, int len, char **rbuf, int rlen) {
+ char* buf, ErlDrvSizeT len, char **rbuf, ErlDrvSizeT rlen) {
+ /*
+ * warning: variable ‘desc’ set but not used
+ * [-Wunused-but-set-variable]
+ * ... no kidding ...
+ *
+ *
file_descriptor *desc = (file_descriptor *)e;
switch (command) {
default:
return 0;
- } /* switch (command) */
+ }
ASSERT(0);
- desc = NULL; /* XXX Avoid warning while empty switch */
+ desc = NULL;
+ */
return 0;
}
@@ -2493,6 +3050,9 @@ static void
file_timeout(ErlDrvData e) {
file_descriptor *desc = (file_descriptor *)e;
enum e_timer timer_state = desc->timer_state;
+#ifdef USE_VM_PROBES
+ dt_private *dt_priv = get_dt_private(dt_driver_io_worker_base);
+#endif
TRACE_C('t');
@@ -2507,12 +3067,18 @@ file_timeout(ErlDrvData e) {
driver_async(desc->port, KEY(desc), desc->invoke, desc->d, desc->free);
break;
case timer_write: {
- int r = flush_write(desc, NULL);
+#ifdef DEBUG
+ int r =
+#endif
+ flush_write(desc, NULL
+#ifdef USE_VM_PROBES
+ , dt_priv, (desc->d == NULL) ? NULL : desc->d->sched_utag
+#endif
+ );
/* Only possible reason for bad return value is ENOMEM, and
* there is nobody to tell...
*/
ASSERT(r == 0);
- r = 0; /* Avoiding warning */
cq_execute(desc);
} break;
} /* case */
@@ -2529,6 +3095,14 @@ file_outputv(ErlDrvData e, ErlIOVec *ev) {
char command;
int p, q;
int err;
+ struct t_data *d = NULL;
+#ifdef USE_VM_PROBES
+ Sint64 dt_i1 = 0, dt_i2 = 0, dt_i3 = 0;
+ Sint64 dt_i4 = 0;
+ char *dt_utag = NULL;
+ char *dt_s1 = NULL;
+ dt_private *dt_priv = get_dt_private(dt_driver_io_worker_base);
+#endif
TRACE_C('v');
@@ -2548,18 +3122,19 @@ file_outputv(ErlDrvData e, ErlIOVec *ev) {
switch (command) {
case FILE_CLOSE: {
+#ifdef USE_VM_PROBES
+ dt_utag = EV_CHAR_P(ev, p, q);
+#endif
flush_read(desc);
- if (flush_write_check_error(desc, &err) < 0) {
+ if (flush_write_check_error(desc, &err
+#ifdef USE_VM_PROBES
+ , dt_priv, dt_utag
+#endif
+ ) < 0) {
reply_posix_error(desc, err);
goto done;
}
- if (ev->size != 1) {
- /* Wrong command length */
- reply_posix_error(desc, EINVAL);
- goto done;
- }
if (desc->fd != FILE_FD_INVALID) {
- struct t_data *d;
if (! (d = EF_ALLOC(sizeof(struct t_data)))) {
reply_posix_error(desc, ENOMEM);
} else {
@@ -2567,6 +3142,10 @@ file_outputv(ErlDrvData e, ErlIOVec *ev) {
d->reply = !0;
d->fd = desc->fd;
d->flags = desc->flags;
+#ifdef USE_VM_PROBES
+ dt_i1 = d->fd;
+ dt_i2 = d->flags;
+#endif
d->invoke = invoke_close;
d->free = free_data;
d->level = 2;
@@ -2582,8 +3161,21 @@ file_outputv(ErlDrvData e, ErlIOVec *ev) {
case FILE_READ: {
Uint32 sizeH, sizeL;
size_t size, alloc_size;
- struct t_data *d;
- if (flush_write_check_error(desc, &err) < 0) {
+
+ if (!EV_GET_UINT32(ev, &sizeH, &p, &q)
+ || !EV_GET_UINT32(ev, &sizeL, &p, &q)) {
+ /* Wrong buffer length to contain the read count */
+ reply_posix_error(desc, EINVAL);
+ goto done;
+ }
+#ifdef USE_VM_PROBES
+ dt_utag = EV_CHAR_P(ev, p, q);
+#endif
+ if (flush_write_check_error(desc, &err
+#ifdef USE_VM_PROBES
+ , dt_priv, dt_utag
+#endif
+ ) < 0) {
reply_posix_error(desc, err);
goto done;
}
@@ -2591,19 +3183,16 @@ file_outputv(ErlDrvData e, ErlIOVec *ev) {
if (desc->read_bufsize == 0 && desc->read_binp != NULL && desc->read_size > 0) {
/* We have allocated a buffer for line mode but should not really have a
read-ahead buffer... */
- if (lseek_flush_read(desc, &err) < 0) {
+ if (lseek_flush_read(desc, &err
+#ifdef USE_VM_PROBES
+ , dt_priv, dt_utag
+#endif
+ ) < 0) {
reply_posix_error(desc, err);
goto done;
}
}
#endif
- if (ev->size != 1+8
- || !EV_GET_UINT32(ev, &sizeH, &p, &q)
- || !EV_GET_UINT32(ev, &sizeL, &p, &q)) {
- /* Wrong buffer length to contain the read count */
- reply_posix_error(desc, EINVAL);
- goto done;
- }
#if SIZEOF_SIZE_T == 4
if (sizeH != 0) {
reply_posix_error(desc, EINVAL);
@@ -2679,6 +3268,11 @@ file_outputv(ErlDrvData e, ErlIOVec *ev) {
d->c.read.bin_offset = desc->read_offset + desc->read_size;
d->c.read.bin_size = desc->read_binp->orig_size - d->c.read.bin_offset;
d->c.read.size = size;
+#ifdef USE_VM_PROBES
+ dt_i1 = d->fd;
+ dt_i2 = d->flags;
+ dt_i3 = d->c.read.size;
+#endif
driver_binary_inc_refc(d->c.read.binp);
d->invoke = invoke_read;
d->free = free_read;
@@ -2696,12 +3290,22 @@ file_outputv(ErlDrvData e, ErlIOVec *ev) {
* allocated binary + dealing with offsets and lengts are done in file_async ready
* for this OP.
*/
- struct t_data *d;
- if (flush_write_check_error(desc, &err) < 0) {
+#ifdef USE_VM_PROBES
+ dt_utag = EV_CHAR_P(ev, p, q);
+#endif
+ if (flush_write_check_error(desc, &err
+#ifdef USE_VM_PROBES
+ , dt_priv, dt_utag
+#endif
+ ) < 0) {
reply_posix_error(desc, err);
goto done;
}
- if (ev->size != 1) {
+ if (ev->size != 1
+#ifdef USE_VM_PROBES
+ + FILENAME_BYTELEN(dt_utag) + FILENAME_CHARSIZE
+#endif
+ ) {
/* Wrong command length */
reply_posix_error(desc, EINVAL);
goto done;
@@ -2757,8 +3361,16 @@ file_outputv(ErlDrvData e, ErlIOVec *ev) {
d->c.read_line.binp = desc->read_binp;
d->c.read_line.read_offset = desc->read_offset;
d->c.read_line.read_size = desc->read_size;
+#ifdef USE_VM_PROBES
+ dt_i1 = d->fd;
+ dt_i2 = d->flags;
+ dt_i3 = d->c.read_line.read_offset;
+#endif
#if !ALWAYS_READ_LINE_AHEAD
d->c.read_line.read_ahead = (desc->read_bufsize > 0);
+#ifdef USE_VM_PROBES
+ dt_i4 = d->c.read_line.read_ahead;
+#endif
#endif
driver_binary_inc_refc(d->c.read.binp);
d->invoke = invoke_read_line;
@@ -2766,10 +3378,22 @@ file_outputv(ErlDrvData e, ErlIOVec *ev) {
d->level = 1;
cq_enq(desc, d);
} goto done;
- case FILE_WRITE: {
- int skip = 1;
- int size = ev->size - skip;
- if (lseek_flush_read(desc, &err) < 0) {
+ case FILE_WRITE: { /* Dtrace: The dtrace user tag is not last in message,
+ but follows the message tag directly.
+ This is handled specially in prim_file.erl */
+ ErlDrvSizeT skip = 1;
+ ErlDrvSizeT size = ev->size - skip;
+
+#ifdef USE_VM_PROBES
+ dt_utag = EV_CHAR_P(ev, p, q);
+ skip += FILENAME_BYTELEN(dt_utag) + FILENAME_CHARSIZE;
+ size = ev->size - skip;
+#endif
+ if (lseek_flush_read(desc, &err
+#ifdef USE_VM_PROBES
+ , dt_priv, dt_utag
+#endif
+ ) < 0) {
reply_posix_error(desc, err);
goto done;
}
@@ -2777,7 +3401,7 @@ file_outputv(ErlDrvData e, ErlIOVec *ev) {
reply_posix_error(desc, EBADF);
goto done;
}
- if (size <= 0) {
+ if (size == 0) {
reply_Uint(desc, size);
goto done;
}
@@ -2796,7 +3420,11 @@ file_outputv(ErlDrvData e, ErlIOVec *ev) {
driver_set_timer(desc->port, desc->write_delay);
}
} else {
- if (async_write(desc, &err, !0, size) != 0) {
+ if ((d = async_write(desc, &err, !0, size
+#ifdef USE_VM_PROBES
+ , &dt_i1, &dt_i2, &dt_i3
+#endif
+ )) == NULL) {
MUTEX_UNLOCK(desc->q_mtx);
reply_posix_error(desc, err);
goto done;
@@ -2806,24 +3434,49 @@ file_outputv(ErlDrvData e, ErlIOVec *ev) {
}
} goto done; /* case FILE_WRITE */
- case FILE_PWRITEV: {
+ case FILE_PWRITEV: { /* Dtrace: The dtrace user tag is not last in message,
+ but follows the message tag directly.
+ This is handled specially in prim_file.erl */
Uint32 i, j, n;
size_t total;
- struct t_data *d;
- if (lseek_flush_read(desc, &err) < 0) {
- reply_Uint_posix_error(desc, 0, err);
- goto done;
- }
- if (flush_write_check_error(desc, &err) < 0) {
- reply_Uint_posix_error(desc, 0, err);
- goto done;
+#ifdef USE_VM_PROBES
+ char dt_tmp;
+ int dt_utag_bytes = 1;
+
+ dt_utag = EV_CHAR_P(ev, p, q);
+ /* This will work for UTF-8, but not for UTF-16 - extra reminder here */
+#ifdef FILENAMES_16BIT
+#error 16bit characters in filenames and dtrace in combination is not supported.
+#endif
+ while (EV_GET_CHAR(ev, &dt_tmp, &p, &q) && dt_tmp != '\0') {
+ dt_utag_bytes++;
}
+#endif
if (ev->size < 1+4
+#ifdef USE_VM_PROBES
+ + dt_utag_bytes
+#endif
|| !EV_GET_UINT32(ev, &n, &p, &q)) {
/* Buffer too short to contain even the number of pos/size specs */
reply_Uint_posix_error(desc, 0, EINVAL);
goto done;
}
+ if (lseek_flush_read(desc, &err
+#ifdef USE_VM_PROBES
+ , dt_priv, dt_utag
+#endif
+ ) < 0) {
+ reply_Uint_posix_error(desc, 0, err);
+ goto done;
+ }
+ if (flush_write_check_error(desc, &err
+#ifdef USE_VM_PROBES
+ , dt_priv, dt_utag
+#endif
+ ) < 0) {
+ reply_Uint_posix_error(desc, 0, err);
+ goto done;
+ }
if (n == 0) {
/* Trivial case - nothing to write */
if (ev->size != 1+4) {
@@ -2833,7 +3486,11 @@ file_outputv(ErlDrvData e, ErlIOVec *ev) {
}
goto done;
}
- if (ev->size < 1+4+8*(2*n)) {
+ if (ev->size < 1+4+8*(2*n)
+#ifdef USE_VM_PROBES
+ + dt_utag_bytes
+#endif
+ ) {
/* Buffer too short to contain even the pos/size specs */
reply_Uint_posix_error(desc, 0, EINVAL);
goto done;
@@ -2848,6 +3505,10 @@ file_outputv(ErlDrvData e, ErlIOVec *ev) {
d->reply = !0;
d->fd = desc->fd;
d->flags = desc->flags;
+#ifdef USE_VM_PROBES
+ dt_i1 = d->fd;
+ dt_i2 = d->flags;
+#endif
d->c.pwritev.port = desc->port;
d->c.pwritev.q_mtx = desc->q_mtx;
d->c.pwritev.n = n;
@@ -2885,13 +3546,20 @@ file_outputv(ErlDrvData e, ErlIOVec *ev) {
}
}
d->c.pwritev.size = total;
+#ifdef USE_VM_PROBES
+ dt_i3 = d->c.pwritev.size;
+#endif
d->c.pwritev.free_size = 0;
if (j == 0) {
/* Trivial case - nothing to write */
EF_FREE(d);
reply_Uint(desc, 0);
} else {
- size_t skip = 1 + 4 + 8*(2*n);
+ ErlDrvSizeT skip = 1 + 4 + 8 * (2*n)
+#ifdef USE_VM_PROBES
+ + dt_utag_bytes
+#endif
+ ;
if (skip + total != ev->size) {
/* Actual amount of data does not match
* total of all pos/size specs
@@ -2912,27 +3580,55 @@ file_outputv(ErlDrvData e, ErlIOVec *ev) {
}
} goto done; /* case FILE_PWRITEV: */
- case FILE_PREADV: {
+ case FILE_PREADV: { /* Dtrace: The dtrace user tag is not last in message,
+ but follows the message tag directly.
+ This is handled specially in prim_file.erl */
register void * void_ptr;
Uint32 i, n;
- struct t_data *d;
ErlIOVec *res_ev;
- if (lseek_flush_read(desc, &err) < 0) {
+#ifdef USE_VM_PROBES
+ char dt_tmp;
+ int dt_utag_bytes = 1;
+ /* This will work for UTF-8, but not for UTF-16 - extra reminder here */
+#ifdef FILENAMES_16BIT
+#error 16bit characters in filenames and dtrace in combination is not supported.
+#endif
+ dt_utag = EV_CHAR_P(ev, p, q);
+ while (EV_GET_CHAR(ev, &dt_tmp, &p, &q) && dt_tmp != '\0') {
+ dt_utag_bytes++;
+ }
+#endif
+ if (lseek_flush_read(desc, &err
+#ifdef USE_VM_PROBES
+ , dt_priv, dt_utag
+#endif
+ ) < 0) {
reply_posix_error(desc, err);
goto done;
}
- if (flush_write_check_error(desc, &err) < 0) {
+ if (flush_write_check_error(desc, &err
+#ifdef USE_VM_PROBES
+ , dt_priv, dt_utag
+#endif
+ ) < 0) {
reply_posix_error(desc, err);
goto done;
}
if (ev->size < 1+8
+#ifdef USE_VM_PROBES
+ + dt_utag_bytes
+#endif
|| !EV_GET_UINT32(ev, &n, &p, &q)
|| !EV_GET_UINT32(ev, &n, &p, &q)) {
/* Buffer too short to contain even the number of pos/size specs */
reply_posix_error(desc, EINVAL);
goto done;
}
- if (ev->size != 1+8+8*(2*n)) {
+ if (ev->size < 1+8+8*(2*n)
+#ifdef USE_VM_PROBES
+ + dt_utag_bytes
+#endif
+ ) {
/* Buffer wrong length to contain the pos/size specs */
reply_posix_error(desc, EINVAL);
goto done;
@@ -2952,6 +3648,10 @@ file_outputv(ErlDrvData e, ErlIOVec *ev) {
d->reply = !0;
d->fd = desc->fd;
d->flags = desc->flags;
+#ifdef USE_VM_PROBES
+ dt_i1 = d->fd;
+ dt_i2 = d->flags;
+#endif
d->c.preadv.n = n;
d->c.preadv.cnt = 0;
d->c.preadv.size = 0;
@@ -2979,6 +3679,9 @@ file_outputv(ErlDrvData e, ErlIOVec *ev) {
#else
size = ((size_t)sizeH<<32) | sizeL;
#endif
+#ifdef USE_VM_PROBES
+ dt_i3 += size;
+#endif
if (! (res_ev->binv[i] = driver_alloc_binary(size))) {
reply_posix_error(desc, ENOMEM);
break;
@@ -3025,42 +3728,68 @@ file_outputv(ErlDrvData e, ErlIOVec *ev) {
} goto done; /* case FILE_PREADV: */
case FILE_LSEEK: {
- Sint64 offset; /* Offset for seek */
+ Sint64 offset; /* Offset for seek */
Uint32 origin; /* Origin of seek. */
- if (lseek_flush_read(desc, &err) < 0) {
- reply_posix_error(desc, err);
+
+ if (ev->size < 1+8+4
+ || !EV_GET_UINT64(ev, &offset, &p, &q)
+ || !EV_GET_UINT32(ev, &origin, &p, &q)) {
+ /* Wrong length of buffer to contain offset and origin */
+ reply_posix_error(desc, EINVAL);
goto done;
}
- if (flush_write_check_error(desc, &err) < 0) {
+#ifdef USE_VM_PROBES
+ dt_utag = EV_CHAR_P(ev, p, q);
+#endif
+ if (lseek_flush_read(desc, &err
+#ifdef USE_VM_PROBES
+ , dt_priv, dt_utag
+#endif
+ ) < 0) {
reply_posix_error(desc, err);
goto done;
}
- if (ev->size != 1+8+4
- || !EV_GET_UINT64(ev, &offset, &p, &q)
- || !EV_GET_UINT32(ev, &origin, &p, &q)) {
- /* Wrong length of buffer to contain offset and origin */
- reply_posix_error(desc, EINVAL);
+ if (flush_write_check_error(desc, &err
+#ifdef USE_VM_PROBES
+ , dt_priv, dt_utag
+#endif
+ ) < 0) {
+ reply_posix_error(desc, err);
goto done;
}
- if (async_lseek(desc, &err, !0, offset, origin) < 0) {
+ if ((d = async_lseek(desc, &err, !0, offset, origin
+#ifdef USE_VM_PROBES
+ , &dt_i1, &dt_i2, &dt_i3
+#endif
+ )) == NULL) {
reply_posix_error(desc, err);
goto done;
}
} goto done;
case FILE_READ_FILE: {
- struct t_data *d;
char *filename;
if (ev->size < 1+1) {
/* Buffer contains empty name */
reply_posix_error(desc, ENOENT);
goto done;
}
+#ifndef USE_VM_PROBES
+ /* In the dtrace case, the iov has an extra element, the dtrace utag - we will need
+ another test to see that
+ the filename is in a single buffer: */
if (ev->size-1 != ev->iov[q].iov_len-p) {
/* Name not in one single buffer */
reply_posix_error(desc, EINVAL);
goto done;
}
+#else
+ if (((byte *)ev->iov[q].iov_base)[ev->iov[q].iov_len-1] != '\0') {
+ /* Name not in one single buffer */
+ reply_posix_error(desc, EINVAL);
+ goto done;
+ }
+#endif
filename = EV_CHAR_P(ev, p, q);
d = EF_ALLOC(sizeof(struct t_data) -1 + FILENAME_BYTELEN(filename) + FILENAME_CHARSIZE);
if (! d) {
@@ -3071,6 +3800,20 @@ file_outputv(ErlDrvData e, ErlIOVec *ev) {
d->reply = !0;
/* Copy name */
FILENAME_COPY(d->b, filename);
+#ifdef USE_VM_PROBES
+ {
+ char dt_tmp;
+
+ /* This will work for UTF-8, but not for UTF-16 - extra reminder here */
+#ifdef FILENAMES_16BIT
+#error 16bit characters in filenames and dtrace in combination is not supported.
+#endif
+ while (EV_GET_CHAR(ev, &dt_tmp, &p, &q) && dt_tmp != '\0')
+ ;
+ dt_s1 = d->b;
+ dt_utag = EV_CHAR_P(ev, p, q);
+ }
+#endif
d->c.read_file.binp = NULL;
d->invoke = invoke_read_file;
d->free = free_read_file;
@@ -3090,7 +3833,6 @@ file_outputv(ErlDrvData e, ErlIOVec *ev) {
char mode;
Sint64 hdr_offset;
Uint32 max_size;
- struct t_data *d;
ErlIOVec *res_ev;
int vsize;
if (! EV_GET_CHAR(ev, &mode, &p, &q)) {
@@ -3102,14 +3844,6 @@ file_outputv(ErlDrvData e, ErlIOVec *ev) {
reply_posix_error(desc, EINVAL);
goto done;
}
- if (lseek_flush_read(desc, &err) < 0) {
- reply_posix_error(desc, err);
- goto done;
- }
- if (flush_write_check_error(desc, &err) < 0) {
- reply_posix_error(desc, err);
- goto done;
- }
if (ev->size < 1+1+8+4
|| !EV_GET_UINT64(ev, &hdr_offset, &p, &q)
|| !EV_GET_UINT32(ev, &max_size, &p, &q)) {
@@ -3118,6 +3852,25 @@ file_outputv(ErlDrvData e, ErlIOVec *ev) {
reply_posix_error(desc, EINVAL);
goto done;
}
+#ifdef USE_VM_PROBES
+ dt_utag = EV_CHAR_P(ev, p, q);
+#endif
+ if (lseek_flush_read(desc, &err
+#ifdef USE_VM_PROBES
+ , dt_priv, dt_utag
+#endif
+ ) < 0) {
+ reply_posix_error(desc, err);
+ goto done;
+ }
+ if (flush_write_check_error(desc, &err
+#ifdef USE_VM_PROBES
+ , dt_priv, dt_utag
+#endif
+ ) < 0) {
+ reply_posix_error(desc, err);
+ goto done;
+ }
/* Create the thread data structure with the contained ErlIOVec
* and corresponding binaries for the response
*/
@@ -3134,6 +3887,12 @@ file_outputv(ErlDrvData e, ErlIOVec *ev) {
d->flags = desc->flags;
d->c.preadv.offsets[0] = hdr_offset;
d->c.preadv.size = max_size;
+#ifdef USE_VM_PROBES
+ dt_i1 = d->fd;
+ dt_i2 = d->flags;
+ dt_i3 = d->c.preadv.offsets[0];
+ dt_i4 = d->c.preadv.size;
+#endif
res_ev = &d->c.preadv.eiov;
/* XXX possible alignment problems here for weird machines */
res_ev->iov = void_ptr = d + 1;
@@ -3148,16 +3907,24 @@ file_outputv(ErlDrvData e, ErlIOVec *ev) {
case FILE_SETOPT: {
char opt;
+
if (ev->size < 1+1
|| !EV_GET_CHAR(ev, &opt, &p, &q)) {
/* Buffer too short to contain even the option type */
reply_posix_error(desc, EINVAL);
goto done;
}
+#ifdef USE_VM_PROBES
+ dt_i1 = opt;
+ dt_utag = EV_CHAR_P(ev, p, q);
+#endif
switch (opt) {
case FILE_OPT_DELAYED_WRITE: {
Uint32 sizeH, sizeL, delayH, delayL;
if (ev->size != 1+1+4*sizeof(Uint32)
+#ifdef USE_VM_PROBES
+ + FILENAME_BYTELEN(dt_utag) + FILENAME_CHARSIZE
+#endif
|| !EV_GET_UINT32(ev, &sizeH, &p, &q)
|| !EV_GET_UINT32(ev, &sizeL, &p, &q)
|| !EV_GET_UINT32(ev, &delayH, &p, &q)
@@ -3184,12 +3951,18 @@ file_outputv(ErlDrvData e, ErlIOVec *ev) {
#else
desc->write_delay = ((unsigned long)delayH << 32) | delayL;
#endif
+#ifdef USE_VM_PROBES
+ dt_i2 = desc->write_delay;
+#endif
TRACE_C('K');
reply_ok(desc);
} goto done;
case FILE_OPT_READ_AHEAD: {
Uint32 sizeH, sizeL;
if (ev->size != 1+1+2*sizeof(Uint32)
+#ifdef USE_VM_PROBES
+ + FILENAME_BYTELEN(dt_utag)+FILENAME_CHARSIZE
+#endif
|| !EV_GET_UINT32(ev, &sizeH, &p, &q)
|| !EV_GET_UINT32(ev, &sizeL, &p, &q)) {
/* Buffer has wrong length to contain the option values */
@@ -3205,6 +3978,9 @@ file_outputv(ErlDrvData e, ErlIOVec *ev) {
#else
desc->read_bufsize = ((size_t)sizeH << 32) | sizeL;
#endif
+#ifdef USE_VM_PROBES
+ dt_i2 = desc->read_bufsize;
+#endif
TRACE_C('K');
reply_ok(desc);
} goto done;
@@ -3213,14 +3989,97 @@ file_outputv(ErlDrvData e, ErlIOVec *ev) {
goto done;
} /* case FILE_OPT_DELAYED_WRITE: */
} ASSERT(0); goto done; /* case FILE_SETOPT: */
-
+
+ case FILE_SENDFILE: {
+
+#ifdef HAVE_SENDFILE
+ struct t_data *d;
+ Uint32 out_fd, offsetH, offsetL, hd_len, tl_len;
+ Uint64 nbytes;
+ char flags;
+
+ if (ev->size < 1 + 7 * sizeof(Uint32) + sizeof(char)
+ || !EV_GET_UINT32(ev, &out_fd, &p, &q)
+ || !EV_GET_CHAR(ev, &flags, &p, &q)
+ || !EV_GET_UINT32(ev, &offsetH, &p, &q)
+ || !EV_GET_UINT32(ev, &offsetL, &p, &q)
+ || !EV_GET_UINT64(ev, &nbytes, &p, &q)
+ || !EV_GET_UINT32(ev, &hd_len, &p, &q)
+ || !EV_GET_UINT32(ev, &tl_len, &p, &q)) {
+ /* Buffer has wrong length to contain all the needed values */
+ reply_posix_error(desc, EINVAL);
+ goto done;
+ }
+
+ if (hd_len != 0 || tl_len != 0 || flags != 0) {
+ // We do not allow header, trailers and/or flags right now
+ reply_posix_error(desc, EINVAL);
+ goto done;
+ }
+
+ d = EF_SAFE_ALLOC(sizeof(struct t_data));
+ d->fd = desc->fd;
+ d->command = command;
+ d->invoke = invoke_sendfile;
+ d->free = free_sendfile;
+ d->level = 2;
+
+ d->c.sendfile.out_fd = (int) out_fd;
+ d->c.sendfile.written = 0;
+ d->c.sendfile.port = desc->port;
+ d->c.sendfile.q_mtx = desc->q_mtx;
+
+ #if SIZEOF_OFF_T == 4
+ if (offsetH != 0) {
+ reply_posix_error(desc, EINVAL);
+ goto done;
+ }
+ d->c.sendfile.offset = (off_t) offsetL;
+ #else
+ d->c.sendfile.offset = ((off_t) offsetH << 32) | offsetL;
+ #endif
+
+ d->c.sendfile.nbytes = nbytes;
+
+ if (USE_THRDS_FOR_SENDFILE) {
+ SET_BLOCKING(d->c.sendfile.out_fd);
+ } else {
+ /**
+ * Write a place holder to queue in order to force file_flush
+ * to be called before the driver is closed.
+ */
+ char tmp[1] = "";
+ MUTEX_LOCK(d->c.sendfile.q_mtx);
+ if (driver_enq(d->c.sendfile.port, tmp, 1)) {
+ MUTEX_UNLOCK(d->c.sendfile.q_mtx);
+ reply_posix_error(desc, ENOMEM);
+ goto done;
+ }
+ MUTEX_UNLOCK(d->c.sendfile.q_mtx);
+ }
+
+ cq_enq(desc, d);
+#else
+ reply_posix_error(desc, ENOTSUP);
+#endif
+ goto done;
+ } /* case FILE_SENDFILE: */
+
} /* switch(command) */
-
- if (lseek_flush_read(desc, &err) < 0) {
+
+ if (lseek_flush_read(desc, &err
+#ifdef USE_VM_PROBES
+ , dt_priv, dt_utag
+#endif
+ ) < 0) {
reply_posix_error(desc, err);
goto done;
}
- if (flush_write_check_error(desc, &err) < 0) {
+ if (flush_write_check_error(desc, &err
+#ifdef USE_VM_PROBES
+ , dt_priv, dt_utag
+#endif
+ ) < 0) {
reply_posix_error(desc, err);
goto done;
} else {
@@ -3238,5 +4097,50 @@ file_outputv(ErlDrvData e, ErlIOVec *ev) {
}
done:
+ if (d != NULL) {
+#ifdef USE_VM_PROBES
+ /*
+ * If d == NULL, then either:
+ * 1). There was an error of some sort, or
+ * 2). The command given to us is actually implemented
+ * by file_output() instead.
+ *
+ * Case #1 is probably a TODO item, perhaps?
+ * Case #2 we definitely don't want to activate a probe.
+ */
+ d->sched_i1 = dt_priv->thread_num;
+ d->sched_i2 = dt_priv->tag;
+ d->sched_utag[0] = '\0';
+ if (dt_utag != NULL) {
+ if (dt_utag[0] == '\0') {
+ dt_utag = NULL;
+ } else {
+ strncpy(d->sched_utag, dt_utag, sizeof(d->sched_utag) - 1);
+ d->sched_utag[sizeof(d->sched_utag) - 1] = '\0';
+ }
+ }
+ DTRACE11(efile_drv_entry, dt_priv->thread_num, dt_priv->tag++,
+ dt_utag, command, dt_s1, NULL, dt_i1, dt_i2, dt_i3, dt_i4,
+ desc->port_str);
+#endif
+ }
cq_execute(desc);
}
+
+#ifdef USE_VM_PROBES
+dt_private *
+get_dt_private(int base)
+{
+ dt_private *dt_priv = (dt_private *) pthread_getspecific(dt_driver_key);
+
+ if (dt_priv == NULL) {
+ dt_priv = EF_SAFE_ALLOC(sizeof(dt_private));
+ erts_mtx_lock(&dt_driver_mutex);
+ dt_priv->thread_num = (base + dt_driver_idnum++);
+ erts_mtx_unlock(&dt_driver_mutex);
+ dt_priv->tag = 0;
+ pthread_setspecific(dt_driver_key, dt_priv);
+ }
+ return dt_priv;
+}
+#endif /* USE_VM_PROBES */
diff --git a/erts/emulator/drivers/common/erl_efile.h b/erts/emulator/drivers/common/erl_efile.h
index 3097ded3f1..69ad02633c 100644
--- a/erts/emulator/drivers/common/erl_efile.h
+++ b/erts/emulator/drivers/common/erl_efile.h
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1997-2010. All Rights Reserved.
+ * Copyright Ericsson AB 1997-2011. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -67,6 +67,11 @@
#define FILENAMES_16BIT 1
#endif
+// We use sendfilev if it exist on solaris
+#if !defined(HAVE_SENDFILE) && defined(HAVE_SENDFILEV)
+#define HAVE_SENDFILE
+#endif
+
/*
* An handle to an open directory. To be cast to the correct type
* in the system-dependent directory functions.
@@ -85,14 +90,15 @@ typedef struct _Efile_error {
/*
* This structure contains date and time.
*/
-typedef struct _Efile_time {
- unsigned year; /* (4 digits). */
- unsigned month; /* (1..12). */
- unsigned day; /* (1..31). */
- unsigned hour; /* (0..23). */
- unsigned minute; /* (0..59). */
- unsigned second; /* (0..59). */
-} Efile_time;
+
+//typedef struct _Efile_time {
+// unsigned year; /* (4 digits). */
+// unsigned month; /* (1..12). */
+// unsigned day; /* (1..31). */
+// unsigned hour; /* (0..23). */
+// unsigned minute; /* (0..59). */
+// unsigned second; /* (0..59). */
+//} Efile_time;
/*
@@ -111,13 +117,26 @@ typedef struct _Efile_info {
Uint32 inode; /* Inode number. */
Uint32 uid; /* User id of owner. */
Uint32 gid; /* Group id of owner. */
- Efile_time accessTime; /* Last time the file was accessed. */
- Efile_time modifyTime; /* Last time the file was modified. */
- Efile_time cTime; /* Creation time (Windows) or last
+ time_t accessTime; /* Last time the file was accessed. */
+ time_t modifyTime; /* Last time the file was modified. */
+ time_t cTime; /* Creation time (Windows) or last
* inode change (Unix).
*/
} Efile_info;
+
+#ifdef HAVE_SENDFILE
+/*
+ * Describes the structure of headers/trailers for sendfile
+ */
+struct t_sendfile_hdtl {
+ SysIOVec *headers;
+ int hdr_cnt;
+ SysIOVec *trailers;
+ int trl_cnt;
+};
+#endif /* HAVE_SENDFILE */
+
/*
* Functions.
*/
@@ -143,7 +162,7 @@ int efile_write_info(Efile_error* errInfo, Efile_info* pInfo, char *name);
int efile_write(Efile_error* errInfo, int flags, int fd,
char* buf, size_t count);
int efile_writev(Efile_error* errInfo, int flags, int fd,
- SysIOVec* iov, int iovcnt, size_t size);
+ SysIOVec* iov, int iovcnt);
int efile_read(Efile_error* errInfo, int flags, int fd,
char* buf, size_t count, size_t* pBytesRead);
int efile_seek(Efile_error* errInfo, int fd,
@@ -162,3 +181,7 @@ int efile_symlink(Efile_error* errInfo, char* old, char* new);
int efile_may_openfile(Efile_error* errInfo, char *name);
int efile_fadvise(Efile_error* errInfo, int fd, Sint64 offset, Sint64 length,
int advise);
+#ifdef HAVE_SENDFILE
+int efile_sendfile(Efile_error* errInfo, int in_fd, int out_fd,
+ off_t *offset, Uint64 *nbytes, struct t_sendfile_hdtl *hdtl);
+#endif /* HAVE_SENDFILE */
diff --git a/erts/emulator/drivers/common/gzio.c b/erts/emulator/drivers/common/gzio.c
index 741cb6ae20..a9303d55bc 100644
--- a/erts/emulator/drivers/common/gzio.c
+++ b/erts/emulator/drivers/common/gzio.c
@@ -27,7 +27,9 @@
#endif
#ifdef __WIN32__
+#ifndef HAVE_CONFLICTING_FREAD_DECLARATION
#define HAVE_CONFLICTING_FREAD_DECLARATION
+#endif
#define FILENAMES_16BIT 1
#endif
diff --git a/erts/emulator/drivers/common/inet_drv.c b/erts/emulator/drivers/common/inet_drv.c
index 40c4a0df08..bf376f0494 100644
--- a/erts/emulator/drivers/common/inet_drv.c
+++ b/erts/emulator/drivers/common/inet_drv.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1997-2011. All Rights Reserved.
+ * Copyright Ericsson AB 1997-2012. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -80,6 +80,13 @@
#endif
#ifdef __WIN32__
+#define LLU "%I64u"
+#else
+#define LLU "%llu"
+#endif
+typedef unsigned long long llu_t;
+
+#ifdef __WIN32__
#define STRNCASECMP strncasecmp
#define INCL_WINSOCK_API_TYPEDEFS 1
@@ -110,6 +117,77 @@
#undef EWOULDBLOCK
#undef ETIMEDOUT
+#ifdef EINPROGRESS
+#undef EINPROGRESS
+#endif
+#ifdef EALREADY
+#undef EALREADY
+#endif
+#ifdef ENOTSOCK
+#undef ENOTSOCK
+#endif
+#ifdef EDESTADDRREQ
+#undef EDESTADDRREQ
+#endif
+#ifdef EMSGSIZE
+#undef EMSGSIZE
+#endif
+#ifdef EPROTOTYPE
+#undef EPROTOTYPE
+#endif
+#ifdef ENOPROTOOPT
+#undef ENOPROTOOPT
+#endif
+#ifdef EPROTONOSUPPORT
+#undef EPROTONOSUPPORT
+#endif
+#ifdef EOPNOTSUPP
+#undef EOPNOTSUPP
+#endif
+#ifdef EAFNOSUPPORT
+#undef EAFNOSUPPORT
+#endif
+#ifdef EADDRINUSE
+#undef EADDRINUSE
+#endif
+#ifdef EADDRNOTAVAIL
+#undef EADDRNOTAVAIL
+#endif
+#ifdef ENETDOWN
+#undef ENETDOWN
+#endif
+#ifdef ENETUNREACH
+#undef ENETUNREACH
+#endif
+#ifdef ENETRESET
+#undef ENETRESET
+#endif
+#ifdef ECONNABORTED
+#undef ECONNABORTED
+#endif
+#ifdef ECONNRESET
+#undef ECONNRESET
+#endif
+#ifdef ENOBUFS
+#undef ENOBUFS
+#endif
+#ifdef EISCONN
+#undef EISCONN
+#endif
+#ifdef ENOTCONN
+#undef ENOTCONN
+#endif
+#ifdef ECONNREFUSED
+#undef ECONNREFUSED
+#endif
+#ifdef ELOOP
+#undef ELOOP
+#endif
+#ifdef EHOSTUNREACH
+#undef EHOSTUNREACH
+#endif
+
+
#define HAVE_MULTICAST_SUPPORT
#define ERRNO_BLOCK WSAEWOULDBLOCK
@@ -280,6 +358,57 @@ static unsigned long one_value = 1;
# define SCTP_EOF MSG_EOF
#endif
+/* More Solaris 10 fixes: */
+#if ! HAVE_DECL_SCTP_CLOSED && HAVE_DECL_SCTPS_IDLE
+# define SCTP_CLOSED SCTPS_IDLE
+# undef HAVE_DECL_SCTP_CLOSED
+# define HAVE_DECL_SCTP_CLOSED 1
+#endif
+#if ! HAVE_DECL_SCTP_BOUND && HAVE_DECL_SCTPS_BOUND
+# define SCTP_BOUND SCTPS_BOUND
+# undef HAVE_DECL_SCTP_BOUND
+# define HAVE_DECL_SCTP_BOUND 1
+#endif
+#if ! HAVE_DECL_SCTP_LISTEN && HAVE_DECL_SCTPS_LISTEN
+# define SCTP_LISTEN SCTPS_LISTEN
+# undef HAVE_DECL_SCTP_LISTEN
+# define HAVE_DECL_SCTP_LISTEN 1
+#endif
+#if ! HAVE_DECL_SCTP_COOKIE_WAIT && HAVE_DECL_SCTPS_COOKIE_WAIT
+# define SCTP_COOKIE_WAIT SCTPS_COOKIE_WAIT
+# undef HAVE_DECL_SCTP_COOKIE_WAIT
+# define HAVE_DECL_SCTP_COOKIE_WAIT 1
+#endif
+#if ! HAVE_DECL_SCTP_COOKIE_ECHOED && HAVE_DECL_SCTPS_COOKIE_ECHOED
+# define SCTP_COOKIE_ECHOED SCTPS_COOKIE_ECHOED
+# undef HAVE_DECL_SCTP_COOKIE_ECHOED
+# define HAVE_DECL_SCTP_COOKIE_ECHOED 1
+#endif
+#if ! HAVE_DECL_SCTP_ESTABLISHED && HAVE_DECL_SCTPS_ESTABLISHED
+# define SCTP_ESTABLISHED SCTPS_ESTABLISHED
+# undef HAVE_DECL_SCTP_ESTABLISHED
+# define HAVE_DECL_SCTP_ESTABLISHED 1
+#endif
+#if ! HAVE_DECL_SCTP_SHUTDOWN_PENDING && HAVE_DECL_SCTPS_SHUTDOWN_PENDING
+# define SCTP_SHUTDOWN_PENDING SCTPS_SHUTDOWN_PENDING
+# undef HAVE_DECL_SCTP_SHUTDOWN_PENDING
+# define HAVE_DECL_SCTP_SHUTDOWN_PENDING 1
+#endif
+#if ! HAVE_DECL_SCTP_SHUTDOWN_SENT && HAVE_DECL_SCTPS_SHUTDOWN_SENT
+# define SCTP_SHUTDOWN_SENT SCTPS_SHUTDOWN_SENT
+# undef HAVE_DECL_SCTP_SHUTDOWN_SENT
+# define HAVE_DECL_SCTP_SHUTDOWN_SENT 1
+#endif
+#if ! HAVE_DECL_SCTP_SHUTDOWN_RECEIVED && HAVE_DECL_SCTPS_SHUTDOWN_RECEIVED
+# define SCTP_SHUTDOWN_RECEIVED SCTPS_SHUTDOWN_RECEIVED
+# undef HAVE_DECL_SCTP_SHUTDOWN_RECEIVED
+# define HAVE_DECL_SCTP_SHUTDOWN_RECEIVED 1
+#endif
+#if ! HAVE_DECL_SCTP_SHUTDOWN_ACK_SENT && HAVE_DECL_SCTPS_SHUTDOWN_ACK_SENT
+# define SCTP_SHUTDOWN_ACK_SENT SCTPS_SHUTDOWN_ACK_SENT
+# undef HAVE_DECL_SCTP_SHUTDOWN_ACK_SENT
+# define HAVE_DECL_SCTP_SHUTDOWN_ACK_SENT 1
+#endif
/* New spelling in lksctp 2.6.22 or maybe even earlier:
* adaption -> adaptation
*/
@@ -294,12 +423,13 @@ static unsigned long one_value = 1;
# define sctp_adaptation_layer_event sctp_adaption_layer_event
#endif
-static void *h_libsctp = NULL;
#ifdef __GNUC__
static typeof(sctp_bindx) *p_sctp_bindx = NULL;
+static typeof(sctp_peeloff) *p_sctp_peeloff = NULL;
#else
static int (*p_sctp_bindx)(int sd, struct sockaddr *addrs,
int addrcnt, int flags) = NULL;
+static int (*p_sctp_peeloff)(int sd, sctp_assoc_t assoc_id) = NULL;
#endif
#endif /* SCTP supported */
@@ -393,6 +523,7 @@ static int my_strncasecmp(const char *s1, const char *s2, size_t n)
driver_select(port, e, mode | (on?ERL_DRV_USE:0), on)
#define sock_select(d, flags, onoff) do { \
+ ASSERT(!(d)->is_ignored); \
(d)->event_mask = (onoff) ? \
((d)->event_mask | (flags)) : \
((d)->event_mask & ~(flags)); \
@@ -415,6 +546,19 @@ static int my_strncasecmp(const char *s1, const char *s2, size_t n)
(((unsigned char*) (s))[1] << 8) | \
(((unsigned char*) (s))[0]))
+
+#ifdef VALGRIND
+# include <valgrind/memcheck.h>
+#else
+# define VALGRIND_MAKE_MEM_DEFINED(ptr,size)
+#endif
+
+/*
+ Magic errno value used locally for return of {error, system_limit}
+ - the emulator definition of SYSTEM_LIMIT is not available here.
+*/
+#define INET_ERRNO_SYSTEM_LIMIT (15 << 8)
+
/*----------------------------------------------------------------------------
** Interface constants.
**
@@ -427,7 +571,7 @@ static int my_strncasecmp(const char *s1, const char *s2, size_t n)
#define INET_AF_ANY 3 /* INADDR_ANY or IN6ADDR_ANY_INIT */
#define INET_AF_LOOPBACK 4 /* INADDR_LOOPBACK or IN6ADDR_LOOPBACK_INIT */
-/* INET_REQ_GETTYPE enumeration */
+/* open and INET_REQ_GETTYPE enumeration */
#define INET_TYPE_STREAM 1
#define INET_TYPE_DGRAM 2
#define INET_TYPE_SEQPACKET 3
@@ -484,16 +628,21 @@ static int my_strncasecmp(const char *s1, const char *s2, size_t n)
#define INET_REQ_IFSET 23
#define INET_REQ_SUBSCRIBE 24
#define INET_REQ_GETIFADDRS 25
+#define INET_REQ_ACCEPT 26
+#define INET_REQ_LISTEN 27
+#define INET_REQ_IGNOREFD 28
+
/* TCP requests */
-#define TCP_REQ_ACCEPT 40
-#define TCP_REQ_LISTEN 41
+/* #define TCP_REQ_ACCEPT 40 MOVED */
+/* #define TCP_REQ_LISTEN 41 MERGED */
#define TCP_REQ_RECV 42
#define TCP_REQ_UNRECV 43
#define TCP_REQ_SHUTDOWN 44
/* UDP and SCTP requests */
#define PACKET_REQ_RECV 60 /* Common for UDP and SCTP */
-#define SCTP_REQ_LISTEN 61 /* Different from TCP; not for UDP */
+/* #define SCTP_REQ_LISTEN 61 MERGED Different from TCP; not for UDP */
#define SCTP_REQ_BINDX 62 /* Multi-home SCTP bind */
+#define SCTP_REQ_PEELOFF 63
/* INET_REQ_SUBSCRIBE sub-requests */
#define INET_SUBS_EMPTY_OUT_Q 1
@@ -507,7 +656,7 @@ static int my_strncasecmp(const char *s1, const char *s2, size_t n)
/* *_REQ_* replies */
#define INET_REP_ERROR 0
#define INET_REP_OK 1
-#define INET_REP_SCTP 2
+#define INET_REP 2
/* INET_REQ_SETOPTS and INET_REQ_GETOPTS options */
#define INET_OPT_REUSEADDR 0 /* enable/disable local address reuse */
@@ -628,10 +777,14 @@ static int my_strncasecmp(const char *s1, const char *s2, size_t n)
** End of interface constants.
**--------------------------------------------------------------------------*/
-#define INET_STATE_CLOSED 0
-#define INET_STATE_OPEN (INET_F_OPEN)
-#define INET_STATE_BOUND (INET_STATE_OPEN | INET_F_BOUND)
-#define INET_STATE_CONNECTED (INET_STATE_BOUND | INET_F_ACTIVE)
+#define INET_STATE_CLOSED (0)
+#define INET_STATE_OPEN (INET_F_OPEN)
+#define INET_STATE_BOUND (INET_STATE_OPEN | INET_F_BOUND)
+#define INET_STATE_CONNECTED (INET_STATE_BOUND | INET_F_ACTIVE)
+#define INET_STATE_LISTENING (INET_STATE_BOUND | INET_F_LISTEN)
+#define INET_STATE_CONNECTING (INET_STATE_BOUND | INET_F_CON)
+#define INET_STATE_ACCEPTING (INET_STATE_LISTENING | INET_F_ACC)
+#define INET_STATE_MULTI_ACCEPTING (INET_STATE_ACCEPTING | INET_F_MULTI_CLIENT)
#define IS_OPEN(d) \
(((d)->state & INET_F_OPEN) == INET_F_OPEN)
@@ -666,6 +819,11 @@ static int my_strncasecmp(const char *s1, const char *s2, size_t n)
/* Max interface name */
#define INET_IFNAMSIZ 16
+/* INET Ignore states */
+#define INET_IGNORE_NONE 0
+#define INET_IGNORE_READ 1
+#define INET_IGNORE_WRITE 1 << 1
+
/* Max length of Erlang Term Buffer (for outputting structured terms): */
#ifdef HAVE_SCTP
#define PACKET_ERL_DRV_TERM_DATA_LEN 512
@@ -674,7 +832,7 @@ static int my_strncasecmp(const char *s1, const char *s2, size_t n)
#endif
-#define BIN_REALLOC_LIMIT(x) (((x)*3)/4) /* 75% */
+#define BIN_REALLOC_MARGIN(x) ((x)/4) /* 25% */
/* The general purpose sockaddr */
typedef union {
@@ -805,20 +963,13 @@ typedef struct {
double send_avg; /* average packet size sent */
subs_list empty_out_q_subs; /* Empty out queue subscribers */
+ int is_ignored; /* if a fd is ignored by the inet_drv.
+ This flag should be set to true when
+ the fd is used outside of inet_drv. */
} inet_descriptor;
-#define TCP_STATE_CLOSED INET_STATE_CLOSED
-#define TCP_STATE_OPEN (INET_F_OPEN)
-#define TCP_STATE_BOUND (TCP_STATE_OPEN | INET_F_BOUND)
-#define TCP_STATE_CONNECTED (TCP_STATE_BOUND | INET_F_ACTIVE)
-#define TCP_STATE_LISTEN (TCP_STATE_BOUND | INET_F_LISTEN)
-#define TCP_STATE_CONNECTING (TCP_STATE_BOUND | INET_F_CON)
-#define TCP_STATE_ACCEPTING (TCP_STATE_LISTEN | INET_F_ACC)
-#define TCP_STATE_MULTI_ACCEPTING (TCP_STATE_ACCEPTING | INET_F_MULTI_CLIENT)
-
-
#define TCP_MAX_PACKET_SIZE 0x4000000 /* 64 M */
#define MAX_VSIZE 16 /* Max number of entries allowed in an I/O
@@ -827,13 +978,14 @@ typedef struct {
static int tcp_inet_init(void);
static void tcp_inet_stop(ErlDrvData);
-static void tcp_inet_command(ErlDrvData, char*, int);
+static void tcp_inet_command(ErlDrvData, char*, ErlDrvSizeT);
static void tcp_inet_commandv(ErlDrvData, ErlIOVec*);
static void tcp_inet_flush(ErlDrvData drv_data);
static void tcp_inet_drv_input(ErlDrvData, ErlDrvEvent);
static void tcp_inet_drv_output(ErlDrvData data, ErlDrvEvent event);
static ErlDrvData tcp_inet_start(ErlDrvPort, char* command);
-static int tcp_inet_ctl(ErlDrvData, unsigned int, char*, int, char**, int);
+static ErlDrvSSizeT tcp_inet_ctl(ErlDrvData, unsigned int,
+ char*, ErlDrvSizeT, char**, ErlDrvSizeT);
static void tcp_inet_timeout(ErlDrvData);
static void tcp_inet_process_exit(ErlDrvData, ErlDrvMonitor *);
static void inet_stop_select(ErlDrvEvent, void*);
@@ -874,25 +1026,19 @@ static struct erl_drv_entry tcp_inet_driver_entry =
inet_stop_select
};
-#define PACKET_STATE_CLOSED INET_STATE_CLOSED
-#define PACKET_STATE_OPEN (INET_F_OPEN)
-#define PACKET_STATE_BOUND (PACKET_STATE_OPEN | INET_F_BOUND)
-#define SCTP_STATE_LISTEN (PACKET_STATE_BOUND | INET_F_LISTEN)
-#define SCTP_STATE_CONNECTING (PACKET_STATE_BOUND | INET_F_CON)
-#define PACKET_STATE_CONNECTED (PACKET_STATE_BOUND | INET_F_ACTIVE)
static int packet_inet_init(void);
static void packet_inet_stop(ErlDrvData);
-static void packet_inet_command(ErlDrvData, char*, int);
+static void packet_inet_command(ErlDrvData, char*, ErlDrvSizeT);
static void packet_inet_drv_input(ErlDrvData data, ErlDrvEvent event);
static void packet_inet_drv_output(ErlDrvData data, ErlDrvEvent event);
static ErlDrvData udp_inet_start(ErlDrvPort, char* command);
#ifdef HAVE_SCTP
static ErlDrvData sctp_inet_start(ErlDrvPort, char* command);
#endif
-static int packet_inet_ctl(ErlDrvData, unsigned int, char*,
- int, char**, int);
+static ErlDrvSSizeT packet_inet_ctl(ErlDrvData, unsigned int, char*,
+ ErlDrvSizeT, char**, ErlDrvSizeT);
static void packet_inet_timeout(ErlDrvData);
#ifdef __WIN32__
static void packet_inet_event(ErlDrvData, ErlDrvEvent);
@@ -986,7 +1132,7 @@ typedef struct {
} tcp_descriptor;
/* send function */
-static int tcp_send(tcp_descriptor* desc, char* ptr, int len);
+static int tcp_send(tcp_descriptor* desc, char* ptr, ErlDrvSizeT len);
static int tcp_sendv(tcp_descriptor* desc, ErlIOVec* ev);
static int tcp_recv(tcp_descriptor* desc, int request_len);
static int tcp_deliver(tcp_descriptor* desc, int len);
@@ -997,6 +1143,9 @@ static int tcp_inet_input(tcp_descriptor* desc, HANDLE event);
typedef struct {
inet_descriptor inet; /* common data structure (DON'T MOVE) */
int read_packets; /* Number of packets to read per invocation */
+ int i_bufsz; /* current input buffer size */
+ ErlDrvBinary* i_buf; /* current binary buffer */
+ char* i_ptr; /* current pos in buf */
} udp_descriptor;
@@ -1050,7 +1199,7 @@ static ErlDrvTermData am_tos;
static int inet_init(void);
-static int ctl_reply(int, char*, int, char**, int);
+static ErlDrvSSizeT ctl_reply(int, char*, ErlDrvSizeT, char**, ErlDrvSizeT);
struct erl_drv_entry inet_driver_entry =
{
@@ -1060,7 +1209,23 @@ struct erl_drv_entry inet_driver_entry =
NULL, /* output */
NULL, /* ready_input */
NULL, /* ready_output */
- "inet"
+ "inet",
+ NULL,
+ NULL, /* handle */
+ NULL, /* control */
+ NULL, /* timeout */
+ NULL, /* outputv */
+ NULL, /* ready_async */
+ NULL, /* flush */
+ NULL, /* call */
+ NULL, /* event */
+ ERL_DRV_EXTENDED_MARKER,
+ ERL_DRV_EXTENDED_MAJOR_VERSION,
+ ERL_DRV_EXTENDED_MINOR_VERSION,
+ 0,
+ NULL,
+ NULL,
+ NULL,
};
/* XXX: is this a driver interface function ??? */
@@ -1074,7 +1239,7 @@ void erl_exit(int n, char*, ...);
#ifdef FATAL_MALLOC
-static void *alloc_wrapper(size_t size){
+static void *alloc_wrapper(ErlDrvSizeT size){
void *ret = driver_alloc(size);
if(ret == NULL)
erl_exit(1,"Out of virtual memory in malloc (%s)", __FILE__);
@@ -1082,7 +1247,7 @@ static void *alloc_wrapper(size_t size){
}
#define ALLOC(X) alloc_wrapper(X)
-static void *realloc_wrapper(void *current, size_t size){
+static void *realloc_wrapper(void *current, ErlDrvSizeT size){
void *ret = driver_realloc(current,size);
if(ret == NULL)
erl_exit(1,"Out of virtual memory in realloc (%s)", __FILE__);
@@ -1311,11 +1476,11 @@ static InetDrvBufStk *get_bufstk(void)
return bs;
}
-static ErlDrvBinary* alloc_buffer(long minsz)
+static ErlDrvBinary* alloc_buffer(ErlDrvSizeT minsz)
{
InetDrvBufStk *bs = get_bufstk();
- DEBUGF(("alloc_buffer: %ld\r\n", minsz));
+ DEBUGF(("alloc_buffer: "LLU"\r\n", (llu_t)minsz));
if (bs && bs->buf.pos > 0) {
long size;
@@ -1391,7 +1556,7 @@ static void release_buffer(ErlDrvBinary* buf)
}
}
-static ErlDrvBinary* realloc_buffer(ErlDrvBinary* buf, long newsz)
+static ErlDrvBinary* realloc_buffer(ErlDrvBinary* buf, ErlDrvSizeT newsz)
{
return driver_realloc_binary(buf, newsz);
}
@@ -1420,8 +1585,9 @@ static ErlDrvData dummy_start(ErlDrvPort port, char* command)
return (ErlDrvData)port;
}
-static int dummy_ctl(ErlDrvData data, unsigned int cmd, char* buf, int len,
- char** rbuf, int rsize)
+static ErlDrvSSizeT dummy_ctl(ErlDrvData data, unsigned int cmd,
+ char* buf, ErlDrvSizeT len, char** rbuf,
+ ErlDrvSizeT rsize)
{
static char error[] = "no_winsock2";
@@ -1429,7 +1595,7 @@ static int dummy_ctl(ErlDrvData data, unsigned int cmd, char* buf, int len,
return ctl_reply(INET_REP_ERROR, error, sizeof(error), rbuf, rsize);
}
-static void dummy_command(ErlDrvData data, char* buf, int len)
+static void dummy_command(ErlDrvData data, char* buf, ErlDrvSizeT len)
{
}
@@ -1485,8 +1651,20 @@ static struct erl_drv_entry dummy_sctp_driver_entry =
#endif
+/* return lowercase string form of errno value */
+static char *errno_str(int err)
+{
+ switch (err) {
+ case INET_ERRNO_SYSTEM_LIMIT:
+ return "system_limit";
+ default:
+ return erl_errno_id(err);
+ }
+}
+
/* general control reply function */
-static int ctl_reply(int rep, char* buf, int len, char** rbuf, int rsize)
+static ErlDrvSSizeT ctl_reply(int rep, char* buf, ErlDrvSizeT len,
+ char** rbuf, ErlDrvSizeT rsize)
{
char* ptr;
@@ -1502,18 +1680,14 @@ static int ctl_reply(int rep, char* buf, int len, char** rbuf, int rsize)
}
/* general control error reply function */
-static int ctl_error(int err, char** rbuf, int rsize)
+static ErlDrvSSizeT ctl_error(int err, char** rbuf, ErlDrvSizeT rsize)
{
- char response[256]; /* Response buffer. */
- char* s;
- char* t;
+ char* s = errno_str(err);
- for (s = erl_errno_id(err), t = response; *s; s++, t++)
- *t = tolower(*s);
- return ctl_reply(INET_REP_ERROR, response, t-response, rbuf, rsize);
+ return ctl_reply(INET_REP_ERROR, s, strlen(s), rbuf, rsize);
}
-static int ctl_xerror(char* xerr, char** rbuf, int rsize)
+static ErlDrvSSizeT ctl_xerror(char* xerr, char** rbuf, ErlDrvSizeT rsize)
{
int n = strlen(xerr);
return ctl_reply(INET_REP_ERROR, xerr, n, rbuf, rsize);
@@ -1522,14 +1696,7 @@ static int ctl_xerror(char* xerr, char** rbuf, int rsize)
static ErlDrvTermData error_atom(int err)
{
- char errstr[256];
- char* s;
- char* t;
-
- for (s = erl_errno_id(err), t = errstr; *s; s++, t++)
- *t = tolower(*s);
- *t = '\0';
- return driver_mk_atom(errstr);
+ return driver_mk_atom(errno_str(err));
}
@@ -1851,6 +2018,26 @@ static int inet_reply_ok(inet_descriptor* desc)
return driver_send_term(desc->port, caller, spec, i);
}
+#ifdef HAVE_SCTP
+static int inet_reply_ok_port(inet_descriptor* desc, ErlDrvTermData dport)
+{
+ ErlDrvTermData spec[2*LOAD_ATOM_CNT + 2*LOAD_PORT_CNT + 2*LOAD_TUPLE_CNT];
+ ErlDrvTermData caller = desc->caller;
+ int i = 0;
+
+ i = LOAD_ATOM(spec, i, am_inet_reply);
+ i = LOAD_PORT(spec, i, desc->dport);
+ i = LOAD_ATOM(spec, i, am_ok);
+ i = LOAD_PORT(spec, i, dport);
+ i = LOAD_TUPLE(spec, i, 2);
+ i = LOAD_TUPLE(spec, i, 3);
+ ASSERT(i == sizeof(spec)/sizeof(*spec));
+
+ desc->caller = 0;
+ return driver_send_term(desc->port, caller, spec, i);
+}
+#endif
+
/* send:
** {inet_reply, S, {error, Reason}}
*/
@@ -2389,14 +2576,19 @@ static ErlDrvTermData am_sctp_rtoinfo, /* Option names */
am_active, am_inactive,
/* For #sctp_status{}: */
- am_empty, am_closed,
+# if HAVE_DECL_SCTP_EMPTY
+ am_empty,
+# endif
+# if HAVE_DECL_SCTP_BOUND
+ am_bound,
+# endif
+# if HAVE_DECL_SCTP_LISTEN
+ am_listen,
+# endif
am_cookie_wait, am_cookie_echoed,
am_established, am_shutdown_pending,
am_shutdown_sent, am_shutdown_received,
am_shutdown_ack_sent;
- /* Not yet implemented in the Linux kernel:
- ** am_bound, am_listen;
- */
/*
** Parsing of "sctp_sndrcvinfo": ancillary data coming with received msgs.
@@ -2665,7 +2857,8 @@ static int sctp_parse_async_event
# ifdef HAVE_STRUCT_SCTP_REMOTE_ERROR_SRE_DATA
chunk = (char*) (&(sptr->sre_data));
# else
- chunk = ((char*)sptr) + sizeof(*sptr);
+ chunk = ((char*) &(sptr->sre_assoc_id))
+ + sizeof(sptr->sre_assoc_id);
# endif
chlen = sptr->sre_length - (chunk - (char *)sptr);
i = sctp_parse_error_chunk(spec, i, chunk, chlen);
@@ -2716,7 +2909,8 @@ static int sctp_parse_async_event
# ifdef HAVE_STRUCT_SCTP_SEND_FAILED_SSF_DATA
chunk = (char*) (&(sptr->ssf_data));
# else
- chunk = ((char*)sptr) + sizeof(*sptr);
+ chunk = ((char*) &(sptr->ssf_assoc_id))
+ + sizeof(sptr->ssf_assoc_id);
# endif
chlen = sptr->ssf_length - (chunk - (char*) sptr);
choff = chunk - bin->orig_bytes;
@@ -3390,8 +3584,15 @@ static void inet_init_sctp(void) {
INIT_ATOM(inactive);
/* For #sctp_status{}: */
+# if HAVE_DECL_SCTP_EMPTY
INIT_ATOM(empty);
- INIT_ATOM(closed);
+# endif
+# if HAVE_DECL_SCTP_BOUND
+ INIT_ATOM(bound);
+# endif
+# if HAVE_DECL_SCTP_LISTEN
+ INIT_ATOM(listen);
+# endif
INIT_ATOM(cookie_wait);
INIT_ATOM(cookie_echoed);
INIT_ATOM(established);
@@ -3399,10 +3600,6 @@ static void inet_init_sctp(void) {
INIT_ATOM(shutdown_sent);
INIT_ATOM(shutdown_received);
INIT_ATOM(shutdown_ack_sent);
- /* Not yet implemented in the Linux kernel:
- ** INIT_ATOM(bound);
- ** INIT_ATOM(listen);
- */
}
#endif /* HAVE_SCTP */
@@ -3453,17 +3650,32 @@ static int inet_init()
/* Check the size of SCTP AssocID -- currently both this driver and the
Erlang part require 32 bit: */
ASSERT(sizeof(sctp_assoc_t)==ASSOC_ID_LEN);
-# ifndef LIBSCTP
-# error LIBSCTP not defined
-# endif
- if (erts_sys_ddll_open_noext(STRINGIFY(LIBSCTP), &h_libsctp, NULL) == 0) {
- void *ptr;
- if (erts_sys_ddll_sym(h_libsctp, "sctp_bindx", &ptr) == 0) {
- p_sctp_bindx = ptr;
- inet_init_sctp();
- add_driver_entry(&sctp_inet_driver_entry);
+# if defined(HAVE_SCTP_BINDX) && defined (HAVE_SCTP_PEELOFF)
+ p_sctp_bindx = sctp_bindx;
+ p_sctp_peeloff = sctp_peeloff;
+ inet_init_sctp();
+ add_driver_entry(&sctp_inet_driver_entry);
+# else
+# ifndef LIBSCTP
+# error LIBSCTP not defined
+# endif
+ {
+ static void *h_libsctp = NULL;
+
+ if (erts_sys_ddll_open_noext(STRINGIFY(LIBSCTP), &h_libsctp, NULL)
+ == 0) {
+ void *ptr;
+ if (erts_sys_ddll_sym(h_libsctp, "sctp_bindx", &ptr) == 0) {
+ p_sctp_bindx = ptr;
+ inet_init_sctp();
+ add_driver_entry(&sctp_inet_driver_entry);
+ if (erts_sys_ddll_sym(h_libsctp, "sctp_peeloff", &ptr) == 0) {
+ p_sctp_peeloff = ptr;
+ }
+ }
}
}
+# endif
#endif
/* remove the dummy inet driver */
@@ -3485,7 +3697,8 @@ static int inet_init()
** and is set to actual length of dst on return
** return NULL on error and ptr after port address on success
*/
-static char* inet_set_address(int family, inet_address* dst, char* src, int* len)
+static char* inet_set_address(int family, inet_address* dst,
+ char* src, ErlDrvSizeT* len)
{
short port;
@@ -3521,7 +3734,7 @@ static char* inet_set_address(int family, inet_address* dst, char* src, int* len
** src = [TAG,P1,P0,X1,X2,...] when TAG = INET_AF_INET | INET_AF_INET6
*/
static char *inet_set_faddress(int family, inet_address* dst,
- char *src, int* len) {
+ char *src, ErlDrvSizeT* len) {
int tag;
if (*len < 1) return NULL;
@@ -3636,7 +3849,13 @@ static void desc_close(inet_descriptor* desc)
desc->forced_events = 0;
desc->send_would_block = 0;
#endif
- driver_select(desc->port, (ErlDrvEvent)(long)desc->event, ERL_DRV_USE, 0);
+ // We should close the fd here, but the other driver might still
+ // be selecting on it.
+ if (!desc->is_ignored)
+ driver_select(desc->port,(ErlDrvEvent)(long)desc->event,
+ ERL_DRV_USE, 0);
+ else
+ inet_stop_select((ErlDrvEvent)(long)desc->event,NULL);
desc->event = INVALID_EVENT; /* closed by stop_select callback */
desc->s = INVALID_SOCKET;
desc->event_mask = 0;
@@ -3679,8 +3898,8 @@ static int erl_inet_close(inet_descriptor* desc)
}
-static int inet_ctl_open(inet_descriptor* desc, int domain, int type,
- char** rbuf, int rsize)
+static ErlDrvSSizeT inet_ctl_open(inet_descriptor* desc, int domain, int type,
+ char** rbuf, ErlDrvSizeT rsize)
{
if (desc->state != INET_STATE_CLOSED)
return ctl_xerror(EXBADSEQ, rbuf, rsize);
@@ -3700,8 +3919,8 @@ static int inet_ctl_open(inet_descriptor* desc, int domain, int type,
/* as inet_open but pass in an open socket (MUST BE OF RIGHT TYPE) */
-static int inet_ctl_fdopen(inet_descriptor* desc, int domain, int type,
- SOCKET s, char** rbuf, int rsize)
+static ErlDrvSSizeT inet_ctl_fdopen(inet_descriptor* desc, int domain, int type,
+ SOCKET s, char** rbuf, ErlDrvSizeT rsize)
{
inet_address name;
unsigned int sz = sizeof(name);
@@ -3709,6 +3928,8 @@ static int inet_ctl_fdopen(inet_descriptor* desc, int domain, int type,
/* check that it is a socket and that the socket is bound */
if (IS_SOCKET_ERROR(sock_name(s, (struct sockaddr*) &name, &sz)))
return ctl_error(sock_errno(), rbuf, rsize);
+ if (name.sa.sa_family != domain)
+ return ctl_error(EINVAL, rbuf, rsize);
desc->s = s;
if ((desc->event = sock_create_event(desc)) == INVALID_EVENT)
return ctl_error(sock_errno(), rbuf, rsize);
@@ -3874,6 +4095,7 @@ static char* buf_to_sockaddr(char* ptr, char* end, struct sockaddr* addr)
addr->sa_family = AF_INET;
return ptr + sizeof(struct in_addr);
}
+#if defined(HAVE_IN6) && defined(AF_INET6)
case INET_AF_INET6: {
struct in6_addr *p = &((struct sockaddr_in6*)addr)->sin6_addr;
buf_check(ptr,end,sizeof(struct in6_addr));
@@ -3881,6 +4103,7 @@ static char* buf_to_sockaddr(char* ptr, char* end, struct sockaddr* addr)
addr->sa_family = AF_INET6;
return ptr + sizeof(struct in6_addr);
}
+#endif
}
error:
return NULL;
@@ -3903,14 +4126,15 @@ static char* buf_to_sockaddr(char* ptr, char* end, struct sockaddr* addr)
#if defined(__WIN32__) && defined(SIO_GET_INTERFACE_LIST)
-static int inet_ctl_getiflist(inet_descriptor* desc, char** rbuf, int rsize)
+static ErlDrvSSizeT inet_ctl_getiflist(inet_descriptor* desc,
+ char** rbuf, ErlDrvSizeT rsize)
{
char ifbuf[BUFSIZ];
char sbuf[BUFSIZ];
char* sptr;
INTERFACE_INFO* ifp;
DWORD len;
- int n;
+ ErlDrvSizeT n;
int err;
ifp = (INTERFACE_INFO*) ifbuf;
@@ -3940,8 +4164,8 @@ static int inet_ctl_getiflist(inet_descriptor* desc, char** rbuf, int rsize)
/* input is an ip-address in string format i.e A.B.C.D
** scan the INTERFACE_LIST to get the options
*/
-static int inet_ctl_ifget(inet_descriptor* desc, char* buf, int len,
- char** rbuf, int rsize)
+static ErlDrvSSizeT inet_ctl_ifget(inet_descriptor* desc, char* buf,
+ ErlDrvSizeT len, char** rbuf, ErlDrvSizeT rsize)
{
char ifbuf[BUFSIZ];
int n;
@@ -4042,8 +4266,9 @@ static int inet_ctl_ifget(inet_descriptor* desc, char* buf, int len,
}
/* not supported */
-static int inet_ctl_ifset(inet_descriptor* desc, char* buf, int len,
- char** rbuf, int rsize)
+static ErlDrvSSizeT inet_ctl_ifset(inet_descriptor* desc,
+ char* buf, ErlDrvSizeT len,
+ char** rbuf, ErlDrvSizeT rsize)
{
return ctl_reply(INET_REP_OK, NULL, 0, rbuf, rsize);
}
@@ -4086,12 +4311,13 @@ static void free_ifconf(struct ifconf *ifcp) {
FREE(ifcp->ifc_buf);
}
-static int inet_ctl_getiflist(inet_descriptor* desc, char** rbuf, int rsize)
+static ErlDrvSSizeT inet_ctl_getiflist(inet_descriptor* desc,
+ char** rbuf, ErlDrvSizeT rsize)
{
struct ifconf ifc;
struct ifreq *ifrp;
char *sbuf, *sp;
- int i;
+ ErlDrvSizeT i;
/* Courtesy of Per Bergqvist and W. Richard Stevens */
@@ -4103,7 +4329,7 @@ static int inet_ctl_getiflist(inet_descriptor* desc, char** rbuf, int rsize)
*sp++ = INET_REP_OK;
i = 0;
for (;;) {
- int n;
+ ErlDrvSizeT n;
ifrp = (struct ifreq *) VOIDP(ifc.ifc_buf + i);
n = sizeof(ifrp->ifr_name) + SIZEA(ifrp->ifr_addr);
@@ -4129,13 +4355,39 @@ static int inet_ctl_getiflist(inet_descriptor* desc, char** rbuf, int rsize)
return sp - sbuf;
}
+#ifdef HAVE_LIBDLPI_H
+#include <libdlpi.h>
+static int hwaddr_libdlpi_lookup(const char *ifnm,
+ uchar_t *addr, size_t *alen)
+{
+ dlpi_handle_t handle;
+ dlpi_info_t linkinfo;
+ int ret = -1;
+
+ if (dlpi_open(ifnm, &handle, 0) != DLPI_SUCCESS) {
+ return -1;
+ }
+
+ if (dlpi_get_physaddr(handle, DL_CURR_PHYS_ADDR,
+ addr, alen) == DLPI_SUCCESS &&
+ dlpi_info(handle, &linkinfo, 0) == DLPI_SUCCESS)
+ {
+ ret = 0;
+ }
+
+ dlpi_close(handle);
+ return ret;
+}
+#endif
+
/* FIXME: temporary hack */
#ifndef IFHWADDRLEN
#define IFHWADDRLEN 6
#endif
-static int inet_ctl_ifget(inet_descriptor* desc, char* buf, int len,
- char** rbuf, int rsize)
+static ErlDrvSSizeT inet_ctl_ifget(inet_descriptor* desc,
+ char* buf, ErlDrvSizeT len,
+ char** rbuf, ErlDrvSizeT rsize)
{
char sbuf[BUFSIZ];
char* sptr;
@@ -4164,7 +4416,24 @@ static int inet_ctl_ifget(inet_descriptor* desc, char* buf, int len,
break;
case INET_IFOPT_HWADDR: {
-#ifdef SIOCGIFHWADDR
+#ifdef HAVE_LIBDLPI_H
+ /*
+ ** OpenSolaris have SIGCGIFHWADDR, but no ifr_hwaddr member..
+ ** The proper way to get the mac address would be to
+ ** use libdlpi...
+ */
+ uchar_t addr[DLPI_PHYSADDR_MAX];
+ size_t alen = sizeof(addr);
+
+ if (hwaddr_libdlpi_lookup(ifreq.ifr_name, addr, &alen) == 0) {
+ buf_check(sptr, s_end, 1+2+alen);
+ *sptr++ = INET_IFOPT_HWADDR;
+ put_int16(alen, sptr);
+ sptr += 2;
+ sys_memcpy(sptr, addr, alen);
+ sptr += alen;
+ }
+#elif defined(SIOCGIFHWADDR) && defined(HAVE_STRUCT_IFREQ_IFR_HWADDR)
if (ioctl(desc->s, SIOCGIFHWADDR, (char *)&ifreq) < 0)
break;
buf_check(sptr, s_end, 1+2+IFHWADDRLEN);
@@ -4173,7 +4442,7 @@ static int inet_ctl_ifget(inet_descriptor* desc, char* buf, int len,
/* raw memcpy (fix include autoconf later) */
sys_memcpy(sptr, (char*)(&ifreq.ifr_hwaddr.sa_data), IFHWADDRLEN);
sptr += IFHWADDRLEN;
-#elif defined(SIOCGENADDR)
+#elif defined(SIOCGENADDR) && defined(HAVE_STRUCT_IFREQ_IFR_ENADDR)
if (ioctl(desc->s, SIOCGENADDR, (char *)&ifreq) < 0)
break;
buf_check(sptr, s_end, 1+2+sizeof(ifreq.ifr_enaddr));
@@ -4312,8 +4581,9 @@ static int inet_ctl_ifget(inet_descriptor* desc, char* buf, int len,
}
-static int inet_ctl_ifset(inet_descriptor* desc, char* buf, int len,
- char** rbuf, int rsize)
+static ErlDrvSSizeT inet_ctl_ifset(inet_descriptor* desc,
+ char* buf, ErlDrvSizeT len,
+ char** rbuf, ErlDrvSizeT rsize)
{
struct ifreq ifreq;
int namlen;
@@ -4336,19 +4606,19 @@ static int inet_ctl_ifset(inet_descriptor* desc, char* buf, int len,
break;
case INET_IFOPT_HWADDR: {
- unsigned int len;
+ unsigned int hwalen;
buf_check(buf, b_end, 2);
- len = get_int16(buf); buf += 2;
- buf_check(buf, b_end, len);
+ hwalen = get_int16(buf); buf += 2;
+ buf_check(buf, b_end, hwalen);
#ifdef SIOCSIFHWADDR
/* raw memcpy (fix include autoconf later) */
sys_memset((char*)(&ifreq.ifr_hwaddr.sa_data),
'\0', sizeof(ifreq.ifr_hwaddr.sa_data));
- sys_memcpy((char*)(&ifreq.ifr_hwaddr.sa_data), buf, len);
+ sys_memcpy((char*)(&ifreq.ifr_hwaddr.sa_data), buf, hwalen);
(void) ioctl(desc->s, SIOCSIFHWADDR, (char *)&ifreq);
#endif
- buf += len;
+ buf += hwalen;
break;
}
@@ -4434,21 +4704,24 @@ static int inet_ctl_ifset(inet_descriptor* desc, char* buf, int len,
#else
-static int inet_ctl_getiflist(inet_descriptor* desc, char** rbuf, int rsize)
+static ErlDrvSSizeT inet_ctl_getiflist(inet_descriptor* desc,
+ char** rbuf, ErlDrvSizeT rsize)
{
return ctl_reply(INET_REP_OK, NULL, 0, rbuf, rsize);
}
-static int inet_ctl_ifget(inet_descriptor* desc, char* buf, int len,
- char** rbuf, int rsize)
+static ErlDrvSSizeT inet_ctl_ifget(inet_descriptor* desc,
+ char* buf, ErlDrvSizeT len,
+ char** rbuf, ErlDrvSizeT rsize)
{
return ctl_reply(INET_REP_OK, NULL, 0, rbuf, rsize);
}
-static int inet_ctl_ifset(inet_descriptor* desc, char* buf, int len,
- char** rbuf, int rsize)
+static ErlDrvSSizeT inet_ctl_ifset(inet_descriptor* desc,
+ char* buf, ErlDrvSizeT len,
+ char** rbuf, ErlDrvSizeT rsize)
{
return ctl_reply(INET_REP_OK, NULL, 0, rbuf, rsize);
}
@@ -4457,6 +4730,7 @@ static int inet_ctl_ifset(inet_descriptor* desc, char* buf, int len,
+#if defined(__WIN32__) || defined(HAVE_GETIFADDRS)
/* Latin-1 to utf8 */
static int utf8_len(const char *c, int m) {
@@ -4479,6 +4753,7 @@ static void utf8_encode(const char *c, int m, char *p) {
}
}
}
+#endif
#if defined(__WIN32__)
@@ -4503,8 +4778,8 @@ int eq_masked_bytes(char *a, char *b, int pref_len) {
return !0;
}
-static int inet_ctl_getifaddrs(inet_descriptor* desc_p,
- char **rbuf_pp, int rsize)
+static ErlDrvSSizeT inet_ctl_getifaddrs(inet_descriptor* desc_p,
+ char **rbuf_pp, ErlDrvSizeT rsize)
{
int i;
DWORD ret, n;
@@ -4514,7 +4789,7 @@ static int inet_ctl_getifaddrs(inet_descriptor* desc_p,
char *buf_p;
char *buf_alloc_p;
- int buf_size =512;
+ ErlDrvSizeT buf_size = 512;
# define BUF_ENSURE(Size) \
do { \
int NEED_, GOT_ = buf_p - buf_alloc_p; \
@@ -4888,12 +5163,12 @@ done:
#elif defined(HAVE_GETIFADDRS)
-static int inet_ctl_getifaddrs(inet_descriptor* desc_p,
- char **rbuf_pp, int rsize)
+static ErlDrvSSizeT inet_ctl_getifaddrs(inet_descriptor* desc_p,
+ char **rbuf_pp, ErlDrvSizeT rsize)
{
struct ifaddrs *ifa_p, *ifa_free_p;
- int buf_size;
+ ErlDrvSizeT buf_size;
char *buf_p;
char *buf_alloc_p;
@@ -4996,8 +5271,8 @@ static int inet_ctl_getifaddrs(inet_descriptor* desc_p,
#else
-static int inet_ctl_getifaddrs(inet_descriptor* desc_p,
- char **rbuf_pp, int rsize)
+static ErlDrvSSizeT inet_ctl_getifaddrs(inet_descriptor* desc_p,
+ char **rbuf_pp, ErlDrvSizeT rsize)
{
return ctl_error(ENOTSUP, rbuf_pp, rsize);
}
@@ -5798,7 +6073,7 @@ static int sctp_set_opts(inet_descriptor* desc, char* ptr, int len)
case SCTP_OPT_PRIMARY_ADDR:
case SCTP_OPT_SET_PEER_PRIMARY_ADDR:
{
- int alen;
+ ErlDrvSizeT alen;
char *after;
CHKLEN(curr, ASSOC_ID_LEN);
@@ -5840,7 +6115,7 @@ static int sctp_set_opts(inet_descriptor* desc, char* ptr, int len)
}
case SCTP_OPT_PEER_ADDR_PARAMS:
{
- int alen;
+ ErlDrvSizeT alen;
char *after;
# ifdef HAVE_STRUCT_SCTP_PADDRPARAMS_SPP_FLAGS
int eflags, cflags, hb_enable, hb_disable,
@@ -5927,6 +6202,7 @@ static int sctp_set_opts(inet_descriptor* desc, char* ptr, int len)
type = SCTP_DEFAULT_SEND_PARAM;
arg_ptr = (char*) (&arg.sri);
arg_sz = sizeof ( arg.sri);
+ VALGRIND_MAKE_MEM_DEFINED(arg_ptr, arg_sz); /*suppress "uninitialised bytes"*/
break;
}
case SCTP_OPT_EVENTS:
@@ -5970,7 +6246,7 @@ static int sctp_set_opts(inet_descriptor* desc, char* ptr, int len)
proto = IPPROTO_SCTP;
type = SCTP_DELAYED_ACK_TIME;
arg_ptr = (char*) (&arg.av);
- arg_sz = sizeof ( arg.es);
+ arg_sz = sizeof ( arg.av);
break;
}
# endif
@@ -6016,8 +6292,9 @@ static int sctp_set_opts(inet_descriptor* desc, char* ptr, int len)
** ptr should point to a buffer with 9*len +1 to be safe!!
*/
-static int inet_fill_opts(inet_descriptor* desc,
- char* buf, int len, char** dest, int destlen)
+static ErlDrvSSizeT inet_fill_opts(inet_descriptor* desc,
+ char* buf, ErlDrvSizeT len,
+ char** dest, ErlDrvSizeT destlen)
{
int type;
int proto;
@@ -6027,8 +6304,8 @@ static int inet_fill_opts(inet_descriptor* desc,
char* arg_ptr;
unsigned int arg_sz;
char *ptr = NULL;
- int dest_used = 0;
- int dest_allocated = destlen;
+ ErlDrvSizeT dest_used = 0;
+ ErlDrvSizeT dest_allocated = destlen;
char *orig_dest = *dest;
/* Ptr is a name parameter */
@@ -6043,7 +6320,7 @@ static int inet_fill_opts(inet_descriptor* desc,
#define PLACE_FOR(Size,Ptr) \
do { \
- int need = dest_used + (Size); \
+ ErlDrvSizeT need = dest_used + (Size); \
if (need > INET_MAX_OPT_BUFFER) { \
RETURN_ERROR(); \
} \
@@ -6064,7 +6341,7 @@ static int inet_fill_opts(inet_descriptor* desc,
/* Ptr is a name parameter */
#define TRUNCATE_TO(Size,Ptr) \
do { \
- int new_need = ((Ptr) - (*dest)) + (Size); \
+ ErlDrvSizeT new_need = ((Ptr) - (*dest)) + (Size); \
if (new_need > dest_used) { \
erl_exit(1,"Internal error in inet_drv, " \
"miscalculated buffer size"); \
@@ -6354,8 +6631,9 @@ static int load_paddrinfo (ErlDrvTermData * spec, int i,
/*
** "sctp_fill_opts": Returns {ok, Results}, or an error:
*/
-static int sctp_fill_opts(inet_descriptor* desc, char* buf, int buflen,
- char** dest, int destlen)
+static ErlDrvSSizeT sctp_fill_opts(inet_descriptor* desc,
+ char* buf, ErlDrvSizeT buflen,
+ char** dest, ErlDrvSizeT destlen)
{
/* In contrast to the generic "inet_fill_opts", the output here is
represented by tuples/records, which are formed in the "spec":
@@ -6537,7 +6815,7 @@ static int sctp_fill_opts(inet_descriptor* desc, char* buf, int buflen,
struct linger lg;
unsigned int sz = sizeof(lg);
- if (sock_getopt(desc->s, IPPROTO_SCTP, SO_LINGER,
+ if (sock_getopt(desc->s, SOL_SOCKET, SO_LINGER,
&lg, &sz) < 0) continue;
/* Fill in the response: */
PLACE_FOR(spec, i,
@@ -6573,7 +6851,7 @@ static int sctp_fill_opts(inet_descriptor* desc, char* buf, int buflen,
{
case INET_OPT_RCVBUF :
{
- proto = IPPROTO_SCTP;
+ proto = SOL_SOCKET;
type = SO_RCVBUF;
is_int = 1;
tag = am_recbuf;
@@ -6581,7 +6859,7 @@ static int sctp_fill_opts(inet_descriptor* desc, char* buf, int buflen,
}
case INET_OPT_SNDBUF :
{
- proto = IPPROTO_SCTP;
+ proto = SOL_SOCKET;
type = SO_SNDBUF;
is_int = 1;
tag = am_sndbuf;
@@ -6734,7 +7012,7 @@ static int sctp_fill_opts(inet_descriptor* desc, char* buf, int buflen,
2*LOAD_ATOM_CNT + LOAD_INT_CNT + 2*LOAD_TUPLE_CNT);
i = LOAD_ATOM (spec, i, am_sctp_adaptation_layer);
i = LOAD_ATOM (spec, i, am_sctp_setadaptation);
- i = LOAD_INT (spec, i, ad.ssb_adaptation_ind);
+ i = LOAD_INT (spec, i, sock_ntohl(ad.ssb_adaptation_ind));
i = LOAD_TUPLE (spec, i, 2);
i = LOAD_TUPLE (spec, i, 2);
break;
@@ -6745,7 +7023,7 @@ static int sctp_fill_opts(inet_descriptor* desc, char* buf, int buflen,
unsigned int sz = sizeof(ap);
int n;
char *after;
- int alen;
+ ErlDrvSizeT alen;
if (buflen < ASSOC_ID_LEN) RETURN_ERROR(spec, -EINVAL);
ap.spp_assoc_id = GET_ASSOC_ID(buf);
@@ -6877,7 +7155,7 @@ static int sctp_fill_opts(inet_descriptor* desc, char* buf, int buflen,
break;
}
/* The following option is not available in Solaris 10: */
-# ifdef SCTP_DELAYED_ACK_TIME
+# if HAVE_DECL_SCTP_DELAYED_ACK_TIME
case SCTP_OPT_DELAYED_ACK_TIME:
{
struct sctp_assoc_value av;
@@ -6924,7 +7202,7 @@ static int sctp_fill_opts(inet_descriptor* desc, char* buf, int buflen,
switch(st.sstat_state)
{
/* SCTP_EMPTY is not supported on SOLARIS10: */
-# ifdef SCTP_EMPTY
+# if HAVE_DECL_SCTP_EMPTY
case SCTP_EMPTY:
i = LOAD_ATOM (spec, i, am_empty);
break;
@@ -6932,14 +7210,16 @@ static int sctp_fill_opts(inet_descriptor* desc, char* buf, int buflen,
case SCTP_CLOSED:
i = LOAD_ATOM (spec, i, am_closed);
break;
- /* The following states are not supported by Linux Kernel SCTP yet:
+# if HAVE_DECL_SCTP_BOUND
case SCTP_BOUND:
i = LOAD_ATOM (spec, i, am_bound);
break;
+# endif
+# if HAVE_DECL_SCTP_LISTEN
case SCTP_LISTEN:
i = LOAD_ATOM (spec, i, am_listen);
break;
- */
+# endif
case SCTP_COOKIE_WAIT:
i = LOAD_ATOM (spec, i, am_cookie_wait);
break;
@@ -6983,7 +7263,7 @@ static int sctp_fill_opts(inet_descriptor* desc, char* buf, int buflen,
struct sctp_paddrinfo pai;
unsigned int sz = sizeof(pai);
char *after;
- int alen;
+ ErlDrvSizeT alen;
if (buflen < ASSOC_ID_LEN) RETURN_ERROR(spec, -EINVAL);
pai.spinfo_assoc_id = GET_ASSOC_ID(buf);
@@ -7010,7 +7290,7 @@ static int sctp_fill_opts(inet_descriptor* desc, char* buf, int buflen,
default:
RETURN_ERROR(spec, -EINVAL); /* No more valid options */
}
- /* If we get here one result has been succesfully loaded */
+ /* If we get here one result has been successfully loaded */
length ++;
}
if (buflen != 0) RETURN_ERROR(spec, -EINVAL); /* Optparam mismatch */
@@ -7027,11 +7307,10 @@ static int sctp_fill_opts(inet_descriptor* desc, char* buf, int buflen,
i = LOAD_TUPLE(spec, i, 3);
/* Now, convert "spec" into the returnable term: */
- /* desc->caller = 0; What does it mean? */
- driver_output_term(desc->port, spec, i);
+ driver_send_term(desc->port, driver_caller(desc->port), spec, i);
FREE(spec);
- (*dest)[0] = INET_REP_SCTP;
+ (*dest)[0] = INET_REP;
return 1; /* Response length */
# undef PLACE_FOR
# undef RETURN_ERROR
@@ -7041,7 +7320,8 @@ static int sctp_fill_opts(inet_descriptor* desc, char* buf, int buflen,
/* fill statistics reply, op codes from src and result in dest
** dst area must be a least 5*len + 1 bytes
*/
-static int inet_fill_stat(inet_descriptor* desc, char* src, int len, char* dst)
+static ErlDrvSSizeT inet_fill_stat(inet_descriptor* desc,
+ char* src, ErlDrvSizeT len, char* dst)
{
unsigned long val;
int op;
@@ -7074,7 +7354,7 @@ static int inet_fill_stat(inet_descriptor* desc, char* src, int len, char* dst)
val = (unsigned long) desc->send_avg;
break;
case INET_STAT_SEND_PND:
- val = driver_sizeq(desc->port);
+ val = (unsigned long) driver_sizeq(desc->port);
break;
case INET_STAT_RECV_OCT:
put_int32(desc->recv_oct[1], dst); /* write high 32bit */
@@ -7119,7 +7399,8 @@ send_empty_out_q_msgs(inet_descriptor* desc)
/* subscribe and fill subscription reply, op codes from src and
** result in dest dst area must be a least 5*len + 1 bytes
*/
-static int inet_subscribe(inet_descriptor* desc, char* src, int len, char* dst)
+static ErlDrvSSizeT inet_subscribe(inet_descriptor* desc,
+ char* src, ErlDrvSizeT len, char* dst)
{
unsigned long val;
int op;
@@ -7206,6 +7487,8 @@ static ErlDrvData inet_start(ErlDrvPort port, int size, int protocol)
sys_memzero((char *)&desc->remote,sizeof(desc->remote));
+ desc->is_ignored = 0;
+
return (ErlDrvData)desc;
}
@@ -7217,14 +7500,14 @@ static ErlDrvData inet_start(ErlDrvPort port, int size, int protocol)
/*
** common TCP/UDP/SCTP control command
*/
-static int inet_ctl(inet_descriptor* desc, int cmd, char* buf, int len,
- char** rbuf, int rsize)
+static ErlDrvSSizeT inet_ctl(inet_descriptor* desc, int cmd, char* buf,
+ ErlDrvSizeT len, char** rbuf, ErlDrvSizeT rsize)
{
switch (cmd) {
case INET_REQ_GETSTAT: {
char* dst;
- int i;
+ ErlDrvSizeT i;
int dstlen = 1; /* Reply code */
for (i = 0; i < len; i++) {
@@ -7264,7 +7547,7 @@ static int inet_ctl(inet_descriptor* desc, int cmd, char* buf, int len,
}
case INET_REQ_GETOPTS: { /* get options */
- int replen;
+ ErlDrvSSizeT replen;
DEBUGF(("inet_ctl(%ld): GETOPTS\r\n", (long)desc->port));
#ifdef HAVE_SCTP
if (IS_SCTP(desc))
@@ -7488,6 +7771,33 @@ static int inet_ctl(inet_descriptor* desc, int cmd, char* buf, int len,
return ctl_reply(INET_REP_OK, tbuf, 2, rbuf, rsize);
}
+ case INET_REQ_IGNOREFD: {
+ DEBUGF(("inet_ctl(%ld): IGNOREFD, IGNORED = %d\r\n",
+ (long)desc->port,(int)*buf));
+
+ /*
+ * FD can only be ignored for connected TCP connections for now,
+ * possible to add UDP and SCTP support if needed.
+ */
+ if (!IS_CONNECTED(desc))
+ return ctl_error(ENOTCONN, rbuf, rsize);
+
+ if (!desc->stype == SOCK_STREAM)
+ return ctl_error(EINVAL, rbuf, rsize);
+
+ if (*buf == 1 && !desc->is_ignored) {
+ sock_select(desc, (FD_READ|FD_WRITE|FD_CLOSE|ERL_DRV_USE_NO_CALLBACK), 0);
+ desc->is_ignored = INET_IGNORE_READ;
+ } else if (*buf == 0 && desc->is_ignored) {
+ int flags = (FD_READ|FD_CLOSE|((desc->is_ignored & INET_IGNORE_WRITE)?FD_WRITE:0));
+ desc->is_ignored = INET_IGNORE_NONE;
+ sock_select(desc, flags, 1);
+ } else
+ return ctl_error(EINVAL, rbuf, rsize);
+
+ return ctl_reply(INET_REP_OK, NULL, 0, rbuf, rsize);
+ }
+
#ifndef VXWORKS
case INET_REQ_GETSERVBYNAME: { /* L1 Name-String L2 Proto-String */
@@ -7548,7 +7858,7 @@ static int inet_ctl(inet_descriptor* desc, int cmd, char* buf, int len,
}
/* update statistics on output packets */
-static void inet_output_count(inet_descriptor* desc, int len)
+static void inet_output_count(inet_descriptor* desc, ErlDrvSizeT len)
{
unsigned long n = desc->send_cnt + 1;
unsigned long t = desc->send_oct[0] + len;
@@ -7568,7 +7878,7 @@ static void inet_output_count(inet_descriptor* desc, int len)
}
/* update statistics on input packets */
-static void inet_input_count(inet_descriptor* desc, int len)
+static void inet_input_count(inet_descriptor* desc, ErlDrvSizeT len)
{
unsigned long n = desc->recv_cnt + 1;
unsigned long t = desc->recv_oct[0] + len;
@@ -7691,7 +8001,7 @@ static void tcp_clear_input(tcp_descriptor* desc)
static void tcp_clear_output(tcp_descriptor* desc)
{
ErlDrvPort ix = desc->inet.port;
- int qsz = driver_sizeq(ix);
+ ErlDrvSizeT qsz = driver_sizeq(ix);
driver_deq(ix, qsz);
send_empty_out_q_msgs(INETP(desc));
@@ -7749,7 +8059,7 @@ static ErlDrvData tcp_inet_start(ErlDrvPort port, char* args)
/* Copy a descriptor, by creating a new port with same settings
* as the descriptor desc.
- * return NULL on error (ENFILE no ports avail)
+ * return NULL on error (SYSTEM_LIMIT no ports avail)
*/
static tcp_descriptor* tcp_inet_copy(tcp_descriptor* desc,SOCKET s,
ErlDrvTermData owner, int* err)
@@ -7788,7 +8098,7 @@ static tcp_descriptor* tcp_inet_copy(tcp_descriptor* desc,SOCKET s,
/* The new port will be linked and connected to the original caller */
port = driver_create_port(port, owner, "tcp_inet", (ErlDrvData) copy_desc);
if ((long)port == -1) {
- *err = ENFILE;
+ *err = INET_ERRNO_SYSTEM_LIMIT;
FREE(copy_desc);
return NULL;
}
@@ -7806,22 +8116,22 @@ static tcp_descriptor* tcp_inet_copy(tcp_descriptor* desc,SOCKET s,
static void tcp_close_check(tcp_descriptor* desc)
{
/* XXX:PaN - multiple clients to handle! */
- if (desc->inet.state == TCP_STATE_ACCEPTING) {
+ if (desc->inet.state == INET_STATE_ACCEPTING) {
inet_async_op *this_op = desc->inet.opt;
sock_select(INETP(desc), FD_ACCEPT, 0);
- desc->inet.state = TCP_STATE_LISTEN;
+ desc->inet.state = INET_STATE_LISTENING;
if (this_op != NULL) {
driver_demonitor_process(desc->inet.port, &(this_op->monitor));
}
async_error_am(INETP(desc), am_closed);
}
- else if (desc->inet.state == TCP_STATE_MULTI_ACCEPTING) {
+ else if (desc->inet.state == INET_STATE_MULTI_ACCEPTING) {
int id,req;
ErlDrvTermData caller;
ErlDrvMonitor monitor;
sock_select(INETP(desc), FD_ACCEPT, 0);
- desc->inet.state = TCP_STATE_LISTEN;
+ desc->inet.state = INET_STATE_LISTENING;
while (deq_multi_op(desc,&id,&req,&caller,NULL,&monitor) == 0) {
driver_demonitor_process(desc->inet.port, &monitor);
send_async_error(desc->inet.port, desc->inet.dport, id, caller, am_closed);
@@ -7829,10 +8139,10 @@ static void tcp_close_check(tcp_descriptor* desc)
clean_multi_timers(&(desc->mtd), desc->inet.port);
}
- else if (desc->inet.state == TCP_STATE_CONNECTING) {
+ else if (desc->inet.state == INET_STATE_CONNECTING) {
async_error_am(INETP(desc), am_closed);
}
- else if (desc->inet.state == TCP_STATE_CONNECTED) {
+ else if (desc->inet.state == INET_STATE_CONNECTED) {
async_error_am_all(INETP(desc), am_closed);
}
}
@@ -7859,45 +8169,69 @@ static void tcp_inet_stop(ErlDrvData e)
/* TCP requests from Erlang */
-static int tcp_inet_ctl(ErlDrvData e, unsigned int cmd, char* buf, int len,
- char** rbuf, int rsize)
+static ErlDrvSSizeT tcp_inet_ctl(ErlDrvData e, unsigned int cmd,
+ char* buf, ErlDrvSizeT len,
+ char** rbuf, ErlDrvSizeT rsize)
{
tcp_descriptor* desc = (tcp_descriptor*)e;
+
switch(cmd) {
- case INET_REQ_OPEN: /* open socket and return internal index */
+ case INET_REQ_OPEN: { /* open socket and return internal index */
+ int domain;
DEBUGF(("tcp_inet_ctl(%ld): OPEN\r\n", (long)desc->inet.port));
- if ((len == 1) && (buf[0] == INET_AF_INET))
- return
- inet_ctl_open(INETP(desc), AF_INET, SOCK_STREAM, rbuf, rsize);
+ if (len != 2) return ctl_error(EINVAL, rbuf, rsize);
+ switch(buf[0]) {
+ case INET_AF_INET:
+ domain = AF_INET;
+ break;
#if defined(HAVE_IN6) && defined(AF_INET6)
- else if ((len == 1) && (buf[0] == INET_AF_INET6))
- return
- inet_ctl_open(INETP(desc), AF_INET6, SOCK_STREAM, rbuf, rsize);
+ case INET_AF_INET6:
+ domain = AF_INET6;
+ break;
#else
- else if ((len == 1) && (buf[0] == INET_AF_INET6))
- return ctl_xerror("eafnosupport",rbuf,rsize);
+ case INET_AF_INET6:
+ return ctl_xerror("eafnosupport", rbuf, rsize);
+ break;
#endif
- else
+ default:
return ctl_error(EINVAL, rbuf, rsize);
+ }
+ if (buf[1] != INET_TYPE_STREAM) return ctl_error(EINVAL, rbuf, rsize);
+ return inet_ctl_open(INETP(desc), domain, SOCK_STREAM, rbuf, rsize);
+ break;
+ }
- case INET_REQ_FDOPEN: /* pass in an open socket */
- DEBUGF(("tcp_inet_ctl(%ld): FDOPEN\r\n", (long)desc->inet.port));
- if ((len == 5) && (buf[0] == INET_AF_INET))
- return inet_ctl_fdopen(INETP(desc), AF_INET, SOCK_STREAM,
- (SOCKET) get_int32(buf+1), rbuf, rsize);
+ case INET_REQ_FDOPEN: { /* pass in an open socket */
+ int domain;
+ DEBUGF(("tcp_inet_ctl(%ld): FDOPEN\r\n", (long)desc->inet.port));
+ if (len != 6) return ctl_error(EINVAL, rbuf, rsize);
+ switch(buf[0]) {
+ case INET_AF_INET:
+ domain = AF_INET;
+ break;
#if defined(HAVE_IN6) && defined(AF_INET6)
- else if ((len == 5) && (buf[0] == INET_AF_INET6))
- return inet_ctl_fdopen(INETP(desc), AF_INET6, SOCK_STREAM,
- (SOCKET) get_int32(buf+1), rbuf, rsize);
+ case INET_AF_INET6:
+ domain = AF_INET6;
+ break;
+#else
+ case INET_AF_INET6:
+ return ctl_xerror("eafnosupport", rbuf, rsize);
+ break;
#endif
- else
+ default:
return ctl_error(EINVAL, rbuf, rsize);
+ }
+ if (buf[1] != INET_TYPE_STREAM) return ctl_error(EINVAL, rbuf, rsize);
+ return inet_ctl_fdopen(INETP(desc), domain, SOCK_STREAM,
+ (SOCKET) get_int32(buf+2), rbuf, rsize);
+ break;
+ }
- case TCP_REQ_LISTEN: { /* argument backlog */
+ case INET_REQ_LISTEN: { /* argument backlog */
int backlog;
DEBUGF(("tcp_inet_ctl(%ld): LISTEN\r\n", (long)desc->inet.port));
- if (desc->inet.state == TCP_STATE_CLOSED)
+ if (desc->inet.state == INET_STATE_CLOSED)
return ctl_xerror(EXBADPORT, rbuf, rsize);
if (!IS_OPEN(INETP(desc)))
return ctl_xerror(EXBADPORT, rbuf, rsize);
@@ -7908,7 +8242,7 @@ static int tcp_inet_ctl(ErlDrvData e, unsigned int cmd, char* buf, int len,
backlog = get_int16(buf);
if (IS_SOCKET_ERROR(sock_listen(desc->inet.s, backlog)))
return ctl_error(sock_errno(), rbuf, rsize);
- desc->inet.state = TCP_STATE_LISTEN;
+ desc->inet.state = INET_STATE_LISTENING;
return ctl_reply(INET_REP_OK, NULL, 0, rbuf, rsize);
}
@@ -7944,13 +8278,13 @@ static int tcp_inet_ctl(ErlDrvData e, unsigned int cmd, char* buf, int len,
((sock_errno() == ERRNO_BLOCK) || /* Winsock2 */
(sock_errno() == EINPROGRESS))) { /* Unix & OSE!! */
sock_select(INETP(desc), FD_CONNECT, 1);
- desc->inet.state = TCP_STATE_CONNECTING;
+ desc->inet.state = INET_STATE_CONNECTING;
if (timeout != INET_INFINITY)
driver_set_timer(desc->inet.port, timeout);
enq_async(INETP(desc), tbuf, INET_REQ_CONNECT);
}
else if (code == 0) { /* ok we are connected */
- desc->inet.state = TCP_STATE_CONNECTED;
+ desc->inet.state = INET_STATE_CONNECTED;
if (desc->inet.active)
sock_select(INETP(desc), (FD_READ|FD_CLOSE), 1);
enq_async(INETP(desc), tbuf, INET_REQ_CONNECT);
@@ -7962,7 +8296,7 @@ static int tcp_inet_ctl(ErlDrvData e, unsigned int cmd, char* buf, int len,
return ctl_reply(INET_REP_OK, tbuf, 2, rbuf, rsize);
}
- case TCP_REQ_ACCEPT: { /* do async accept */
+ case INET_REQ_ACCEPT: { /* do async accept */
char tbuf[2];
unsigned timeout;
inet_address remote;
@@ -7972,14 +8306,14 @@ static int tcp_inet_ctl(ErlDrvData e, unsigned int cmd, char* buf, int len,
DEBUGF(("tcp_inet_ctl(%ld): ACCEPT\r\n", (long)desc->inet.port));
/* INPUT: Timeout(4) */
- if ((desc->inet.state != TCP_STATE_LISTEN && desc->inet.state != TCP_STATE_ACCEPTING &&
- desc->inet.state != TCP_STATE_MULTI_ACCEPTING) || len != 4) {
+ if ((desc->inet.state != INET_STATE_LISTENING && desc->inet.state != INET_STATE_ACCEPTING &&
+ desc->inet.state != INET_STATE_MULTI_ACCEPTING) || len != 4) {
return ctl_error(EINVAL, rbuf, rsize);
}
timeout = get_int32(buf);
- if (desc->inet.state == TCP_STATE_ACCEPTING) {
+ if (desc->inet.state == INET_STATE_ACCEPTING) {
unsigned long time_left = 0;
int oid = 0;
ErlDrvTermData ocaller = ERL_DRV_NIL;
@@ -8008,10 +8342,10 @@ static int tcp_inet_ctl(ErlDrvData e, unsigned int cmd, char* buf, int len,
mtd = add_multi_timer(&(desc->mtd), desc->inet.port, caller,
timeout, &tcp_inet_multi_timeout);
}
- enq_multi_op(desc, tbuf, TCP_REQ_ACCEPT, caller, mtd, &monitor);
- desc->inet.state = TCP_STATE_MULTI_ACCEPTING;
+ enq_multi_op(desc, tbuf, INET_REQ_ACCEPT, caller, mtd, &monitor);
+ desc->inet.state = INET_STATE_MULTI_ACCEPTING;
return ctl_reply(INET_REP_OK, tbuf, 2, rbuf, rsize);
- } else if (desc->inet.state == TCP_STATE_MULTI_ACCEPTING) {
+ } else if (desc->inet.state == INET_STATE_MULTI_ACCEPTING) {
ErlDrvTermData caller = driver_caller(desc->inet.port);
MultiTimerData *mtd = NULL;
ErlDrvMonitor monitor;
@@ -8023,7 +8357,7 @@ static int tcp_inet_ctl(ErlDrvData e, unsigned int cmd, char* buf, int len,
mtd = add_multi_timer(&(desc->mtd), desc->inet.port, caller,
timeout, &tcp_inet_multi_timeout);
}
- enq_multi_op(desc, tbuf, TCP_REQ_ACCEPT, caller, mtd, &monitor);
+ enq_multi_op(desc, tbuf, INET_REQ_ACCEPT, caller, mtd, &monitor);
return ctl_reply(INET_REP_OK, tbuf, 2, rbuf, rsize);
} else {
n = sizeof(desc->inet.remote);
@@ -8035,8 +8369,8 @@ static int tcp_inet_ctl(ErlDrvData e, unsigned int cmd, char* buf, int len,
&monitor) != 0) {
return ctl_xerror("noproc", rbuf, rsize);
}
- enq_async_w_tmo(INETP(desc), tbuf, TCP_REQ_ACCEPT, timeout, &monitor);
- desc->inet.state = TCP_STATE_ACCEPTING;
+ enq_async_w_tmo(INETP(desc), tbuf, INET_REQ_ACCEPT, timeout, &monitor);
+ desc->inet.state = INET_STATE_ACCEPTING;
sock_select(INETP(desc),FD_ACCEPT,1);
if (timeout != INET_INFINITY) {
driver_set_timer(desc->inet.port, timeout);
@@ -8063,8 +8397,8 @@ static int tcp_inet_ctl(ErlDrvData e, unsigned int cmd, char* buf, int len,
driver_select(accept_desc->inet.port, accept_desc->inet.event,
ERL_DRV_READ, 1);
#endif
- accept_desc->inet.state = TCP_STATE_CONNECTED;
- enq_async(INETP(desc), tbuf, TCP_REQ_ACCEPT);
+ accept_desc->inet.state = INET_STATE_CONNECTED;
+ enq_async(INETP(desc), tbuf, INET_REQ_ACCEPT);
async_ok_port(INETP(desc), accept_desc->inet.dport);
}
return ctl_reply(INET_REP_OK, tbuf, 2, rbuf, rsize);
@@ -8106,13 +8440,14 @@ static int tcp_inet_ctl(ErlDrvData e, unsigned int cmd, char* buf, int len,
if (enq_async(INETP(desc), tbuf, TCP_REQ_RECV) < 0)
return ctl_error(EALREADY, rbuf, rsize);
- if (tcp_recv(desc, n) == 0) {
+ if (INETP(desc)->is_ignored || tcp_recv(desc, n) == 0) {
if (timeout == 0)
async_error_am(INETP(desc), am_timeout);
else {
if (timeout != INET_INFINITY)
- driver_set_timer(desc->inet.port, timeout);
- sock_select(INETP(desc),(FD_READ|FD_CLOSE),1);
+ driver_set_timer(desc->inet.port, timeout);
+ if (!INETP(desc)->is_ignored)
+ sock_select(INETP(desc),(FD_READ|FD_CLOSE),1);
}
}
return ctl_reply(INET_REP_OK, tbuf, 2, rbuf, rsize);
@@ -8170,7 +8505,7 @@ static void tcp_inet_timeout(ErlDrvData e)
(long)desc->inet.port, desc->inet.s));
if ((state & INET_F_MULTI_CLIENT)) { /* Multi-client always means multi-timers */
fire_multi_timers(&(desc->mtd), desc->inet.port, e);
- } else if ((state & TCP_STATE_CONNECTED) == TCP_STATE_CONNECTED) {
+ } else if ((state & INET_STATE_CONNECTED) == INET_STATE_CONNECTED) {
if (desc->busy_on_send) {
ASSERT(IS_BUSY(INETP(desc)));
desc->inet.caller = desc->inet.busy_caller;
@@ -8190,20 +8525,20 @@ static void tcp_inet_timeout(ErlDrvData e)
async_error_am(INETP(desc), am_timeout);
}
}
- else if ((state & TCP_STATE_CONNECTING) == TCP_STATE_CONNECTING) {
+ else if ((state & INET_STATE_CONNECTING) == INET_STATE_CONNECTING) {
/* assume connect timeout */
/* close the socket since it's not usable (see man pages) */
erl_inet_close(INETP(desc));
async_error_am(INETP(desc), am_timeout);
}
- else if ((state & TCP_STATE_ACCEPTING) == TCP_STATE_ACCEPTING) {
+ else if ((state & INET_STATE_ACCEPTING) == INET_STATE_ACCEPTING) {
inet_async_op *this_op = desc->inet.opt;
/* timer is set on accept */
sock_select(INETP(desc), FD_ACCEPT, 0);
if (this_op != NULL) {
driver_demonitor_process(desc->inet.port, &(this_op->monitor));
}
- desc->inet.state = TCP_STATE_LISTEN;
+ desc->inet.state = INET_STATE_LISTENING;
async_error_am(INETP(desc), am_timeout);
}
DEBUGF(("tcp_inet_timeout(%ld) }\r\n", (long)desc->inet.port));
@@ -8221,7 +8556,7 @@ static void tcp_inet_multi_timeout(ErlDrvData e, ErlDrvTermData caller)
driver_demonitor_process(desc->inet.port, &monitor);
if (desc->multi_first == NULL) {
sock_select(INETP(desc),FD_ACCEPT,0);
- desc->inet.state = TCP_STATE_LISTEN; /* restore state */
+ desc->inet.state = INET_STATE_LISTENING; /* restore state */
}
send_async_error(desc->inet.port, desc->inet.dport, id, caller, am_timeout);
}
@@ -8237,7 +8572,7 @@ static void tcp_inet_multi_timeout(ErlDrvData e, ErlDrvTermData caller)
** but distribution still uses the tcp_inet_command!!
*/
-static void tcp_inet_command(ErlDrvData e, char *buf, int len)
+static void tcp_inet_command(ErlDrvData e, char *buf, ErlDrvSizeT len)
{
tcp_descriptor* desc = (tcp_descriptor*)e;
desc->inet.caller = driver_caller(desc->inet.port);
@@ -8287,7 +8622,7 @@ static void tcp_inet_process_exit(ErlDrvData e, ErlDrvMonitor *monitorp)
ErlDrvTermData who = driver_get_monitored_process(desc->inet.port,monitorp);
int state = desc->inet.state;
- if ((state & TCP_STATE_MULTI_ACCEPTING) == TCP_STATE_MULTI_ACCEPTING) {
+ if ((state & INET_STATE_MULTI_ACCEPTING) == INET_STATE_MULTI_ACCEPTING) {
int id,req;
MultiTimerData *timeout;
if (remove_multi_op(desc, &id, &req, who, &timeout, NULL) != 0) {
@@ -8298,15 +8633,15 @@ static void tcp_inet_process_exit(ErlDrvData e, ErlDrvMonitor *monitorp)
}
if (desc->multi_first == NULL) {
sock_select(INETP(desc),FD_ACCEPT,0);
- desc->inet.state = TCP_STATE_LISTEN; /* restore state */
+ desc->inet.state = INET_STATE_LISTENING; /* restore state */
}
- } else if ((state & TCP_STATE_ACCEPTING) == TCP_STATE_ACCEPTING) {
+ } else if ((state & INET_STATE_ACCEPTING) == INET_STATE_ACCEPTING) {
int did,drid;
ErlDrvTermData dcaller;
deq_async(INETP(desc), &did, &dcaller, &drid);
driver_cancel_timer(desc->inet.port);
sock_select(INETP(desc),FD_ACCEPT,0);
- desc->inet.state = TCP_STATE_LISTEN; /* restore state */
+ desc->inet.state = INET_STATE_LISTENING; /* restore state */
}
}
@@ -8456,8 +8791,15 @@ static int tcp_remain(tcp_descriptor* desc, int* len)
else if (tlen == 0) { /* need unknown more */
*len = 0;
if (nsz == 0) {
- if (nfill == n)
- goto error;
+ if (nfill == n) {
+ if (desc->inet.psize != 0 && desc->inet.psize > nfill) {
+ if (tcp_expand_buffer(desc, desc->inet.psize) < 0)
+ return -1;
+ return desc->inet.psize;
+ }
+ else
+ goto error;
+ }
DEBUGF((" => restart more=%d\r\n", nfill - n));
return nfill - n;
}
@@ -8496,32 +8838,29 @@ static int tcp_deliver(tcp_descriptor* desc, int len)
}
while (len > 0) {
- int code = 0;
+ int code;
inet_input_count(INETP(desc), len);
/* deliver binary? */
if (len*4 >= desc->i_buf->orig_size*3) { /* >=75% */
+ code = tcp_reply_binary_data(desc, desc->i_buf,
+ (desc->i_ptr_start -
+ desc->i_buf->orig_bytes),
+ len);
+ if (code < 0)
+ return code;
+
/* something after? */
if (desc->i_ptr_start + len == desc->i_ptr) { /* no */
- code = tcp_reply_binary_data(desc, desc->i_buf,
- (desc->i_ptr_start -
- desc->i_buf->orig_bytes),
- len);
tcp_clear_input(desc);
}
else { /* move trail to beginning of a new buffer */
- ErlDrvBinary* bin;
+ ErlDrvBinary* bin = alloc_buffer(desc->i_bufsz);
char* ptr_end = desc->i_ptr_start + len;
int sz = desc->i_ptr - ptr_end;
- bin = alloc_buffer(desc->i_bufsz);
memcpy(bin->orig_bytes, ptr_end, sz);
-
- code = tcp_reply_binary_data(desc, desc->i_buf,
- (desc->i_ptr_start-
- desc->i_buf->orig_bytes),
- len);
free_buffer(desc->i_buf);
desc->i_buf = bin;
desc->i_ptr_start = desc->i_buf->orig_bytes;
@@ -8533,17 +8872,15 @@ static int tcp_deliver(tcp_descriptor* desc, int len)
code = tcp_reply_data(desc, desc->i_ptr_start, len);
/* XXX The buffer gets thrown away on error (code < 0) */
/* Windows needs workaround for this in tcp_inet_event... */
+ if (code < 0)
+ return code;
desc->i_ptr_start += len;
if (desc->i_ptr_start == desc->i_ptr)
tcp_clear_input(desc);
else
desc->i_remain = 0;
-
}
- if (code < 0)
- return code;
-
count++;
len = 0;
@@ -8848,8 +9185,8 @@ static void tcp_inet_event(ErlDrvData e, ErlDrvEvent event)
/* socket has input:
-** 1. TCP_STATE_ACCEPTING => non block accept ?
-** 2. TCP_STATE_CONNECTED => read input
+** 1. INET_STATE_ACCEPTING => non block accept ?
+** 2. INET_STATE_CONNECTED => read input
*/
static int tcp_inet_input(tcp_descriptor* desc, HANDLE event)
{
@@ -8857,8 +9194,9 @@ static int tcp_inet_input(tcp_descriptor* desc, HANDLE event)
#ifdef DEBUG
long port = (long) desc->inet.port; /* Used after driver_exit() */
#endif
+ ASSERT(!INETP(desc)->is_ignored);
DEBUGF(("tcp_inet_input(%ld) {s=%d\r\n", port, desc->inet.s));
- if (desc->inet.state == TCP_STATE_ACCEPTING) {
+ if (desc->inet.state == INET_STATE_ACCEPTING) {
SOCKET s;
unsigned int len;
inet_address remote;
@@ -8873,7 +9211,7 @@ static int tcp_inet_input(tcp_descriptor* desc, HANDLE event)
}
sock_select(INETP(desc),FD_ACCEPT,0);
- desc->inet.state = TCP_STATE_LISTEN; /* restore state */
+ desc->inet.state = INET_STATE_LISTENING; /* restore state */
if (this_op != NULL) {
driver_demonitor_process(desc->inet.port, &(this_op->monitor));
@@ -8913,11 +9251,11 @@ static int tcp_inet_input(tcp_descriptor* desc, HANDLE event)
driver_select(accept_desc->inet.port, accept_desc->inet.event,
ERL_DRV_READ, 1);
#endif
- accept_desc->inet.state = TCP_STATE_CONNECTED;
+ accept_desc->inet.state = INET_STATE_CONNECTED;
ret = async_ok_port(INETP(desc), accept_desc->inet.dport);
goto done;
}
- } else if (desc->inet.state == TCP_STATE_MULTI_ACCEPTING) {
+ } else if (desc->inet.state == INET_STATE_MULTI_ACCEPTING) {
SOCKET s;
unsigned int len;
inet_address remote;
@@ -8929,7 +9267,7 @@ static int tcp_inet_input(tcp_descriptor* desc, HANDLE event)
int times = 0;
#endif
- while (desc->inet.state == TCP_STATE_MULTI_ACCEPTING) {
+ while (desc->inet.state == INET_STATE_MULTI_ACCEPTING) {
len = sizeof(desc->inet.remote);
s = sock_accept(desc->inet.s, (struct sockaddr*) &remote, &len);
@@ -8949,7 +9287,7 @@ static int tcp_inet_input(tcp_descriptor* desc, HANDLE event)
if (desc->multi_first == NULL) {
sock_select(INETP(desc),FD_ACCEPT,0);
- desc->inet.state = TCP_STATE_LISTEN; /* restore state */
+ desc->inet.state = INET_STATE_LISTENING; /* restore state */
}
if (timeout != NULL) {
@@ -8980,7 +9318,7 @@ static int tcp_inet_input(tcp_descriptor* desc, HANDLE event)
driver_select(accept_desc->inet.port, accept_desc->inet.event,
ERL_DRV_READ, 1);
#endif
- accept_desc->inet.state = TCP_STATE_CONNECTED;
+ accept_desc->inet.state = INET_STATE_CONNECTED;
ret = send_async_ok_port(desc->inet.port, desc->inet.dport,
id, caller, accept_desc->inet.dport);
}
@@ -9063,12 +9401,12 @@ static int tcp_send_error(tcp_descriptor* desc, int err)
*/
static int tcp_sendv(tcp_descriptor* desc, ErlIOVec* ev)
{
- int sz;
+ ErlDrvSizeT sz;
char buf[4];
- int h_len;
- int n;
+ ErlDrvSizeT h_len;
+ ssize_t n;
ErlDrvPort ix = desc->inet.port;
- int len = ev->size;
+ ErlDrvSizeT len = ev->size;
switch(desc->inet.htype) {
case TCP_PB_1:
@@ -9116,9 +9454,13 @@ static int tcp_sendv(tcp_descriptor* desc, ErlIOVec* ev)
else {
int vsize = (ev->vsize > MAX_VSIZE) ? MAX_VSIZE : ev->vsize;
- DEBUGF(("tcp_sendv(%ld): s=%d, about to send %d,%d bytes\r\n",
- (long)desc->inet.port, desc->inet.s, h_len, len));
- if (desc->tcp_add_flags & TCP_ADDF_DELAY_SEND) {
+ DEBUGF(("tcp_sendv(%ld): s=%d, about to send "LLU","LLU" bytes\r\n",
+ (long)desc->inet.port, desc->inet.s, (llu_t)h_len, (llu_t)len));
+
+ if (INETP(desc)->is_ignored) {
+ INETP(desc)->is_ignored |= INET_IGNORE_WRITE;
+ n = 0;
+ } else if (desc->tcp_add_flags & TCP_ADDF_DELAY_SEND) {
n = 0;
} else if (IS_SOCKET_ERROR(sock_sendv(desc->inet.s, ev->iov,
vsize, &n, 0))) {
@@ -9139,14 +9481,17 @@ static int tcp_sendv(tcp_descriptor* desc, ErlIOVec* ev)
return 0;
}
else {
- DEBUGF(("tcp_sendv(%ld): s=%d, only sent %d/%d of %d/%d bytes/items\r\n",
- (long)desc->inet.port, desc->inet.s, n, vsize, ev->size, ev->vsize));
+ DEBUGF(("tcp_sendv(%ld): s=%d, only sent "
+ LLU"/%d of "LLU"/%d bytes/items\r\n",
+ (long)desc->inet.port, desc->inet.s,
+ (llu_t)n, vsize, (llu_t)ev->size, ev->vsize));
}
DEBUGF(("tcp_sendv(%ld): s=%d, Send failed, queuing\r\n",
(long)desc->inet.port, desc->inet.s));
driver_enqv(ix, ev, n);
- sock_select(INETP(desc),(FD_WRITE|FD_CLOSE), 1);
+ if (!INETP(desc)->is_ignored)
+ sock_select(INETP(desc),(FD_WRITE|FD_CLOSE), 1);
}
return 0;
}
@@ -9154,7 +9499,7 @@ static int tcp_sendv(tcp_descriptor* desc, ErlIOVec* ev)
/*
** Send non blocking data
*/
-static int tcp_send(tcp_descriptor* desc, char* ptr, int len)
+static int tcp_send(tcp_descriptor* desc, char* ptr, ErlDrvSizeT len)
{
int sz;
char buf[4];
@@ -9209,9 +9554,12 @@ static int tcp_send(tcp_descriptor* desc, char* ptr, int len)
iov[1].iov_base = ptr;
iov[1].iov_len = len;
- DEBUGF(("tcp_send(%ld): s=%d, about to send %d,%d bytes\r\n",
- (long)desc->inet.port, desc->inet.s, h_len, len));
- if (desc->tcp_add_flags & TCP_ADDF_DELAY_SEND) {
+ DEBUGF(("tcp_send(%ld): s=%d, about to send "LLU","LLU" bytes\r\n",
+ (long)desc->inet.port, desc->inet.s, (llu_t)h_len, (llu_t)len));
+ if (INETP(desc)->is_ignored) {
+ INETP(desc)->is_ignored |= INET_IGNORE_WRITE;
+ n = 0;
+ } else if (desc->tcp_add_flags & TCP_ADDF_DELAY_SEND) {
sock_send(desc->inet.s, buf, 0, 0);
n = 0;
} else if (IS_SOCKET_ERROR(sock_sendv(desc->inet.s,iov,2,&n,0))) {
@@ -9242,7 +9590,8 @@ static int tcp_send(tcp_descriptor* desc, char* ptr, int len)
n -= h_len;
driver_enq(ix, ptr+n, len-n);
}
- sock_select(INETP(desc),(FD_WRITE|FD_CLOSE), 1);
+ if (!INETP(desc)->is_ignored)
+ sock_select(INETP(desc),(FD_WRITE|FD_CLOSE), 1);
}
return 0;
}
@@ -9258,17 +9607,18 @@ static void tcp_inet_drv_input(ErlDrvData data, ErlDrvEvent event)
}
/* socket ready for ouput:
-** 1. TCP_STATE_CONNECTING => non block connect ?
-** 2. TCP_STATE_CONNECTED => write output
+** 1. INET_STATE_CONNECTING => non block connect ?
+** 2. INET_STATE_CONNECTED => write output
*/
static int tcp_inet_output(tcp_descriptor* desc, HANDLE event)
{
int ret = 0;
ErlDrvPort ix = desc->inet.port;
+ ASSERT(!INETP(desc)->is_ignored);
DEBUGF(("tcp_inet_output(%ld) {s=%d\r\n",
(long)desc->inet.port, desc->inet.s));
- if (desc->inet.state == TCP_STATE_CONNECTING) {
+ if (desc->inet.state == INET_STATE_CONNECTING) {
sock_select(INETP(desc),FD_CONNECT,0);
driver_cancel_timer(ix); /* posssibly cancel a timer */
@@ -9288,7 +9638,7 @@ static int tcp_inet_output(tcp_descriptor* desc, HANDLE event)
(struct sockaddr*) &desc->inet.remote, &sz);
if (IS_SOCKET_ERROR(code)) {
- desc->inet.state = TCP_STATE_BOUND; /* restore state */
+ desc->inet.state = INET_STATE_BOUND; /* restore state */
ret = async_error(INETP(desc), sock_errno());
goto done;
}
@@ -9301,15 +9651,15 @@ static int tcp_inet_output(tcp_descriptor* desc, HANDLE event)
(void *)&error, &sz);
if ((code < 0) || error) {
- desc->inet.state = TCP_STATE_BOUND; /* restore state */
+ desc->inet.state = INET_STATE_BOUND; /* restore state */
ret = async_error(INETP(desc), error);
goto done;
}
}
-#endif /* SOCKOPT_CONNECT_STAT */
+#endif /* SO_ERROR */
#endif /* !__WIN32__ */
- desc->inet.state = TCP_STATE_CONNECTED;
+ desc->inet.state = INET_STATE_CONNECTED;
if (desc->inet.active)
sock_select(INETP(desc),(FD_READ|FD_CLOSE),1);
async_ok(INETP(desc));
@@ -9317,7 +9667,7 @@ static int tcp_inet_output(tcp_descriptor* desc, HANDLE event)
else if (IS_CONNECTED(INETP(desc))) {
for (;;) {
int vsize;
- int n;
+ ssize_t n;
SysIOVec* iov;
if ((iov = driver_peekq(ix, &vsize)) == NULL) {
@@ -9409,6 +9759,59 @@ static int should_use_so_bsdcompat(void)
#endif /* __linux__ */
#endif /* HAVE_SO_BSDCOMPAT */
+
+
+#ifdef HAVE_SCTP
+/* Copy a descriptor, by creating a new port with same settings
+ * as the descriptor desc.
+ * return NULL on error (ENFILE no ports avail)
+ */
+static udp_descriptor* sctp_inet_copy(udp_descriptor* desc, SOCKET s, int* err)
+{
+ ErlDrvPort port = desc->inet.port;
+ udp_descriptor* copy_desc;
+
+ copy_desc = (udp_descriptor*) sctp_inet_start(port, NULL);
+
+ /* Setup event if needed */
+ if ((copy_desc->inet.s = s) != INVALID_SOCKET) {
+ if ((copy_desc->inet.event = sock_create_event(INETP(copy_desc))) ==
+ INVALID_EVENT) {
+ *err = sock_errno();
+ FREE(copy_desc);
+ return NULL;
+ }
+ }
+
+ /* Some flags must be inherited at this point */
+ copy_desc->inet.mode = desc->inet.mode;
+ copy_desc->inet.exitf = desc->inet.exitf;
+ copy_desc->inet.bit8f = desc->inet.bit8f;
+ copy_desc->inet.deliver = desc->inet.deliver;
+ copy_desc->inet.htype = desc->inet.htype;
+ copy_desc->inet.psize = desc->inet.psize;
+ copy_desc->inet.stype = desc->inet.stype;
+ copy_desc->inet.sfamily = desc->inet.sfamily;
+ copy_desc->inet.hsz = desc->inet.hsz;
+ copy_desc->inet.bufsz = desc->inet.bufsz;
+
+ /* The new port will be linked and connected to the caller */
+ port = driver_create_port(port, desc->inet.caller, "sctp_inet",
+ (ErlDrvData) copy_desc);
+ if ((long)port == -1) {
+ *err = ENFILE;
+ FREE(copy_desc);
+ return NULL;
+ }
+ copy_desc->inet.port = port;
+ copy_desc->inet.dport = driver_mk_port(port);
+ *err = 0;
+ return copy_desc;
+}
+#endif
+
+
+
static int packet_inet_init()
{
return 0;
@@ -9427,6 +9830,9 @@ static ErlDrvData packet_inet_start(ErlDrvPort port, char* args, int protocol)
return ERL_DRV_ERROR_ERRNO;
desc->read_packets = INET_PACKET_POLL;
+ desc->i_bufsz = 0;
+ desc->i_buf = NULL;
+ desc->i_ptr = NULL;
return drvd;
}
@@ -9451,6 +9857,10 @@ static void packet_inet_stop(ErlDrvData e)
*/
udp_descriptor * udesc = (udp_descriptor*) e;
inet_descriptor* descr = INETP(udesc);
+ if (udesc->i_buf != NULL) {
+ release_buffer(udesc->i_buf);
+ udesc->i_buf = NULL;
+ }
ASSERT(NO_SUBSCRIBERS(&(descr->empty_out_q_subs)));
inet_stop(descr);
@@ -9468,28 +9878,38 @@ static int packet_error(udp_descriptor* udesc, int err)
/*
** Various functions accessible via "port_control" on the Erlang side:
*/
-static int packet_inet_ctl(ErlDrvData e, unsigned int cmd, char* buf, int len,
- char** rbuf, int rsize)
+static ErlDrvSSizeT packet_inet_ctl(ErlDrvData e, unsigned int cmd, char* buf,
+ ErlDrvSizeT len, char** rbuf, ErlDrvSizeT rsize)
{
- int replen;
+ ErlDrvSSizeT replen;
udp_descriptor * udesc = (udp_descriptor *) e;
inet_descriptor* desc = INETP(udesc);
int type = SOCK_DGRAM;
- int af;
-#ifdef HAVE_SCTP
- if (IS_SCTP(desc)) type = SOCK_SEQPACKET;
-#endif
+ int af = AF_INET;
switch(cmd) {
case INET_REQ_OPEN: /* open socket and return internal index */
DEBUGF(("packet_inet_ctl(%ld): OPEN\r\n", (long)desc->port));
- if (len != 1) {
+ if (len != 2) {
return ctl_error(EINVAL, rbuf, rsize);
}
switch (buf[0]) {
case INET_AF_INET: af = AF_INET; break;
#if defined(HAVE_IN6) && defined(AF_INET6)
- case INET_AF_INET6: af = AF_INET6; break;
+ case INET_AF_INET6: af = AF_INET6; break;
+#else
+ case INET_AF_INET6:
+ return ctl_xerror("eafnosupport", rbuf, rsize);
+ break;
+#endif
+ default:
+ return ctl_error(EINVAL, rbuf, rsize);
+ }
+ switch (buf[1]) {
+ case INET_TYPE_STREAM: type = SOCK_STREAM; break;
+ case INET_TYPE_DGRAM: type = SOCK_DGRAM; break;
+#ifdef HAVE_SCTP
+ case INET_TYPE_SEQPACKET: type = SOCK_SEQPACKET; break;
#endif
default:
return ctl_error(EINVAL, rbuf, rsize);
@@ -9516,18 +9936,35 @@ static int packet_inet_ctl(ErlDrvData e, unsigned int cmd, char* buf, int len,
return replen;
- case INET_REQ_FDOPEN: /* pass in an open (and bound) socket */
+ case INET_REQ_FDOPEN: { /* pass in an open (and bound) socket */
+ SOCKET s;
DEBUGF(("packet inet_ctl(%ld): FDOPEN\r\n", (long)desc->port));
- if ((len == 5) && (buf[0] == INET_AF_INET))
- replen = inet_ctl_fdopen(desc, AF_INET, SOCK_DGRAM,
- (SOCKET)get_int32(buf+1),rbuf,rsize);
+ if (len != 6) {
+ return ctl_error(EINVAL, rbuf, rsize);
+ }
+ switch (buf[0]) {
+ case INET_AF_INET: af = AF_INET; break;
#if defined(HAVE_IN6) && defined(AF_INET6)
- else if ((len == 5) && (buf[0] == INET_AF_INET6))
- replen = inet_ctl_fdopen(desc, AF_INET6, SOCK_DGRAM,
- (SOCKET)get_int32(buf+1),rbuf,rsize);
+ case INET_AF_INET6: af = AF_INET6; break;
+#else
+ case INET_AF_INET6:
+ return ctl_xerror("eafnosupport", rbuf, rsize);
+ break;
#endif
- else
+ default:
+ return ctl_error(EINVAL, rbuf, rsize);
+ }
+ switch (buf[1]) {
+ case INET_TYPE_STREAM: type = SOCK_STREAM; break;
+ case INET_TYPE_DGRAM: type = SOCK_DGRAM; break;
+#ifdef HAVE_SCTP
+ case INET_TYPE_SEQPACKET: type = SOCK_SEQPACKET; break;
+#endif
+ default:
return ctl_error(EINVAL, rbuf, rsize);
+ }
+ s = (SOCKET)get_int32(buf+2);
+ replen = inet_ctl_fdopen(desc, af, type, s, rbuf, rsize);
if ((*rbuf)[0] != INET_REP_ERROR) {
if (desc->active)
@@ -9547,13 +9984,13 @@ static int packet_inet_ctl(ErlDrvData e, unsigned int cmd, char* buf, int len,
#endif
}
return replen;
+ }
case INET_REQ_CLOSE:
DEBUGF(("packet_inet_ctl(%ld): CLOSE\r\n", (long)desc->port));
erl_inet_close(desc);
return ctl_reply(INET_REP_OK, NULL, 0, rbuf, rsize);
- return 0;
case INET_REQ_CONNECT: {
@@ -9566,8 +10003,9 @@ static int packet_inet_ctl(ErlDrvData e, unsigned int cmd, char* buf, int len,
*/
int code;
char tbuf[2];
+#ifdef HAVE_SCTP
unsigned timeout;
-
+#endif
DEBUGF(("packet_inet_ctl(%ld): CONNECT\r\n", (long)desc->port));
/* INPUT: [ Timeout(4), Port(2), Address(N) ] */
@@ -9599,14 +10037,14 @@ static int packet_inet_ctl(ErlDrvData e, unsigned int cmd, char* buf, int len,
if (IS_SOCKET_ERROR(code) && (sock_errno() == EINPROGRESS)) {
/* XXX: Unix only -- WinSock would have a different cond! */
- desc->state = SCTP_STATE_CONNECTING;
+ desc->state = INET_STATE_CONNECTING;
if (timeout != INET_INFINITY)
driver_set_timer(desc->port, timeout);
enq_async(desc, tbuf, INET_REQ_CONNECT);
}
else if (code == 0) { /* OK we are connected */
sock_select(desc, FD_CONNECT, 0);
- desc->state = PACKET_STATE_CONNECTED;
+ desc->state = INET_STATE_CONNECTED;
enq_async(desc, tbuf, INET_REQ_CONNECT);
async_ok(desc);
}
@@ -9628,7 +10066,7 @@ static int packet_inet_ctl(ErlDrvData e, unsigned int cmd, char* buf, int len,
else if (len < 6)
return ctl_error(EINVAL, rbuf, rsize);
else {
- timeout = get_int32(buf); /* IGNORED */
+ /* Ignore timeout */
buf += 4;
len -= 4;
if (inet_set_address(desc->sfamily,
@@ -9652,11 +10090,11 @@ static int packet_inet_ctl(ErlDrvData e, unsigned int cmd, char* buf, int len,
}
#ifdef HAVE_SCTP
- case SCTP_REQ_LISTEN:
+ case INET_REQ_LISTEN:
{ /* LISTEN is only for SCTP sockets, not UDP. This code is borrowed
from the TCP section. Returns: {ok,[]} on success.
*/
- int flag;
+ int backlog;
DEBUGF(("packet_inet_ctl(%ld): LISTEN\r\n", (long)desc->port));
if (!IS_SCTP(desc))
@@ -9666,15 +10104,14 @@ static int packet_inet_ctl(ErlDrvData e, unsigned int cmd, char* buf, int len,
if (!IS_BOUND(desc))
return ctl_xerror(EXBADSEQ, rbuf, rsize);
- /* The arg is a binary value: 1:enable, 0:disable */
- if (len != 1)
+ if (len != 2)
return ctl_error(EINVAL, rbuf, rsize);
- flag = get_int8(buf);
+ backlog = get_int16(buf);
- if (IS_SOCKET_ERROR(sock_listen(desc->s, flag)))
+ if (IS_SOCKET_ERROR(sock_listen(desc->s, backlog)))
return ctl_error(sock_errno(), rbuf, rsize);
- desc->state = SCTP_STATE_LISTEN; /* XXX: not used? */
+ desc->state = INET_STATE_LISTENING; /* XXX: not used? */
return ctl_reply(INET_REP_OK, NULL, 0, rbuf, rsize);
}
@@ -9699,7 +10136,7 @@ static int packet_inet_ctl(ErlDrvData e, unsigned int cmd, char* buf, int len,
/* List item format: Port(2), IP(4|16) -- compatible with
"inet_set_address": */
inet_address tmp;
- int alen = buf + len - curr;
+ ErlDrvSizeT alen = buf + len - curr;
curr = inet_set_address(desc->sfamily, &tmp, curr, &alen);
if (curr == NULL)
return ctl_error(EINVAL, rbuf, rsize);
@@ -9720,6 +10157,46 @@ static int packet_inet_ctl(ErlDrvData e, unsigned int cmd, char* buf, int len,
return ctl_reply(INET_REP_OK, NULL, 0, rbuf, rsize);
}
+
+ case SCTP_REQ_PEELOFF:
+ {
+ Uint32 assoc_id;
+ udp_descriptor* new_udesc;
+ int err;
+ SOCKET new_socket;
+
+ DEBUGF(("packet_inet_ctl(%ld): PEELOFF\r\n", (long)desc->port));
+ if (!IS_SCTP(desc))
+ return ctl_xerror(EXBADPORT, rbuf, rsize);
+ if (!IS_OPEN(desc))
+ return ctl_xerror(EXBADPORT, rbuf, rsize);
+ if (!IS_BOUND(desc))
+ return ctl_xerror(EXBADSEQ, rbuf, rsize);
+ if (! p_sctp_peeloff)
+ return ctl_error(ENOTSUP, rbuf, rsize);
+
+ if (len != 4)
+ return ctl_error(EINVAL, rbuf, rsize);
+ assoc_id = get_int32(buf);
+
+ new_socket = p_sctp_peeloff(desc->s, assoc_id);
+ if (IS_SOCKET_ERROR(new_socket)) {
+ return ctl_error(sock_errno(), rbuf, rsize);
+ }
+
+ desc->caller = driver_caller(desc->port);
+ if ((new_udesc = sctp_inet_copy(udesc, new_socket, &err)) == NULL) {
+ sock_close(new_socket);
+ desc->caller = 0;
+ return ctl_error(err, rbuf, rsize);
+ }
+ new_udesc->inet.state = INET_STATE_CONNECTED;
+ new_udesc->inet.stype = SOCK_STREAM;
+
+ inet_reply_ok_port(desc, new_udesc->inet.dport);
+ (*rbuf)[0] = INET_REP;
+ return 1;
+ }
#endif /* HAVE_SCTP */
case PACKET_REQ_RECV:
@@ -9739,7 +10216,7 @@ static int packet_inet_ctl(ErlDrvData e, unsigned int cmd, char* buf, int len,
if (desc->active || (len != 8))
return ctl_error(EINVAL, rbuf, rsize);
timeout = get_int32(buf);
- /* The 2nd arg, Length(4), is ignored for both UDP ans SCTP protocols,
+ /* The 2nd arg, Length(4), is ignored for both UDP and SCTP protocols,
since they are msg-oriented. */
if (enq_async(desc, tbuf, PACKET_REQ_RECV) < 0)
@@ -9784,13 +10261,13 @@ static void packet_inet_timeout(ErlDrvData e)
** There is no destination address -- SCTYP send is performed over
** an existing association, using "sctp_sndrcvinfo" specified.
*/
-static void packet_inet_command(ErlDrvData e, char* buf, int len)
+static void packet_inet_command(ErlDrvData e, char* buf, ErlDrvSizeT len)
{
udp_descriptor * udesc= (udp_descriptor*) e;
inet_descriptor* desc = INETP(udesc);
char* ptr = buf;
char* qtr;
- int sz;
+ ErlDrvSizeT sz;
int code;
inet_address other;
@@ -9808,7 +10285,7 @@ static void packet_inet_command(ErlDrvData e, char* buf, int len)
#ifdef HAVE_SCTP
if (IS_SCTP(desc))
{
- int data_len;
+ ErlDrvSizeT data_len;
struct iovec iov[1]; /* For real data */
struct msghdr mhdr; /* Message wrapper */
struct sctp_sndrcvinfo *sri; /* The actual ancilary data */
@@ -9844,6 +10321,7 @@ static void packet_inet_command(ErlDrvData e, char* buf, int len)
mhdr.msg_iovlen = 1;
mhdr.msg_control = cmsg.ancd; /* For ancilary data */
mhdr.msg_controllen = cmsg.hdr.cmsg_len;
+ VALGRIND_MAKE_MEM_DEFINED(mhdr.msg_control, mhdr.msg_controllen); /*suppress "uninitialised bytes"*/
mhdr.msg_flags = 0; /* Not used with "sendmsg" */
/* Now do the actual sending. NB: "flags" in "sendmsg" itself are NOT
@@ -9918,12 +10396,8 @@ static int packet_inet_input(udp_descriptor* udesc, HANDLE event)
{
inet_descriptor* desc = INETP(udesc);
int n;
- unsigned int len;
inet_address other;
char abuf[sizeof(inet_address)]; /* buffer address; enough??? */
- int sz;
- char* ptr;
- ErlDrvBinary* buf; /* binary */
int packet_count = udesc->read_packets;
int count = 0; /* number of packets delivered to owner */
#ifdef HAVE_SCTP
@@ -9934,23 +10408,39 @@ static int packet_inet_input(udp_descriptor* udesc, HANDLE event)
#endif
while(packet_count--) {
- len = sizeof(other);
- sz = desc->bufsz;
- /* Allocate space for message and address. NB: "bufsz" is in "desc",
- but the "buf" itself is allocated separately:
- */
- if ((buf = alloc_buffer(sz+len)) == NULL)
- return packet_error(udesc, ENOMEM);
- ptr = buf->orig_bytes + len; /* pointer to message part */
+ unsigned int len = sizeof(other);
+
+ /* udesc->i_buf is only kept between SCTP fragments */
+ if (udesc->i_buf == NULL) {
+ udesc->i_bufsz = desc->bufsz + len;
+ if ((udesc->i_buf = alloc_buffer(udesc->i_bufsz)) == NULL)
+ return packet_error(udesc, ENOMEM);
+ /* pointer to message start */
+ udesc->i_ptr = udesc->i_buf->orig_bytes + len;
+ } else {
+ ErlDrvBinary* tmp;
+ int bufsz;
+ bufsz = desc->bufsz + (udesc->i_ptr - udesc->i_buf->orig_bytes);
+ if ((tmp = realloc_buffer(udesc->i_buf, bufsz)) == NULL) {
+ release_buffer(udesc->i_buf);
+ udesc->i_buf = NULL;
+ return packet_error(udesc, ENOMEM);
+ } else {
+ udesc->i_ptr =
+ tmp->orig_bytes + (udesc->i_ptr - udesc->i_buf->orig_bytes);
+ udesc->i_buf = tmp;
+ udesc->i_bufsz = bufsz;
+ }
+ }
/* Note: On Windows NT, recvfrom() fails if the socket is connected. */
#ifdef HAVE_SCTP
/* For SCTP we must use recvmsg() */
if (IS_SCTP(desc)) {
- iov->iov_base = ptr; /* Data will come here */
- iov->iov_len = sz; /* Remaining buffer space */
+ iov->iov_base = udesc->i_ptr; /* Data will come here */
+ iov->iov_len = desc->bufsz; /* Remaining buffer space */
- mhdr.msg_name = &other; /* Peer addr comes into "other" */
+ mhdr.msg_name = &other; /* Peer addr comes into "other" */
mhdr.msg_namelen = len;
mhdr.msg_iov = iov;
mhdr.msg_iovlen = 1;
@@ -9960,42 +10450,28 @@ static int packet_inet_input(udp_descriptor* udesc, HANDLE event)
/* Do the actual SCTP receive: */
n = sock_recvmsg(desc->s, &mhdr, 0);
+ len = mhdr.msg_namelen;
goto check_result;
}
#endif
/* Use recv() instead on connected sockets. */
if ((desc->state & INET_F_ACTIVE)) {
- n = sock_recv(desc->s, ptr, sz, 0);
+ n = sock_recv(desc->s, udesc->i_ptr, desc->bufsz, 0);
other = desc->remote;
+ goto check_result;
}
- else
- n = sock_recvfrom(desc->s, ptr, sz, 0, &other.sa, &len);
-
-#ifdef HAVE_SCTP
+ n = sock_recvfrom(desc->s, udesc->i_ptr, desc->bufsz,
+ 0, &other.sa, &len);
check_result:
-#endif
/* Analyse the result: */
- if (IS_SOCKET_ERROR(n)
-#ifdef HAVE_SCTP
- || (short_recv = (IS_SCTP(desc) && !(mhdr.msg_flags & MSG_EOR)))
- /* NB: here we check for EOR not being set -- this is an error as
- well, we don't support partial msgs:
- */
-#endif
- ) {
+ if (IS_SOCKET_ERROR(n)) {
int err = sock_errno();
- release_buffer(buf);
if (err != ERRNO_BLOCK) {
+ /* real error */
+ release_buffer(udesc->i_buf);
+ udesc->i_buf = NULL;
if (!desc->active) {
-#ifdef HAVE_SCTP
- if (short_recv) {
- async_error_am(desc, am_short_recv);
- } else {
- async_error(desc, err);
- }
-#else
async_error(desc, err);
-#endif
driver_cancel_timer(desc->port);
sock_select(desc,FD_READ,0);
}
@@ -10003,46 +10479,72 @@ static int packet_inet_input(udp_descriptor* udesc, HANDLE event)
/* This is for an active desc only: */
packet_error_message(udesc, err);
}
+ return count;
}
- else if (!desc->active)
+ /* would block error - try again */
+ if (!desc->active
+#ifdef HAVE_SCTP
+ || short_recv
+#endif
+ ) {
sock_select(desc,FD_READ,1);
+ }
return count; /* strange, not ready */
}
- else {
- int offs;
- int nsz;
+
+#ifdef HAVE_SCTP
+ if (IS_SCTP(desc) && (short_recv = !(mhdr.msg_flags & MSG_EOR))) {
+ /* SCTP non-final message fragment */
+ inet_input_count(desc, n);
+ udesc->i_ptr += n;
+ continue; /* wait for more fragments */
+ }
+#endif
+
+ {
+ /* message received */
int code;
- unsigned int alen = len;
void * extra = NULL;
+ char * ptr;
+ int nsz;
inet_input_count(desc, n);
- inet_get_address(desc->sfamily, abuf, &other, &alen);
- /* Copy formatted address to the buffer allocated; "alen" is the
- actual length which must be <= than the original reserved "len".
+ udesc->i_ptr += n;
+ inet_get_address(desc->sfamily, abuf, &other, &len);
+ /* Copy formatted address to the buffer allocated; "len" is the
+ actual length which must be <= than the original reserved.
This means that the addr + data in the buffer are contiguous,
- but they may start not at the "orig_bytes", but with some "offs"
- from them:
+ but they may start not at the "orig_bytes", instead at "ptr":
*/
- ASSERT (alen <= len);
- sys_memcpy(ptr - alen, abuf, alen);
- ptr -= alen;
- nsz = n + alen; /* nsz = data + address */
- offs = ptr - buf->orig_bytes; /* initial pointer offset */
+ ASSERT (len <= sizeof(other));
+ ptr = udesc->i_buf->orig_bytes + sizeof(other) - len;
+ sys_memcpy(ptr, abuf, len);
+
+ nsz = udesc->i_ptr - ptr;
/* Check if we need to reallocate binary */
- if ((desc->mode == INET_MODE_BINARY) &&
- (desc->hsz < n) && (nsz < BIN_REALLOC_LIMIT(sz))) {
+ if ((desc->mode == INET_MODE_BINARY)
+ && (desc->hsz < (nsz - len))
+ && (nsz + BIN_REALLOC_MARGIN(desc->bufsz) < udesc->i_bufsz)) {
ErlDrvBinary* tmp;
- if ((tmp = realloc_buffer(buf,nsz+offs)) != NULL)
- buf = tmp;
+ int bufsz;
+ bufsz = udesc->i_ptr - udesc->i_buf->orig_bytes;
+ if ((tmp = realloc_buffer(udesc->i_buf, bufsz)) != NULL) {
+ udesc->i_buf = tmp;
+ udesc->i_bufsz = bufsz;
+ udesc->i_ptr = NULL; /* not used from here */
+ }
}
#ifdef HAVE_SCTP
if (IS_SCTP(desc)) extra = &mhdr;
#endif
/* Actual parsing and return of the data received, occur here: */
- code = packet_reply_binary_data(desc, (unsigned int)alen,
- buf, offs, nsz, extra);
- free_buffer(buf);
+ code = packet_reply_binary_data(desc, len, udesc->i_buf,
+ (sizeof(other) - len),
+ nsz,
+ extra);
+ free_buffer(udesc->i_buf);
+ udesc->i_buf = NULL;
if (code < 0)
return count;
count++;
@@ -10052,7 +10554,17 @@ static int packet_inet_input(udp_descriptor* udesc, HANDLE event)
return count; /* passive mode (read one packet only) */
}
}
+ } /* while(packet_count--) { */
+
+ /* we ran out of tries (packet_count) either on an active socket
+ * that got that many messages or an SCTP socket that got that
+ * many message fragments but still not the final
+ */
+#ifdef HAVE_SCTP
+ if (short_recv) {
+ sock_select(desc, FD_READ, 1);
}
+#endif
return count;
}
@@ -10062,7 +10574,7 @@ static void packet_inet_drv_output(ErlDrvData e, ErlDrvEvent event)
}
/* UDP/SCTP socket ready for output:
-** This is a Back-End for Non-Block SCTP Connect (SCTP_STATE_CONNECTING)
+** This is a Back-End for Non-Block SCTP Connect (INET_STATE_CONNECTING)
*/
static int packet_inet_output(udp_descriptor* udesc, HANDLE event)
{
@@ -10073,7 +10585,7 @@ static int packet_inet_output(udp_descriptor* udesc, HANDLE event)
DEBUGF(("packet_inet_output(%ld) {s=%d\r\n",
(long)desc->port, desc->s));
- if (desc->state == SCTP_STATE_CONNECTING) {
+ if (desc->state == INET_STATE_CONNECTING) {
sock_select(desc, FD_CONNECT, 0);
driver_cancel_timer(ix); /* posssibly cancel a timer */
@@ -10093,7 +10605,7 @@ static int packet_inet_output(udp_descriptor* udesc, HANDLE event)
(struct sockaddr*) &desc->remote, &sz);
if (IS_SOCKET_ERROR(code)) {
- desc->state = PACKET_STATE_BOUND; /* restore state */
+ desc->state = INET_STATE_BOUND; /* restore state */
ret = async_error(desc, sock_errno());
goto done;
}
@@ -10106,15 +10618,15 @@ static int packet_inet_output(udp_descriptor* udesc, HANDLE event)
(void *)&error, &sz);
if ((code < 0) || error) {
- desc->state = PACKET_STATE_BOUND; /* restore state */
+ desc->state = INET_STATE_BOUND; /* restore state */
ret = async_error(desc, error);
goto done;
}
}
-#endif /* SOCKOPT_CONNECT_STAT */
+#endif /* SO_ERROR */
#endif /* !__WIN32__ */
- desc->state = PACKET_STATE_CONNECTED;
+ desc->state = INET_STATE_CONNECTED;
async_ok(desc);
}
else {
@@ -10521,7 +11033,7 @@ int erts_sock_connect(erts_sock_t socket, byte *ip_addr, int len, Uint16 port)
{
SOCKET s = (SOCKET) socket;
char buf[2 + 4];
- int blen = 6;
+ ErlDrvSizeT blen = 6;
inet_address addr;
if (socket == ERTS_SOCK_INVALID_SOCKET || len != 4)
diff --git a/erts/emulator/drivers/common/ram_file_drv.c b/erts/emulator/drivers/common/ram_file_drv.c
index abedcc933a..a109e40333 100644
--- a/erts/emulator/drivers/common/ram_file_drv.c
+++ b/erts/emulator/drivers/common/ram_file_drv.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1997-2010. All Rights Reserved.
+ * Copyright Ericsson AB 1997-2011. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -98,7 +98,7 @@ typedef unsigned char uchar;
static ErlDrvData rfile_start(ErlDrvPort, char*);
static int rfile_init(void);
static void rfile_stop(ErlDrvData);
-static void rfile_command(ErlDrvData, char*, int);
+static void rfile_command(ErlDrvData, char*, ErlDrvSizeT);
struct erl_drv_entry ram_file_driver_entry = {
@@ -108,7 +108,23 @@ struct erl_drv_entry ram_file_driver_entry = {
rfile_command,
NULL,
NULL,
- "ram_file_drv"
+ "ram_file_drv",
+ NULL,
+ NULL, /* handle */
+ NULL, /* control */
+ NULL, /* timeout */
+ NULL, /* outputv */
+ NULL, /* ready_async */
+ NULL, /* flush */
+ NULL, /* call */
+ NULL, /* event */
+ ERL_DRV_EXTENDED_MARKER,
+ ERL_DRV_EXTENDED_MAJOR_VERSION,
+ ERL_DRV_EXTENDED_MINOR_VERSION,
+ 0,
+ NULL,
+ NULL,
+ NULL,
};
/* A File is represented as a array of bytes, this array is
@@ -121,9 +137,9 @@ typedef struct ram_file {
int flags; /* flags read/write */
ErlDrvBinary* bin; /* binary to hold binary file */
char* buf; /* buffer start (in binary) */
- int size; /* buffer size (allocated) */
- int cur; /* current position in buffer */
- int end; /* end position in buffer */
+ ErlDrvSSizeT size; /* buffer size (allocated) */
+ ErlDrvSSizeT cur; /* current position in buffer */
+ ErlDrvSSizeT end; /* end position in buffer */
} RamFile;
#ifdef LOADABLE
@@ -211,7 +227,7 @@ static int reply(RamFile *f, int ok, int err)
return 0;
}
-static int numeric_reply(RamFile *f, int result)
+static int numeric_reply(RamFile *f, ErlDrvSSizeT result)
{
char tmp[5];
@@ -231,7 +247,8 @@ static int numeric_reply(RamFile *f, int result)
/* install bin as the new binary reset all pointer */
-static void ram_file_set(RamFile *f, ErlDrvBinary *bin, int bsize, int len)
+static void ram_file_set(RamFile *f, ErlDrvBinary *bin,
+ ErlDrvSSizeT bsize, ErlDrvSSizeT len)
{
f->size = bsize;
f->buf = bin->orig_bytes;
@@ -240,9 +257,9 @@ static void ram_file_set(RamFile *f, ErlDrvBinary *bin, int bsize, int len)
f->bin = bin;
}
-static int ram_file_init(RamFile *f, char *buf, int count, int *error)
+static int ram_file_init(RamFile *f, char *buf, ErlDrvSSizeT count, int *error)
{
- int bsize;
+ ErlDrvSSizeT bsize;
ErlDrvBinary* bin;
if (count < 0) {
@@ -268,9 +285,9 @@ static int ram_file_init(RamFile *f, char *buf, int count, int *error)
return count;
}
-static int ram_file_expand(RamFile *f, int size, int *error)
+static ErlDrvSSizeT ram_file_expand(RamFile *f, ErlDrvSSizeT size, int *error)
{
- int bsize;
+ ErlDrvSSizeT bsize;
ErlDrvBinary* bin;
if (size < 0) {
@@ -298,10 +315,10 @@ static int ram_file_expand(RamFile *f, int size, int *error)
}
-static int ram_file_write(RamFile *f, char *buf, int len,
- int *location, int *error)
+static ErlDrvSSizeT ram_file_write(RamFile *f, char *buf, ErlDrvSSizeT len,
+ ErlDrvSSizeT *location, int *error)
{
- int cur = f->cur;
+ ErlDrvSSizeT cur = f->cur;
if (!(f->flags & RAM_FILE_MODE_WRITE)) {
*error = EBADF;
@@ -322,11 +339,11 @@ static int ram_file_write(RamFile *f, char *buf, int len,
return len;
}
-static int ram_file_read(RamFile *f, int len, ErlDrvBinary **bp,
- int *location, int *error)
+static ErlDrvSSizeT ram_file_read(RamFile *f, ErlDrvSSizeT len, ErlDrvBinary **bp,
+ ErlDrvSSizeT *location, int *error)
{
ErlDrvBinary* bin;
- int cur = f->cur;
+ ErlDrvSSizeT cur = f->cur;
if (!(f->flags & RAM_FILE_MODE_READ)) {
*error = EBADF;
@@ -352,9 +369,10 @@ static int ram_file_read(RamFile *f, int len, ErlDrvBinary **bp,
return len;
}
-static int ram_file_seek(RamFile *f, int offset, int whence, int *error)
+static ErlDrvSSizeT ram_file_seek(RamFile *f, ErlDrvSSizeT offset, int whence,
+ int *error)
{
- int pos;
+ ErlDrvSSizeT pos;
if (f->flags == 0) {
*error = EBADF;
@@ -389,13 +407,13 @@ static int ram_file_seek(RamFile *f, int offset, int whence, int *error)
static int ram_file_uuencode(RamFile *f)
{
- int code_len = UULINE(UNIX_LINE);
- int len = f->end;
- int usize = 4*((len+2)/3) + 2*((len+code_len-1)/code_len) + 2;
+ ErlDrvSSizeT code_len = UULINE(UNIX_LINE);
+ ErlDrvSSizeT len = f->end;
+ ErlDrvSSizeT usize = 4*((len+2)/3) + 2*((len+code_len-1)/code_len) + 2;
ErlDrvBinary* bin;
uchar* inp;
uchar* outp;
- int count = 0;
+ ErlDrvSSizeT count = 0;
if ((bin = driver_alloc_binary(usize)) == NULL)
return error_reply(f, ENOMEM);
@@ -447,8 +465,8 @@ static int ram_file_uuencode(RamFile *f)
static int ram_file_uudecode(RamFile *f)
{
- int len = f->end;
- int usize = ( (len+3) / 4 ) * 3;
+ ErlDrvSSizeT len = f->end;
+ ErlDrvSSizeT usize = ( (len+3) / 4 ) * 3;
ErlDrvBinary* bin;
uchar* inp;
uchar* outp;
@@ -510,7 +528,7 @@ static int ram_file_uudecode(RamFile *f)
static int ram_file_compress(RamFile *f)
{
- int size = f->end;
+ ErlDrvSSizeT size = f->end;
ErlDrvBinary* bin;
if ((bin = erts_gzdeflate_buffer(f->buf, size)) == NULL) {
@@ -528,7 +546,7 @@ static int ram_file_compress(RamFile *f)
static int ram_file_uncompress(RamFile *f)
{
- int size = f->end;
+ ErlDrvSSizeT size = f->end;
ErlDrvBinary* bin;
if ((bin = erts_gzinflate_buffer(f->buf, size)) == NULL) {
@@ -541,15 +559,15 @@ static int ram_file_uncompress(RamFile *f)
}
-static void rfile_command(ErlDrvData e, char* buf, int count)
+static void rfile_command(ErlDrvData e, char* buf, ErlDrvSizeT count)
{
RamFile* f = (RamFile*)e;
int error = 0;
ErlDrvBinary* bin;
char header[5]; /* result code + count */
- int offset;
- int origin; /* Origin of seek. */
- int n;
+ ErlDrvSSizeT offset;
+ ErlDrvSSizeT origin; /* Origin of seek. */
+ ErlDrvSSizeT n;
count--;
switch(*(uchar*)buf++) {
diff --git a/erts/emulator/drivers/common/zlib_drv.c b/erts/emulator/drivers/common/zlib_drv.c
index f50899a730..60394b610b 100644
--- a/erts/emulator/drivers/common/zlib_drv.c
+++ b/erts/emulator/drivers/common/zlib_drv.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2003-2010. All Rights Reserved.
+ * Copyright Ericsson AB 2003-2012. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -64,8 +64,9 @@
static int zlib_init(void);
static ErlDrvData zlib_start(ErlDrvPort port, char* buf);
static void zlib_stop(ErlDrvData e);
-static int zlib_ctl(ErlDrvData drv_data, unsigned int command, char *buf,
- int len, char **rbuf, int rlen);
+static void zlib_flush(ErlDrvData e);
+static ErlDrvSSizeT zlib_ctl(ErlDrvData drv_data, unsigned int command, char *buf,
+ ErlDrvSizeT len, char **rbuf, ErlDrvSizeT rlen);
static void zlib_outputv(ErlDrvData drv_data, ErlIOVec *ev);
ErlDrvEntry zlib_driver_entry = {
@@ -82,7 +83,7 @@ ErlDrvEntry zlib_driver_entry = {
NULL, /* timeout */
zlib_outputv,
NULL, /* read_async */
- NULL, /* flush */
+ zlib_flush,
NULL, /* call */
NULL, /* event */
ERL_DRV_EXTENDED_MARKER,
@@ -162,12 +163,12 @@ static char* zlib_reason(int code, int* err)
}
-static int zlib_return(int code, char** rbuf, int rlen)
+static ErlDrvSSizeT zlib_return(int code, char** rbuf, ErlDrvSizeT rlen)
{
int msg_code = 0; /* 0=ok, 1=error */
char* dst = *rbuf;
char* src;
- int len = 0;
+ ErlDrvSizeT len = 0;
src = zlib_reason(code, &msg_code);
*dst++ = msg_code;
@@ -182,7 +183,8 @@ static int zlib_return(int code, char** rbuf, int rlen)
return len;
}
-static int zlib_value2(int msg_code, int value, char** rbuf, int rlen)
+static ErlDrvSSizeT zlib_value2(int msg_code, int value,
+ char** rbuf, ErlDrvSizeT rlen)
{
char* dst = *rbuf;
@@ -197,7 +199,7 @@ static int zlib_value2(int msg_code, int value, char** rbuf, int rlen)
return 5;
}
-static int zlib_value(int value, char** rbuf, int rlen)
+static ErlDrvSSizeT zlib_value(int value, char** rbuf, ErlDrvSizeT rlen)
{
return zlib_value2(2, value, rbuf, rlen);
}
@@ -409,8 +411,15 @@ static void zlib_stop(ErlDrvData e)
driver_free(d);
}
-static int zlib_ctl(ErlDrvData drv_data, unsigned int command, char *buf,
- int len, char **rbuf, int rlen)
+static void zlib_flush(ErlDrvData drv_data)
+{
+ ZLibData* d = (ZLibData*) drv_data;
+
+ driver_deq(d->port, driver_sizeq(d->port));
+}
+
+static ErlDrvSSizeT zlib_ctl(ErlDrvData drv_data, unsigned int command, char *buf,
+ ErlDrvSizeT len, char **rbuf, ErlDrvSizeT rlen)
{
ZLibData* d = (ZLibData*)drv_data;
int res;
diff --git a/erts/emulator/drivers/unix/ttsl_drv.c b/erts/emulator/drivers/unix/ttsl_drv.c
index d782b044a9..b29f80a8ba 100644
--- a/erts/emulator/drivers/unix/ttsl_drv.c
+++ b/erts/emulator/drivers/unix/ttsl_drv.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1996-2010. All Rights Reserved.
+ * Copyright Ericsson AB 1996-2011. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -106,7 +106,7 @@ static int lpos; /* The current "cursor position" in the line buf
/* Main interface functions. */
static void ttysl_stop(ErlDrvData);
-static void ttysl_from_erlang(ErlDrvData, char*, int);
+static void ttysl_from_erlang(ErlDrvData, char*, ErlDrvSizeT);
static void ttysl_from_tty(ErlDrvData, ErlDrvEvent);
static void ttysl_stop_select(ErlDrvEvent, void*);
static Sint16 get_sint16(char*);
@@ -141,7 +141,8 @@ static void update_cols(void);
static int tty_init(int,int,int,int);
static int tty_set(int);
static int tty_reset(int);
-static int ttysl_control(ErlDrvData, unsigned int, char *, int, char **, int);
+static ErlDrvSSizeT ttysl_control(ErlDrvData, unsigned int,
+ char *, ErlDrvSizeT, char **, ErlDrvSizeT);
#ifdef ERTS_NOT_USED
static RETSIGTYPE suspend(int);
#endif
@@ -242,7 +243,7 @@ static ErlDrvData ttysl_start(ErlDrvPort port, char* buf)
#ifndef HAVE_TERMCAP
return ERL_DRV_ERROR_GENERAL;
#else
- char *s, *t, c, *l;
+ char *s, *t, *l;
int canon, echo, sig; /* Terminal characteristics */
int flag;
extern int using_oldshell; /* set this to let the rest of erts know */
@@ -262,7 +263,6 @@ static ErlDrvData ttysl_start(ErlDrvPort port, char* buf)
s++;
/* Find end of this argument (start of next) and insert NUL. */
if ((t = strchr(s, ' '))) {
- c = *t;
*t = '\0';
}
if ((flag = ((*s == '+') ? 1 : ((*s == '-') ? -1 : 0)))) {
@@ -346,13 +346,13 @@ static void ttysl_get_window_size(Uint32 *width, Uint32 *height)
*height = DEF_HEIGHT;
}
-static int ttysl_control(ErlDrvData drv_data,
- unsigned int command,
- char *buf, int len,
- char **rbuf, int rlen)
+static ErlDrvSSizeT ttysl_control(ErlDrvData drv_data,
+ unsigned int command,
+ char *buf, ErlDrvSizeT len,
+ char **rbuf, ErlDrvSizeT rlen)
{
char resbuff[2*sizeof(Uint32)];
- int res_size;
+ ErlDrvSizeT res_size;
switch (command) {
case CTRL_OP_GET_WINSIZE:
{
@@ -634,14 +634,14 @@ static int check_buf_size(byte *s, int n)
}
-static void ttysl_from_erlang(ErlDrvData ttysl_data, char* buf, int count)
+static void ttysl_from_erlang(ErlDrvData ttysl_data, char* buf, ErlDrvSizeT count)
{
if (lpos > MAXSIZE)
put_chars((byte*)"\n", 1);
switch (buf[0]) {
case OP_PUTC:
- DEBUGLOG(("OP: Putc(%d)",count-1));
+ DEBUGLOG(("OP: Putc(%lu)",(unsigned long) count-1));
if (check_buf_size((byte*)buf+1, count-1) == 0)
return;
put_chars((byte*)buf+1, count-1);
diff --git a/erts/emulator/drivers/unix/unix_efile.c b/erts/emulator/drivers/unix/unix_efile.c
index 4b3934657c..ad112f7590 100644
--- a/erts/emulator/drivers/unix/unix_efile.c
+++ b/erts/emulator/drivers/unix/unix_efile.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1997-2010. All Rights Reserved.
+ * Copyright Ericsson AB 1997-2012. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -33,6 +33,9 @@
#include <sys/types.h>
#include <sys/uio.h>
#endif
+#if defined(HAVE_SENDFILE) && (defined(__linux__) || (defined(__sun) && defined(__SVR4)))
+#include <sys/sendfile.h>
+#endif
#if defined(__APPLE__) && defined(__MACH__) && !defined(__DARWIN__)
#define DARWIN 1
@@ -813,7 +816,6 @@ efile_fileinfo(Efile_error* errInfo, Efile_info* pInfo,
char* name, int info_for_link)
{
struct stat statbuf; /* Information about the file */
- struct tm *timep; /* Broken-apart filetime. */
int result;
#ifdef VXWORKS
@@ -880,40 +882,17 @@ efile_fileinfo(Efile_error* errInfo, Efile_info* pInfo,
else
pInfo->type = FT_OTHER;
-#if defined(HAVE_LOCALTIME_R) || defined(VXWORKS)
- {
- /* Use the reentrant version of localtime() */
- static struct tm local_tm;
-#define localtime(a) (localtime_r((a), &local_tm), &local_tm)
-#endif
-
-
-#define GET_TIME(dst, src) \
- timep = localtime(&statbuf.src); \
- (dst).year = timep->tm_year+1900; \
- (dst).month = timep->tm_mon+1; \
- (dst).day = timep->tm_mday; \
- (dst).hour = timep->tm_hour; \
- (dst).minute = timep->tm_min; \
- (dst).second = timep->tm_sec
-
- GET_TIME(pInfo->accessTime, st_atime);
- GET_TIME(pInfo->modifyTime, st_mtime);
- GET_TIME(pInfo->cTime, st_ctime);
-
-#undef GET_TIME
+ pInfo->accessTime = statbuf.st_atime;
+ pInfo->modifyTime = statbuf.st_mtime;
+ pInfo->cTime = statbuf.st_ctime;
-#if defined(HAVE_LOCALTIME_R) || defined(VXWORKS)
- }
-#endif
-
- pInfo->mode = statbuf.st_mode;
- pInfo->links = statbuf.st_nlink;
+ pInfo->mode = statbuf.st_mode;
+ pInfo->links = statbuf.st_nlink;
pInfo->major_device = statbuf.st_dev;
pInfo->minor_device = statbuf.st_rdev;
- pInfo->inode = statbuf.st_ino;
- pInfo->uid = statbuf.st_uid;
- pInfo->gid = statbuf.st_gid;
+ pInfo->inode = statbuf.st_ino;
+ pInfo->uid = statbuf.st_uid;
+ pInfo->gid = statbuf.st_gid;
return 1;
}
@@ -921,6 +900,8 @@ efile_fileinfo(Efile_error* errInfo, Efile_info* pInfo,
int
efile_write_info(Efile_error *errInfo, Efile_info *pInfo, char *name)
{
+ struct utimbuf tval;
+
CHECK_PATHLEN(name, errInfo);
#ifdef VXWORKS
@@ -973,38 +954,18 @@ efile_write_info(Efile_error *errInfo, Efile_info *pInfo, char *name)
#endif /* !VXWORKS */
- if (pInfo->accessTime.year != -1 && pInfo->modifyTime.year != -1) {
- struct utimbuf tval;
- struct tm timebuf;
-
-#define MKTIME(tb, ts) \
- timebuf.tm_year = ts.year-1900; \
- timebuf.tm_mon = ts.month-1; \
- timebuf.tm_mday = ts.day; \
- timebuf.tm_hour = ts.hour; \
- timebuf.tm_min = ts.minute; \
- timebuf.tm_sec = ts.second; \
- timebuf.tm_isdst = -1; \
- if ((tb = mktime(&timebuf)) == (time_t) -1) { \
- errno = EINVAL; \
- return check_error(-1, errInfo); \
- }
+ tval.actime = pInfo->accessTime;
+ tval.modtime = pInfo->modifyTime;
- MKTIME(tval.actime, pInfo->accessTime);
- MKTIME(tval.modtime, pInfo->modifyTime);
-#undef MKTIME
-
#ifdef VXWORKS
- /* VxWorks' utime doesn't work when the file is a nfs mounted
- * one, don't report error if utime fails.
- */
- utime(name, &tval);
- return 1;
+ /* VxWorks' utime doesn't work when the file is a nfs mounted
+ * one, don't report error if utime fails.
+ */
+ utime(name, &tval);
+ return 1;
#else
- return check_error(utime(name, &tval), errInfo);
+ return check_error(utime(name, &tval), errInfo);
#endif
- }
- return 1;
}
@@ -1043,13 +1004,11 @@ efile_writev(Efile_error* errInfo, /* Where to return error codes */
* opened */
int fd, /* File descriptor to write to */
SysIOVec* iov, /* Vector of buffer structs.
- * The structs are unchanged
- * after the call */
- int iovcnt, /* Number of structs in vector */
- size_t size) /* Number of bytes to write */
+ * The structs may be changed i.e.
+ * due to incomplete writes */
+ int iovcnt) /* Number of structs in vector */
{
int cnt = 0; /* Buffers so far written */
- int p = 0; /* Position in next buffer */
ASSERT(iovcnt >= 0);
@@ -1060,66 +1019,47 @@ efile_writev(Efile_error* errInfo, /* Where to return error codes */
#endif
while (cnt < iovcnt) {
+ if ((! iov[cnt].iov_base) || (iov[cnt].iov_len <= 0)) {
+ /* Empty buffer - skip */
+ cnt++;
+ } else { /* Non-empty buffer */
+ ssize_t w; /* Bytes written in this call */
#ifdef HAVE_WRITEV
- int w; /* Bytes written in this call */
- int b = iovcnt - cnt; /* Buffers to write */
- if (b > MAXIOV)
- b = MAXIOV;
- if (iov[cnt].iov_base && iov[cnt].iov_len > 0) {
- if (b == 1) {
- /* Degenerated io vector */
- do {
- w = write(fd, iov[cnt].iov_base + p, iov[cnt].iov_len - p);
- } while (w < 0 && errno == EINTR);
- } else {
- /* Non-empty vector first.
- * Adjust pos in first buffer in case of
- * previous incomplete writev */
- iov[cnt].iov_base += p;
- iov[cnt].iov_len -= p;
+ int b = iovcnt - cnt; /* Buffers to write */
+ /* Use as many buffers as MAXIOV allows */
+ if (b > MAXIOV)
+ b = MAXIOV;
+ if (b > 1) {
do {
w = writev(fd, &iov[cnt], b);
} while (w < 0 && errno == EINTR);
- iov[cnt].iov_base -= p;
- iov[cnt].iov_len += p;
- }
- if (w < 0)
- return check_error(-1, errInfo);
- } else {
- /* Empty vector first - skip */
- cnt++;
- continue;
- }
- ASSERT(w >= 0);
- /* Move forward to next vector to write */
- for (; cnt < iovcnt; cnt++) {
- if (iov[cnt].iov_base && iov[cnt].iov_len > 0) {
- if (w < iov[cnt].iov_len)
- break;
- else
- w -= iov[cnt].iov_len;
- }
- }
- ASSERT(w >= 0);
- p = w > 0 ? w : 0; /* Skip p bytes next writev */
-#else /* #ifdef HAVE_WRITEV */
- if (iov[cnt].iov_base && iov[cnt].iov_len > 0) {
- /* Non-empty vector */
- int w; /* Bytes written in this call */
- while (p < iov[cnt].iov_len) {
- do {
- w = write(fd, iov[cnt].iov_base + p, iov[cnt].iov_len - p);
- } while (w < 0 && errno == EINTR);
- if (w < 0)
- return check_error(-1, errInfo);
- p += w;
+ } else
+ /* Degenerated io vector - use regular write */
+#endif
+ {
+ do {
+ w = write(fd, iov[cnt].iov_base, iov[cnt].iov_len);
+ } while (w < 0 && errno == EINTR);
+ ASSERT(w <= iov[cnt].iov_len);
+ }
+ if (w < 0) return check_error(-1, errInfo);
+ /* Move forward to next buffer to write */
+ for (; cnt < iovcnt && w > 0; cnt++) {
+ if (iov[cnt].iov_base && iov[cnt].iov_len > 0) {
+ if (w < iov[cnt].iov_len) {
+ /* Adjust the buffer for next write */
+ iov[cnt].iov_len -= w;
+ iov[cnt].iov_base += w;
+ w = 0;
+ break;
+ } else {
+ w -= iov[cnt].iov_len;
+ }
+ }
}
- }
- cnt++;
- p = 0;
-#endif /* #ifdef HAVE_WRITEV */
+ ASSERT(w == 0);
+ } /* else Non-empty buffer */
} /* while (cnt< iovcnt) */
- size = 0; /* Avoid compiler warning */
return 1;
}
@@ -1464,3 +1404,115 @@ efile_fadvise(Efile_error* errInfo, int fd, Sint64 offset,
return check_error(0, errInfo);
#endif
}
+
+#ifdef HAVE_SENDFILE
+// For some reason the maximum size_t cannot be used as the max size
+// 3GB seems to work on all platforms
+#define SENDFILE_CHUNK_SIZE ((1UL << 30) -1)
+
+/*
+ * sendfile: The implementation of the sendfile system call varies
+ * a lot on different *nix platforms so to make the api similar in all
+ * we have to emulate some things in linux and play with variables on
+ * bsd/darwin.
+ *
+ * All of the calls will split a command which tries to send more than
+ * SENDFILE_CHUNK_SIZE of data at once.
+ *
+ * On platforms where *nbytes of 0 does not mean the entire file, this is
+ * simulated.
+ *
+ * It could be possible to implement header/trailer in sendfile. Though
+ * you would have to emulate it in linux and on BSD/Darwin some complex
+ * calculations have to be made when using a non blocking socket to figure
+ * out how much of the header/file/trailer was sent in each command.
+ *
+ * The semantics of the API is this:
+ * Return value: 1 if all data was sent and the function does not need to
+ * be called again. 0 if an error occures OR if there is more data which
+ * has to be sent (EAGAIN or EINTR will be set appropriately)
+ *
+ * The amount of data written in a call is returned through nbytes.
+ *
+ */
+
+int
+efile_sendfile(Efile_error* errInfo, int in_fd, int out_fd,
+ off_t *offset, Uint64 *nbytes, struct t_sendfile_hdtl* hdtl)
+{
+ Uint64 written = 0;
+#if defined(__linux__)
+ ssize_t retval;
+ do {
+ // check if *nbytes is 0 or greater than chunk size
+ if (*nbytes == 0 || *nbytes > SENDFILE_CHUNK_SIZE)
+ retval = sendfile(out_fd, in_fd, offset, SENDFILE_CHUNK_SIZE);
+ else
+ retval = sendfile(out_fd, in_fd, offset, *nbytes);
+ if (retval > 0) {
+ written += retval;
+ *nbytes -= retval;
+ }
+ } while (retval == SENDFILE_CHUNK_SIZE);
+ if (written != 0) {
+ // -1 is not returned by the linux API so we have to simulate it
+ retval = -1;
+ errno = EAGAIN;
+ }
+#elif defined(__sun) && defined(__SVR4) && defined(HAVE_SENDFILEV)
+ ssize_t retval;
+ size_t len;
+ sendfilevec_t fdrec;
+ fdrec.sfv_fd = in_fd;
+ fdrec.sfv_flag = 0;
+ do {
+ fdrec.sfv_off = *offset;
+ len = 0;
+ // check if *nbytes is 0 or greater than chunk size
+ if (*nbytes == 0 || *nbytes > SENDFILE_CHUNK_SIZE)
+ fdrec.sfv_len = SENDFILE_CHUNK_SIZE;
+ else
+ fdrec.sfv_len = *nbytes;
+ retval = sendfilev(out_fd, &fdrec, 1, &len);
+ if (retval != -1 || errno == EAGAIN || errno == EINTR) {
+ *offset += len;
+ *nbytes -= len;
+ written += len;
+ }
+ } while (len == SENDFILE_CHUNK_SIZE);
+#elif defined(DARWIN)
+ int retval;
+ off_t len;
+ do {
+ // check if *nbytes is 0 or greater than chunk size
+ if(*nbytes > SENDFILE_CHUNK_SIZE)
+ len = SENDFILE_CHUNK_SIZE;
+ else
+ len = *nbytes;
+ retval = sendfile(in_fd, out_fd, *offset, &len, NULL, 0);
+ if (retval != -1 || errno == EAGAIN || errno == EINTR) {
+ *offset += len;
+ *nbytes -= len;
+ written += len;
+ }
+ } while (len == SENDFILE_CHUNK_SIZE);
+#elif defined(__FreeBSD__) || defined(__DragonFly__)
+ off_t len;
+ int retval;
+ do {
+ if (*nbytes > SENDFILE_CHUNK_SIZE)
+ retval = sendfile(in_fd, out_fd, *offset, SENDFILE_CHUNK_SIZE,
+ NULL, &len, 0);
+ else
+ retval = sendfile(in_fd, out_fd, *offset, *nbytes, NULL, &len, 0);
+ if (retval != -1 || errno == EAGAIN || errno == EINTR) {
+ *offset += len;
+ *nbytes -= len;
+ written += len;
+ }
+ } while(len == SENDFILE_CHUNK_SIZE);
+#endif
+ *nbytes = written;
+ return check_error(retval, errInfo);
+}
+#endif /* HAVE_SENDFILE */
diff --git a/erts/emulator/drivers/win32/registry_drv.c b/erts/emulator/drivers/win32/registry_drv.c
index 05fd2ea55f..1fad34e380 100644
--- a/erts/emulator/drivers/win32/registry_drv.c
+++ b/erts/emulator/drivers/win32/registry_drv.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1997-2009. All Rights Reserved.
+ * Copyright Ericsson AB 1997-2011. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -84,7 +84,7 @@ static int maperror(DWORD error);
static int reg_init(void);
static ErlDrvData reg_start(ErlDrvPort, char*);
static void reg_stop(ErlDrvData);
-static void reg_from_erlang(ErlDrvData, char*, int);
+static void reg_from_erlang(ErlDrvData, char*, ErlDrvSizeT);
struct erl_drv_entry registry_driver_entry = {
reg_init,
@@ -95,10 +95,21 @@ struct erl_drv_entry registry_driver_entry = {
NULL,
"registry__drv__",
NULL,
+ NULL, /* handle */
+ NULL, /* control */
+ NULL, /* timeout */
+ NULL, /* outputv */
+ NULL, /* ready_async */
+ NULL, /* flush */
+ NULL, /* call */
+ NULL, /* event */
+ ERL_DRV_EXTENDED_MARKER,
+ ERL_DRV_EXTENDED_MAJOR_VERSION,
+ ERL_DRV_EXTENDED_MINOR_VERSION,
+ 0,
NULL,
NULL,
NULL,
- NULL
};
static int
@@ -158,7 +169,7 @@ reg_stop(ErlDrvData clientData)
}
static void
-reg_from_erlang(ErlDrvData clientData, char* buf, int count)
+reg_from_erlang(ErlDrvData clientData, char* buf, ErlDrvSizeT count)
{
RegPort* rp = (RegPort *) clientData;
int cmd;
@@ -301,7 +312,7 @@ reg_from_erlang(ErlDrvData clientData, char* buf, int count)
buf = (char *) &dword;
ASSERT(count == 4);
}
- result = RegSetValueEx(rp->hkey, name, 0, type, buf, count);
+ result = RegSetValueEx(rp->hkey, name, 0, type, buf, (DWORD)count);
reply(rp, result);
}
break;
diff --git a/erts/emulator/drivers/win32/ttsl_drv.c b/erts/emulator/drivers/win32/ttsl_drv.c
index fd88dafd34..1a74d21e99 100644
--- a/erts/emulator/drivers/win32/ttsl_drv.c
+++ b/erts/emulator/drivers/win32/ttsl_drv.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1996-2009. All Rights Reserved.
+ * Copyright Ericsson AB 1996-2011. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -21,6 +21,9 @@
* smart line for output.
*/
+#ifdef HAVE_CONFIG_H
+# include "config.h"
+#endif
#include "sys.h"
#include <ctype.h>
#include <stdlib.h>
@@ -80,8 +83,9 @@ int lpos; /* The current "cursor position" in the line buf
static int ttysl_init(void);
static ErlDrvData ttysl_start(ErlDrvPort, char*);
static void ttysl_stop(ErlDrvData);
-static int ttysl_control(ErlDrvData, unsigned int, char *, int, char **, int);
-static void ttysl_from_erlang(ErlDrvData, char*, int);
+static ErlDrvSSizeT ttysl_control(ErlDrvData, unsigned int,
+ char *, ErlDrvSizeT, char **, ErlDrvSizeT);
+static void ttysl_from_erlang(ErlDrvData, char*, ErlDrvSizeT);
static void ttysl_from_tty(ErlDrvData, ErlDrvEvent);
static Sint16 get_sint16(char *s);
@@ -117,7 +121,19 @@ struct erl_drv_entry ttsl_driver_entry = {
NULL,
NULL,
ttysl_control,
- NULL
+ NULL, /* timeout */
+ NULL, /* outputv */
+ NULL, /* ready_async */
+ NULL, /* flush */
+ NULL, /* call */
+ NULL, /* event */
+ ERL_DRV_EXTENDED_MARKER,
+ ERL_DRV_EXTENDED_MAJOR_VERSION,
+ ERL_DRV_EXTENDED_MINOR_VERSION,
+ 0,
+ NULL,
+ NULL,
+ NULL,
};
static int utf8_mode = 0;
@@ -151,13 +167,13 @@ static void ttysl_get_window_size(Uint32 *width, Uint32 *height)
}
-static int ttysl_control(ErlDrvData drv_data,
+static ErlDrvSSizeT ttysl_control(ErlDrvData drv_data,
unsigned int command,
- char *buf, int len,
- char **rbuf, int rlen)
+ char *buf, ErlDrvSizeT len,
+ char **rbuf, ErlDrvSizeT rlen)
{
char resbuff[2*sizeof(Uint32)];
- int res_size;
+ ErlDrvSizeT res_size;
switch (command) {
case CTRL_OP_GET_WINSIZE:
{
@@ -173,7 +189,7 @@ static int ttysl_control(ErlDrvData drv_data,
res_size = 1;
break;
case CTRL_OP_SET_UNICODE_STATE:
- if (len > 0) {
+ if (len != 0) {
int m = (int) *buf;
*resbuff = (utf8_mode) ? 1 : 0;
res_size = 1;
@@ -435,14 +451,14 @@ static int check_buf_size(byte *s, int n)
}
-static void ttysl_from_erlang(ErlDrvData ttysl_data, char* buf, int count)
+static void ttysl_from_erlang(ErlDrvData ttysl_data, char* buf, ErlDrvSizeT count)
{
if (lpos > MAXSIZE)
put_chars((byte*)"\n", 1);
switch (buf[0]) {
case OP_PUTC:
- DEBUGLOG(("OP: Putc(%d)",count-1));
+ DEBUGLOG(("OP: Putc(%I64u)",(unsigned long long)count-1));
if (check_buf_size((byte*)buf+1, count-1) == 0)
return;
put_chars((byte*)buf+1, count-1);
diff --git a/erts/emulator/drivers/win32/win_con.c b/erts/emulator/drivers/win32/win_con.c
index c788ad409d..6b45b92cbe 100644
--- a/erts/emulator/drivers/win32/win_con.c
+++ b/erts/emulator/drivers/win32/win_con.c
@@ -21,6 +21,9 @@
#define _UNICODE 1
#include <tchar.h>
#include <stdio.h>
+#ifdef HAVE_CONFIG_H
+# include "config.h"
+#endif
#include "sys.h"
#include <windowsx.h>
#include "resource.h"
@@ -34,6 +37,23 @@
#define REALLOC(X,Y) realloc(X,Y)
#define FREE(X) free(X)
+#if SIZEOF_VOID_P == 8
+#define WIN64 1
+#ifndef GCL_HBRBACKGROUND
+#define GCL_HBRBACKGROUND GCLP_HBRBACKGROUND
+#endif
+#define DIALOG_PROC_RET INT_PTR
+#define CF_HOOK_RET INT_PTR
+#define CC_HOOK_RET INT_PTR
+#define OFN_HOOK_RET INT_PTR
+#else
+#define DIALOG_PROC_RET BOOL
+#define CF_HOOK_RET UINT
+#define CC_HOOK_RET UINT
+#define OFN_HOOK_RET UINT
+#endif
+
+
#ifndef STATE_SYSTEM_INVISIBLE
/* Mingw problem with oleacc.h and WIN32_LEAN_AND_MEAN */
#define STATE_SYSTEM_INVISIBLE 0x00008000
@@ -150,7 +170,7 @@ static TCHAR *erlang_window_title = TEXT("Erlang");
static unsigned __stdcall ConThreadInit(LPVOID param);
static LRESULT CALLBACK ClientWndProc(HWND hwnd, UINT iMsg, WPARAM wParam, LPARAM lParam);
static LRESULT CALLBACK FrameWndProc(HWND hwnd, UINT iMsg, WPARAM wParam, LPARAM lParam);
-static BOOL CALLBACK AboutDlgProc(HWND hDlg, UINT iMsg, WPARAM wParam, LPARAM lParam);
+static DIALOG_PROC_RET CALLBACK AboutDlgProc(HWND hDlg, UINT iMsg, WPARAM wParam, LPARAM lParam);
static ScreenLine_t *ConNewLine(void);
static void DeleteTopLine(void);
static void ensure_line_below(void);
@@ -1608,7 +1628,7 @@ OnEditSelAll(HWND hwnd)
InvalidateRect(hwnd, NULL, TRUE);
}
-UINT APIENTRY CFHookProc(HWND hDlg,UINT iMsg,WPARAM wParam,LPARAM lParam)
+CF_HOOK_RET APIENTRY CFHookProc(HWND hDlg,UINT iMsg,WPARAM wParam,LPARAM lParam)
{
/* Hook procedure for font dialog box */
HWND hOwner;
@@ -1626,11 +1646,11 @@ UINT APIENTRY CFHookProc(HWND hDlg,UINT iMsg,WPARAM wParam,LPARAM lParam)
OffsetRect(&rc, -rcDlg.right, -rcDlg.bottom);
SetWindowPos(hDlg,HWND_TOP,rcOwner.left + (rc.right / 2),
rcOwner.top + (rc.bottom / 2),0,0,SWP_NOSIZE);
- return 1;
+ return (CF_HOOK_RET) 1;
default:
break;
}
- return 0; /* Let the default procedure process the message */
+ return (CF_HOOK_RET) 0; /* Let the default procedure process the message */
}
static BOOL
@@ -1705,7 +1725,7 @@ ConSetFont(HWND hwnd)
InvalidateRect(hwnd, NULL, TRUE);
}
-UINT APIENTRY
+CC_HOOK_RET APIENTRY
CCHookProc(HWND hDlg,UINT iMsg,WPARAM wParam,LPARAM lParam)
{
/* Hook procedure for choose color dialog box */
@@ -1724,11 +1744,11 @@ CCHookProc(HWND hDlg,UINT iMsg,WPARAM wParam,LPARAM lParam)
OffsetRect(&rc, -rcDlg.right, -rcDlg.bottom);
SetWindowPos(hDlg,HWND_TOP,rcOwner.left + (rc.right / 2),
rcOwner.top + (rc.bottom / 2),0,0,SWP_NOSIZE);
- return 1;
+ return (CC_HOOK_RET) 1;
default:
break;
}
- return 0; /* Let the default procedure process the message */
+ return (CC_HOOK_RET) 0; /* Let the default procedure process the message */
}
void ConChooseColor(HWND hwnd)
@@ -1758,7 +1778,8 @@ void ConChooseColor(HWND hwnd)
}
}
-UINT APIENTRY OFNHookProc(HWND hwndDlg,UINT iMsg,WPARAM wParam,LPARAM lParam)
+OFN_HOOK_RET APIENTRY OFNHookProc(HWND hwndDlg,UINT iMsg,
+ WPARAM wParam,LPARAM lParam)
{
/* Hook procedure for open file dialog box */
HWND hOwner,hDlg;
@@ -1777,11 +1798,11 @@ UINT APIENTRY OFNHookProc(HWND hwndDlg,UINT iMsg,WPARAM wParam,LPARAM lParam)
OffsetRect(&rc, -rcDlg.right, -rcDlg.bottom);
SetWindowPos(hDlg,HWND_TOP,rcOwner.left + (rc.right / 2),
rcOwner.top + (rc.bottom / 2),0,0,SWP_NOSIZE);
- return 1;
+ return (OFN_HOOK_RET) 1;
default:
break;
}
- return 0; /* the let default procedure process the message */
+ return (OFN_HOOK_RET) 0; /* the let default procedure process the message */
}
static void
@@ -1933,7 +1954,7 @@ write_outbuf(TCHAR *data, int num_chars)
return num_chars;
}
-BOOL CALLBACK AboutDlgProc(HWND hDlg, UINT iMsg, WPARAM wParam, LPARAM lParam)
+DIALOG_PROC_RET CALLBACK AboutDlgProc(HWND hDlg, UINT iMsg, WPARAM wParam, LPARAM lParam)
{
HWND hOwner;
RECT rc,rcOwner,rcDlg;
@@ -1953,17 +1974,17 @@ BOOL CALLBACK AboutDlgProc(HWND hDlg, UINT iMsg, WPARAM wParam, LPARAM lParam)
rcOwner.top + (rc.bottom / 2),0,0,SWP_NOSIZE);
SetDlgItemText(hDlg, ID_VERSIONSTRING,
TEXT("Erlang emulator version ") TEXT(ERLANG_VERSION));
- return TRUE;
+ return (DIALOG_PROC_RET) TRUE;
case WM_COMMAND:
switch (LOWORD(wParam)) {
case IDOK:
case IDCANCEL:
EndDialog(hDlg,0);
- return TRUE;
+ return (DIALOG_PROC_RET) TRUE;
}
break;
}
- return FALSE;
+ return (DIALOG_PROC_RET) FALSE;
}
static void
@@ -2117,7 +2138,7 @@ AddToCmdHistory(void)
}
}
-static TBBUTTON tbb[] =
+/*static TBBUTTON tbb[] =
{
0, 0, TBSTATE_ENABLED, TBSTYLE_SEP, 0, 0, 0, 0,
0, 0, TBSTATE_ENABLED, TBSTYLE_SEP, 0, 0, 0, 0,
@@ -2149,6 +2170,39 @@ static TBBUTTON tbb[] =
2, IDMENU_FONT, TBSTATE_ENABLED, TBSTYLE_AUTOSIZE, 0, 0, 0, 0,
3, IDMENU_ABOUT, TBSTATE_ENABLED, TBSTYLE_AUTOSIZE, 0, 0, 0, 0,
0, 0, TBSTATE_ENABLED, TBSTYLE_SEP, 0, 0, 0, 0,
+ };*/
+static TBBUTTON tbb[] =
+{
+ {0, 0, TBSTATE_ENABLED, TBSTYLE_SEP},
+ {0, 0, TBSTATE_ENABLED, TBSTYLE_SEP},
+ {0, 0, TBSTATE_ENABLED, TBSTYLE_SEP},
+ {0, 0, TBSTATE_ENABLED, TBSTYLE_SEP},
+ {0, 0, TBSTATE_ENABLED, TBSTYLE_SEP},
+ {0, 0, TBSTATE_ENABLED, TBSTYLE_SEP},
+ {0, 0, TBSTATE_ENABLED, TBSTYLE_SEP},
+ {0, 0, TBSTATE_ENABLED, TBSTYLE_SEP},
+ {0, 0, TBSTATE_ENABLED, TBSTYLE_SEP},
+ {0, 0, TBSTATE_ENABLED, TBSTYLE_SEP},
+ {0, 0, TBSTATE_ENABLED, TBSTYLE_SEP},
+ {0, 0, TBSTATE_ENABLED, TBSTYLE_SEP},
+ {0, 0, TBSTATE_ENABLED, TBSTYLE_SEP},
+ {0, 0, TBSTATE_ENABLED, TBSTYLE_SEP},
+ {0, 0, TBSTATE_ENABLED, TBSTYLE_SEP},
+ {0, 0, TBSTATE_ENABLED, TBSTYLE_SEP},
+ {0, 0, TBSTATE_ENABLED, TBSTYLE_SEP},
+ {0, 0, TBSTATE_ENABLED, TBSTYLE_SEP},
+ {0, 0, TBSTATE_ENABLED, TBSTYLE_SEP},
+ {0, 0, TBSTATE_ENABLED, TBSTYLE_SEP},
+ {0, 0, TBSTATE_ENABLED, TBSTYLE_SEP},
+ {0, 0, TBSTATE_ENABLED, TBSTYLE_SEP},
+ {0, 0, TBSTATE_ENABLED, TBSTYLE_SEP},
+ {0, 0, TBSTATE_ENABLED, TBSTYLE_SEP},
+ {0, 0, TBSTATE_ENABLED, TBSTYLE_SEP},
+ {0, IDMENU_COPY, TBSTATE_ENABLED, TBSTYLE_AUTOSIZE},
+ {1, IDMENU_PASTE, TBSTATE_ENABLED, TBSTYLE_AUTOSIZE},
+ {2, IDMENU_FONT, TBSTATE_ENABLED, TBSTYLE_AUTOSIZE},
+ {3, IDMENU_ABOUT, TBSTATE_ENABLED, TBSTYLE_AUTOSIZE},
+ {0, 0, TBSTATE_ENABLED, TBSTYLE_SEP}
};
static TBADDBITMAP tbbitmap =
@@ -2156,6 +2210,17 @@ static TBADDBITMAP tbbitmap =
HINST_COMMCTRL, IDB_STD_SMALL_COLOR,
};
+#ifdef HARDDEBUG
+/* For really hard GUI startup debugging, place DEBUGBOX() macros in code
+ and get modal message boxes with the line number. */
+static void debug_box(int line) {
+ TCHAR buff[1024];
+ swprintf(buff,1024,TEXT("DBG:%d"),line);
+ MessageBox(NULL,buff,TEXT("DBG"),MB_OK|MB_APPLMODAL);
+}
+
+#define DEBUGBOX() debug_box(__LINE__)
+#endif
static HWND
InitToolBar(HWND hwndParent)
@@ -2169,7 +2234,6 @@ InitToolBar(HWND hwndParent)
COLORMAP colorMap;
colorMap.from = RGB(192, 192, 192);
colorMap.to = backgroundColor;
-
/* Create toolbar window with tooltips */
hwndTB = CreateWindowEx(0,TOOLBARCLASSNAME,(TCHAR *)NULL,
WS_CHILD|CCS_TOP|WS_CLIPSIBLINGS|TBSTYLE_TOOLTIPS,
@@ -2180,9 +2244,10 @@ InitToolBar(HWND hwndParent)
tbbitmap.hInst = NULL;
tbbitmap.nID = (UINT) CreateMappedBitmap(beam_module, 1,0, &colorMap, 1);
SendMessage(hwndTB, TB_ADDBITMAP, (WPARAM) 4,
- (WPARAM) &tbbitmap);
+ (LPARAM) &tbbitmap);
+
SendMessage(hwndTB,TB_ADDBUTTONS, (WPARAM) 30,
- (LPARAM) (LPTBBUTTON) tbb);
+ (LPARAM) tbb);
if (toolbarVisible)
ShowWindow(hwndTB, SW_SHOW);
diff --git a/erts/emulator/drivers/win32/win_efile.c b/erts/emulator/drivers/win32/win_efile.c
index 3d59564f7b..606fa1d7de 100755..100644
--- a/erts/emulator/drivers/win32/win_efile.c
+++ b/erts/emulator/drivers/win32/win_efile.c
@@ -21,6 +21,9 @@
*/
#include <windows.h>
+#ifdef HAVE_CONFIG_H
+# include "config.h"
+#endif
#include "sys.h"
#include <ctype.h>
#include <wchar.h>
@@ -42,6 +45,26 @@
#define INVALID_FILE_ATTRIBUTES ((DWORD) 0xFFFFFFFF)
#endif
+#define TICKS_PER_SECOND (10000000ULL)
+#define EPOCH_DIFFERENCE (11644473600LL)
+
+#define FILETIME_TO_EPOCH(epoch, ft) \
+ do { \
+ ULARGE_INTEGER ull; \
+ ull.LowPart = (ft).dwLowDateTime; \
+ ull.HighPart = (ft).dwHighDateTime; \
+ (epoch) = ((ull.QuadPart / TICKS_PER_SECOND) - EPOCH_DIFFERENCE); \
+ } while(0)
+
+#define EPOCH_TO_FILETIME(ft, epoch) \
+ do { \
+ ULARGE_INTEGER ull; \
+ ull.QuadPart = (((epoch) + EPOCH_DIFFERENCE) * TICKS_PER_SECOND); \
+ (ft).dwLowDateTime = ull.LowPart; \
+ (ft).dwHighDateTime = ull.HighPart; \
+ } while(0)
+
+
static int check_error(int result, Efile_error* errInfo);
static int set_error(Efile_error* errInfo);
static int is_root_unc_name(const WCHAR *path);
@@ -127,6 +150,8 @@ static int errno_map(DWORD last_error) {
return EBUSY;
case ERROR_NO_PROC_SLOTS:
return EAGAIN;
+ case ERROR_CANT_RESOLVE_FILENAME:
+ return EMLINK;
case ERROR_ARENA_TRASHED:
case ERROR_INVALID_BLOCK:
case ERROR_BAD_ENVIRONMENT:
@@ -859,14 +884,7 @@ efile_fileinfo(Efile_error* errInfo, Efile_info* pInfo,
findbuf.cFileName[0] = L'\0';
pInfo->links = 1;
- pInfo->modifyTime.year = 1980;
- pInfo->modifyTime.month = 1;
- pInfo->modifyTime.day = 1;
- pInfo->modifyTime.hour = 0;
- pInfo->modifyTime.minute = 0;
- pInfo->modifyTime.second = 0;
-
- pInfo->accessTime = pInfo->modifyTime;
+ pInfo->cTime = pInfo->accessTime = pInfo->modifyTime = 0;
} else {
SYSTEMTIME SystemTime;
FILETIME LocalFTime;
@@ -900,34 +918,21 @@ efile_fileinfo(Efile_error* errInfo, Efile_info* pInfo,
}
}
-#define GET_TIME(dst, src) \
-if (!FileTimeToLocalFileTime(&findbuf.src, &LocalFTime) || \
- !FileTimeToSystemTime(&LocalFTime, &SystemTime)) { \
- return set_error(errInfo); \
-} \
-(dst).year = SystemTime.wYear; \
-(dst).month = SystemTime.wMonth; \
-(dst).day = SystemTime.wDay; \
-(dst).hour = SystemTime.wHour; \
-(dst).minute = SystemTime.wMinute; \
-(dst).second = SystemTime.wSecond;
-
- GET_TIME(pInfo->modifyTime, ftLastWriteTime);
+ FILETIME_TO_EPOCH(pInfo->modifyTime, findbuf.ftLastWriteTime);
if (findbuf.ftLastAccessTime.dwLowDateTime == 0 &&
findbuf.ftLastAccessTime.dwHighDateTime == 0) {
pInfo->accessTime = pInfo->modifyTime;
} else {
- GET_TIME(pInfo->accessTime, ftLastAccessTime);
+ FILETIME_TO_EPOCH(pInfo->accessTime, findbuf.ftLastAccessTime);
}
if (findbuf.ftCreationTime.dwLowDateTime == 0 &&
findbuf.ftCreationTime.dwHighDateTime == 0) {
pInfo->cTime = pInfo->modifyTime;
} else {
- GET_TIME(pInfo->cTime, ftCreationTime);
+ FILETIME_TO_EPOCH(pInfo->cTime ,findbuf.ftCreationTime);
}
-#undef GET_TIME
FindClose(findhandle);
}
@@ -963,17 +968,12 @@ efile_write_info(Efile_error* errInfo,
char* name)
{
SYSTEMTIME timebuf;
- FILETIME LocalFileTime;
FILETIME ModifyFileTime;
FILETIME AccessFileTime;
FILETIME CreationFileTime;
HANDLE fd;
- FILETIME* mtime = NULL;
- FILETIME* atime = NULL;
- FILETIME* ctime = NULL;
DWORD attr;
DWORD tempAttr;
- BOOL modifyTime = FALSE;
WCHAR *wname = (WCHAR *) name;
/*
@@ -998,57 +998,36 @@ efile_write_info(Efile_error* errInfo,
* Construct all file times.
*/
-#define MKTIME(tb, ts, ptr) \
- timebuf.wYear = ts.year; \
- timebuf.wMonth = ts.month; \
- timebuf.wDay = ts.day; \
- timebuf.wHour = ts.hour; \
- timebuf.wMinute = ts.minute; \
- timebuf.wSecond = ts.second; \
- timebuf.wMilliseconds = 0; \
- if (ts.year != -1) { \
- modifyTime = TRUE; \
- ptr = &tb; \
- if (!SystemTimeToFileTime(&timebuf, &LocalFileTime ) || \
- !LocalFileTimeToFileTime(&LocalFileTime, &tb)) { \
- errno = EINVAL; \
- return check_error(-1, errInfo); \
- } \
- }
-
- MKTIME(ModifyFileTime, pInfo->modifyTime, mtime);
- MKTIME(AccessFileTime, pInfo->accessTime, atime);
- MKTIME(CreationFileTime, pInfo->cTime, ctime);
-#undef MKTIME
+ EPOCH_TO_FILETIME(ModifyFileTime, pInfo->modifyTime);
+ EPOCH_TO_FILETIME(AccessFileTime, pInfo->accessTime);
+ EPOCH_TO_FILETIME(CreationFileTime, pInfo->cTime);
/*
* If necessary, set the file times.
*/
- if (modifyTime) {
- /*
- * If the has read only access, we must temporarily turn on
- * write access (this is necessary for native filesystems,
- * but not for NFS filesystems).
- */
+ /*
+ * If the has read only access, we must temporarily turn on
+ * write access (this is necessary for native filesystems,
+ * but not for NFS filesystems).
+ */
- if (tempAttr & FILE_ATTRIBUTE_READONLY) {
- tempAttr &= ~FILE_ATTRIBUTE_READONLY;
- if (!SetFileAttributesW(wname, tempAttr)) {
- return set_error(errInfo);
- }
+ if (tempAttr & FILE_ATTRIBUTE_READONLY) {
+ tempAttr &= ~FILE_ATTRIBUTE_READONLY;
+ if (!SetFileAttributesW(wname, tempAttr)) {
+ return set_error(errInfo);
}
+ }
- fd = CreateFileW(wname, GENERIC_READ|GENERIC_WRITE,
- FILE_SHARE_READ | FILE_SHARE_WRITE,
- NULL, OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, NULL);
- if (fd != INVALID_HANDLE_VALUE) {
- BOOL result = SetFileTime(fd, ctime, atime, mtime);
- if (!result) {
- return set_error(errInfo);
- }
- CloseHandle(fd);
+ fd = CreateFileW(wname, GENERIC_READ|GENERIC_WRITE,
+ FILE_SHARE_READ | FILE_SHARE_WRITE,
+ NULL, OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, NULL);
+ if (fd != INVALID_HANDLE_VALUE) {
+ BOOL result = SetFileTime(fd, &CreationFileTime, &AccessFileTime, &ModifyFileTime);
+ if (!result) {
+ return set_error(errInfo);
}
+ CloseHandle(fd);
}
/*
@@ -1136,8 +1115,7 @@ efile_writev(Efile_error* errInfo, /* Where to return error codes */
SysIOVec* iov, /* Vector of buffer structs.
* The structs are unchanged
* after the call */
- int iovcnt, /* Number of structs in vector */
- size_t size) /* Number of bytes to write */
+ int iovcnt) /* Number of structs in vector */
{
int cnt; /* Buffers so far written */
OVERLAPPED overlapped;
@@ -1405,7 +1383,7 @@ efile_readlink(Efile_error* errInfo, char* name, char* buffer, size_t size)
DWORD fileAttributes = GetFileAttributesW(wname);
if ((fileAttributes & FILE_ATTRIBUTE_REPARSE_POINT)) {
BOOLEAN success = 0;
- HANDLE h = CreateFileW(wname, GENERIC_READ, 0,NULL, OPEN_EXISTING, 0, NULL);
+ HANDLE h = CreateFileW(wname, GENERIC_READ, 0,NULL, OPEN_EXISTING, FILE_FLAG_BACKUP_SEMANTICS, NULL);
int len;
if(h != INVALID_HANDLE_VALUE) {
success = pGetFinalPathNameByHandle(h, wbuffer, size,0);
@@ -1421,7 +1399,7 @@ efile_readlink(Efile_error* errInfo, char* name, char* buffer, size_t size)
if (*wbuffer == L'\\')
*wbuffer = L'/';
CloseHandle(h);
- }
+ }
FreeLibrary(hModule);
if (success) {
return 1;
diff --git a/erts/emulator/hipe/hipe_abi.txt b/erts/emulator/hipe/hipe_abi.txt
index d0ec162342..9d4726de9d 100644
--- a/erts/emulator/hipe/hipe_abi.txt
+++ b/erts/emulator/hipe/hipe_abi.txt
@@ -62,7 +62,7 @@ exceptional condition, it puts an error code in p->freason
and returns THE_NON_VALUE (zero, except in debug mode).
If p->freason == TRAP, then the BIF redirects its call to some
-other function, given by p->def_arg_reg[].
+other function, given by p->i
The BIF and the new callee may have different arities.
The "hipe_${ARCH}_bifs.m4" macro files take care of these issues
diff --git a/erts/emulator/hipe/hipe_amd64_bifs.m4 b/erts/emulator/hipe/hipe_amd64_bifs.m4
index 0ba763cbea..ec25c0b9b7 100644
--- a/erts/emulator/hipe/hipe_amd64_bifs.m4
+++ b/erts/emulator/hipe/hipe_amd64_bifs.m4
@@ -20,24 +20,37 @@ changecom(`/*', `*/')dnl
include(`hipe/hipe_amd64_asm.m4')
+#`include' "config.h"
#`include' "hipe_literals.h"
+
`#if THE_NON_VALUE == 0
#define TEST_GOT_EXN testq %rax, %rax
#else
#define TEST_GOT_EXN cmpq $THE_NON_VALUE, %rax
#endif'
-`#define TEST_GOT_MBUF movq P_MBUF(P), %rdx; testq %rdx, %rdx; jnz 3f; 2:
-#define JOIN3(A,B,C) A##B##C
-#define HANDLE_GOT_MBUF(ARITY) 3: call JOIN3(nbif_,ARITY,_gc_after_bif); jmp 2b'
+define(TEST_GOT_MBUF,`movq P_MBUF(P), %rdx # `TEST_GOT_MBUF'
+ testq %rdx, %rdx
+ jnz 3f
+2:')
+define(HANDLE_GOT_MBUF,`
+3: call nbif_$1_gc_after_bif # `HANDLE_GOT_MBUF'
+ jmp 2b')
+
+`#if defined(ERTS_ENABLE_LOCK_CHECK) && defined(ERTS_SMP)
+# define CALL_BIF(F) movq $CSYM(F), P_BIF_CALLEE(P); call CSYM(hipe_debug_bif_wrapper)
+#else
+# define CALL_BIF(F) call CSYM(F)
+#endif'
/*
* standard_bif_interface_1(nbif_name, cbif_name)
* standard_bif_interface_2(nbif_name, cbif_name)
* standard_bif_interface_3(nbif_name, cbif_name)
+ * standard_bif_interface_0(nbif_name, cbif_name)
*
- * Generate native interface for a BIF with 1-3 parameters and
+ * Generate native interface for a BIF with 0-3 parameters and
* standard failure mode.
*/
define(standard_bif_interface_1,
@@ -54,7 +67,11 @@ ASYM($1):
/* make the call on the C stack */
SWITCH_ERLANG_TO_C
- call CSYM($2)
+ pushq %rsi
+ movq %rsp, %rsi /* Eterm* BIF__ARGS */
+ sub $(8), %rsp /* stack frame 16-byte alignment */
+ CALL_BIF($2)
+ add $(1*8 + 8), %rsp
TEST_GOT_MBUF
SWITCH_C_TO_ERLANG
@@ -82,7 +99,11 @@ ASYM($1):
/* make the call on the C stack */
SWITCH_ERLANG_TO_C
- call CSYM($2)
+ pushq %rdx
+ pushq %rsi
+ movq %rsp, %rsi /* Eterm* BIF__ARGS */
+ CALL_BIF($2)
+ add $(2*8), %rsp
TEST_GOT_MBUF
SWITCH_C_TO_ERLANG
@@ -111,7 +132,13 @@ ASYM($1):
/* make the call on the C stack */
SWITCH_ERLANG_TO_C
- call CSYM($2)
+ pushq %rcx
+ pushq %rdx
+ pushq %rsi
+ movq %rsp, %rsi /* Eterm* BIF__ARGS */
+ sub $(8), %rsp /* stack frame 16-byte alignment */
+ CALL_BIF($2)
+ add $(3*8 + 8), %rsp
TEST_GOT_MBUF
SWITCH_C_TO_ERLANG
@@ -124,13 +151,7 @@ ASYM($1):
TYPE_FUNCTION(ASYM($1))
#endif')
-/*
- * fail_bif_interface_0(nbif_name, cbif_name)
- *
- * Generate native interface for a BIF with 0 parameters and
- * standard failure mode.
- */
-define(fail_bif_interface_0,
+define(standard_bif_interface_0,
`
#ifndef HAVE_$1
#`define' HAVE_$1
@@ -143,7 +164,7 @@ ASYM($1):
/* make the call on the C stack */
SWITCH_ERLANG_TO_C
- call CSYM($2)
+ CALL_BIF($2)
TEST_GOT_MBUF
SWITCH_C_TO_ERLANG
@@ -528,7 +549,9 @@ ASYM($1):
/*
* AMD64-specific primops.
*/
+#ifndef NO_FPE_SIGNALS
noproc_primop_interface_0(nbif_handle_fp_exception, erts_restore_fpu)
+#endif /* NO_FPE_SIGNALS */
/*
* Implement gc_bif_interface_0 as nofail_primop_interface_0.
diff --git a/erts/emulator/hipe/hipe_amd64_primops.h b/erts/emulator/hipe/hipe_amd64_primops.h
index e3c7111997..55cb0eadb8 100644
--- a/erts/emulator/hipe/hipe_amd64_primops.h
+++ b/erts/emulator/hipe/hipe_amd64_primops.h
@@ -19,5 +19,7 @@
PRIMOP_LIST(am_inc_stack_0, &nbif_inc_stack_0)
+#ifndef NO_FPE_SIGNALS
PRIMOP_LIST(am_handle_fp_exception, &nbif_handle_fp_exception)
+#endif
PRIMOP_LIST(am_sse2_fnegate_mask, &sse2_fnegate_mask)
diff --git a/erts/emulator/hipe/hipe_arm.c b/erts/emulator/hipe/hipe_arm.c
index d52f429a9b..e20a8a7969 100644
--- a/erts/emulator/hipe/hipe_arm.c
+++ b/erts/emulator/hipe/hipe_arm.c
@@ -71,7 +71,7 @@ static struct segment {
} curseg;
#define in_area(ptr,start,nbytes) \
- ((unsigned long)((char*)(ptr) - (char*)(start)) < (nbytes))
+ ((UWord)((char*)(ptr) - (char*)(start)) < (nbytes))
static void *new_code_mapping(void)
{
diff --git a/erts/emulator/hipe/hipe_arm_bifs.m4 b/erts/emulator/hipe/hipe_arm_bifs.m4
index 3664fb6502..e0c6f09796 100644
--- a/erts/emulator/hipe/hipe_arm_bifs.m4
+++ b/erts/emulator/hipe/hipe_arm_bifs.m4
@@ -20,18 +20,27 @@ changecom(`/*', `*/')dnl
include(`hipe/hipe_arm_asm.m4')
+#`include' "config.h"
#`include' "hipe_literals.h"
.text
.p2align 2
-`#define JOIN3(A,B,C) A##B##C
-#define TEST_GOT_MBUF(ARITY) ldr r1, [P, #P_MBUF]; cmp r1, #0; blne JOIN3(nbif_,ARITY,_gc_after_bif)'
+`#if defined(ERTS_ENABLE_LOCK_CHECK) && defined(ERTS_SMP)
+# define CALL_BIF(F) mov r14, #F; str r14, [r0, #P_BIF_CALLEE]; bl hipe_debug_bif_wrapper
+#else
+# define CALL_BIF(F) bl F
+#endif'
+
+define(TEST_GOT_MBUF,`ldr r1, [P, #P_MBUF] /* `TEST_GOT_MBUF' */
+ cmp r1, #0
+ blne nbif_$1_gc_after_bif')
/*
* standard_bif_interface_1(nbif_name, cbif_name)
* standard_bif_interface_2(nbif_name, cbif_name)
* standard_bif_interface_3(nbif_name, cbif_name)
+ * standard_bif_interface_0(nbif_name, cbif_name)
*
* Generate native interface for a BIF with 1-3 parameters and
* standard failure mode.
@@ -48,7 +57,9 @@ $1:
/* Save caller-save registers and call the C function. */
SAVE_CONTEXT_BIF
- bl $2
+ str r1, [r0, #P_ARG0] /* Store BIF__ARGS in def_arg_reg[] */
+ add r1, r0, #P_ARG0
+ CALL_BIF($2)
TEST_GOT_MBUF(1)
/* Restore registers. Check for exception. */
@@ -73,7 +84,10 @@ $1:
/* Save caller-save registers and call the C function. */
SAVE_CONTEXT_BIF
- bl $2
+ str r1, [r0, #P_ARG0] /* Store BIF__ARGS in def_arg_reg[] */
+ str r2, [r0, #P_ARG1]
+ add r1, r0, #P_ARG0
+ CALL_BIF($2)
TEST_GOT_MBUF(2)
/* Restore registers. Check for exception. */
@@ -99,7 +113,11 @@ $1:
/* Save caller-save registers and call the C function. */
SAVE_CONTEXT_BIF
- bl $2
+ str r1, [r0, #P_ARG0] /* Store BIF__ARGS in def_arg_reg[] */
+ str r2, [r0, #P_ARG1]
+ str r3, [r0, #P_ARG2]
+ add r1, r0, #P_ARG0
+ CALL_BIF($2)
TEST_GOT_MBUF(3)
/* Restore registers. Check for exception. */
@@ -111,13 +129,7 @@ $1:
.type $1, %function
#endif')
-/*
- * fail_bif_interface_0(nbif_name, cbif_name)
- *
- * Generate native interface for a BIF with 0 parameters and
- * standard failure mode.
- */
-define(fail_bif_interface_0,
+define(standard_bif_interface_0,
`
#ifndef HAVE_$1
#`define' HAVE_$1
@@ -128,7 +140,8 @@ $1:
/* Save caller-save registers and call the C function. */
SAVE_CONTEXT_BIF
- bl $2
+ /* ignore empty BIF__ARGS */
+ CALL_BIF($2)
TEST_GOT_MBUF(0)
/* Restore registers. Check for exception. */
diff --git a/erts/emulator/hipe/hipe_bif0.c b/erts/emulator/hipe/hipe_bif0.c
index e7fb850530..26f183dc25 100644
--- a/erts/emulator/hipe/hipe_bif0.c
+++ b/erts/emulator/hipe/hipe_bif0.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2001-2011. All Rights Reserved.
+ * Copyright Ericsson AB 2001-2012. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -174,8 +174,13 @@ static inline unsigned char *bytearray_lvalue(Eterm bin, Eterm idx)
{
Sint i;
unsigned char *bytes;
+#ifndef DEBUG
+ ERTS_DECLARE_DUMMY(Uint bitoffs);
+ ERTS_DECLARE_DUMMY(Uint bitsize);
+#else
Uint bitoffs;
Uint bitsize;
+#endif
if (is_not_binary(bin) ||
is_not_small(idx) ||
@@ -235,9 +240,15 @@ BIF_RETTYPE hipe_bifs_bitarray_2(BIF_ALIST_2)
BIF_RETTYPE hipe_bifs_bitarray_update_3(BIF_ALIST_3)
{
unsigned char *bytes, bytemask;
- Uint bitoffs, bitsize;
Uint bitnr, bytenr;
int set;
+#ifndef DEBUG
+ ERTS_DECLARE_DUMMY(Uint bitoffs);
+ ERTS_DECLARE_DUMMY(Uint bitsize);
+#else
+ Uint bitoffs;
+ Uint bitsize;
+#endif
if (is_not_binary(BIF_ARG_1))
BIF_ERROR(BIF_P, BADARG);
@@ -267,8 +278,15 @@ BIF_RETTYPE hipe_bifs_bitarray_update_3(BIF_ALIST_3)
BIF_RETTYPE hipe_bifs_bitarray_sub_2(BIF_ALIST_2)
{
unsigned char *bytes, bytemask;
- Uint bitoffs, bitsize;
Uint bitnr, bytenr;
+#ifndef DEBUG
+ ERTS_DECLARE_DUMMY(Uint bitoffs);
+ ERTS_DECLARE_DUMMY(Uint bitsize);
+#else
+ Uint bitoffs;
+ Uint bitsize;
+#endif
+
if (is_not_binary(BIF_ARG_1))
BIF_ERROR(BIF_P, BADARG);
@@ -397,10 +415,15 @@ BIF_RETTYPE hipe_bifs_enter_code_2(BIF_ALIST_2)
Uint nrbytes;
void *bytes;
void *address;
- Uint bitoffs;
- Uint bitsize;
Eterm trampolines;
Eterm *hp;
+#ifndef DEBUG
+ ERTS_DECLARE_DUMMY(Uint bitoffs);
+ ERTS_DECLARE_DUMMY(Uint bitsize);
+#else
+ Uint bitoffs;
+ Uint bitsize;
+#endif
if (is_not_binary(BIF_ARG_1))
BIF_ERROR(BIF_P, BADARG);
@@ -985,6 +1008,26 @@ BIF_RETTYPE hipe_conv_big_to_float(BIF_ALIST_1)
BIF_RET(res);
}
+#ifdef NO_FPE_SIGNALS
+
+/*
+ This is the current solution to make hipe run without FPE.
+ The native code is the same except that a call to this primop
+ is made after _every_ float operation to check the result.
+ The native fcheckerror still done later will detect if an
+ "emulated" FPE has occured.
+ We use p->hipe.float_result to avoid passing a 'double' argument,
+ which has its own calling convention (on amd64 at least).
+ Simple and slow...
+*/
+void hipe_emulate_fpe(Process* p)
+{
+ if (!finite(p->hipe.float_result)) {
+ p->fp_exception = 1;
+ }
+}
+#endif
+
#if 0 /* XXX: unused */
/*
* At least parts of this should be inlined in native code.
diff --git a/erts/emulator/hipe/hipe_bif0.h b/erts/emulator/hipe/hipe_bif0.h
index f02e8862dc..c512d66f9d 100644
--- a/erts/emulator/hipe/hipe_bif0.h
+++ b/erts/emulator/hipe/hipe_bif0.h
@@ -29,7 +29,7 @@ extern Uint *hipe_bifs_find_pc_from_mfa(Eterm mfa);
extern void hipe_mfa_info_table_init(void);
extern void *hipe_get_remote_na(Eterm m, Eterm f, unsigned int a);
-extern Eterm hipe_find_na_or_make_stub(Process*, Eterm, Eterm, Eterm);
+extern BIF_RETTYPE hipe_find_na_or_make_stub(BIF_ALIST_3);
extern int hipe_find_mfa_from_ra(const void *ra, Eterm *m, Eterm *f, unsigned int *a);
#if defined(__powerpc__) || defined(__ppc__) || defined(__powerpc64__) || defined(__arm__)
extern void *hipe_mfa_get_trampoline(Eterm m, Eterm f, unsigned int a);
diff --git a/erts/emulator/hipe/hipe_bif0.tab b/erts/emulator/hipe/hipe_bif0.tab
index b6c6bede23..ce641365e9 100644
--- a/erts/emulator/hipe/hipe_bif0.tab
+++ b/erts/emulator/hipe/hipe_bif0.tab
@@ -140,3 +140,5 @@ atom bs_put_utf16le
atom bs_get_utf16
atom bs_validate_unicode
atom bs_validate_unicode_retract
+atom emulate_fpe
+
diff --git a/erts/emulator/hipe/hipe_bif2.c b/erts/emulator/hipe/hipe_bif2.c
index 2660f74a82..ee97541e15 100644
--- a/erts/emulator/hipe/hipe_bif2.c
+++ b/erts/emulator/hipe/hipe_bif2.c
@@ -166,3 +166,26 @@ BIF_RETTYPE hipe_bifs_show_message_area_0(BIF_ALIST_0)
BIF_RET(am_false);
#endif
}
+
+#if defined(ERTS_ENABLE_LOCK_CHECK) && defined(ERTS_SMP)
+
+BIF_RETTYPE hipe_debug_bif_wrapper(BIF_ALIST_1);
+
+# define ERTS_SMP_REQ_PROC_MAIN_LOCK(P) \
+ if ((P)) erts_proc_lc_require_lock((P), ERTS_PROC_LOCK_MAIN)
+# define ERTS_SMP_UNREQ_PROC_MAIN_LOCK(P) \
+ if ((P)) erts_proc_lc_unrequire_lock((P), ERTS_PROC_LOCK_MAIN)
+
+BIF_RETTYPE hipe_debug_bif_wrapper(BIF_ALIST_1)
+{
+ typedef BIF_RETTYPE Bif(BIF_ALIST_1);
+ Bif* fp = (Bif*) (BIF_P->hipe.bif_callee);
+ BIF_RETTYPE res;
+ ERTS_SMP_UNREQ_PROC_MAIN_LOCK(BIF_P);
+ res = (*fp)(BIF_P, BIF__ARGS);
+ ERTS_SMP_REQ_PROC_MAIN_LOCK(BIF_P);
+ return res;
+}
+
+#endif /* ERTS_ENABLE_LOCK_CHECK && ERTS_SMP */
+
diff --git a/erts/emulator/hipe/hipe_bif_list.m4 b/erts/emulator/hipe/hipe_bif_list.m4
index 083788997b..942fa0c5cb 100644
--- a/erts/emulator/hipe/hipe_bif_list.m4
+++ b/erts/emulator/hipe/hipe_bif_list.m4
@@ -70,24 +70,18 @@
****************************************************************/
/*
+ * standard_bif_interface_0(nbif_name, cbif_name)
* standard_bif_interface_1(nbif_name, cbif_name)
* standard_bif_interface_2(nbif_name, cbif_name)
* standard_bif_interface_3(nbif_name, cbif_name)
*
- * A BIF with implicit P parameter, 1-3 ordinary parameters,
+ * A BIF with implicit P parameter, 0-3 ordinary parameters,
* which may fail.
* HP and FCALLS may be read and updated.
* HP_LIMIT, NSP, NSP_LIMIT, and NRA may not be accessed.
*/
/*
- * fail_bif_interface_0(nbif_name, cbif_name)
- *
- * A zero-arity BIF which may fail, otherwise
- * identical to standard_bif_interface_N.
- */
-
-/*
* nofail_primop_interface_0(nbif_name, cbif_name)
* nofail_primop_interface_1(nbif_name, cbif_name)
* nofail_primop_interface_2(nbif_name, cbif_name)
@@ -150,8 +144,7 @@
/*
* Zero-arity BIFs that can fail.
*/
-fail_bif_interface_0(nbif_memory_0, memory_0)
-fail_bif_interface_0(nbif_processes_0, processes_0)
+standard_bif_interface_0(nbif_processes_0, processes_0)
/*
* BIFs and primops that may do a GC (change heap limit and walk the native stack).
@@ -176,10 +169,10 @@ gc_bif_interface_0(nbif_hipe_bifs_nstack_used_size_0, hipe_bifs_nstack_used_size
/*
* Arithmetic operators called indirectly by the HiPE compiler.
*/
-standard_bif_interface_2(nbif_add_2, erts_mixed_plus)
-standard_bif_interface_2(nbif_sub_2, erts_mixed_minus)
-standard_bif_interface_2(nbif_mul_2, erts_mixed_times)
-standard_bif_interface_2(nbif_div_2, erts_mixed_div)
+standard_bif_interface_2(nbif_add_2, splus_2)
+standard_bif_interface_2(nbif_sub_2, sminus_2)
+standard_bif_interface_2(nbif_mul_2, stimes_2)
+standard_bif_interface_2(nbif_div_2, div_2)
standard_bif_interface_2(nbif_intdiv_2, intdiv_2)
standard_bif_interface_2(nbif_rem_2, rem_2)
standard_bif_interface_2(nbif_bsl_2, bsl_2)
@@ -252,6 +245,10 @@ noproc_primop_interface_5(nbif_bs_put_big_integer, hipe_bs_put_big_integer)
gc_bif_interface_0(nbif_check_get_msg, hipe_check_get_msg)
+#ifdef NO_FPE_SIGNALS
+nocons_nofail_primop_interface_0(nbif_emulate_fpe, hipe_emulate_fpe)
+#endif
+
/*
* SMP-specific stuff
*/
@@ -261,11 +258,6 @@ noproc_primop_interface_1(nbif_atomic_inc, hipe_atomic_inc)
',)dnl
/*
- * Implement standard_bif_interface_0 as nofail_primop_interface_0.
- */
-define(standard_bif_interface_0,`nofail_primop_interface_0($1, $2)')
-
-/*
* Standard BIFs.
* BIF_LIST(ModuleAtom,FunctionAtom,Arity,CFun,Index)
*/
diff --git a/erts/emulator/hipe/hipe_gc.c b/erts/emulator/hipe/hipe_gc.c
index 0199dea99e..e0575c35ff 100644
--- a/erts/emulator/hipe/hipe_gc.c
+++ b/erts/emulator/hipe/hipe_gc.c
@@ -46,9 +46,14 @@ Eterm *fullsweep_nstack(Process *p, Eterm *n_htop)
char *src, *oh;
Uint src_size, oh_size;
+ if (!p->hipe.nstack) {
+ ASSERT(!p->hipe.nsp && !p->hipe.nstend);
+ return n_htop;
+ }
if (!nstack_walk_init_check(p))
return n_htop;
+ ASSERT(p->hipe.nsp && p->hipe.nstend);
nsp = nstack_walk_nsp_begin(p);
nsp_end = p->hipe.nstgraylim;
if (nsp_end)
@@ -136,9 +141,14 @@ void gensweep_nstack(Process *p, Eterm **ptr_old_htop, Eterm **ptr_n_htop)
char *heap;
Uint heap_size, mature_size;
+ if (!p->hipe.nstack) {
+ ASSERT(!p->hipe.nsp && !p->hipe.nstend);
+ return;
+ }
if (!nstack_walk_init_check(p))
return;
+ ASSERT(p->hipe.nsp && p->hipe.nstend);
nsp = nstack_walk_nsp_begin(p);
nsp_end = p->hipe.nstgraylim;
if (nsp_end) {
diff --git a/erts/emulator/hipe/hipe_mkliterals.c b/erts/emulator/hipe/hipe_mkliterals.c
index bced90785d..d07d14028c 100644
--- a/erts/emulator/hipe/hipe_mkliterals.c
+++ b/erts/emulator/hipe/hipe_mkliterals.c
@@ -1,9 +1,8 @@
/*
* %CopyrightBegin%
-
- *
+ *
* Copyright Ericsson AB 2001-2011. All Rights Reserved.
- *
+ *
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
* compliance with the License. You should have received a copy of the
@@ -212,6 +211,11 @@ static const unsigned int CRCTABLE[256] = {
0xB40BBE37, 0xC30C8EA1, 0x5A05DF1B, 0x2D02EF8D,
};
+/* For hipe cross compiler. Hard code all values.
+ No calls by hipe compiler to query the running emulator.
+*/
+static int is_xcomp = 0;
+
/*
* The algorithm for calculating the 32 bit CRC checksum is based upon
* documentation and algorithms provided by Dr. Ross N. Williams in the
@@ -243,7 +247,7 @@ crc_update_buf(unsigned int crc_value,
}
static unsigned int
-crc_update_int(unsigned int crc_value, const unsigned int *p)
+crc_update_int(unsigned int crc_value, const int *p)
{
return crc_update_buf(crc_value, p, sizeof *p);
}
@@ -256,7 +260,7 @@ crc_update_int(unsigned int crc_value, const unsigned int *p)
*/
static const struct literal {
const char *name;
- unsigned int value;
+ int value;
} literals[] = {
/* Field offsets in a process struct */
{ "P_HP", offsetof(struct process, htop) },
@@ -289,6 +293,14 @@ static const struct literal {
{ "P_NRA", offsetof(struct process, hipe.nra) },
#endif
{ "P_NARITY", offsetof(struct process, hipe.narity) },
+ { "P_FLOAT_RESULT",
+# ifdef NO_FPE_SIGNALS
+ offsetof(struct process, hipe.float_result)
+# endif
+ },
+# if defined(ERTS_ENABLE_LOCK_CHECK) && defined(ERTS_SMP)
+ { "P_BIF_CALLEE", offsetof(struct process, hipe.bif_callee) },
+# endif
#endif /* HIPE */
/* process flags bits */
@@ -298,7 +310,7 @@ static const struct literal {
{ "FREASON_TRAP", TRAP },
/* special Erlang constants */
- { "THE_NON_VALUE", THE_NON_VALUE },
+ { "THE_NON_VALUE", (int)THE_NON_VALUE },
/* funs */
#ifdef HIPE
@@ -452,7 +464,7 @@ static const struct rts_param {
unsigned int nr;
const char *name;
unsigned int is_defined;
- unsigned int value;
+ int value;
} rts_params[] = {
{ 1, "P_OFF_HEAP_FUNS",
#if !defined(HYBRID)
@@ -484,7 +496,7 @@ static const struct rts_param {
#endif
},
{ 14, "P_FP_EXCEPTION",
-#if !defined(NO_FPE_SIGNALS)
+#if !defined(NO_FPE_SIGNALS) || defined(HIPE)
1, offsetof(struct process, fp_exception)
#endif
},
@@ -497,6 +509,15 @@ static const struct rts_param {
0
#endif
},
+ /* This flag is always defined, but its value is configuration-dependent. */
+ { 16, "ERTS_NO_FPE_SIGNALS",
+ 1,
+#if defined(NO_FPE_SIGNALS)
+ 1
+#else
+ 0
+#endif
+ },
/* This parameter is always defined, but its value depends on ERTS_SMP. */
{ 19, "MSG_MESSAGE",
1, offsetof(struct erl_mesg, m[0])
@@ -528,12 +549,12 @@ static void compute_crc(void)
static void c_define_literal(FILE *fp, const struct literal *literal)
{
- fprintf(fp, "#define %s %u\n", literal->name, literal->value);
+ fprintf(fp, "#define %s %d\n", literal->name, literal->value);
}
static void e_define_literal(FILE *fp, const struct literal *literal)
{
- fprintf(fp, "-define(%s, %u).\n", literal->name, literal->value);
+ fprintf(fp, "-define(%s, %d).\n", literal->name, literal->value);
}
static void print_literals(FILE *fp, void (*print_literal)(FILE*, const struct literal*))
@@ -560,7 +581,7 @@ static void print_atom_literals(FILE *fp, void (*print_atom_literal)(FILE*, cons
static void c_define_param(FILE *fp, const struct rts_param *param)
{
if (param->is_defined)
- fprintf(fp, "#define %s %u\n", param->name, param->value);
+ fprintf(fp, "#define %s %d\n", param->name, param->value);
}
static void c_case_param(FILE *fp, const struct rts_param *param)
@@ -568,7 +589,7 @@ static void c_case_param(FILE *fp, const struct rts_param *param)
fprintf(fp, " \\\n");
fprintf(fp, "\tcase %u: ", param->nr);
if (param->is_defined)
- fprintf(fp, "value = %u", param->value);
+ fprintf(fp, "value = %d", param->value);
else
fprintf(fp, "is_defined = 0");
fprintf(fp, "; break;");
@@ -576,7 +597,15 @@ static void c_case_param(FILE *fp, const struct rts_param *param)
static void e_define_param(FILE *fp, const struct rts_param *param)
{
- fprintf(fp, "-define(%s, hipe_bifs:get_rts_param(%u)).\n", param->name, param->nr);
+ if (is_xcomp) {
+ if (param->is_defined)
+ fprintf(fp, "-define(%s, %d).\n", param->name, param->value);
+ else
+ fprintf(fp, "-define(%s, []).\n", param->name);
+ }
+ else {
+ fprintf(fp, "-define(%s, hipe_bifs:get_rts_param(%u)).\n", param->name, param->nr);
+ }
}
static void print_params(FILE *fp, void (*print_param)(FILE*,const struct rts_param*))
@@ -613,19 +642,40 @@ static int do_e(FILE *fp, const char* this_exe)
fprintf(fp, "\n");
print_params(fp, e_define_param);
fprintf(fp, "\n");
- fprintf(fp, "-define(HIPE_SYSTEM_CRC, hipe_bifs:system_crc(%u)).\n", literals_crc);
+ if (is_xcomp) {
+ fprintf(fp, "-define(HIPE_SYSTEM_CRC, %u).\n", system_crc);
+ }
+ else {
+ fprintf(fp, "-define(HIPE_SYSTEM_CRC, hipe_bifs:system_crc(%u)).\n",
+ literals_crc);
+ }
return 0;
}
int main(int argc, const char **argv)
{
+ int i;
+ int (*do_func_ptr)(FILE *, const char*) = NULL;
+
compute_crc();
- if (argc == 2) {
- if (strcmp(argv[1], "-c") == 0)
- return do_c(stdout, argv[0]);
- if (strcmp(argv[1], "-e") == 0)
- return do_e(stdout, argv[0]);
+ for (i = 1; i < argc; i++) {
+ if (strcmp(argv[i], "-c") == 0)
+ do_func_ptr = &do_c;
+ else if (strcmp(argv[i], "-e") == 0)
+ do_func_ptr = &do_e;
+ else if (strcmp(argv[i], "-x") == 0)
+ is_xcomp = 1;
+ else
+ goto error;
+ }
+ if (do_func_ptr) {
+ return do_func_ptr(stdout, argv[0]);
}
- fprintf(stderr, "usage: %s [-c | -e] > output-file\n", argv[0]);
+error:
+ fprintf(stderr, "usage: %s [-x] [-c | -e] > output-file\n"
+ "\t-c\tC header file\n"
+ "\t-e\tErlang header file\n"
+ "\t-x\tCross compile. No dependencies to compiling emulator\n",
+ argv[0]);
return 1;
}
diff --git a/erts/emulator/hipe/hipe_mode_switch.c b/erts/emulator/hipe/hipe_mode_switch.c
index 16f8fb1347..6a3ce5608f 100644
--- a/erts/emulator/hipe/hipe_mode_switch.c
+++ b/erts/emulator/hipe/hipe_mode_switch.c
@@ -35,6 +35,17 @@
#include "hipe_stack.h"
#include "hipe_bif0.h" /* hipe_mfa_info_table_init() */
+#if defined(ERTS_ENABLE_LOCK_CHECK) && defined(ERTS_SMP)
+# define ERTS_SMP_REQ_PROC_MAIN_LOCK(P) \
+ if ((P)) erts_proc_lc_require_lock((P), ERTS_PROC_LOCK_MAIN)
+# define ERTS_SMP_UNREQ_PROC_MAIN_LOCK(P) \
+ if ((P)) erts_proc_lc_unrequire_lock((P), ERTS_PROC_LOCK_MAIN)
+#else
+# define ERTS_SMP_REQ_PROC_MAIN_LOCK(P)
+# define ERTS_SMP_UNREQ_PROC_MAIN_LOCK(P)
+#endif
+
+
/*
* Internal debug support.
* #define HIPE_DEBUG to the desired debug level:
@@ -318,45 +329,46 @@ Process *hipe_mode_switch(Process *p, unsigned cmd, Eterm reg[])
* Native code called a BIF, which "failed" with a TRAP to BEAM.
* Prior to returning, the BIF stored (see BIF_TRAP<N>):
- * the callee's address in p->def_arg_reg[3]
- * the callee's parameters in p->def_arg_reg[0..2]
+ * the callee's address in p->i
+ * the callee's parameters in reg[0..2]
* the callee's arity in p->arity (for BEAM gc purposes)
*
* We need to remove the BIF's parameters from the native
* stack: to this end hipe_${ARCH}_glue.S stores the BIF's
* arity in p->hipe.narity.
*
- * If the BIF emptied the stack (typically hibernate), p->hipe.nsp is
- * NULL and there is no need to get rid of stacked parameters.
+ * If the BIF emptied the stack (typically hibernate), p->hipe.nstack
+ * is NULL and there is no need to get rid of stacked parameters.
*/
unsigned int i, is_recursive = 0;
- /* Save p->arity, then update it with the original BIF's arity.
- Get rid of any stacked parameters in that call. */
- /* XXX: hipe_call_from_native_is_recursive() copies data to
- reg[], which is useless in the TRAP case. Maybe write a
- specialised hipe_trap_from_native_is_recursive() later. */
- if (p->hipe.nsp != NULL) {
- unsigned int callee_arity;
- callee_arity = p->arity;
- p->arity = p->hipe.narity; /* caller's arity */
- is_recursive = hipe_call_from_native_is_recursive(p, reg);
-
- p->i = (Eterm *)(p->def_arg_reg[3]);
- p->arity = callee_arity;
+ if (p->hipe.nstack != NULL) {
+ ASSERT(p->hipe.nsp != NULL);
+ is_recursive = hipe_trap_from_native_is_recursive(p);
}
+ else {
+ /* Some architectures (risc) need this re-reset of nsp as the
+ * BIF wrapper do not detect stack change and causes an obsolete
+ * stack pointer to be saved in p->hipe.nsp before return to us.
+ */
+ p->hipe.nsp = NULL;
+ }
- /* If process is in P_WAITING state, we schedule the next process */
+ /* Schedule next process if current process was hibernated or is waiting
+ for messages */
+ if (p->flags & F_HIBERNATE_SCHED) {
+ p->flags &= ~F_HIBERNATE_SCHED;
+ goto do_schedule;
+ }
if (p->status == P_WAITING) {
+ for (i = 0; i < p->arity; ++i)
+ p->arg_reg[i] = reg[i];
goto do_schedule;
}
- for (i = 0; i < p->arity; ++i)
- reg[i] = p->def_arg_reg[i];
-
if (is_recursive)
hipe_push_beam_trap_frame(p, reg, p->arity);
-
+
result = HIPE_MODE_SWITCH_RES_CALL;
break;
}
@@ -465,10 +477,12 @@ Process *hipe_mode_switch(Process *p, unsigned cmd, Eterm reg[])
#if !(NR_ARG_REGS > 5)
int reds_in = p->def_arg_reg[5];
#endif
+ ERTS_SMP_UNREQ_PROC_MAIN_LOCK(p);
p = schedule(p, reds_in - p->fcalls);
+ ERTS_SMP_REQ_PROC_MAIN_LOCK(p);
#ifdef ERTS_SMP
p->hipe_smp.have_receive_locks = 0;
- reg = p->scheduler_data->save_reg;
+ reg = p->scheduler_data->x_reg_array;
#endif
}
{
@@ -643,7 +657,7 @@ Eterm hipe_build_stacktrace(Process *p, struct StackTrace *s)
if (depth < 1)
return NIL;
- heap_size = 6 * depth; /* each [{M,F,A}|_] is 2+4 == 6 words */
+ heap_size = 7 * depth; /* each [{M,F,A,[]}|_] is 2+5 == 7 words */
hp = HAlloc(p, heap_size);
hp_end = hp + heap_size;
@@ -654,8 +668,8 @@ Eterm hipe_build_stacktrace(Process *p, struct StackTrace *s)
ra = (const void*)s->trace[i];
if (!hipe_find_mfa_from_ra(ra, &m, &f, &a))
continue;
- mfa = TUPLE3(hp, m, f, make_small(a));
- hp += 4;
+ mfa = TUPLE4(hp, m, f, make_small(a), NIL);
+ hp += 5;
next = CONS(hp, mfa, NIL);
*next_p = next;
next_p = &CDR(list_val(next));
diff --git a/erts/emulator/hipe/hipe_mode_switch.h b/erts/emulator/hipe/hipe_mode_switch.h
index dbc2386e14..a3e908a3b3 100644
--- a/erts/emulator/hipe/hipe_mode_switch.h
+++ b/erts/emulator/hipe/hipe_mode_switch.h
@@ -49,7 +49,7 @@
#include "error.h"
-int hipe_modeswitch_debug;
+extern int hipe_modeswitch_debug;
void hipe_mode_switch_init(void);
void hipe_set_call_trap(Uint *bfun, void *nfun, int is_closure);
diff --git a/erts/emulator/hipe/hipe_native_bif.c b/erts/emulator/hipe/hipe_native_bif.c
index 8d31348496..3be821f8f7 100644
--- a/erts/emulator/hipe/hipe_native_bif.c
+++ b/erts/emulator/hipe/hipe_native_bif.c
@@ -41,9 +41,9 @@
*/
/* for -Wmissing-prototypes :-( */
-extern Eterm hipe_check_process_code_2(Process*, Eterm, Eterm);
-extern Eterm hipe_garbage_collect_1(Process*, Eterm);
-extern Eterm hipe_show_nstack_1(Process*, Eterm);
+extern Eterm hipe_check_process_code_2(BIF_ALIST_2);
+extern Eterm hipe_garbage_collect_1(BIF_ALIST_1);
+extern Eterm hipe_show_nstack_1(BIF_ALIST_1);
/* Used when a BIF can trigger a stack walk. */
static __inline__ void hipe_set_narity(Process *p, unsigned int arity)
@@ -56,7 +56,7 @@ Eterm hipe_check_process_code_2(BIF_ALIST_2)
Eterm ret;
hipe_set_narity(BIF_P, 2);
- ret = check_process_code_2(BIF_P, BIF_ARG_1, BIF_ARG_2);
+ ret = check_process_code_2(BIF_P, BIF__ARGS);
hipe_set_narity(BIF_P, 0);
return ret;
}
@@ -66,7 +66,7 @@ Eterm hipe_garbage_collect_1(BIF_ALIST_1)
Eterm ret;
hipe_set_narity(BIF_P, 1);
- ret = garbage_collect_1(BIF_P, BIF_ARG_1);
+ ret = garbage_collect_1(BIF_P, BIF__ARGS);
hipe_set_narity(BIF_P, 0);
return ret;
}
@@ -76,7 +76,7 @@ Eterm hipe_show_nstack_1(BIF_ALIST_1)
Eterm ret;
hipe_set_narity(BIF_P, 1);
- ret = hipe_bifs_show_nstack_1(BIF_P, BIF_ARG_1);
+ ret = hipe_bifs_show_nstack_1(BIF_P, BIF__ARGS);
hipe_set_narity(BIF_P, 0);
return ret;
}
@@ -99,8 +99,10 @@ void hipe_gc(Process *p, Eterm need)
* has begun.
* XXX: BUG: native code should check return status
*/
-Eterm hipe_set_timeout(Process *p, Eterm timeout_value)
+BIF_RETTYPE hipe_set_timeout(BIF_ALIST_1)
{
+ Process* p = BIF_P;
+ Eterm timeout_value = BIF_ARG_1;
#if !defined(ARCH_64)
Uint time_val;
#endif
@@ -187,6 +189,8 @@ void hipe_fclearerror_error(Process *p)
{
#if !defined(NO_FPE_SIGNALS)
erts_fp_check_init_error(&p->fp_exception);
+#else
+ erl_exit(ERTS_ABORT_EXIT, "Emulated FPE not cleared by HiPE");
#endif
}
@@ -286,8 +290,13 @@ static struct StackTrace *get_trace_from_exc(Eterm exc)
* This does what the (misnamed) Beam instruction 'raise_ss' does,
* namely, a proper re-throw of an exception that was caught by 'try'.
*/
-Eterm hipe_rethrow(Process *c_p, Eterm exc, Eterm value)
+
+BIF_RETTYPE hipe_rethrow(BIF_ALIST_2)
{
+ Process* c_p = BIF_P;
+ Eterm exc = BIF_ARG_1;
+ Eterm value = BIF_ARG_2;
+
c_p->fvalue = value;
if (c_p->freason == EXC_NULL) {
/* a safety check for the R10-0 case; should not happen */
@@ -334,7 +343,7 @@ char *hipe_bs_allocate(int len)
bptr = erts_bin_nrml_alloc(len);
bptr->flags = 0;
bptr->orig_size = len;
- erts_smp_atomic_init(&bptr->refc, 1);
+ erts_smp_atomic_init_nob(&bptr->refc, 1);
return bptr->orig_bytes;
}
@@ -411,8 +420,12 @@ Eterm hipe_bs_utf8_size(Eterm arg)
return make_small(4);
}
-Eterm hipe_bs_put_utf8(Process *p, Eterm arg, byte *base, unsigned int offset)
+BIF_RETTYPE hipe_bs_put_utf8(BIF_ALIST_3)
{
+ Process* p = BIF_P;
+ Eterm arg = BIF_ARG_1;
+ byte* base = (byte*) BIF_ARG_2;
+ Uint offset = (Uint) BIF_ARG_3;
byte *save_bin_buf;
Uint save_bin_offset;
int res;
@@ -468,13 +481,21 @@ Eterm hipe_bs_put_utf16(Process *p, Eterm arg, byte *base, unsigned int offset,
return new_offset;
}
-Eterm hipe_bs_put_utf16be(Process *p, Eterm arg, byte *base, unsigned int offset)
+BIF_RETTYPE hipe_bs_put_utf16be(BIF_ALIST_3)
{
+ Process *p = BIF_P;
+ Eterm arg = BIF_ARG_1;
+ byte *base = (byte*) BIF_ARG_2;
+ Uint offset = (Uint) BIF_ARG_3;
return hipe_bs_put_utf16(p, arg, base, offset, 0);
}
-Eterm hipe_bs_put_utf16le(Process *p, Eterm arg, byte *base, unsigned int offset)
+BIF_RETTYPE hipe_bs_put_utf16le(BIF_ALIST_3)
{
+ Process *p = BIF_P;
+ Eterm arg = BIF_ARG_1;
+ byte *base = (byte*) BIF_ARG_2;
+ Uint offset = (Uint) BIF_ARG_3;
return hipe_bs_put_utf16(p, arg, base, offset, BSF_LITTLE);
}
@@ -489,8 +510,10 @@ static int validate_unicode(Eterm arg)
return 1;
}
-Eterm hipe_bs_validate_unicode(Process *p, Eterm arg)
+BIF_RETTYPE hipe_bs_validate_unicode(BIF_ALIST_1)
{
+ Process *p = BIF_P;
+ Eterm arg = BIF_ARG_1;
if (!validate_unicode(arg))
BIF_ERROR(p, BADARG);
return NIL;
@@ -584,7 +607,7 @@ void hipe_clear_timeout(Process *c_p)
void hipe_atomic_inc(int *counter)
{
- erts_smp_atomic_inc((erts_smp_atomic_t*)counter);
+ erts_smp_atomic_inc_nob((erts_smp_atomic_t*)counter);
}
#endif
diff --git a/erts/emulator/hipe/hipe_native_bif.h b/erts/emulator/hipe/hipe_native_bif.h
index 13a02b84a2..9e3a156fbc 100644
--- a/erts/emulator/hipe/hipe_native_bif.h
+++ b/erts/emulator/hipe/hipe_native_bif.h
@@ -23,6 +23,7 @@
#ifndef HIPE_NATIVE_BIF_H
#define HIPE_NATIVE_BIF_H
+#include "bif.h"
#include "hipe_arch.h"
/*
@@ -71,27 +72,32 @@ AEXTERN(void,nbif_select_msg,(Process*));
AEXTERN(Eterm,nbif_cmp_2,(void));
AEXTERN(Eterm,nbif_eq_2,(void));
-Eterm hipe_nonclosure_address(Process*, Eterm, Uint);
-Eterm hipe_conv_big_to_float(Process*, Eterm);
+BIF_RETTYPE hipe_nonclosure_address(BIF_ALIST_2);
+BIF_RETTYPE hipe_conv_big_to_float(BIF_ALIST_1);
void hipe_fclearerror_error(Process*);
void hipe_select_msg(Process*);
void hipe_gc(Process*, Eterm);
-Eterm hipe_set_timeout(Process*, Eterm);
+BIF_RETTYPE hipe_set_timeout(BIF_ALIST_1);
void hipe_handle_exception(Process*);
-Eterm hipe_rethrow(Process *c_p, Eterm exc, Eterm value);
+BIF_RETTYPE hipe_rethrow(BIF_ALIST_2);
char *hipe_bs_allocate(int);
Binary *hipe_bs_reallocate(Binary*, int);
int hipe_bs_put_small_float(Process*, Eterm, Uint, byte*, unsigned, unsigned);
void hipe_bs_put_bits(Eterm, Uint, byte*, unsigned, unsigned);
Eterm hipe_bs_utf8_size(Eterm);
-Eterm hipe_bs_put_utf8(Process*, Eterm, byte*, unsigned int);
+BIF_RETTYPE hipe_bs_put_utf8(BIF_ALIST_3);
Eterm hipe_bs_utf16_size(Eterm);
-Eterm hipe_bs_put_utf16be(Process*, Eterm, byte*, unsigned int);
-Eterm hipe_bs_put_utf16le(Process*, Eterm, byte*, unsigned int);
-Eterm hipe_bs_validate_unicode(Process*, Eterm);
+BIF_RETTYPE hipe_bs_put_utf16be(BIF_ALIST_3);
+BIF_RETTYPE hipe_bs_put_utf16le(BIF_ALIST_3);
+BIF_RETTYPE hipe_bs_validate_unicode(BIF_ALIST_1);
struct erl_bin_match_buffer;
int hipe_bs_validate_unicode_retract(struct erl_bin_match_buffer*, Eterm);
+#ifdef NO_FPE_SIGNALS
+AEXTERN(void,nbif_emulate_fpe,(Process*));
+void hipe_emulate_fpe(Process*);
+#endif
+
/*
* Stuff that is different in SMP and non-SMP.
*/
diff --git a/erts/emulator/hipe/hipe_ppc.c b/erts/emulator/hipe/hipe_ppc.c
index bc25061a16..2d8fd61e1e 100644
--- a/erts/emulator/hipe/hipe_ppc.c
+++ b/erts/emulator/hipe/hipe_ppc.c
@@ -80,7 +80,7 @@ static struct segment {
} curseg;
#define in_area(ptr,start,nbytes) \
- ((unsigned long)((char*)(ptr) - (char*)(start)) < (nbytes))
+ ((UWord)((char*)(ptr) - (char*)(start)) < (nbytes))
/* Darwin breakage */
#if !defined(MAP_ANONYMOUS) && defined(MAP_ANON)
diff --git a/erts/emulator/hipe/hipe_ppc_asm.m4 b/erts/emulator/hipe/hipe_ppc_asm.m4
index 0eb5c441e6..343402f9f0 100644
--- a/erts/emulator/hipe/hipe_ppc_asm.m4
+++ b/erts/emulator/hipe/hipe_ppc_asm.m4
@@ -31,12 +31,23 @@ define(LOAD,ld)dnl
define(STORE,std)dnl
define(CMPI,cmpdi)dnl
define(WSIZE,8)dnl
+`#define STORE_IA(ADDR, DST, TMP) \
+ addis TMP, 0, ADDR@highest SEMI\
+ ori TMP, TMP, ADDR@higher SEMI\
+ rldicr TMP, TMP, 32, 31 SEMI\
+ oris TMP, TMP, ADDR@h SEMI\
+ ori TMP, TMP, ADDR@l SEMI\
+ std TMP, DST'
',`
/* 32-bit PowerPC */
define(LOAD,lwz)dnl
define(STORE,stw)dnl
define(CMPI,cmpwi)dnl
define(WSIZE,4)dnl
+`#define STORE_IA(ADDR, DST, TMP) \
+ lis TMP, ADDR@ha SEMI\
+ addi TMP, TMP, ADDR@l SEMI\
+ stw TMP, DST'
')dnl
`#define LOAD 'LOAD
`#define STORE 'STORE
diff --git a/erts/emulator/hipe/hipe_ppc_bifs.m4 b/erts/emulator/hipe/hipe_ppc_bifs.m4
index 203fefe1a1..d09551d10d 100644
--- a/erts/emulator/hipe/hipe_ppc_bifs.m4
+++ b/erts/emulator/hipe/hipe_ppc_bifs.m4
@@ -20,21 +20,34 @@ changecom(`/*', `*/')dnl
include(`hipe/hipe_ppc_asm.m4')
+#`include' "config.h"
#`include' "hipe_literals.h"
+`#if defined(ERTS_ENABLE_LOCK_CHECK) && defined(ERTS_SMP)
+# define CALL_BIF(F) STORE_IA(CSYM(F), P_BIF_CALLEE(P), r29); bl CSYM(hipe_debug_bif_wrapper)
+#else
+# define CALL_BIF(F) bl CSYM(F)
+#endif'
+
.text
.p2align 2
-`#define TEST_GOT_MBUF LOAD r4, P_MBUF(P) SEMI CMPI r4, 0 SEMI bne- 3f SEMI 2:
-#define JOIN3(A,B,C) A##B##C
-#define HANDLE_GOT_MBUF(ARITY) 3: bl CSYM(JOIN3(nbif_,ARITY,_gc_after_bif)) SEMI b 2b'
+define(TEST_GOT_MBUF,`LOAD r4, P_MBUF(P) # `TEST_GOT_MBUF'
+ CMPI r4, 0
+ bne- 3f
+2:')
+define(HANDLE_GOT_MBUF,`
+3: bl CSYM(nbif_$1_gc_after_bif) # `HANDLE_GOT_MBUF'
+ b 2b')
+
/*
* standard_bif_interface_1(nbif_name, cbif_name)
* standard_bif_interface_2(nbif_name, cbif_name)
* standard_bif_interface_3(nbif_name, cbif_name)
+ * standard_bif_interface_0(nbif_name, cbif_name)
*
- * Generate native interface for a BIF with 1-3 parameters and
+ * Generate native interface for a BIF with 0-3 parameters and
* standard failure mode.
*/
define(standard_bif_interface_1,
@@ -49,7 +62,9 @@ ASYM($1):
/* Save caller-save registers and call the C function. */
SAVE_CONTEXT_BIF
- bl CSYM($2)
+ STORE r4, P_ARG0(r3) # Store BIF__ARGS in def_arg_reg[]
+ addi r4, r3, P_ARG0
+ CALL_BIF($2)
TEST_GOT_MBUF
/* Restore registers. Check for exception. */
@@ -77,7 +92,10 @@ ASYM($1):
/* Save caller-save registers and call the C function. */
SAVE_CONTEXT_BIF
- bl CSYM($2)
+ STORE r4, P_ARG0(r3) # Store BIF__ARGS in def_arg_reg[]
+ STORE r5, P_ARG1(r3)
+ addi r4, r3, P_ARG0
+ CALL_BIF($2)
TEST_GOT_MBUF
/* Restore registers. Check for exception. */
@@ -106,7 +124,11 @@ ASYM($1):
/* Save caller-save registers and call the C function. */
SAVE_CONTEXT_BIF
- bl CSYM($2)
+ STORE r4, P_ARG0(r3) # Store BIF__ARGS in def_arg_reg[]
+ STORE r5, P_ARG1(r3)
+ STORE r6, P_ARG2(r3)
+ addi r4, r3, P_ARG0
+ CALL_BIF($2)
TEST_GOT_MBUF
/* Restore registers. Check for exception. */
@@ -121,13 +143,7 @@ ASYM($1):
TYPE_FUNCTION(ASYM($1))
#endif')
-/*
- * fail_bif_interface_0(nbif_name, cbif_name)
- *
- * Generate native interface for a BIF with 0 parameters and
- * standard failure mode.
- */
-define(fail_bif_interface_0,
+define(standard_bif_interface_0,
`
#ifndef HAVE_$1
#`define' HAVE_$1
@@ -138,7 +154,8 @@ ASYM($1):
/* Save caller-save registers and call the C function. */
SAVE_CONTEXT_BIF
- bl CSYM($2)
+ /* ignore empty BIF__ARGS */
+ CALL_BIF($2)
TEST_GOT_MBUF
/* Restore registers. Check for exception. */
@@ -173,7 +190,8 @@ ASYM($1):
/* Save caller-save registers and call the C function. */
SAVE_CONTEXT_GC
- bl CSYM($2)
+ /* ignore empty BIF__ARGS */
+ CALL_BIF($2)
TEST_GOT_MBUF
/* Restore registers. */
@@ -196,7 +214,9 @@ ASYM($1):
/* Save caller-save registers and call the C function. */
SAVE_CONTEXT_GC
- bl CSYM($2)
+ STORE r4, P_ARG0(r3) # Store BIF__ARGS in def_arg_reg[]
+ addi r4, r3, P_ARG0
+ CALL_BIF($2)
TEST_GOT_MBUF
/* Restore registers. Check for exception. */
@@ -224,7 +244,10 @@ ASYM($1):
/* Save caller-save registers and call the C function. */
SAVE_CONTEXT_GC
- bl CSYM($2)
+ STORE r4, P_ARG0(r3) # Store BIF__ARGS in def_arg_reg[]
+ STORE r5, P_ARG1(r3)
+ addi r4, r3, P_ARG0
+ CALL_BIF($2)
TEST_GOT_MBUF
/* Restore registers. Check for exception. */
diff --git a/erts/emulator/hipe/hipe_primops.h b/erts/emulator/hipe/hipe_primops.h
index 94113ffcd8..38509c105b 100644
--- a/erts/emulator/hipe/hipe_primops.h
+++ b/erts/emulator/hipe/hipe_primops.h
@@ -77,6 +77,10 @@ PRIMOP_LIST(am_nonclosure_address, &nbif_nonclosure_address)
PRIMOP_LIST(am_conv_big_to_float, &nbif_conv_big_to_float)
PRIMOP_LIST(am_fclearerror_error, &nbif_fclearerror_error)
+#ifdef NO_FPE_SIGNALS
+PRIMOP_LIST(am_emulate_fpe, &nbif_emulate_fpe)
+#endif
+
#if defined(__sparc__)
#include "hipe_sparc_primops.h"
#endif
diff --git a/erts/emulator/hipe/hipe_process.h b/erts/emulator/hipe/hipe_process.h
index 5effacb398..4ee99d78a2 100644
--- a/erts/emulator/hipe/hipe_process.h
+++ b/erts/emulator/hipe/hipe_process.h
@@ -42,6 +42,12 @@ struct hipe_process_state {
void (*nra)(void); /* Native code return address. */
#endif
unsigned int narity; /* Arity of BIF call, for stack walks. */
+#ifdef NO_FPE_SIGNALS
+ double float_result; /* to be checked for inf/NaN by hipe_emulate_fpe */
+#endif
+#if defined(ERTS_ENABLE_LOCK_CHECK) && defined(ERTS_SMP)
+ void (*bif_callee)(void); /* When calling BIF's via debug wrapper */
+#endif
};
extern void hipe_arch_print_pcb(struct hipe_process_state *p);
diff --git a/erts/emulator/hipe/hipe_risc_glue.h b/erts/emulator/hipe/hipe_risc_glue.h
index e74023e3e9..cc2671c016 100644
--- a/erts/emulator/hipe/hipe_risc_glue.h
+++ b/erts/emulator/hipe/hipe_risc_glue.h
@@ -199,6 +199,22 @@ hipe_call_from_native_is_recursive(Process *p, Eterm reg[])
return 0;
}
+/* BEAM called native, which called BIF that returned trap
+ * Discard bif parameters.
+ * If tailcall, also clean up native stub continuation. */
+static __inline__ int
+hipe_trap_from_native_is_recursive(Process *p)
+{
+ if (p->hipe.narity > NR_ARG_REGS) {
+ p->hipe.nsp += (p->hipe.narity - NR_ARG_REGS);
+ }
+ if (p->hipe.nra != (void(*)(void))&nbif_return)
+ return 1;
+ hipe_pop_risc_nra_frame(p);
+ return 0;
+}
+
+
/* Native makes a call which needs to unload the parameters.
This differs from hipe_call_from_native_is_recursive() in
that it doesn't check for or pop the BEAM-calls-native frame.
diff --git a/erts/emulator/hipe/hipe_sparc_bifs.m4 b/erts/emulator/hipe/hipe_sparc_bifs.m4
index 03db7f3413..ca5af45d58 100644
--- a/erts/emulator/hipe/hipe_sparc_bifs.m4
+++ b/erts/emulator/hipe/hipe_sparc_bifs.m4
@@ -20,27 +20,42 @@ changecom(`/*', `*/')dnl
include(`hipe/hipe_sparc_asm.m4')
+#`include' "config.h"
#`include' "hipe_literals.h"
.section ".text"
.align 4
+`#if defined(ERTS_ENABLE_LOCK_CHECK) && defined(ERTS_SMP)
+# define CALL_BIF(F) set F, %o7; st %o7, [%o0+P_BIF_CALLEE]; call hipe_debug_bif_wrapper
+#else
+# define CALL_BIF(F) call F
+#endif'
+
/*
* Test for exception. This macro executes its delay slot.
*/
-`#define __TEST_GOT_EXN(LABEL) cmp %o0, THE_NON_VALUE; bz,pn %icc, LABEL
-#define TEST_GOT_EXN(ARITY) __TEST_GOT_EXN(JOIN3(nbif_,ARITY,_simple_exception))'
+define(TEST_GOT_EXN,`cmp %o0, THE_NON_VALUE ! `TEST_GOT_EXN'
+ bz,pn %icc, nbif_$1_simple_exception')
-`#define TEST_GOT_MBUF ld [P+P_MBUF], %o1; cmp %o1, 0; bne 3f; nop; 2:
-#define JOIN3(A,B,C) A##B##C
-#define HANDLE_GOT_MBUF(ARITY) 3: call JOIN3(nbif_,ARITY,_gc_after_bif); nop; b 2b; nop'
+define(TEST_GOT_MBUF,`ld [P+P_MBUF], %o1 ! `TEST_GOT_MBUF'
+ cmp %o1, 0
+ bne 3f
+ nop
+2:')
+define(HANDLE_GOT_MBUF,`
+3: call nbif_$1_gc_after_bif ! `HANDLE_GOT_MBUF'
+ nop
+ b 2b
+ nop')
/*
* standard_bif_interface_1(nbif_name, cbif_name)
* standard_bif_interface_2(nbif_name, cbif_name)
* standard_bif_interface_3(nbif_name, cbif_name)
+ * standard_bif_interface_0(nbif_name, cbif_name)
*
- * Generate native interface for a BIF with 1-3 parameters and
+ * Generate native interface for a BIF with 0-3 parameters and
* standard failure mode.
*/
define(standard_bif_interface_1,
@@ -55,7 +70,9 @@ $1:
/* Save caller-save registers and call the C function. */
SAVE_CONTEXT_BIF
- call $2
+ st %o1, [%o0+P_ARG0] ! Store BIF__ARGS in def_arg_reg
+ add %o0, P_ARG0, %o1
+ CALL_BIF($2)
nop
TEST_GOT_MBUF
@@ -81,7 +98,10 @@ $1:
/* Save caller-save registers and call the C function. */
SAVE_CONTEXT_BIF
- call $2
+ st %o1, [%o0+P_ARG0] ! Store BIF__ARGS in def_arg_reg
+ st %o2, [%o0+P_ARG1]
+ add %o0, P_ARG0, %o1
+ CALL_BIF($2)
nop
TEST_GOT_MBUF
@@ -108,7 +128,11 @@ $1:
/* Save caller-save registers and call the C function. */
SAVE_CONTEXT_BIF
- call $2
+ st %o1, [%o0+P_ARG0] ! Store BIF__ARGS in def_arg_reg
+ st %o2, [%o0+P_ARG1]
+ st %o3, [%o0+P_ARG2]
+ add %o0, P_ARG0, %o1
+ CALL_BIF($2)
nop
TEST_GOT_MBUF
@@ -121,13 +145,7 @@ $1:
.type $1, #function
#endif')
-/*
- * fail_bif_interface_0(nbif_name, cbif_name)
- *
- * Generate native interface for a BIF with 0 parameters and
- * standard failure mode.
- */
-define(fail_bif_interface_0,
+define(standard_bif_interface_0,
`
#ifndef HAVE_$1
#`define' HAVE_$1
@@ -138,7 +156,8 @@ $1:
/* Save caller-save registers and call the C function. */
SAVE_CONTEXT_BIF
- call $2
+ /* ignore empty BIF__ARGS */
+ CALL_BIF($2)
nop
TEST_GOT_MBUF
@@ -171,7 +190,8 @@ $1:
/* Save caller-save registers and call the C function. */
SAVE_CONTEXT_GC
- call $2
+ /* ignore empty BIF__ARGS */
+ CALL_BIF($2)
nop
TEST_GOT_MBUF
@@ -195,7 +215,9 @@ $1:
/* Save caller-save registers and call the C function. */
SAVE_CONTEXT_GC
- call $2
+ st %o1, [%o0+P_ARG0] ! Store BIF__ARGS in def_arg_reg
+ add %o0, P_ARG0, %o1
+ CALL_BIF($2)
nop
TEST_GOT_MBUF
@@ -221,7 +243,10 @@ $1:
/* Save caller-save registers and call the C function. */
SAVE_CONTEXT_GC
- call $2
+ st %o1, [%o0+P_ARG0] ! Store BIF__ARGS in def_arg_reg
+ st %o2, [%o0+P_ARG1]
+ add %o0, P_ARG0, %o1
+ CALL_BIF($2)
nop
TEST_GOT_MBUF
diff --git a/erts/emulator/hipe/hipe_x86.h b/erts/emulator/hipe/hipe_x86.h
index f0f3c158af..97f09e38cd 100644
--- a/erts/emulator/hipe/hipe_x86.h
+++ b/erts/emulator/hipe/hipe_x86.h
@@ -49,7 +49,9 @@ static __inline__ int hipe_word32_address_ok(void *address)
#define hipe_arch_name am_x86
extern void nbif_inc_stack_0(void);
+#ifndef NO_FPE_SIGNALS
extern void nbif_handle_fp_exception(void);
+#endif
/* for hipe_bifs_enter_code_2 */
extern void *hipe_alloc_code(Uint nrbytes, Eterm callees, Eterm *trampolines, Process *p);
diff --git a/erts/emulator/hipe/hipe_x86_bifs.m4 b/erts/emulator/hipe/hipe_x86_bifs.m4
index 1bb6488b00..3cb7d67be0 100644
--- a/erts/emulator/hipe/hipe_x86_bifs.m4
+++ b/erts/emulator/hipe/hipe_x86_bifs.m4
@@ -20,6 +20,7 @@ changecom(`/*', `*/')dnl
include(`hipe/hipe_x86_asm.m4')
+#`include' "config.h"
#`include' "hipe_literals.h"
`#if THE_NON_VALUE == 0
@@ -28,16 +29,27 @@ include(`hipe/hipe_x86_asm.m4')
#define TEST_GOT_EXN cmpl $THE_NON_VALUE,%eax
#endif'
-`#define TEST_GOT_MBUF movl P_MBUF(P), %edx; testl %edx, %edx; jnz 3f; 2:
-#define JOIN3(A,B,C) A##B##C
-#define HANDLE_GOT_MBUF(ARITY) 3: call JOIN3(nbif_,ARITY,_gc_after_bif); jmp 2b'
+`#if defined(ERTS_ENABLE_LOCK_CHECK) && defined(ERTS_SMP)
+# define CALL_BIF(F) movl $CSYM(F), P_BIF_CALLEE(P); call CSYM(hipe_debug_bif_wrapper)
+#else
+# define CALL_BIF(F) call CSYM(F)
+#endif'
+
+define(TEST_GOT_MBUF,`movl P_MBUF(P), %edx # `TEST_GOT_MBUF'
+ testl %edx, %edx
+ jnz 3f
+2:')
+define(HANDLE_GOT_MBUF,`
+3: call nbif_$1_gc_after_bif # `HANDLE_GOT_MBUF'
+ jmp 2b')
/*
* standard_bif_interface_1(nbif_name, cbif_name)
* standard_bif_interface_2(nbif_name, cbif_name)
* standard_bif_interface_3(nbif_name, cbif_name)
+ * standard_bif_interface_0(nbif_name, cbif_name)
*
- * Generate native interface for a BIF with 1-3 parameters and
+ * Generate native interface for a BIF with 0-3 parameters and
* standard failure mode.
*/
define(standard_bif_interface_1,
@@ -56,8 +68,10 @@ ASYM($1):
/* make the call on the C stack */
NBIF_ARG_REG(0,P)
- NBIF_ARG(1,1,0)
- call CSYM($2)
+ NBIF_ARG(2,1,0)
+ lea 8(%esp), %eax
+ NBIF_ARG_REG(1,%eax) # BIF__ARGS
+ CALL_BIF($2)
TEST_GOT_MBUF
/* switch to native stack */
@@ -88,9 +102,11 @@ ASYM($1):
/* make the call on the C stack */
NBIF_ARG_REG(0,P)
- NBIF_ARG(1,2,0)
- NBIF_ARG(2,2,1)
- call CSYM($2)
+ NBIF_ARG(2,2,0)
+ NBIF_ARG(3,2,1)
+ lea 8(%esp), %eax
+ NBIF_ARG_REG(1,%eax) # BIF__ARGS
+ CALL_BIF($2)
TEST_GOT_MBUF
/* switch to native stack */
@@ -121,10 +137,12 @@ ASYM($1):
/* make the call on the C stack */
NBIF_ARG_REG(0,P)
- NBIF_ARG(1,3,0)
- NBIF_ARG(2,3,1)
- NBIF_ARG(3,3,2)
- call CSYM($2)
+ NBIF_ARG(2,3,0)
+ NBIF_ARG(3,3,1)
+ NBIF_ARG(4,3,2)
+ lea 8(%esp), %eax
+ NBIF_ARG_REG(1,%eax) # BIF__ARGS
+ CALL_BIF($2)
TEST_GOT_MBUF
/* switch to native stack */
@@ -139,13 +157,7 @@ ASYM($1):
TYPE_FUNCTION(ASYM($1))
#endif')
-/*
- * fail_bif_interface_0(nbif_name, cbif_name)
- *
- * Generate native interface for a BIF with 0 parameters and
- * standard failure mode.
- */
-define(fail_bif_interface_0,
+define(standard_bif_interface_0,
`
#ifndef HAVE_$1
#`define' HAVE_$1
@@ -158,7 +170,8 @@ ASYM($1):
/* make the call on the C stack */
NBIF_ARG_REG(0,P)
- call CSYM($2)
+ /* skip BIF__ARGS */
+ CALL_BIF($2)
TEST_GOT_MBUF
/* switch to native stack */
@@ -608,7 +621,9 @@ ASYM($1):
/*
* x86-specific primops.
*/
+#ifndef NO_FPE_SIGNALS
noproc_primop_interface_0(nbif_handle_fp_exception, erts_restore_fpu)
+#endif /* NO_FPE_SIGNALS */
/*
* Implement gc_bif_interface_0 as nofail_primop_interface_0.
diff --git a/erts/emulator/hipe/hipe_x86_glue.h b/erts/emulator/hipe/hipe_x86_glue.h
index a7b0f164be..b0db93267c 100644
--- a/erts/emulator/hipe/hipe_x86_glue.h
+++ b/erts/emulator/hipe/hipe_x86_glue.h
@@ -186,6 +186,25 @@ hipe_call_from_native_is_recursive(Process *p, Eterm reg[])
return 0;
}
+/* BEAM called native, which called BIF that returned trap
+ * Discard bif parameters.
+ * If tailcall, also clean up native stub continuation. */
+static __inline__ int
+hipe_trap_from_native_is_recursive(Process *p)
+{
+ Eterm nra = *(p->hipe.nsp++);
+
+ if (p->hipe.narity > NR_ARG_REGS) {
+ p->hipe.nsp += (p->hipe.narity - NR_ARG_REGS);
+ }
+ if (nra != (Eterm)nbif_return) {
+ *--(p->hipe.nsp) = nra;
+ return 1;
+ }
+ return 0;
+}
+
+
/* Native makes a call which needs to unload the parameters.
This differs from hipe_call_from_native_is_recursive() in
that it doesn't check for or pop the BEAM-calls-native frame.
diff --git a/erts/emulator/hipe/hipe_x86_primops.h b/erts/emulator/hipe/hipe_x86_primops.h
index 96d2336bc5..111b1fa8bd 100644
--- a/erts/emulator/hipe/hipe_x86_primops.h
+++ b/erts/emulator/hipe/hipe_x86_primops.h
@@ -19,4 +19,7 @@
PRIMOP_LIST(am_inc_stack_0, &nbif_inc_stack_0)
+#ifndef NO_FPE_SIGNALS
PRIMOP_LIST(am_handle_fp_exception, &nbif_handle_fp_exception)
+#endif
+
diff --git a/erts/emulator/pcre/Makefile.in b/erts/emulator/pcre/Makefile.in
deleted file mode 100644
index f62700ec4e..0000000000
--- a/erts/emulator/pcre/Makefile.in
+++ /dev/null
@@ -1,165 +0,0 @@
-# Makefile for zlib
-# Copyright (C) 1995-1996 Jean-loup Gailly.
-# For conditions of distribution and use, see copyright notice in zlib.h
-
-# To compile and test, type:
-# ./configure; make test
-# The call of configure is optional if you don't have special requirements
-
-# To install /usr/local/lib/libz.* and /usr/local/include/zlib.h, type:
-# make install
-# To install in $HOME instead of /usr/local, use:
-# make install prefix=$HOME
-
-# %ExternalCopyright%
-
-ARFLAGS = rc
-
-O = \
-pcre_latin_1_table.o \
-pcre_compile.o \
-pcre_config.o \
-pcre_dfa_exec.o \
-pcre_exec.o \
-pcre_fullinfo.o \
-pcre_get.o \
-pcre_globals.o \
-pcre_info.o \
-pcre_maketables.o \
-pcre_newline.o \
-pcre_ord2utf8.o \
-pcre_refcount.o \
-pcre_study.o \
-pcre_tables.o \
-pcre_try_flipped.o \
-pcre_ucp_searchfuncs.o \
-pcre_valid_utf8.o \
-pcre_version.o \
-pcre_xclass.o
-
-OBJS = $(O:%=$(OBJDIR)/%)
-
-GENINC = pcre_exec_loop_break_cases.inc
-
-#### Begin OTP targets
-
-include $(ERL_TOP)/make/target.mk
-
-# On windows we need a separate zlib during debug build
-ifeq ($(TARGET),win32)
-
-ifeq ($(TYPE),debug)
-CFLAGS = $(subst -O2, -g, @CFLAGS@ @DEFS@ @DEBUG_FLAGS@ @EMU_THR_DEFS@ -DERLANG_INTEGRATION)
-else # debug
-CFLAGS = @CFLAGS@ @DEFS@ @EMU_THR_DEFS@ -DERLANG_INTEGRATION
-endif # debug
-
-else # win32
-
-ifeq ($(TYPE),debug)
-TYPE_FLAGS = @DEBUG_CFLAGS@
-else # debug
-ifeq ($(TYPE),gcov)
-TYPE_FLAGS = -O0 -fprofile-arcs -ftest-coverage
-else # gcov
-TYPE_FLAGS = -O3
-endif # gcov
-endif # debug
-
-CFLAGS = $(TYPE_FLAGS) $(subst -O2,, @CFLAGS@) @DEFS@ @EMU_THR_DEFS@ -DERLANG_INTEGRATION
-
-endif # win32
-
-OBJDIR = $(ERL_TOP)/erts/emulator/pcre/obj/$(TARGET)/$(TYPE)
-
-include $(ERL_TOP)/make/$(TARGET)/otp.mk
-
-ifeq ($(TARGET), win32)
-LIBRARY=$(OBJDIR)/epcre.lib
-else
-LIBRARY=$(OBJDIR)/libepcre.a
-endif
-
-all: $(LIBRARY)
-
-# ----------------------------------------------------
-# Release Target
-# ----------------------------------------------------
-include $(ERL_TOP)/make/otp_release_targets.mk
-
-release_spec: opt
-
-tests release_tests:
-
-docs release_docs release_docs_spec:
-
-clean:
- rm -f $(OBJS) $(OBJDIR)/libepcre.a
-
-#### end OTP targets
-
-ifeq ($(TARGET), win32)
-$(LIBRARY): $(OBJS)
- $(AR) -out:$@ $(OBJS)
-else
-$(LIBRARY): $(OBJS)
- $(AR) $(ARFLAGS) $@ $(OBJS)
- -@ ($(RANLIB) $@ || true) 2>/dev/null
-endif
-
-$(OBJDIR)/%.o: %.c
- $(CC) -c $(CFLAGS) -o $@ $<
-
-$(GENINC): pcre_exec.c
- for x in `grep -n COST_CHK pcre_exec.c | grep -v 'COST_CHK(N)' | awk -F: '{print $$1}'`; \
- do \
- N=`expr $$x + 100`; \
- echo "case $$N: goto L_LOOP_COUNT_$${x};"; \
- done > $(GENINC)
-
-table: ./gen_table
- ./gen_table pcre_latin_1_table.c
-
-./gen_table: pcre_make_latin1_default.c make_latin1_table.c
- $(CC) $(CFLAGS) -o gen_table pcre_make_latin1_default.c make_latin1_table.c
-
-# DO NOT DELETE THIS LINE -- make depend depends on it.
-
-$(OBJDIR)/pcre_chartables.o: pcre_chartables.c pcre_internal.h local_config.h \
- pcre.h ucp.h
-$(OBJDIR)/pcre_compile.o: pcre_compile.c pcre_internal.h local_config.h \
- pcre.h ucp.h
-$(OBJDIR)/pcre_config.o: pcre_config.c pcre_internal.h local_config.h pcre.h \
- ucp.h
-$(OBJDIR)/pcre_dfa_exec.o: pcre_dfa_exec.c pcre_internal.h local_config.h \
- pcre.h ucp.h
-$(OBJDIR)/pcre_exec.o: pcre_exec.c pcre_internal.h local_config.h pcre.h ucp.h \
- $(GENINC)
-$(OBJDIR)/pcre_fullinfo.o: pcre_fullinfo.c pcre_internal.h local_config.h \
- pcre.h ucp.h
-$(OBJDIR)/pcre_get.o: pcre_get.c pcre_internal.h local_config.h pcre.h ucp.h
-$(OBJDIR)/pcre_globals.o: pcre_globals.c pcre_internal.h local_config.h \
- pcre.h ucp.h
-$(OBJDIR)/pcre_info.o: pcre_info.c pcre_internal.h local_config.h pcre.h ucp.h
-$(OBJDIR)/pcre_maketables.o: pcre_maketables.c pcre_internal.h local_config.h \
- pcre.h ucp.h
-$(OBJDIR)/pcre_newline.o: pcre_newline.c pcre_internal.h local_config.h \
- pcre.h ucp.h
-$(OBJDIR)/pcre_ord2utf8.o: pcre_ord2utf8.c pcre_internal.h local_config.h \
- pcre.h ucp.h
-$(OBJDIR)/pcre_refcount.o: pcre_refcount.c pcre_internal.h local_config.h \
- pcre.h ucp.h
-$(OBJDIR)/pcre_study.o: pcre_study.c pcre_internal.h local_config.h pcre.h \
- ucp.h
-$(OBJDIR)/pcre_tables.o: pcre_tables.c pcre_internal.h local_config.h pcre.h \
- ucp.h
-$(OBJDIR)/pcre_try_flipped.o: pcre_try_flipped.c pcre_internal.h \
- local_config.h pcre.h ucp.h
-$(OBJDIR)/pcre_ucp_searchfuncs.o: pcre_ucp_searchfuncs.c pcre_internal.h \
- local_config.h pcre.h ucp.h ucpinternal.h ucptable.h
-$(OBJDIR)/pcre_valid_utf8.o: pcre_valid_utf8.c pcre_internal.h local_config.h \
- pcre.h ucp.h
-pcre_version.o: pcre_version.c pcre_internal.h local_config.h pcre.h \
- ucp.h
-$(OBJDIR)/pcre_xclass.o: pcre_xclass.c pcre_internal.h local_config.h pcre.h \
- ucp.h
diff --git a/erts/emulator/pcre/pcre.mk b/erts/emulator/pcre/pcre.mk
new file mode 100644
index 0000000000..352137b341
--- /dev/null
+++ b/erts/emulator/pcre/pcre.mk
@@ -0,0 +1,111 @@
+#
+# %CopyrightBegin%
+#
+# Copyright Ericsson AB 2012. All Rights Reserved.
+#
+# The contents of this file are subject to the Erlang Public License,
+# Version 1.1, (the "License"); you may not use this file except in
+# compliance with the License. You should have received a copy of the
+# Erlang Public License along with this software. If not, it can be
+# retrieved online at http://www.erlang.org/.
+#
+# Software distributed under the License is distributed on an "AS IS"
+# basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+# the License for the specific language governing rights and limitations
+# under the License.
+#
+# %CopyrightEnd%
+#
+
+PCRE_O = \
+pcre_latin_1_table.o \
+pcre_compile.o \
+pcre_config.o \
+pcre_dfa_exec.o \
+pcre_exec.o \
+pcre_fullinfo.o \
+pcre_get.o \
+pcre_globals.o \
+pcre_info.o \
+pcre_maketables.o \
+pcre_newline.o \
+pcre_ord2utf8.o \
+pcre_refcount.o \
+pcre_study.o \
+pcre_tables.o \
+pcre_try_flipped.o \
+pcre_ucp_searchfuncs.o \
+pcre_valid_utf8.o \
+pcre_version.o \
+pcre_xclass.o
+
+PCRE_OBJS = $(PCRE_O:%=$(PCRE_OBJDIR)/%)
+
+PCRE_GENINC = $(ERL_TOP)/erts/emulator/pcre/pcre_exec_loop_break_cases.inc
+
+PCRE_OBJDIR = $(ERL_TOP)/erts/emulator/pcre/obj/$(TARGET)/$(TYPE)
+
+PCRE_CFLAGS = $(filter-out -DDEBUG,$(CFLAGS)) -DERLANG_INTEGRATION
+
+ifeq ($(TARGET), win32)
+$(EPCRE_LIB): $(PCRE_OBJS)
+ $(AR) -out:$@ $(PCRE_OBJS)
+else
+$(EPCRE_LIB): $(PCRE_OBJS)
+ $(AR) $(ARFLAGS) $@ $(PCRE_OBJS)
+ -@ ($(RANLIB) $@ || true) 2>/dev/null
+endif
+
+$(PCRE_OBJDIR)/%.o: pcre/%.c
+ $(CC) -c $(PCRE_CFLAGS) -o $@ $<
+
+$(PCRE_GENINC): pcre/pcre_exec.c
+ for x in `grep -n COST_CHK pcre/pcre_exec.c | grep -v 'COST_CHK(N)' | awk -F: '{print $$1}'`; \
+ do \
+ N=`expr $$x + 100`; \
+ echo "case $$N: goto L_LOOP_COUNT_$${x};"; \
+ done > $(PCRE_GENINC)
+
+# Dependencies.
+
+$(PCRE_OBJDIR)/pcre_chartables.o: pcre/pcre_chartables.c pcre/pcre_internal.h \
+ pcre/local_config.h pcre/pcre.h pcre/ucp.h
+$(PCRE_OBJDIR)/pcre_compile.o: pcre/pcre_compile.c pcre/pcre_internal.h \
+ pcre/local_config.h pcre/pcre.h pcre/ucp.h
+$(PCRE_OBJDIR)/pcre_config.o: pcre/pcre_config.c pcre/pcre_internal.h \
+ pcre/local_config.h pcre/pcre.h pcre/ucp.h
+$(PCRE_OBJDIR)/pcre_dfa_exec.o: pcre/pcre_dfa_exec.c pcre/pcre_internal.h \
+ pcre/local_config.h pcre/pcre.h pcre/ucp.h
+$(PCRE_OBJDIR)/pcre_exec.o: pcre/pcre_exec.c pcre/pcre_internal.h \
+ pcre/local_config.h pcre/pcre.h pcre/ucp.h $(PCRE_GENINC)
+$(PCRE_OBJDIR)/pcre_fullinfo.o: pcre/pcre_fullinfo.c pcre/pcre_internal.h \
+ pcre/local_config.h pcre/pcre.h pcre/ucp.h
+$(PCRE_OBJDIR)/pcre_get.o: pcre/pcre_get.c pcre/pcre_internal.h \
+ pcre/local_config.h pcre/pcre.h pcre/ucp.h
+$(PCRE_OBJDIR)/pcre_globals.o: pcre/pcre_globals.c pcre/pcre_internal.h \
+ pcre/local_config.h pcre/pcre.h pcre/ucp.h
+$(PCRE_OBJDIR)/pcre_info.o: pcre/pcre_info.c pcre/pcre_internal.h \
+ pcre/local_config.h pcre/pcre.h pcre/ucp.h
+$(PCRE_OBJDIR)/pcre_maketables.o: pcre/pcre_maketables.c pcre/pcre_internal.h \
+ pcre/local_config.h pcre/pcre.h pcre/ucp.h
+$(PCRE_OBJDIR)/pcre_newline.o: pcre/pcre_newline.c pcre/pcre_internal.h \
+ pcre/local_config.h pcre/pcre.h pcre/ucp.h
+$(PCRE_OBJDIR)/pcre_ord2utf8.o: pcre/pcre_ord2utf8.c pcre/pcre_internal.h \
+ pcre/local_config.h pcre/pcre.h pcre/ucp.h
+$(PCRE_OBJDIR)/pcre/pcre_refcount.o: pcre/pcre_refcount.c pcre/pcre_internal.h \
+ pcre/local_config.h pcre/pcre.h pcre/ucp.h
+$(PCRE_OBJDIR)/pcre_study.o: pcre/pcre_study.c pcre/pcre_internal.h \
+ pcre/local_config.h pcre/pcre.h pcre/ucp.h
+$(PCRE_OBJDIR)/pcre_tables.o: pcre/pcre_tables.c pcre/pcre_internal.h \
+ pcre/local_config.h pcre/pcre.h pcre/ucp.h
+$(PCRE_OBJDIR)/pcre_try_flipped.o: pcre/pcre_try_flipped.c pcre/pcre_internal.h \
+ pcre/local_config.h pcre/pcre.h pcre/ucp.h
+$(PCRE_OBJDIR)/pcre_ucp_searchfuncs.o: pcre/pcre_ucp_searchfuncs.c \
+ pcre/pcre_internal.h pcre/local_config.h pcre/pcre.h pcre/ucp.h \
+ pcre/ucpinternal.h pcre/ucptable.h
+$(PCRE_OBJDIR)/pcre_valid_utf8.o: pcre/pcre_valid_utf8.c pcre/pcre_internal.h \
+ pcre/local_config.h pcre/pcre.h pcre/ucp.h
+pcre_version.o: pcre/pcre_version.c pcre/pcre_internal.h pcre/local_config.h \
+ pcre/pcre.h pcre/ucp.h
+$(PCRE_OBJDIR)/pcre/pcre_xclass.o: pcre/pcre_xclass.c pcre/pcre_internal.h \
+ pcre/local_config.h pcre/pcre.h pcre/ucp.h
diff --git a/erts/emulator/sys/common/erl_check_io.c b/erts/emulator/sys/common/erl_check_io.c
index cd4de21d65..c1336c60d9 100644
--- a/erts/emulator/sys/common/erl_check_io.c
+++ b/erts/emulator/sys/common/erl_check_io.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2006-2011. All Rights Reserved.
+ * Copyright Ericsson AB 2006-2012. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -35,6 +35,8 @@
#include "sys.h"
#include "global.h"
#include "erl_check_io.h"
+#include "erl_thr_progress.h"
+#include "dtrace-wrapper.h"
#ifdef ERTS_SYS_CONTINOUS_FD_NUMBERS
# define ERTS_DRV_EV_STATE_EXTRA_SIZE 128
@@ -66,6 +68,9 @@ typedef char EventStateFlags;
#define ERTS_CIO_POLL_CTL ERTS_POLL_EXPORT(erts_poll_control)
#define ERTS_CIO_POLL_WAIT ERTS_POLL_EXPORT(erts_poll_wait)
+#ifdef ERTS_POLL_NEED_ASYNC_INTERRUPT_SUPPORT
+#define ERTS_CIO_POLL_AS_INTR ERTS_POLL_EXPORT(erts_poll_async_sig_interrupt)
+#endif
#define ERTS_CIO_POLL_INTR ERTS_POLL_EXPORT(erts_poll_interrupt)
#define ERTS_CIO_POLL_INTR_TMD ERTS_POLL_EXPORT(erts_poll_interrupt_timed)
#define ERTS_CIO_NEW_POLLSET ERTS_POLL_EXPORT(erts_poll_create_pollset)
@@ -218,7 +223,7 @@ remember_removed(ErtsDrvEventState *state, struct pollset_info* psi)
#ifdef ERTS_SMP
struct removed_fd *fdlp;
ERTS_SMP_LC_ASSERT(erts_smp_lc_mtx_is_locked(fd_mtx(state->fd)));
- if (erts_smp_atomic_read(&psi->in_poll_wait)) {
+ if (erts_smp_atomic_read_nob(&psi->in_poll_wait)) {
state->remove_cnt++;
ASSERT(state->remove_cnt > 0);
fdlp = removed_fd_alloc();
@@ -310,7 +315,8 @@ forget_removed(struct pollset_info* psi)
erts_smp_mtx_unlock(mtx);
if (drv_ptr) {
int was_unmasked = erts_block_fpe();
- (*drv_ptr->stop_select) (fd, NULL);
+ DTRACE1(driver_stop_select, drv_ptr->name);
+ (*drv_ptr->stop_select) ((ErlDrvEvent) fd, NULL);
erts_unblock_fpe(was_unmasked);
if (drv_ptr->handle) {
erts_ddll_dereference_driver(drv_ptr->handle);
@@ -333,7 +339,7 @@ grow_drv_ev_state(int min_ix)
new_len = max_fds;
erts_smp_mtx_lock(&drv_ev_state_grow_lock);
- if (erts_smp_atomic_read(&drv_ev_state_len) <= min_ix) {
+ if (erts_smp_atomic_read_nob(&drv_ev_state_len) <= min_ix) {
for (i=0; i<DRV_EV_STATE_LOCK_CNT; i++) { /* lock all fd's */
erts_smp_mtx_lock(&drv_ev_state_locks[i].lck);
}
@@ -343,7 +349,7 @@ grow_drv_ev_state(int min_ix)
sizeof(ErtsDrvEventState)*new_len)
: erts_alloc(ERTS_ALC_T_DRV_EV_STATE,
sizeof(ErtsDrvEventState)*new_len));
- for (i = erts_smp_atomic_read(&drv_ev_state_len); i < new_len; i++) {
+ for (i = erts_smp_atomic_read_nob(&drv_ev_state_len); i < new_len; i++) {
drv_ev_state[i].fd = (ErtsSysFdType) i;
drv_ev_state[i].driver.select = NULL;
drv_ev_state[i].events = 0;
@@ -351,7 +357,7 @@ grow_drv_ev_state(int min_ix)
drv_ev_state[i].type = ERTS_EV_TYPE_NONE;
drv_ev_state[i].flags = 0;
}
- erts_smp_atomic_set(&drv_ev_state_len, new_len);
+ erts_smp_atomic_set_nob(&drv_ev_state_len, new_len);
for (i=0; i<DRV_EV_STATE_LOCK_CNT; i++) {
erts_smp_mtx_unlock(&drv_ev_state_locks[i].lck);
}
@@ -492,12 +498,15 @@ ERTS_CIO_EXPORT(driver_select)(ErlDrvPort ix,
ErtsDrvEventState *state;
int wake_poller;
int ret;
+#ifdef USE_VM_PROBES
+ DTRACE_CHARBUF(name, 64);
+#endif
ERTS_SMP_LC_ASSERT(erts_drvport2port(ix)
&& erts_lc_is_port_locked(erts_drvport2port(ix)));
#ifdef ERTS_SYS_CONTINOUS_FD_NUMBERS
- if ((unsigned)fd >= (unsigned)erts_smp_atomic_read(&drv_ev_state_len)) {
+ if ((unsigned)fd >= (unsigned)erts_smp_atomic_read_nob(&drv_ev_state_len)) {
if (fd < 0) {
return -1;
}
@@ -521,6 +530,10 @@ ERTS_CIO_EXPORT(driver_select)(ErlDrvPort ix,
if (IS_FD_UNKNOWN(state)) {
/* fast track to stop_select callback */
stop_select_fn = erts_drvport2port(ix)->drv_ptr->stop_select;
+#ifdef USE_VM_PROBES
+ strncpy(name, erts_drvport2port(ix)->drv_ptr->name, sizeof(name)-1);
+ name[sizeof(name)-1] = '\0';
+#endif
ret = 0;
goto done_unknown;
}
@@ -657,6 +670,10 @@ ERTS_CIO_EXPORT(driver_select)(ErlDrvPort ix,
/* Safe to close fd now as it is not in pollset
or there was no need to eject fd (kernel poll) */
stop_select_fn = drv_ptr->stop_select;
+#ifdef USE_VM_PROBES
+ strncpy(name, erts_drvport2port(ix)->drv_ptr->name, sizeof(name)-1);
+ name[sizeof(name)-1] = '\0';
+#endif
}
else {
/* Not safe to close fd, postpone stop_select callback. */
@@ -682,6 +699,7 @@ done_unknown:
erts_smp_mtx_unlock(fd_mtx(fd));
if (stop_select_fn) {
int was_unmasked = erts_block_fpe();
+ DTRACE1(driver_stop_select, name);
(*stop_select_fn)(e, NULL);
erts_unblock_fpe(was_unmasked);
}
@@ -709,7 +727,7 @@ ERTS_CIO_EXPORT(driver_event)(ErlDrvPort ix,
&& erts_lc_is_port_locked(erts_drvport2port(ix)));
#ifdef ERTS_SYS_CONTINOUS_FD_NUMBERS
- if ((unsigned)fd >= (unsigned)erts_smp_atomic_read(&drv_ev_state_len)) {
+ if ((unsigned)fd >= (unsigned)erts_smp_atomic_read_nob(&drv_ev_state_len)) {
if (fd < 0)
return -1;
if (fd >= max_fds) {
@@ -1115,6 +1133,14 @@ eready(Eterm id, ErtsDrvEventState *state, ErlDrvEventData event_data)
static void bad_fd_in_pollset( ErtsDrvEventState *, Eterm, Eterm, ErtsPollEvents);
+#ifdef ERTS_POLL_NEED_ASYNC_INTERRUPT_SUPPORT
+void
+ERTS_CIO_EXPORT(erts_check_io_async_sig_interrupt)(void)
+{
+ ERTS_CIO_POLL_AS_INTR(pollset.ps);
+}
+#endif
+
void
ERTS_CIO_EXPORT(erts_check_io_interrupt)(int set)
{
@@ -1122,7 +1148,8 @@ ERTS_CIO_EXPORT(erts_check_io_interrupt)(int set)
}
void
-ERTS_CIO_EXPORT(erts_check_io_interrupt_timed)(int set, long msec)
+ERTS_CIO_EXPORT(erts_check_io_interrupt_timed)(int set,
+ erts_short_time_t msec)
{
ERTS_CIO_POLL_INTR_TMD(pollset.ps, set, msec);
}
@@ -1153,17 +1180,15 @@ ERTS_CIO_EXPORT(erts_check_io)(int do_wait)
#ifdef ERTS_ENABLE_LOCK_CHECK
erts_lc_check_exact(NULL, 0); /* No locks should be locked */
#endif
- erts_smp_activity_begin(ERTS_ACTIVITY_WAIT, NULL, NULL, NULL);
pollres_len = sizeof(pollres)/sizeof(ErtsPollResFd);
- erts_smp_atomic_set(&pollset.in_poll_wait, 1);
+ erts_smp_atomic_set_nob(&pollset.in_poll_wait, 1);
poll_ret = ERTS_CIO_POLL_WAIT(pollset.ps, pollres, &pollres_len, &wait_time);
#ifdef ERTS_ENABLE_LOCK_CHECK
erts_lc_check_exact(NULL, 0); /* No locks should be locked */
#endif
- erts_smp_activity_end(ERTS_ACTIVITY_WAIT, NULL, NULL, NULL);
erts_deliver_time(); /* sync the machine's idea of time */
@@ -1173,7 +1198,7 @@ ERTS_CIO_EXPORT(erts_check_io)(int do_wait)
#endif
if (poll_ret != 0) {
- erts_smp_atomic_set(&pollset.in_poll_wait, 0);
+ erts_smp_atomic_set_nob(&pollset.in_poll_wait, 0);
forget_removed(&pollset);
if (poll_ret == EAGAIN) {
goto restart;
@@ -1304,7 +1329,7 @@ ERTS_CIO_EXPORT(erts_check_io)(int do_wait)
#endif
}
- erts_smp_atomic_set(&pollset.in_poll_wait, 0);
+ erts_smp_atomic_set_nob(&pollset.in_poll_wait, 0);
forget_removed(&pollset);
}
@@ -1419,7 +1444,7 @@ static void drv_ev_state_free(void *des)
void
ERTS_CIO_EXPORT(erts_init_check_io)(void)
{
- erts_smp_atomic_init(&pollset.in_poll_wait, 0);
+ erts_smp_atomic_init_nob(&pollset.in_poll_wait, 0);
ERTS_CIO_POLL_INIT();
pollset.ps = ERTS_CIO_NEW_POLLSET();
@@ -1441,7 +1466,7 @@ ERTS_CIO_EXPORT(erts_init_check_io)(void)
#endif
#ifdef ERTS_SYS_CONTINOUS_FD_NUMBERS
max_fds = ERTS_CIO_POLL_MAX_FDS();
- erts_smp_atomic_init(&drv_ev_state_len, 0);
+ erts_smp_atomic_init_nob(&drv_ev_state_len, 0);
drv_ev_state = NULL;
erts_smp_mtx_init(&drv_ev_state_grow_lock, "drv_ev_state_grow");
#else
@@ -1479,7 +1504,7 @@ ERTS_CIO_EXPORT(erts_check_io_size)(void)
ERTS_CIO_POLL_INFO(pollset.ps, &pi);
res = pi.memory_size;
#ifdef ERTS_SYS_CONTINOUS_FD_NUMBERS
- res += sizeof(ErtsDrvEventState) * erts_smp_atomic_read(&drv_ev_state_len);
+ res += sizeof(ErtsDrvEventState) * erts_smp_atomic_read_nob(&drv_ev_state_len);
#else
res += safe_hash_table_sz(&drv_ev_state_tab);
{
@@ -1506,7 +1531,7 @@ ERTS_CIO_EXPORT(erts_check_io_info)(void *proc)
ERTS_CIO_POLL_INFO(pollset.ps, &pi);
memory_size = pi.memory_size;
#ifdef ERTS_SYS_CONTINOUS_FD_NUMBERS
- memory_size += sizeof(ErtsDrvEventState) * erts_smp_atomic_read(&drv_ev_state_len);
+ memory_size += sizeof(ErtsDrvEventState) * erts_smp_atomic_read_nob(&drv_ev_state_len);
#else
memory_size += safe_hash_table_sz(&drv_ev_state_tab);
{
@@ -1870,13 +1895,12 @@ ERTS_CIO_EXPORT(erts_check_io_debug)(void)
erts_printf("--- fds in pollset --------------------------------------\n");
-#ifdef ERTS_SMP
-# ifdef ERTS_ENABLE_LOCK_CHECK
+#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_CHECK)
erts_lc_check_exact(NULL, 0); /* No locks should be locked */
-# endif
- erts_block_system(0); /* stop the world to avoid messy locking */
#endif
+ erts_smp_thr_progress_block(); /* stop the world to avoid messy locking */
+
#ifdef ERTS_SYS_CONTINOUS_FD_NUMBERS
counters.epep = erts_alloc(ERTS_ALC_T_TMP, sizeof(ErtsPollEvents)*max_fds);
ERTS_POLL_EXPORT(erts_poll_get_selected_events)(pollset.ps, counters.epep, max_fds);
@@ -1886,7 +1910,7 @@ ERTS_CIO_EXPORT(erts_check_io_debug)(void)
counters.num_errors = 0;
#ifdef ERTS_SYS_CONTINOUS_FD_NUMBERS
- len = erts_smp_atomic_read(&drv_ev_state_len);
+ len = erts_smp_atomic_read_nob(&drv_ev_state_len);
for (fd = 0; fd < len; fd++) {
doit_erts_check_io_debug((void *) &drv_ev_state[fd], (void *) &counters);
}
@@ -1898,9 +1922,7 @@ ERTS_CIO_EXPORT(erts_check_io_debug)(void)
safe_hash_for_each(&drv_ev_state_tab, &doit_erts_check_io_debug, (void *) &counters);
#endif
-#ifdef ERTS_SMP
- erts_release_system();
-#endif
+ erts_smp_thr_progress_unblock();
erts_printf("\n");
erts_printf("used fds=%d\n", counters.used_fds);
diff --git a/erts/emulator/sys/common/erl_check_io.h b/erts/emulator/sys/common/erl_check_io.h
index 9b45a63913..edab7947ba 100644
--- a/erts/emulator/sys/common/erl_check_io.h
+++ b/erts/emulator/sys/common/erl_check_io.h
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2006-2009. All Rights Reserved.
+ * Copyright Ericsson AB 2006-2011. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -40,10 +40,14 @@ Eterm erts_check_io_info_kp(void *);
Eterm erts_check_io_info_nkp(void *);
int erts_check_io_max_files_kp(void);
int erts_check_io_max_files_nkp(void);
+#ifdef ERTS_POLL_NEED_ASYNC_INTERRUPT_SUPPORT
+void erts_check_io_async_sig_interrupt_kp(void);
+void erts_check_io_async_sig_interrupt_nkp(void);
+#endif
void erts_check_io_interrupt_kp(int);
void erts_check_io_interrupt_nkp(int);
-void erts_check_io_interrupt_timed_kp(int, long);
-void erts_check_io_interrupt_timed_nkp(int, long);
+void erts_check_io_interrupt_timed_kp(int, erts_short_time_t);
+void erts_check_io_interrupt_timed_nkp(int, erts_short_time_t);
void erts_check_io_kp(int);
void erts_check_io_nkp(int);
void erts_init_check_io_kp(void);
@@ -56,8 +60,11 @@ int erts_check_io_debug_nkp(void);
Uint erts_check_io_size(void);
Eterm erts_check_io_info(void *);
int erts_check_io_max_files(void);
+#ifdef ERTS_POLL_NEED_ASYNC_INTERRUPT_SUPPORT
+void erts_check_io_async_sig_interrupt(void);
+#endif
void erts_check_io_interrupt(int);
-void erts_check_io_interrupt_timed(int, long);
+void erts_check_io_interrupt_timed(int, erts_short_time_t);
void erts_check_io(int);
void erts_init_check_io(void);
diff --git a/erts/emulator/sys/common/erl_mseg.c b/erts/emulator/sys/common/erl_mseg.c
index eaef6680dd..db2854fa40 100644
--- a/erts/emulator/sys/common/erl_mseg.c
+++ b/erts/emulator/sys/common/erl_mseg.c
@@ -36,14 +36,12 @@
#include "erl_threads.h"
#include "erl_mtrace.h"
#include "erl_time.h"
+#include "erl_alloc.h"
#include "big.h"
+#include "erl_thr_progress.h"
#if HAVE_ERTS_MSEG
-#if defined(USE_THREADS) && !defined(ERTS_SMP)
-# define ERTS_THREADS_NO_SMP
-#endif
-
#define SEGTYPE ERTS_MTRACE_SEGMENT_ID
#ifndef HAVE_GETPAGESIZE
@@ -75,16 +73,9 @@
static int atoms_initialized;
-static Uint cache_check_interval;
-
typedef struct mem_kind_t MemKind;
-static void check_cache(void *unused);
static void mseg_clear_cache(MemKind*);
-static int is_cache_check_scheduled;
-#ifdef ERTS_THREADS_NO_SMP
-static int is_cache_check_requested;
-#endif
#if HALFWORD_HEAP
static int initialize_pmmap(void);
@@ -138,7 +129,8 @@ const ErtsMsegOpt_t erts_mseg_default_opt = {
1, /* Use cache */
1, /* Preserv data */
0, /* Absolute shrink threshold */
- 0 /* Relative shrink threshold */
+ 0, /* Relative shrink threshold */
+ 0 /* Scheduler specific */
#if HALFWORD_HEAP
,0 /* need low memory */
#endif
@@ -157,11 +149,10 @@ typedef struct {
Uint32 no;
} CallCounter;
-static int is_init_done;
static Uint page_size;
static Uint page_shift;
-static struct {
+typedef struct {
CallCounter alloc;
CallCounter dealloc;
CallCounter realloc;
@@ -172,7 +163,9 @@ static struct {
#endif
CallCounter clear_cache;
CallCounter check_cache;
-} calls;
+} ErtsMsegCalls;
+
+typedef struct ErtsMsegAllctr_t_ ErtsMsegAllctr_t;
struct mem_kind_t {
cache_desc_t cache_descs[MAX_CACHE_SIZE];
@@ -201,25 +194,84 @@ struct mem_kind_t {
} max_ever;
} segments;
+ ErtsMsegAllctr_t *ma;
const char* name;
MemKind* next;
};/*MemKind*/
+struct ErtsMsegAllctr_t_ {
+ int ix;
+
+ int is_init_done;
+ int is_thread_safe;
+ erts_mtx_t mtx;
+
+ int is_cache_check_scheduled;
+
+ MemKind* mk_list;
+
#if HALFWORD_HEAP
-static MemKind low_mem, hi_mem;
+ MemKind low_mem;
+ MemKind hi_mem;
#else
-static MemKind the_mem;
+ MemKind the_mem;
#endif
-static MemKind* mk_list = NULL;
-static Uint max_cache_size;
-static Uint abs_max_cache_bad_fit;
-static Uint rel_max_cache_bad_fit;
+ Uint max_cache_size;
+ Uint abs_max_cache_bad_fit;
+ Uint rel_max_cache_bad_fit;
+
+ ErtsMsegCalls calls;
#if CAN_PARTLY_DESTROY
-static Uint min_seg_size;
+ Uint min_seg_size;
+#endif
+
+};
+
+typedef union {
+ ErtsMsegAllctr_t mseg_alloc;
+ char align__[ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(ErtsMsegAllctr_t))];
+} ErtsAlgndMsegAllctr_t;
+
+static int no_mseg_allocators;
+static ErtsAlgndMsegAllctr_t *aligned_mseg_allctr;
+
+#ifdef ERTS_SMP
+
+#define ERTS_MSEG_ALLCTR_IX(IX) \
+ (&aligned_mseg_allctr[(IX)].mseg_alloc)
+
+#define ERTS_MSEG_ALLCTR_SS() \
+ ERTS_MSEG_ALLCTR_IX((int) erts_get_scheduler_id())
+
+#define ERTS_MSEG_ALLCTR_OPT(OPT) \
+ ((OPT)->sched_spec ? ERTS_MSEG_ALLCTR_SS() : ERTS_MSEG_ALLCTR_IX(0))
+
+#else
+
+#define ERTS_MSEG_ALLCTR_IX(IX) \
+ (&aligned_mseg_allctr[0].mseg_alloc)
+
+#define ERTS_MSEG_ALLCTR_SS() \
+ (&aligned_mseg_allctr[0].mseg_alloc)
+
+#define ERTS_MSEG_ALLCTR_OPT(OPT) \
+ (&aligned_mseg_allctr[0].mseg_alloc)
+
#endif
+#define ERTS_MSEG_LOCK(MA) \
+do { \
+ if ((MA)->is_thread_safe) \
+ erts_mtx_lock(&(MA)->mtx); \
+} while (0)
+
+#define ERTS_MSEG_UNLOCK(MA) \
+do { \
+ if ((MA)->is_thread_safe) \
+ erts_mtx_unlock(&(MA)->mtx); \
+} while (0)
#define ERTS_MSEG_ALLOC_STAT(C,SZ) \
do { \
@@ -250,104 +302,44 @@ do { \
#define ONE_GIGA (1000000000)
-#define ZERO_CC(CC) (calls.CC.no = 0, calls.CC.giga_no = 0)
+#define ZERO_CC(MA, CC) ((MA)->calls.CC.no = 0, \
+ (MA)->calls.CC.giga_no = 0)
-#define INC_CC(CC) (calls.CC.no == ONE_GIGA - 1 \
- ? (calls.CC.giga_no++, calls.CC.no = 0) \
- : calls.CC.no++)
+#define INC_CC(MA, CC) ((MA)->calls.CC.no == ONE_GIGA - 1 \
+ ? ((MA)->calls.CC.giga_no++, \
+ (MA)->calls.CC.no = 0) \
+ : (MA)->calls.CC.no++)
-#define DEC_CC(CC) (calls.CC.no == 0 \
- ? (calls.CC.giga_no--, \
- calls.CC.no = ONE_GIGA - 1) \
- : calls.CC.no--)
+#define DEC_CC(MA, CC) ((MA)->calls.CC.no == 0 \
+ ? ((MA)->calls.CC.giga_no--, \
+ (MA)->calls.CC.no = ONE_GIGA - 1) \
+ : (MA)->calls.CC.no--)
-static erts_mtx_t mseg_mutex; /* Also needed when !USE_THREADS */
static erts_mtx_t init_atoms_mutex; /* Also needed when !USE_THREADS */
-#ifdef USE_THREADS
-#ifdef ERTS_THREADS_NO_SMP
-static erts_tid_t main_tid;
-static int async_handle = -1;
-#endif
-
-static void thread_safe_init(void)
-{
- erts_mtx_init(&init_atoms_mutex, "mseg_init_atoms");
- erts_mtx_init(&mseg_mutex, "mseg");
-
-#ifdef ERTS_THREADS_NO_SMP
- main_tid = erts_thr_self();
-#endif
-}
-
-#endif
-
-static ErlTimer cache_check_timer;
static ERTS_INLINE void
-schedule_cache_check(void)
-{
- if (!is_cache_check_scheduled && is_init_done) {
-#ifdef ERTS_THREADS_NO_SMP
- if (!erts_equal_tids(erts_thr_self(), main_tid)) {
- if (!is_cache_check_requested) {
- is_cache_check_requested = 1;
- sys_async_ready(async_handle);
- }
- }
- else
-#endif
- {
- cache_check_timer.active = 0;
- erts_set_timer(&cache_check_timer,
- check_cache,
- NULL,
- NULL,
- cache_check_interval);
- is_cache_check_scheduled = 1;
-#ifdef ERTS_THREADS_NO_SMP
- is_cache_check_requested = 0;
-#endif
- }
- }
-}
-
-#ifdef ERTS_THREADS_NO_SMP
-
-static void
-check_schedule_cache_check(void)
+schedule_cache_check(ErtsMsegAllctr_t *ma)
{
- erts_mtx_lock(&mseg_mutex);
- if (is_cache_check_requested
- && !is_cache_check_scheduled) {
- schedule_cache_check();
- }
- erts_mtx_unlock(&mseg_mutex);
-}
-
-#endif
-static void
-mseg_shutdown(void)
-{
- MemKind* mk;
- erts_mtx_lock(&mseg_mutex);
- for (mk=mk_list; mk; mk=mk->next) {
- mseg_clear_cache(mk);
+ if (!ma->is_cache_check_scheduled && ma->is_init_done) {
+ erts_set_aux_work_timeout(ma->ix,
+ ERTS_SSI_AUX_WORK_MSEG_CACHE_CHECK,
+ 1);
+ ma->is_cache_check_scheduled = 1;
}
- erts_mtx_unlock(&mseg_mutex);
}
static ERTS_INLINE void *
-mseg_create(MemKind* mk, Uint size)
+mseg_create(ErtsMsegAllctr_t *ma, MemKind* mk, Uint size)
{
void *seg;
ASSERT(size % page_size == 0);
#if HALFWORD_HEAP
- if (mk == &low_mem) {
+ if (mk == &ma->low_mem) {
seg = pmmap(size);
if ((unsigned long) seg & CHECK_POINTER_MASK) {
erts_fprintf(stderr,"Pointer mask failure (0x%08lx)\n",(unsigned long) seg);
@@ -371,28 +363,38 @@ mseg_create(MemKind* mk, Uint size)
#endif
}
- INC_CC(create);
+ INC_CC(ma, create);
return seg;
}
static ERTS_INLINE void
-mseg_destroy(MemKind* mk, void *seg, Uint size)
+mseg_destroy(ErtsMsegAllctr_t *ma, MemKind* mk, void *seg, Uint size)
{
+#ifdef DEBUG
int res;
+#endif
#if HALFWORD_HEAP
- if (mk == &low_mem) {
- res = pmunmap((void *) seg, size);
+ if (mk == &ma->low_mem) {
+#ifdef DEBUG
+ res =
+#endif
+ pmunmap((void *) seg, size);
}
else
#endif
{
#ifdef ERTS_MSEG_FAKE_SEGMENTS
erts_sys_free(ERTS_ALC_N_INVALID, NULL, seg);
+#ifdef DEBUG
res = 0;
+#endif
#elif HAVE_MMAP
- res = munmap((void *) seg, size);
+#ifdef DEBUG
+ res =
+#endif
+ munmap((void *) seg, size);
#else
# error "Missing mseg_destroy() implementation"
#endif
@@ -401,14 +403,14 @@ mseg_destroy(MemKind* mk, void *seg, Uint size)
ASSERT(size % page_size == 0);
ASSERT(res == 0);
- INC_CC(destroy);
+ INC_CC(ma, destroy);
}
#if HAVE_MSEG_RECREATE
static ERTS_INLINE void *
-mseg_recreate(MemKind* mk, void *old_seg, Uint old_size, Uint new_size)
+mseg_recreate(ErtsMsegAllctr_t *ma, MemKind* mk, void *old_seg, Uint old_size, Uint new_size)
{
void *new_seg;
@@ -416,7 +418,7 @@ mseg_recreate(MemKind* mk, void *old_seg, Uint old_size, Uint new_size)
ASSERT(new_size % page_size == 0);
#if HALFWORD_HEAP
- if (mk == &low_mem) {
+ if (mk == &ma->low_mem) {
new_seg = (void *) pmremap((void *) old_seg,
(size_t) old_size,
(size_t) new_size);
@@ -447,19 +449,37 @@ mseg_recreate(MemKind* mk, void *old_seg, Uint old_size, Uint new_size)
#endif
}
- INC_CC(recreate);
+ INC_CC(ma, recreate);
return new_seg;
}
#endif /* #if HAVE_MSEG_RECREATE */
+#ifdef DEBUG
+#define ERTS_DBG_MA_CHK_THR_ACCESS(MA) \
+do { \
+ if ((MA)->is_thread_safe) \
+ ERTS_LC_ASSERT(erts_lc_mtx_is_locked(&(MA)->mtx) \
+ || erts_smp_thr_progress_is_blocking() \
+ || ERTS_IS_CRASH_DUMPING); \
+ else \
+ ERTS_LC_ASSERT((MA)->ix == (int) erts_get_scheduler_id() \
+ || erts_smp_thr_progress_is_blocking() \
+ || ERTS_IS_CRASH_DUMPING); \
+} while (0)
+#define ERTS_DBG_MK_CHK_THR_ACCESS(MK) \
+ ERTS_DBG_MA_CHK_THR_ACCESS((MK)->ma)
+#else
+#define ERTS_DBG_MA_CHK_THR_ACCESS(MA)
+#define ERTS_DBG_MK_CHK_THR_ACCESS(MK)
+#endif
static ERTS_INLINE cache_desc_t *
alloc_cd(MemKind* mk)
{
cache_desc_t *cd = mk->free_cache_descs;
- ERTS_LC_ASSERT(erts_lc_mtx_is_locked(&mseg_mutex));
+ ERTS_DBG_MK_CHK_THR_ACCESS(mk);
if (cd)
mk->free_cache_descs = cd->next;
return cd;
@@ -468,7 +488,7 @@ alloc_cd(MemKind* mk)
static ERTS_INLINE void
free_cd(MemKind* mk, cache_desc_t *cd)
{
- ERTS_LC_ASSERT(erts_lc_mtx_is_locked(&mseg_mutex));
+ ERTS_DBG_MK_CHK_THR_ACCESS(mk);
cd->next = mk->free_cache_descs;
mk->free_cache_descs = cd;
}
@@ -477,7 +497,7 @@ free_cd(MemKind* mk, cache_desc_t *cd)
static ERTS_INLINE void
link_cd(MemKind* mk, cache_desc_t *cd)
{
- ERTS_LC_ASSERT(erts_lc_mtx_is_locked(&mseg_mutex));
+ ERTS_DBG_MK_CHK_THR_ACCESS(mk);
if (mk->cache)
mk->cache->prev = cd;
cd->next = mk->cache;
@@ -496,7 +516,7 @@ link_cd(MemKind* mk, cache_desc_t *cd)
static ERTS_INLINE void
end_link_cd(MemKind* mk, cache_desc_t *cd)
{
- ERTS_LC_ASSERT(erts_lc_mtx_is_locked(&mseg_mutex));
+ ERTS_DBG_MK_CHK_THR_ACCESS(mk);
if (mk->cache_end)
mk->cache_end->next = cd;
cd->next = NULL;
@@ -515,7 +535,7 @@ end_link_cd(MemKind* mk, cache_desc_t *cd)
static ERTS_INLINE void
unlink_cd(MemKind* mk, cache_desc_t *cd)
{
- ERTS_LC_ASSERT(erts_lc_mtx_is_locked(&mseg_mutex));
+ ERTS_DBG_MK_CHK_THR_ACCESS(mk);
if (cd->next)
cd->next->prev = cd->prev;
else
@@ -533,7 +553,7 @@ static ERTS_INLINE void
check_cache_limits(MemKind* mk)
{
cache_desc_t *cd;
- ERTS_LC_ASSERT(erts_lc_mtx_is_locked(&mseg_mutex));
+ ERTS_DBG_MK_CHK_THR_ACCESS(mk);
mk->max_cached_seg_size = 0;
mk->min_cached_seg_size = ~((Uint) 0);
for (cd = mk->cache; cd; cd = cd->next) {
@@ -551,7 +571,7 @@ adjust_cache_size(MemKind* mk, int force_check_limits)
int check_limits = force_check_limits;
Sint max_cached = ((Sint) mk->segments.current.watermark
- (Sint) mk->segments.current.no);
- ERTS_LC_ASSERT(erts_lc_mtx_is_locked(&mseg_mutex));
+ ERTS_DBG_MK_CHK_THR_ACCESS(mk);
while (((Sint) mk->cache_size) > max_cached && ((Sint) mk->cache_size) > 0) {
ASSERT(mk->cache_end);
cd = mk->cache_end;
@@ -562,7 +582,7 @@ adjust_cache_size(MemKind* mk, int force_check_limits)
}
if (erts_mtrace_enabled)
erts_mtrace_crr_free(SEGTYPE, SEGTYPE, cd->seg);
- mseg_destroy(mk, cd->seg, cd->size);
+ mseg_destroy(mk->ma, mk, cd->seg, cd->size);
unlink_cd(mk,cd);
free_cd(mk,cd);
}
@@ -571,7 +591,7 @@ adjust_cache_size(MemKind* mk, int force_check_limits)
check_cache_limits(mk);
}
-static void
+static Uint
check_one_cache(MemKind* mk)
{
if (mk->segments.current.watermark > mk->segments.current.no)
@@ -579,23 +599,37 @@ check_one_cache(MemKind* mk)
adjust_cache_size(mk, 0);
if (mk->cache_size)
- schedule_cache_check();
+ schedule_cache_check(mk->ma);
+ return mk->cache_size;
}
-static void check_cache(void* unused)
+static void do_cache_check(ErtsMsegAllctr_t *ma)
{
+ int empty_cache = 1;
MemKind* mk;
- erts_mtx_lock(&mseg_mutex);
- is_cache_check_scheduled = 0;
+ ERTS_MSEG_LOCK(ma);
- for (mk=mk_list; mk; mk=mk->next) {
- check_one_cache(mk);
+ for (mk=ma->mk_list; mk; mk=mk->next) {
+ if (check_one_cache(mk))
+ empty_cache = 0;
}
- INC_CC(check_cache);
+ if (empty_cache) {
+ ma->is_cache_check_scheduled = 0;
+ erts_set_aux_work_timeout(ma->ix,
+ ERTS_SSI_AUX_WORK_MSEG_CACHE_CHECK,
+ 0);
+ }
+
+ INC_CC(ma, check_cache);
+
+ ERTS_MSEG_UNLOCK(ma);
+}
- erts_mtx_unlock(&mseg_mutex);
+void erts_mseg_cache_check(void)
+{
+ do_cache_check(ERTS_MSEG_ALLCTR_SS());
}
static void
@@ -611,42 +645,44 @@ mseg_clear_cache(MemKind* mk)
mk->segments.current.watermark = mk->segments.current.no;
- INC_CC(clear_cache);
+ INC_CC(mk->ma, clear_cache);
}
-static ERTS_INLINE MemKind* memkind(const ErtsMsegOpt_t *opt)
+static ERTS_INLINE MemKind* memkind(ErtsMsegAllctr_t *ma,
+ const ErtsMsegOpt_t *opt)
{
#if HALFWORD_HEAP
- return opt->low_mem ? &low_mem : &hi_mem;
+ return opt->low_mem ? &ma->low_mem : &ma->hi_mem;
#else
- return &the_mem;
+ return &ma->the_mem;
#endif
}
static void *
-mseg_alloc(ErtsAlcType_t atype, Uint *size_p, const ErtsMsegOpt_t *opt)
+mseg_alloc(ErtsMsegAllctr_t *ma, ErtsAlcType_t atype, Uint *size_p,
+ const ErtsMsegOpt_t *opt)
{
Uint max, min, diff_size, size;
cache_desc_t *cd, *cand_cd;
void *seg;
- MemKind* mk = memkind(opt);
+ MemKind* mk = memkind(ma, opt);
- INC_CC(alloc);
+ INC_CC(ma, alloc);
size = PAGE_CEILING(*size_p);
#if CAN_PARTLY_DESTROY
- if (size < min_seg_size)
- min_seg_size = size;
+ if (size < ma->min_seg_size)
+ ma->min_seg_size = size;
#endif
if (!opt->cache) {
create_seg:
adjust_cache_size(mk,0);
- seg = mseg_create(mk, size);
+ seg = mseg_create(ma, mk, size);
if (!seg) {
mseg_clear_cache(mk);
- seg = mseg_create(mk, size);
+ seg = mseg_create(ma, mk, size);
if (!seg)
size = 0;
}
@@ -667,10 +703,10 @@ mseg_alloc(ErtsAlcType_t atype, Uint *size_p, const ErtsMsegOpt_t *opt)
diff_size = mk->min_cached_seg_size - size;
- if (diff_size > abs_max_cache_bad_fit)
+ if (diff_size > ma->abs_max_cache_bad_fit)
goto create_seg;
- if (100*PAGES(diff_size) > rel_max_cache_bad_fit*PAGES(size))
+ if (100*PAGES(diff_size) > ma->rel_max_cache_bad_fit*PAGES(size))
goto create_seg;
}
@@ -708,8 +744,8 @@ mseg_alloc(ErtsAlcType_t atype, Uint *size_p, const ErtsMsegOpt_t *opt)
diff_size = cand_cd->size - size;
- if (diff_size > abs_max_cache_bad_fit
- || 100*PAGES(diff_size) > rel_max_cache_bad_fit*PAGES(size)) {
+ if (diff_size > ma->abs_max_cache_bad_fit
+ || 100*PAGES(diff_size) > ma->rel_max_cache_bad_fit*PAGES(size)) {
if (mk->max_cached_seg_size < cand_cd->size)
mk->max_cached_seg_size = cand_cd->size;
if (mk->min_cached_seg_size > cand_cd->size)
@@ -740,18 +776,18 @@ mseg_alloc(ErtsAlcType_t atype, Uint *size_p, const ErtsMsegOpt_t *opt)
static void
-mseg_dealloc(ErtsAlcType_t atype, void *seg, Uint size,
+mseg_dealloc(ErtsMsegAllctr_t *ma, ErtsAlcType_t atype, void *seg, Uint size,
const ErtsMsegOpt_t *opt)
{
- MemKind* mk = memkind(opt);
+ MemKind* mk = memkind(ma, opt);
cache_desc_t *cd;
ERTS_MSEG_DEALLOC_STAT(mk,size);
- if (!opt->cache || max_cache_size == 0) {
+ if (!opt->cache || ma->max_cache_size == 0) {
if (erts_mtrace_enabled)
erts_mtrace_crr_free(atype, SEGTYPE, seg);
- mseg_destroy(mk, seg, size);
+ mseg_destroy(ma, mk, seg, size);
}
else {
int check_limits = 0;
@@ -769,7 +805,7 @@ mseg_dealloc(ErtsAlcType_t atype, void *seg, Uint size,
}
if (erts_mtrace_enabled)
erts_mtrace_crr_free(SEGTYPE, SEGTYPE, cd->seg);
- mseg_destroy(mk, cd->seg, cd->size);
+ mseg_destroy(ma, mk, cd->seg, cd->size);
unlink_cd(mk,cd);
free_cd(mk,cd);
}
@@ -790,33 +826,34 @@ mseg_dealloc(ErtsAlcType_t atype, void *seg, Uint size,
if (check_limits)
check_cache_limits(mk);
- schedule_cache_check();
+ schedule_cache_check(ma);
}
- INC_CC(dealloc);
+ INC_CC(ma, dealloc);
}
static void *
-mseg_realloc(ErtsAlcType_t atype, void *seg, Uint old_size, Uint *new_size_p,
- const ErtsMsegOpt_t *opt)
+mseg_realloc(ErtsMsegAllctr_t *ma, ErtsAlcType_t atype, void *seg,
+ Uint old_size, Uint *new_size_p, const ErtsMsegOpt_t *opt)
{
- MemKind* mk = memkind(opt);
+ MemKind* mk;
void *new_seg;
Uint new_size;
if (!seg || !old_size) {
- new_seg = mseg_alloc(atype, new_size_p, opt);
- DEC_CC(alloc);
+ new_seg = mseg_alloc(ma, atype, new_size_p, opt);
+ DEC_CC(ma, alloc);
return new_seg;
}
if (!(*new_size_p)) {
- mseg_dealloc(atype, seg, old_size, opt);
- DEC_CC(dealloc);
+ mseg_dealloc(ma, atype, seg, old_size, opt);
+ DEC_CC(ma, dealloc);
return NULL;
}
+ mk = memkind(ma, opt);
new_seg = seg;
new_size = PAGE_CEILING(*new_size_p);
@@ -826,8 +863,8 @@ mseg_realloc(ErtsAlcType_t atype, void *seg, Uint old_size, Uint *new_size_p,
Uint shrink_sz = old_size - new_size;
#if CAN_PARTLY_DESTROY
- if (new_size < min_seg_size)
- min_seg_size = new_size;
+ if (new_size < ma->min_seg_size)
+ ma->min_seg_size = new_size;
#endif
if (shrink_sz < opt->abs_shrink_th
@@ -838,7 +875,7 @@ mseg_realloc(ErtsAlcType_t atype, void *seg, Uint old_size, Uint *new_size_p,
#if CAN_PARTLY_DESTROY
- if (shrink_sz > min_seg_size
+ if (shrink_sz > ma->min_seg_size
&& mk->free_cache_descs
&& opt->cache) {
cache_desc_t *cd;
@@ -857,7 +894,7 @@ mseg_realloc(ErtsAlcType_t atype, void *seg, Uint old_size, Uint *new_size_p,
new_size);
erts_mtrace_crr_alloc(cd->seg, SEGTYPE, SEGTYPE, cd->size);
}
- schedule_cache_check();
+ schedule_cache_check(ma);
}
else {
if (erts_mtrace_enabled)
@@ -866,7 +903,7 @@ mseg_realloc(ErtsAlcType_t atype, void *seg, Uint old_size, Uint *new_size_p,
SEGTYPE,
seg,
new_size);
- mseg_destroy(mk, ((char *) seg) + new_size, shrink_sz);
+ mseg_destroy(ma, mk, ((char *) seg) + new_size, shrink_sz);
}
#elif HAVE_MSEG_RECREATE
@@ -875,14 +912,14 @@ mseg_realloc(ErtsAlcType_t atype, void *seg, Uint old_size, Uint *new_size_p,
#else
- new_seg = mseg_alloc(atype, &new_size, opt);
+ new_seg = mseg_alloc(ma, atype, &new_size, opt);
if (!new_seg)
new_size = old_size;
else {
sys_memcpy(((char *) new_seg),
((char *) seg),
MIN(new_size, old_size));
- mseg_dealloc(atype, seg, old_size, opt);
+ mseg_dealloc(ma, atype, seg, old_size, opt);
}
#endif
@@ -892,34 +929,34 @@ mseg_realloc(ErtsAlcType_t atype, void *seg, Uint old_size, Uint *new_size_p,
else {
if (!opt->preserv) {
- mseg_dealloc(atype, seg, old_size, opt);
- new_seg = mseg_alloc(atype, &new_size, opt);
+ mseg_dealloc(ma, atype, seg, old_size, opt);
+ new_seg = mseg_alloc(ma, atype, &new_size, opt);
}
else {
#if HAVE_MSEG_RECREATE
#if !CAN_PARTLY_DESTROY
do_recreate:
#endif
- new_seg = mseg_recreate(mk, (void *) seg, old_size, new_size);
+ new_seg = mseg_recreate(ma, mk, (void *) seg, old_size, new_size);
if (erts_mtrace_enabled)
erts_mtrace_crr_realloc(new_seg, atype, SEGTYPE, seg, new_size);
if (!new_seg)
new_size = old_size;
#else
- new_seg = mseg_alloc(atype, &new_size, opt);
+ new_seg = mseg_alloc(ma, atype, &new_size, opt);
if (!new_seg)
new_size = old_size;
else {
sys_memcpy(((char *) new_seg),
((char *) seg),
MIN(new_size, old_size));
- mseg_dealloc(atype, seg, old_size, opt);
+ mseg_dealloc(ma, atype, seg, old_size, opt);
}
#endif
}
}
- INC_CC(realloc);
+ INC_CC(ma, realloc);
*new_size_p = new_size;
@@ -937,7 +974,6 @@ static struct {
Eterm amcbf;
Eterm rmcbf;
Eterm mcs;
- Eterm cci;
Eterm memkind;
Eterm name;
@@ -973,13 +1009,13 @@ static void ERTS_INLINE atom_init(Eterm *atom, char *name)
#define AM_INIT(AM) atom_init(&am.AM, #AM)
static void
-init_atoms(void)
+init_atoms(ErtsMsegAllctr_t *ma)
{
#ifdef DEBUG
Eterm *atom;
#endif
- erts_mtx_unlock(&mseg_mutex);
+ ERTS_MSEG_UNLOCK(ma);
erts_mtx_lock(&init_atoms_mutex);
if (!atoms_initialized) {
@@ -997,7 +1033,6 @@ init_atoms(void)
AM_INIT(amcbf);
AM_INIT(rmcbf);
AM_INIT(mcs);
- AM_INIT(cci);
AM_INIT(status);
AM_INIT(cached_segments);
@@ -1025,7 +1060,7 @@ init_atoms(void)
#endif
}
- erts_mtx_lock(&mseg_mutex);
+ ERTS_MSEG_LOCK(ma);
atoms_initialized = 1;
erts_mtx_unlock(&init_atoms_mutex);
}
@@ -1082,7 +1117,8 @@ add_4tup(Uint **hpp, Uint *szp, Eterm *lp,
}
static Eterm
-info_options(char *prefix,
+info_options(ErtsMsegAllctr_t *ma,
+ char *prefix,
int *print_to_p,
void *print_to_arg,
Uint **hpp,
@@ -1093,30 +1129,26 @@ info_options(char *prefix,
if (print_to_p) {
int to = *print_to_p;
void *arg = print_to_arg;
- erts_print(to, arg, "%samcbf: %beu\n", prefix, abs_max_cache_bad_fit);
- erts_print(to, arg, "%srmcbf: %beu\n", prefix, rel_max_cache_bad_fit);
- erts_print(to, arg, "%smcs: %beu\n", prefix, max_cache_size);
- erts_print(to, arg, "%scci: %beu\n", prefix, cache_check_interval);
+ erts_print(to, arg, "%samcbf: %beu\n", prefix, ma->abs_max_cache_bad_fit);
+ erts_print(to, arg, "%srmcbf: %beu\n", prefix, ma->rel_max_cache_bad_fit);
+ erts_print(to, arg, "%smcs: %beu\n", prefix, ma->max_cache_size);
}
if (hpp || szp) {
if (!atoms_initialized)
- init_atoms();
+ init_atoms(ma);
res = NIL;
add_2tup(hpp, szp, &res,
- am.cci,
- bld_uint(hpp, szp, cache_check_interval));
- add_2tup(hpp, szp, &res,
am.mcs,
- bld_uint(hpp, szp, max_cache_size));
+ bld_uint(hpp, szp, ma->max_cache_size));
add_2tup(hpp, szp, &res,
am.rmcbf,
- bld_uint(hpp, szp, rel_max_cache_bad_fit));
+ bld_uint(hpp, szp, ma->rel_max_cache_bad_fit));
add_2tup(hpp, szp, &res,
am.amcbf,
- bld_uint(hpp, szp, abs_max_cache_bad_fit));
+ bld_uint(hpp, szp, ma->abs_max_cache_bad_fit));
}
@@ -1124,18 +1156,18 @@ info_options(char *prefix,
}
static Eterm
-info_calls(int *print_to_p, void *print_to_arg, Uint **hpp, Uint *szp)
+info_calls(ErtsMsegAllctr_t *ma, int *print_to_p, void *print_to_arg, Uint **hpp, Uint *szp)
{
Eterm res = THE_NON_VALUE;
if (print_to_p) {
-#define PRINT_CC(TO, TOA, CC) \
- if (calls.CC.giga_no == 0) \
- erts_print(TO, TOA, "mseg_%s calls: %b32u\n", #CC, calls.CC.no); \
- else \
+#define PRINT_CC(TO, TOA, CC) \
+ if (ma->calls.CC.giga_no == 0) \
+ erts_print(TO, TOA, "mseg_%s calls: %b32u\n", #CC, ma->calls.CC.no); \
+ else \
erts_print(TO, TOA, "mseg_%s calls: %b32u%09b32u\n", #CC, \
- calls.CC.giga_no, calls.CC.no)
+ ma->calls.CC.giga_no, ma->calls.CC.no)
int to = *print_to_p;
void *arg = print_to_arg;
@@ -1161,48 +1193,48 @@ info_calls(int *print_to_p, void *print_to_arg, Uint **hpp, Uint *szp)
add_3tup(hpp, szp, &res,
am.mseg_check_cache,
- bld_unstable_uint(hpp, szp, calls.check_cache.giga_no),
- bld_unstable_uint(hpp, szp, calls.check_cache.no));
+ bld_unstable_uint(hpp, szp, ma->calls.check_cache.giga_no),
+ bld_unstable_uint(hpp, szp, ma->calls.check_cache.no));
add_3tup(hpp, szp, &res,
am.mseg_clear_cache,
- bld_unstable_uint(hpp, szp, calls.clear_cache.giga_no),
- bld_unstable_uint(hpp, szp, calls.clear_cache.no));
+ bld_unstable_uint(hpp, szp, ma->calls.clear_cache.giga_no),
+ bld_unstable_uint(hpp, szp, ma->calls.clear_cache.no));
#if HAVE_MSEG_RECREATE
add_3tup(hpp, szp, &res,
am.mseg_recreate,
- bld_unstable_uint(hpp, szp, calls.recreate.giga_no),
- bld_unstable_uint(hpp, szp, calls.recreate.no));
+ bld_unstable_uint(hpp, szp, ma->calls.recreate.giga_no),
+ bld_unstable_uint(hpp, szp, ma->calls.recreate.no));
#endif
add_3tup(hpp, szp, &res,
am.mseg_destroy,
- bld_unstable_uint(hpp, szp, calls.destroy.giga_no),
- bld_unstable_uint(hpp, szp, calls.destroy.no));
+ bld_unstable_uint(hpp, szp, ma->calls.destroy.giga_no),
+ bld_unstable_uint(hpp, szp, ma->calls.destroy.no));
add_3tup(hpp, szp, &res,
am.mseg_create,
- bld_unstable_uint(hpp, szp, calls.create.giga_no),
- bld_unstable_uint(hpp, szp, calls.create.no));
+ bld_unstable_uint(hpp, szp, ma->calls.create.giga_no),
+ bld_unstable_uint(hpp, szp, ma->calls.create.no));
add_3tup(hpp, szp, &res,
am.mseg_realloc,
- bld_unstable_uint(hpp, szp, calls.realloc.giga_no),
- bld_unstable_uint(hpp, szp, calls.realloc.no));
+ bld_unstable_uint(hpp, szp, ma->calls.realloc.giga_no),
+ bld_unstable_uint(hpp, szp, ma->calls.realloc.no));
add_3tup(hpp, szp, &res,
am.mseg_dealloc,
- bld_unstable_uint(hpp, szp, calls.dealloc.giga_no),
- bld_unstable_uint(hpp, szp, calls.dealloc.no));
+ bld_unstable_uint(hpp, szp, ma->calls.dealloc.giga_no),
+ bld_unstable_uint(hpp, szp, ma->calls.dealloc.no));
add_3tup(hpp, szp, &res,
am.mseg_alloc,
- bld_unstable_uint(hpp, szp, calls.alloc.giga_no),
- bld_unstable_uint(hpp, szp, calls.alloc.no));
+ bld_unstable_uint(hpp, szp, ma->calls.alloc.giga_no),
+ bld_unstable_uint(hpp, szp, ma->calls.alloc.no));
}
return res;
}
static Eterm
-info_status(MemKind* mk, int *print_to_p, void *print_to_arg,
+info_status(ErtsMsegAllctr_t *ma, MemKind* mk, int *print_to_p, void *print_to_arg,
int begin_new_max_period, Uint **hpp, Uint *szp)
{
Eterm res = THE_NON_VALUE;
@@ -1258,7 +1290,7 @@ info_status(MemKind* mk, int *print_to_p, void *print_to_arg,
return res;
}
-static Eterm info_memkind(MemKind* mk, int *print_to_p, void *print_to_arg,
+static Eterm info_memkind(ErtsMsegAllctr_t *ma, MemKind* mk, int *print_to_p, void *print_to_arg,
int begin_max_per, Uint **hpp, Uint *szp)
{
Eterm res = THE_NON_VALUE;
@@ -1274,8 +1306,8 @@ static Eterm info_memkind(MemKind* mk, int *print_to_p, void *print_to_arg,
atoms[2] = am.calls;
values[0] = erts_bld_string(hpp, szp, mk->name);
}
- values[1] = info_status(mk, print_to_p, print_to_arg, begin_max_per, hpp, szp);
- values[2] = info_calls(print_to_p, print_to_arg, hpp, szp);
+ values[1] = info_status(ma, mk, print_to_p, print_to_arg, begin_max_per, hpp, szp);
+ values[2] = info_calls(ma, print_to_p, print_to_arg, hpp, szp);
if (hpp || szp)
res = bld_2tup_list(hpp, szp, 3, atoms, values);
@@ -1285,7 +1317,7 @@ static Eterm info_memkind(MemKind* mk, int *print_to_p, void *print_to_arg,
static Eterm
-info_version(int *print_to_p, void *print_to_arg, Uint **hpp, Uint *szp)
+info_version(ErtsMsegAllctr_t *ma, int *print_to_p, void *print_to_arg, Uint **hpp, Uint *szp)
{
Eterm res = THE_NON_VALUE;
@@ -1306,56 +1338,64 @@ info_version(int *print_to_p, void *print_to_arg, Uint **hpp, Uint *szp)
\* */
Eterm
-erts_mseg_info_options(int *print_to_p, void *print_to_arg,
+erts_mseg_info_options(int ix,
+ int *print_to_p, void *print_to_arg,
Uint **hpp, Uint *szp)
{
+ ErtsMsegAllctr_t *ma = ERTS_MSEG_ALLCTR_IX(ix);
Eterm res;
- erts_mtx_lock(&mseg_mutex);
+ ERTS_MSEG_LOCK(ma);
- res = info_options("option ", print_to_p, print_to_arg, hpp, szp);
+ ERTS_DBG_MA_CHK_THR_ACCESS(ma);
- erts_mtx_unlock(&mseg_mutex);
+ res = info_options(ma, "option ", print_to_p, print_to_arg, hpp, szp);
+
+ ERTS_MSEG_UNLOCK(ma);
return res;
}
Eterm
-erts_mseg_info(int *print_to_p,
+erts_mseg_info(int ix,
+ int *print_to_p,
void *print_to_arg,
int begin_max_per,
Uint **hpp,
Uint *szp)
{
+ ErtsMsegAllctr_t *ma = ERTS_MSEG_ALLCTR_IX(ix);
Eterm res = THE_NON_VALUE;
Eterm atoms[4];
Eterm values[4];
Uint n = 0;
- erts_mtx_lock(&mseg_mutex);
+ ERTS_MSEG_LOCK(ma);
+
+ ERTS_DBG_MA_CHK_THR_ACCESS(ma);
if (hpp || szp) {
if (!atoms_initialized)
- init_atoms();
+ init_atoms(ma);
atoms[0] = am.version;
atoms[1] = am.options;
atoms[2] = am.memkind;
atoms[3] = am.memkind;
}
- values[n++] = info_version(print_to_p, print_to_arg, hpp, szp);
- values[n++] = info_options("option ", print_to_p, print_to_arg, hpp, szp);
+ values[n++] = info_version(ma, print_to_p, print_to_arg, hpp, szp);
+ values[n++] = info_options(ma, "option ", print_to_p, print_to_arg, hpp, szp);
#if HALFWORD_HEAP
- values[n++] = info_memkind(&low_mem, print_to_p, print_to_arg, begin_max_per, hpp, szp);
- values[n++] = info_memkind(&hi_mem, print_to_p, print_to_arg, begin_max_per, hpp, szp);
+ values[n++] = info_memkind(ma, &ma->low_mem, print_to_p, print_to_arg, begin_max_per, hpp, szp);
+ values[n++] = info_memkind(ma, &ma->hi_mem, print_to_p, print_to_arg, begin_max_per, hpp, szp);
#else
- values[n++] = info_memkind(&the_mem, print_to_p, print_to_arg, begin_max_per, hpp, szp);
+ values[n++] = info_memkind(ma, &ma->the_mem, print_to_p, print_to_arg, begin_max_per, hpp, szp);
#endif
if (hpp || szp)
res = bld_2tup_list(hpp, szp, n, atoms, values);
- erts_mtx_unlock(&mseg_mutex);
+ ERTS_MSEG_UNLOCK(ma);
return res;
}
@@ -1363,10 +1403,12 @@ erts_mseg_info(int *print_to_p,
void *
erts_mseg_alloc_opt(ErtsAlcType_t atype, Uint *size_p, const ErtsMsegOpt_t *opt)
{
+ ErtsMsegAllctr_t *ma = ERTS_MSEG_ALLCTR_OPT(opt);
void *seg;
- erts_mtx_lock(&mseg_mutex);
- seg = mseg_alloc(atype, size_p, opt);
- erts_mtx_unlock(&mseg_mutex);
+ ERTS_MSEG_LOCK(ma);
+ ERTS_DBG_MA_CHK_THR_ACCESS(ma);
+ seg = mseg_alloc(ma, atype, size_p, opt);
+ ERTS_MSEG_UNLOCK(ma);
return seg;
}
@@ -1377,12 +1419,14 @@ erts_mseg_alloc(ErtsAlcType_t atype, Uint *size_p)
}
void
-erts_mseg_dealloc_opt(ErtsAlcType_t atype, void *seg, Uint size,
- const ErtsMsegOpt_t *opt)
+erts_mseg_dealloc_opt(ErtsAlcType_t atype, void *seg,
+ Uint size, const ErtsMsegOpt_t *opt)
{
- erts_mtx_lock(&mseg_mutex);
- mseg_dealloc(atype, seg, size, opt);
- erts_mtx_unlock(&mseg_mutex);
+ ErtsMsegAllctr_t *ma = ERTS_MSEG_ALLCTR_OPT(opt);
+ ERTS_MSEG_LOCK(ma);
+ ERTS_DBG_MA_CHK_THR_ACCESS(ma);
+ mseg_dealloc(ma, atype, seg, size, opt);
+ ERTS_MSEG_UNLOCK(ma);
}
void
@@ -1392,44 +1436,60 @@ erts_mseg_dealloc(ErtsAlcType_t atype, void *seg, Uint size)
}
void *
-erts_mseg_realloc_opt(ErtsAlcType_t atype, void *seg, Uint old_size,
- Uint *new_size_p, const ErtsMsegOpt_t *opt)
+erts_mseg_realloc_opt(ErtsAlcType_t atype, void *seg,
+ Uint old_size, Uint *new_size_p,
+ const ErtsMsegOpt_t *opt)
{
+ ErtsMsegAllctr_t *ma = ERTS_MSEG_ALLCTR_OPT(opt);
void *new_seg;
- erts_mtx_lock(&mseg_mutex);
- new_seg = mseg_realloc(atype, seg, old_size, new_size_p, opt);
- erts_mtx_unlock(&mseg_mutex);
+ ERTS_MSEG_LOCK(ma);
+ ERTS_DBG_MA_CHK_THR_ACCESS(ma);
+ new_seg = mseg_realloc(ma, atype, seg, old_size, new_size_p, opt);
+ ERTS_MSEG_UNLOCK(ma);
return new_seg;
}
void *
-erts_mseg_realloc(ErtsAlcType_t atype, void *seg, Uint old_size,
- Uint *new_size_p)
+erts_mseg_realloc(ErtsAlcType_t atype, void *seg,
+ Uint old_size, Uint *new_size_p)
{
- return erts_mseg_realloc_opt(atype, seg, old_size, new_size_p, &erts_mseg_default_opt);
+ return erts_mseg_realloc_opt(atype, seg, old_size, new_size_p,
+ &erts_mseg_default_opt);
}
void
erts_mseg_clear_cache(void)
{
+ ErtsMsegAllctr_t *ma = ERTS_MSEG_ALLCTR_SS();
MemKind* mk;
- erts_mtx_lock(&mseg_mutex);
- for (mk=mk_list; mk; mk=mk->next) {
+
+start:
+
+ ERTS_MSEG_LOCK(ma);
+ ERTS_DBG_MA_CHK_THR_ACCESS(ma);
+ for (mk=ma->mk_list; mk; mk=mk->next) {
mseg_clear_cache(mk);
}
- erts_mtx_unlock(&mseg_mutex);
+ ERTS_MSEG_UNLOCK(ma);
+
+ if (ma->ix != 0) {
+ ma = ERTS_MSEG_ALLCTR_IX(0);
+ goto start;
+ }
}
Uint
-erts_mseg_no(void)
+erts_mseg_no(const ErtsMsegOpt_t *opt)
{
+ ErtsMsegAllctr_t *ma = ERTS_MSEG_ALLCTR_OPT(opt);
MemKind* mk;
Uint n = 0;
- erts_mtx_lock(&mseg_mutex);
- for (mk=mk_list; mk; mk=mk->next) {
+ ERTS_MSEG_LOCK(ma);
+ ERTS_DBG_MA_CHK_THR_ACCESS(ma);
+ for (mk=ma->mk_list; mk; mk=mk->next) {
n += mk->segments.current.no;
}
- erts_mtx_unlock(&mseg_mutex);
+ ERTS_MSEG_UNLOCK(ma);
return n;
}
@@ -1439,7 +1499,7 @@ erts_mseg_unit_size(void)
return page_size;
}
-static void mem_kind_init(MemKind* mk, const char* name)
+static void mem_kind_init(ErtsMsegAllctr_t *ma, MemKind* mk, const char* name)
{
unsigned i;
@@ -1450,10 +1510,10 @@ static void mem_kind_init(MemKind* mk, const char* name)
mk->cache_size = 0;
mk->cache_hits = 0;
- if (max_cache_size > 0) {
- for (i = 0; i < max_cache_size - 1; i++)
+ if (ma->max_cache_size > 0) {
+ for (i = 0; i < ma->max_cache_size - 1; i++)
mk->cache_descs[i].next = &mk->cache_descs[i + 1];
- mk->cache_descs[max_cache_size - 1].next = NULL;
+ mk->cache_descs[ma->max_cache_size - 1].next = NULL;
mk->free_cache_descs = &mk->cache_descs[0];
}
else
@@ -1467,30 +1527,38 @@ static void mem_kind_init(MemKind* mk, const char* name)
mk->segments.max_ever.no = 0;
mk->segments.max_ever.sz = 0;
+ mk->ma = ma;
mk->name = name;
- mk->next = mk_list;
- mk_list = mk;
+ mk->next = ma->mk_list;
+ ma->mk_list = mk;
}
+
+
void
erts_mseg_init(ErtsMsegInit_t *init)
{
- atoms_initialized = 0;
- is_init_done = 0;
+ int i;
+ UWord x;
- /* Options ... */
+#ifdef ERTS_SMP
+ no_mseg_allocators = init->nos + 1;
+#else
+ no_mseg_allocators = 1;
+#endif
- abs_max_cache_bad_fit = init->amcbf;
- rel_max_cache_bad_fit = init->rmcbf;
- max_cache_size = init->mcs;
- cache_check_interval = init->cci;
+ x = (UWord) malloc(sizeof(ErtsAlgndMsegAllctr_t)
+ *no_mseg_allocators
+ + (ERTS_CACHE_LINE_SIZE-1));
+ if (x & ERTS_CACHE_LINE_MASK)
+ x = (x & ~ERTS_CACHE_LINE_MASK) + ERTS_CACHE_LINE_SIZE;
+ ASSERT((x & ERTS_CACHE_LINE_MASK) == 0);
+ aligned_mseg_allctr = (ErtsAlgndMsegAllctr_t *) x;
- /* */
+ atoms_initialized = 0;
-#ifdef USE_THREADS
- thread_safe_init();
-#endif
+ erts_mtx_init(&init_atoms_mutex, "mseg_init_atoms");
#if HAVE_MMAP && !defined(MAP_ANON)
mmap_fd = open("/dev/zero", O_RDWR);
@@ -1512,34 +1580,55 @@ erts_mseg_init(ErtsMsegInit_t *init)
page_shift++;
}
- sys_memzero((void *) &calls, sizeof(calls));
+ for (i = 0; i < no_mseg_allocators; i++) {
+ ErtsMsegAllctr_t *ma = ERTS_MSEG_ALLCTR_IX(i);
-#if CAN_PARTLY_DESTROY
- min_seg_size = ~((Uint) 0);
-#endif
+ ma->ix = i;
+
+ ma->is_init_done = 0;
+
+ if (i != 0)
+ ma->is_thread_safe = 0;
+ else {
+ ma->is_thread_safe = 1;
+ erts_mtx_init(&ma->mtx, "mseg");
+ }
+
+ ma->is_cache_check_scheduled = 0;
+
+ /* Options ... */
- if (max_cache_size > MAX_CACHE_SIZE)
- max_cache_size = MAX_CACHE_SIZE;
+ ma->abs_max_cache_bad_fit = init->amcbf;
+ ma->rel_max_cache_bad_fit = init->rmcbf;
+ ma->max_cache_size = init->mcs;
+
+ if (ma->max_cache_size > MAX_CACHE_SIZE)
+ ma->max_cache_size = MAX_CACHE_SIZE;
+
+ ma->mk_list = NULL;
#if HALFWORD_HEAP
- mem_kind_init(&low_mem, "low memory");
- mem_kind_init(&hi_mem, "high memory");
+ mem_kind_init(ma, &ma->low_mem, "low memory");
+ mem_kind_init(ma, &ma->hi_mem, "high memory");
#else
- mem_kind_init(&the_mem, "all memory");
+ mem_kind_init(ma, &ma->the_mem, "all memory");
#endif
- is_cache_check_scheduled = 0;
-#ifdef ERTS_THREADS_NO_SMP
- is_cache_check_requested = 0;
+ sys_memzero((void *) &ma->calls, sizeof(ErtsMsegCalls));
+
+#if CAN_PARTLY_DESTROY
+ ma->min_seg_size = ~((Uint) 0);
#endif
+ }
}
-static ERTS_INLINE Uint tot_cache_size(void)
+static ERTS_INLINE Uint tot_cache_size(ErtsMsegAllctr_t *ma)
{
MemKind* mk;
Uint sz = 0;
- for (mk=mk_list; mk; mk=mk->next) {
+ ERTS_DBG_MA_CHK_THR_ACCESS(ma);
+ for (mk=ma->mk_list; mk; mk=mk->next) {
sz += mk->cache_size;
}
return sz;
@@ -1552,25 +1641,13 @@ static ERTS_INLINE Uint tot_cache_size(void)
void
erts_mseg_late_init(void)
{
-#ifdef ERTS_THREADS_NO_SMP
- int handle =
- erts_register_async_ready_callback(
- check_schedule_cache_check);
-#endif
- erts_mtx_lock(&mseg_mutex);
- is_init_done = 1;
-#ifdef ERTS_THREADS_NO_SMP
- async_handle = handle;
-#endif
- if (tot_cache_size())
- schedule_cache_check();
- erts_mtx_unlock(&mseg_mutex);
-}
-
-void
-erts_mseg_exit(void)
-{
- mseg_shutdown();
+ ErtsMsegAllctr_t *ma = ERTS_MSEG_ALLCTR_SS();
+ ERTS_MSEG_LOCK(ma);
+ ERTS_DBG_MA_CHK_THR_ACCESS(ma);
+ ma->is_init_done = 1;
+ if (tot_cache_size(ma))
+ schedule_cache_check(ma);
+ ERTS_MSEG_UNLOCK(ma);
}
#endif /* #if HAVE_ERTS_MSEG */
@@ -1599,12 +1676,13 @@ erts_mseg_test(unsigned long op,
erts_mseg_clear_cache();
return (unsigned long) 0;
case 0x405:
- return (unsigned long) erts_mseg_no();
+ return (unsigned long) erts_mseg_no(&erts_mseg_default_opt);
case 0x406: {
+ ErtsMsegAllctr_t *ma = ERTS_MSEG_ALLCTR_IX(0);
unsigned long res;
- erts_mtx_lock(&mseg_mutex);
- res = (unsigned long) tot_cache_size();
- erts_mtx_unlock(&mseg_mutex);
+ ERTS_MSEG_LOCK(ma);
+ res = (unsigned long) tot_cache_size(ma);
+ ERTS_MSEG_UNLOCK(ma);
return res;
}
#else /* #if HAVE_ERTS_MSEG */
diff --git a/erts/emulator/sys/common/erl_mseg.h b/erts/emulator/sys/common/erl_mseg.h
index 8f116030a8..741080fb78 100644
--- a/erts/emulator/sys/common/erl_mseg.h
+++ b/erts/emulator/sys/common/erl_mseg.h
@@ -44,7 +44,7 @@ typedef struct {
Uint amcbf;
Uint rmcbf;
Uint mcs;
- Uint cci;
+ Uint nos;
} ErtsMsegInit_t;
#define ERTS_MSEG_INIT_DEFAULT_INITIALIZER \
@@ -60,6 +60,7 @@ typedef struct {
int preserv;
UWord abs_shrink_th;
UWord rel_shrink_th;
+ int sched_spec;
#if HALFWORD_HEAP
int low_mem;
#endif
@@ -75,14 +76,14 @@ void *erts_mseg_realloc(ErtsAlcType_t, void *, Uint, Uint *);
void *erts_mseg_realloc_opt(ErtsAlcType_t, void *, Uint, Uint *,
const ErtsMsegOpt_t *);
void erts_mseg_clear_cache(void);
-Uint erts_mseg_no(void);
+void erts_mseg_cache_check(void);
+Uint erts_mseg_no( const ErtsMsegOpt_t *);
Uint erts_mseg_unit_size(void);
void erts_mseg_init(ErtsMsegInit_t *init);
void erts_mseg_late_init(void); /* Have to be called after all allocators,
threads and timers have been initialized. */
-void erts_mseg_exit(void);
-Eterm erts_mseg_info_options(int *, void*, Uint **, Uint *);
-Eterm erts_mseg_info(int *, void*, int, Uint **, Uint *);
+Eterm erts_mseg_info_options(int, int *, void*, Uint **, Uint *);
+Eterm erts_mseg_info(int, int *, void*, int, Uint **, Uint *);
#endif /* #if HAVE_ERTS_MSEG */
diff --git a/erts/emulator/sys/common/erl_poll.c b/erts/emulator/sys/common/erl_poll.c
index f5c785d683..26858052c4 100644
--- a/erts/emulator/sys/common/erl_poll.c
+++ b/erts/emulator/sys/common/erl_poll.c
@@ -68,6 +68,7 @@
# endif
# endif
#endif
+#include "erl_thr_progress.h"
#include "erl_driver.h"
#include "erl_alloc.h"
@@ -114,7 +115,7 @@
#endif
#define ERTS_POLL_USE_WAKEUP_PIPE \
- (ERTS_POLL_ASYNC_INTERRUPT_SUPPORT || defined(ERTS_SMP))
+ (ERTS_POLL_ASYNC_INTERRUPT_SUPPORT || defined(USE_THREADS))
#ifdef ERTS_SMP
@@ -124,11 +125,11 @@
erts_smp_mtx_unlock(&(PS)->mtx)
#define ERTS_POLLSET_SET_POLLED_CHK(PS) \
- ((int) erts_atomic32_xchg(&(PS)->polled, (erts_aint32_t) 1))
+ ((int) erts_atomic32_xchg_nob(&(PS)->polled, (erts_aint32_t) 1))
#define ERTS_POLLSET_UNSET_POLLED(PS) \
- erts_atomic32_set(&(PS)->polled, (erts_aint32_t) 0)
+ erts_atomic32_set_nob(&(PS)->polled, (erts_aint32_t) 0)
#define ERTS_POLLSET_IS_POLLED(PS) \
- ((int) erts_atomic32_read(&(PS)->polled))
+ ((int) erts_atomic32_read_nob(&(PS)->polled))
#else
@@ -142,11 +143,11 @@
#if ERTS_POLL_USE_UPDATE_REQUESTS_QUEUE
#define ERTS_POLLSET_SET_HAVE_UPDATE_REQUESTS(PS) \
- erts_smp_atomic32_set(&(PS)->have_update_requests, (erts_aint32_t) 1)
+ erts_smp_atomic32_set_nob(&(PS)->have_update_requests, (erts_aint32_t) 1)
#define ERTS_POLLSET_UNSET_HAVE_UPDATE_REQUESTS(PS) \
- erts_smp_atomic32_set(&(PS)->have_update_requests, (erts_aint32_t) 0)
+ erts_smp_atomic32_set_nob(&(PS)->have_update_requests, (erts_aint32_t) 0)
#define ERTS_POLLSET_HAVE_UPDATE_REQUESTS(PS) \
- ((int) erts_smp_atomic32_read(&(PS)->have_update_requests))
+ ((int) erts_smp_atomic32_read_nob(&(PS)->have_update_requests))
#else
#define ERTS_POLLSET_SET_HAVE_UPDATE_REQUESTS(PS)
#define ERTS_POLLSET_UNSET_HAVE_UPDATE_REQUESTS(PS)
@@ -261,7 +262,6 @@ struct ErtsPollSet_ {
#ifdef ERTS_SMP
erts_atomic32_t polled;
erts_smp_mtx_t mtx;
-#elif ERTS_POLL_ASYNC_INTERRUPT_SUPPORT
#endif
#if ERTS_POLL_USE_WAKEUP_PIPE
int wake_fds[2];
@@ -269,10 +269,8 @@ struct ErtsPollSet_ {
#if ERTS_POLL_USE_FALLBACK
int fallback_used;
#endif
-#ifdef ERTS_SMP
+#if defined(USE_THREADS) || ERTS_POLL_ASYNC_INTERRUPT_SUPPORT
erts_atomic32_t wakeup_state;
-#elif ERTS_POLL_ASYNC_INTERRUPT_SUPPORT
- volatile int wakeup_state;
#endif
erts_smp_atomic32_t timeout;
#ifdef ERTS_POLL_COUNT_AVOIDED_WAKEUPS
@@ -345,21 +343,16 @@ static void print_misc_debug_info(void);
static ERTS_INLINE void
reset_wakeup_state(ErtsPollSet ps)
{
-#ifdef ERTS_SMP
- erts_atomic32_set(&ps->wakeup_state, ERTS_POLL_NOT_WOKEN);
- ERTS_THR_MEMORY_BARRIER;
-#elif ERTS_POLL_ASYNC_INTERRUPT_SUPPORT
- ps->wakeup_state = 0;
+#if defined(USE_THREADS) || ERTS_POLL_ASYNC_INTERRUPT_SUPPORT
+ erts_atomic32_set_mb(&ps->wakeup_state, ERTS_POLL_NOT_WOKEN);
#endif
}
static ERTS_INLINE int
is_woken(ErtsPollSet ps)
{
-#ifdef ERTS_SMP
+#if defined(USE_THREADS) || ERTS_POLL_ASYNC_INTERRUPT_SUPPORT
return erts_atomic32_read_acqb(&ps->wakeup_state) != ERTS_POLL_NOT_WOKEN;
-#elif ERTS_POLL_ASYNC_INTERRUPT_SUPPORT
- return ps->wakeup_state != ERTS_POLL_NOT_WOKEN;
#else
return 0;
#endif
@@ -368,13 +361,9 @@ is_woken(ErtsPollSet ps)
static ERTS_INLINE int
is_interrupted_reset(ErtsPollSet ps)
{
-#ifdef ERTS_SMP
- return (erts_atomic32_xchg(&ps->wakeup_state, ERTS_POLL_NOT_WOKEN)
+#if defined(USE_THREADS) || ERTS_POLL_ASYNC_INTERRUPT_SUPPORT
+ return (erts_atomic32_xchg_nob(&ps->wakeup_state, ERTS_POLL_NOT_WOKEN)
== ERTS_POLL_WOKEN_INTR);
-#elif ERTS_POLL_ASYNC_INTERRUPT_SUPPORT
- int res = ps->wakeup_state == ERTS_POLL_WOKEN_INTR;
- ps->wakeup_state = ERTS_POLL_NOT_WOKEN;
- return res;
#else
return 0;
#endif
@@ -383,16 +372,13 @@ is_interrupted_reset(ErtsPollSet ps)
static ERTS_INLINE void
woke_up(ErtsPollSet ps)
{
-#ifdef ERTS_SMP
- erts_aint32_t wakeup_state = erts_atomic32_read(&ps->wakeup_state);
+#if defined(USE_THREADS) || ERTS_POLL_ASYNC_INTERRUPT_SUPPORT
+ erts_aint32_t wakeup_state = erts_atomic32_read_nob(&ps->wakeup_state);
if (wakeup_state == ERTS_POLL_NOT_WOKEN)
- (void) erts_atomic32_cmpxchg(&ps->wakeup_state,
- ERTS_POLL_WOKEN,
- ERTS_POLL_NOT_WOKEN);
- ASSERT(erts_atomic32_read(&ps->wakeup_state) != ERTS_POLL_NOT_WOKEN);
-#elif ERTS_POLL_ASYNC_INTERRUPT_SUPPORT
- if (ps->wakeup_state == ERTS_POLL_NOT_WOKEN)
- ps->wakeup_state = ERTS_POLL_WOKEN;
+ (void) erts_atomic32_cmpxchg_nob(&ps->wakeup_state,
+ ERTS_POLL_WOKEN,
+ ERTS_POLL_NOT_WOKEN);
+ ASSERT(erts_atomic32_read_nob(&ps->wakeup_state) != ERTS_POLL_NOT_WOKEN);
#endif
}
@@ -403,28 +389,27 @@ woke_up(ErtsPollSet ps)
#if ERTS_POLL_USE_WAKEUP_PIPE
static ERTS_INLINE void
-wake_poller(ErtsPollSet ps, int interrupted)
+wake_poller(ErtsPollSet ps, int interrupted, int async_signal_safe)
{
int wake;
-#ifdef ERTS_SMP
- erts_aint32_t wakeup_state;
- if (!interrupted)
- wakeup_state = erts_atomic32_cmpxchg_relb(&ps->wakeup_state,
- ERTS_POLL_WOKEN,
- ERTS_POLL_NOT_WOKEN);
+ if (async_signal_safe)
+ wake = 1;
else {
- /*
- * We might unnecessarily write to the pipe, however,
- * that isn't problematic.
- */
- wakeup_state = erts_atomic32_read(&ps->wakeup_state);
- erts_atomic32_set_relb(&ps->wakeup_state, ERTS_POLL_WOKEN_INTR);
+ erts_aint32_t wakeup_state;
+ if (!interrupted)
+ wakeup_state = erts_atomic32_cmpxchg_relb(&ps->wakeup_state,
+ ERTS_POLL_WOKEN,
+ ERTS_POLL_NOT_WOKEN);
+ else {
+ /*
+ * We might unnecessarily write to the pipe, however,
+ * that isn't problematic.
+ */
+ wakeup_state = erts_atomic32_read_nob(&ps->wakeup_state);
+ erts_atomic32_set_relb(&ps->wakeup_state, ERTS_POLL_WOKEN_INTR);
+ }
+ wake = wakeup_state == ERTS_POLL_NOT_WOKEN;
}
- wake = wakeup_state == ERTS_POLL_NOT_WOKEN;
-#elif ERTS_POLL_ASYNC_INTERRUPT_SUPPORT
- wake = ps->wakeup_state == ERTS_POLL_NOT_WOKEN;
- ps->wakeup_state = interrupted ? ERTS_POLL_WOKEN_INTR : ERTS_POLL_NOT_WOKEN;
-#endif
/*
* NOTE: This function might be called from signal handlers in the
* non-smp case; therefore, it has to be async-signal safe in
@@ -439,9 +424,17 @@ wake_poller(ErtsPollSet ps, int interrupted)
res = write(ps->wake_fds[1], "!", 1);
} while (res < 0 && errno == EINTR);
if (res <= 0 && errno != ERRNO_BLOCK) {
- fatal_error_async_signal_safe(__FILE__
- ":XXX:wake_poller(): "
- "Failed to write on wakeup pipe\n");
+ if (async_signal_safe)
+ fatal_error_async_signal_safe(__FILE__
+ ":XXX:wake_poller(): "
+ "Failed to write on wakeup pipe\n");
+ else
+ fatal_error("%s:%d:wake_poller(): "
+ "Failed to write to wakeup pipe fd=%d: "
+ "%s (%d)\n",
+ __FILE__, __LINE__,
+ ps->wake_fds[1],
+ erl_errno_id(errno), errno);
}
}
}
@@ -449,11 +442,18 @@ wake_poller(ErtsPollSet ps, int interrupted)
static ERTS_INLINE void
cleanup_wakeup_pipe(ErtsPollSet ps)
{
+#if ERTS_POLL_ASYNC_INTERRUPT_SUPPORT
+ int intr = 0;
+#endif
int fd = ps->wake_fds[0];
int res;
do {
char buf[32];
res = read(fd, buf, sizeof(buf));
+#if ERTS_POLL_ASYNC_INTERRUPT_SUPPORT
+ if (res > 0)
+ intr = 1;
+#endif
} while (res > 0 || (res < 0 && errno == EINTR));
if (res < 0 && errno != ERRNO_BLOCK) {
fatal_error("%s:%d:cleanup_wakeup_pipe(): "
@@ -463,6 +463,10 @@ cleanup_wakeup_pipe(ErtsPollSet ps)
fd,
erl_errno_id(errno), errno);
}
+#if ERTS_POLL_ASYNC_INTERRUPT_SUPPORT
+ if (intr)
+ erts_atomic32_set_nob(&ps->wakeup_state, ERTS_POLL_WOKEN_INTR);
+#endif
}
static void
@@ -839,7 +843,7 @@ write_batch_buf(ErtsPollSet ps, ErtsPollBatchBuf *bbp)
ps->fds_status[fd].flags |= ERTS_POLL_FD_FLG_USEFLBCK;
ASSERT(ps->fds_status[fd].used_events);
ps->fds_status[fd].used_events = 0;
- erts_smp_atomic_dec(&ps->no_of_user_fds);
+ erts_smp_atomic_dec_nob(&ps->no_of_user_fds);
update_fallback_pollset(ps, fd);
ASSERT(ps->fds_status[fd].flags & ERTS_POLL_FD_FLG_INFLBCK);
break;
@@ -889,11 +893,11 @@ batch_update_pollset(ErtsPollSet ps, int fd, ErtsPollBatchBuf *bbp)
events = ERTS_POLL_EV_E2N(ps->fds_status[fd].events);
if (!events) {
buf[buf_len].events = POLLREMOVE;
- erts_smp_atomic_dec(&ps->no_of_user_fds);
+ erts_smp_atomic_dec_nob(&ps->no_of_user_fds);
}
else if (!ps->fds_status[fd].used_events) {
buf[buf_len].events = events;
- erts_smp_atomic_inc(&ps->no_of_user_fds);
+ erts_smp_atomic_inc_nob(&ps->no_of_user_fds);
}
else {
if ((ps->fds_status[fd].flags & ERTS_POLL_FD_FLG_RST)
@@ -983,12 +987,12 @@ batch_update_pollset(ErtsPollSet ps, int fd, ErtsPollBatchBuf *bbp)
}
if (used_events) {
if (!events) {
- erts_smp_atomic_dec(&ps->no_of_user_fds);
+ erts_smp_atomic_dec_nob(&ps->no_of_user_fds);
}
}
else {
if (events)
- erts_smp_atomic_inc(&ps->no_of_user_fds);
+ erts_smp_atomic_inc_nob(&ps->no_of_user_fds);
}
ASSERT((events & ~(ERTS_POLL_EV_IN|ERTS_POLL_EV_OUT)) == 0);
ASSERT((used_events & ~(ERTS_POLL_EV_IN|ERTS_POLL_EV_OUT)) == 0);
@@ -1062,7 +1066,7 @@ update_pollset(ErtsPollSet ps, int fd)
epe.data.fd = epe_templ.data.fd;
res = epoll_ctl(ps->kp_fd, EPOLL_CTL_DEL, fd, &epe);
} while (res != 0 && errno == EINTR);
- erts_smp_atomic_dec(&ps->no_of_user_fds);
+ erts_smp_atomic_dec_nob(&ps->no_of_user_fds);
ps->fds_status[fd].used_events = 0;
}
@@ -1070,11 +1074,11 @@ update_pollset(ErtsPollSet ps, int fd)
/* A note on EPOLL_CTL_DEL: linux kernel versions before 2.6.9
need a non-NULL event pointer even though it is ignored... */
op = EPOLL_CTL_DEL;
- erts_smp_atomic_dec(&ps->no_of_user_fds);
+ erts_smp_atomic_dec_nob(&ps->no_of_user_fds);
}
else if (!ps->fds_status[fd].used_events) {
op = EPOLL_CTL_ADD;
- erts_smp_atomic_inc(&ps->no_of_user_fds);
+ erts_smp_atomic_inc_nob(&ps->no_of_user_fds);
}
else {
op = EPOLL_CTL_MOD;
@@ -1124,7 +1128,7 @@ update_pollset(ErtsPollSet ps, int fd)
/* Fall through ... */
case EPOLL_CTL_ADD: {
ps->fds_status[fd].flags |= ERTS_POLL_FD_FLG_USEFLBCK;
- erts_smp_atomic_dec(&ps->no_of_user_fds);
+ erts_smp_atomic_dec_nob(&ps->no_of_user_fds);
#if ERTS_POLL_USE_CONCURRENT_UPDATE
if (!*update_fallback) {
*update_fallback = 1;
@@ -1212,7 +1216,7 @@ static int update_pollset(ErtsPollSet ps, int fd)
#if ERTS_POLL_USE_FALLBACK
ASSERT(ps->fds_status[fd].flags & ERTS_POLL_FD_FLG_INFLBCK);
#endif
- erts_smp_atomic_dec(&ps->no_of_user_fds);
+ erts_smp_atomic_dec_nob(&ps->no_of_user_fds);
last_pix = --ps->no_poll_fds;
if (pix != last_pix) {
/* Move last pix to this pix */
@@ -1239,7 +1243,7 @@ static int update_pollset(ErtsPollSet ps, int fd)
ASSERT(!(ps->fds_status[fd].flags & ERTS_POLL_FD_FLG_INFLBCK)
|| fd == ps->kp_fd);
#endif
- erts_smp_atomic_inc(&ps->no_of_user_fds);
+ erts_smp_atomic_inc_nob(&ps->no_of_user_fds);
ps->fds_status[fd].pix = pix = ps->no_poll_fds++;
if (pix >= ps->poll_fds_len)
grow_poll_fds(ps, pix);
@@ -1290,7 +1294,7 @@ static int update_pollset(ErtsPollSet ps, int fd)
if (!ps->fds_status[fd].used_events) {
ASSERT(events);
- erts_smp_atomic_inc(&ps->no_of_user_fds);
+ erts_smp_atomic_inc_nob(&ps->no_of_user_fds);
#if ERTS_POLL_USE_FALLBACK
ps->no_select_fds++;
ps->fds_status[fd].flags |= ERTS_POLL_FD_FLG_INFLBCK;
@@ -1298,7 +1302,7 @@ static int update_pollset(ErtsPollSet ps, int fd)
}
else if (!events) {
ASSERT(ps->fds_status[fd].used_events);
- erts_smp_atomic_dec(&ps->no_of_user_fds);
+ erts_smp_atomic_dec_nob(&ps->no_of_user_fds);
ps->fds_status[fd].events = events;
#if ERTS_POLL_USE_FALLBACK
ps->no_select_fds--;
@@ -1497,7 +1501,7 @@ ERTS_POLL_EXPORT(erts_poll_controlv)(ErtsPollSet ps,
#ifdef ERTS_SMP
if (final_do_wake)
- wake_poller(ps, 0);
+ wake_poller(ps, 0, 0);
#endif /* ERTS_SMP */
}
@@ -1520,7 +1524,7 @@ ERTS_POLL_EXPORT(erts_poll_control)(ErtsPollSet ps,
#ifdef ERTS_SMP
if (*do_wake) {
- wake_poller(ps, 0);
+ wake_poller(ps, 0, 0);
}
#endif /* ERTS_SMP */
@@ -1893,10 +1897,10 @@ save_poll_result(ErtsPollSet ps, ErtsPollResFd pr[], int max_res,
}
static ERTS_INLINE int
-check_fd_events(ErtsPollSet ps, SysTimeval *tv, int max_res, int *ps_locked)
+check_fd_events(ErtsPollSet ps, SysTimeval *tv, int max_res)
{
- ASSERT(!*ps_locked);
- if (erts_smp_atomic_read(&ps->no_of_user_fds) == 0
+ int res;
+ if (erts_smp_atomic_read_nob(&ps->no_of_user_fds) == 0
&& tv->tv_usec == 0 && tv->tv_sec == 0) {
/* Nothing to poll and zero timeout; done... */
return 0;
@@ -1915,16 +1919,23 @@ check_fd_events(ErtsPollSet ps, SysTimeval *tv, int max_res, int *ps_locked)
timeout = INT_MAX;
if (max_res > ps->res_events_len)
grow_res_events(ps, max_res);
- return epoll_wait(ps->kp_fd, ps->res_events, max_res, (int)timeout);
+#ifdef ERTS_SMP
+ if (timeout)
+ erts_thr_progress_prepare_wait(NULL);
+#endif
+ res = epoll_wait(ps->kp_fd, ps->res_events, max_res, (int)timeout);
#elif ERTS_POLL_USE_KQUEUE /* --- kqueue ------------------------------ */
struct timespec ts;
- ts.tv_sec = tv->tv_sec;
- ts.tv_nsec = tv->tv_usec*1000;
if (max_res > ps->res_events_len)
grow_res_events(ps, max_res);
- return kevent(ps->kp_fd, NULL, 0, ps->res_events, max_res, &ts);
+#ifdef ERTS_SMP
+ if (timeout)
+ erts_thr_progress_prepare_wait(NULL);
+#endif
+ ts.tv_sec = tv->tv_sec;
+ ts.tv_nsec = tv->tv_usec*1000;
+ res = kevent(ps->kp_fd, NULL, 0, ps->res_events, max_res, &ts);
#endif /* ----------------------------------------- */
-
}
else /* use fallback (i.e. poll() or select()) */
#endif /* ERTS_POLL_USE_FALLBACK */
@@ -1937,8 +1948,8 @@ check_fd_events(ErtsPollSet ps, SysTimeval *tv, int max_res, int *ps_locked)
* the maximum number of file descriptors in the poll set.
*/
struct dvpoll poll_res;
- int nfds = (int) erts_smp_atomic_read(&ps->no_of_user_fds);
-#ifdef ERTS_SMP
+ int nfds = (int) erts_smp_atomic_read_nob(&ps->no_of_user_fds);
+#if ERTS_POLL_USE_WAKEUP_PIPE
nfds++; /* Wakeup pipe */
#endif
if (timeout > INT_MAX)
@@ -1947,22 +1958,38 @@ check_fd_events(ErtsPollSet ps, SysTimeval *tv, int max_res, int *ps_locked)
if (poll_res.dp_nfds > ps->res_events_len)
grow_res_events(ps, poll_res.dp_nfds);
poll_res.dp_fds = ps->res_events;
+#ifdef ERTS_SMP
+ if (timeout)
+ erts_thr_progress_prepare_wait(NULL);
+#endif
poll_res.dp_timeout = (int) timeout;
- return ioctl(ps->kp_fd, DP_POLL, &poll_res);
+ res = ioctl(ps->kp_fd, DP_POLL, &poll_res);
#elif ERTS_POLL_USE_POLL /* --- poll -------------------------------- */
if (timeout > INT_MAX)
timeout = INT_MAX;
- return poll(ps->poll_fds, ps->no_poll_fds, (int) timeout);
+#ifdef ERTS_SMP
+ if (timeout)
+ erts_thr_progress_prepare_wait(NULL);
+#endif
+ res = poll(ps->poll_fds, ps->no_poll_fds, (int) timeout);
#elif ERTS_POLL_USE_SELECT /* --- select ------------------------------ */
- int res;
+ SysTimeval to = *tv;
+
ps->res_input_fds = ps->input_fds;
ps->res_output_fds = ps->output_fds;
+
+#ifdef ERTS_SMP
+ if (to.tv_sec || to.tv_usec)
+ erts_thr_progress_prepare_wait(NULL);
+#endif
res = select(ps->max_fd + 1,
&ps->res_input_fds,
&ps->res_output_fds,
NULL,
- tv);
+ &to);
#ifdef ERTS_SMP
+ if (to.tv_sec || to.tv_usec)
+ erts_thr_progress_finalize_wait(NULL);
if (res < 0
&& errno == EBADF
&& ERTS_POLLSET_HAVE_UPDATE_REQUESTS(ps)) {
@@ -1978,15 +2005,16 @@ check_fd_events(ErtsPollSet ps, SysTimeval *tv, int max_res, int *ps_locked)
* have triggered, we fake an EAGAIN error and let the caller
* restart us.
*/
- SysTimeval zero_tv = {0, 0};
- *ps_locked = 1;
+ to.tv_sec = 0;
+ to.tv_usec = 0;
ERTS_POLLSET_LOCK(ps);
handle_update_requests(ps);
+ ERTS_POLLSET_UNLOCK(ps);
res = select(ps->max_fd + 1,
&ps->res_input_fds,
&ps->res_output_fds,
NULL,
- &zero_tv);
+ &to);
if (res == 0) {
errno = EAGAIN;
res = -1;
@@ -1996,6 +2024,11 @@ check_fd_events(ErtsPollSet ps, SysTimeval *tv, int max_res, int *ps_locked)
return res;
#endif /* ----------------------------------------- */
}
+#ifdef ERTS_SMP
+ if (timeout)
+ erts_thr_progress_finalize_wait(NULL);
+#endif
+ return res;
}
}
@@ -2007,7 +2040,9 @@ ERTS_POLL_EXPORT(erts_poll_wait)(ErtsPollSet ps,
{
int res, no_fds;
int ebadf = 0;
- int ps_locked;
+#ifdef ERTS_SMP
+ int ps_locked = 0;
+#endif
SysTimeval *tvp;
SysTimeval itv;
@@ -2049,8 +2084,7 @@ ERTS_POLL_EXPORT(erts_poll_wait)(ErtsPollSet ps,
}
#endif
- ps_locked = 0;
- res = check_fd_events(ps, tvp, no_fds, &ps_locked);
+ res = check_fd_events(ps, tvp, no_fds);
woke_up(ps);
@@ -2072,10 +2106,8 @@ ERTS_POLL_EXPORT(erts_poll_wait)(ErtsPollSet ps,
#endif
#ifdef ERTS_SMP
- if (!ps_locked) {
- ps_locked = 1;
- ERTS_POLLSET_LOCK(ps);
- }
+ ps_locked = 1;
+ ERTS_POLLSET_LOCK(ps);
#endif
no_fds = save_poll_result(ps, pr, no_fds, res, ebadf);
@@ -2111,19 +2143,26 @@ ERTS_POLL_EXPORT(erts_poll_wait)(ErtsPollSet ps,
void
ERTS_POLL_EXPORT(erts_poll_interrupt)(ErtsPollSet ps, int set)
{
-#if ERTS_POLL_ASYNC_INTERRUPT_SUPPORT || defined(ERTS_SMP)
- /*
- * NOTE: This function might be called from signal handlers in the
- * non-smp case; therefore, it has to be async-signal safe in
- * the non-smp case.
- */
+#if defined(USE_THREADS) || ERTS_POLL_ASYNC_INTERRUPT_SUPPORT
if (!set)
reset_wakeup_state(ps);
else
- wake_poller(ps, 1);
+ wake_poller(ps, 1, 0);
#endif
}
+#if ERTS_POLL_ASYNC_INTERRUPT_SUPPORT
+void
+ERTS_POLL_EXPORT(erts_poll_async_sig_interrupt)(ErtsPollSet ps)
+{
+ /*
+ * NOTE: This function is called from signal handlers, it,
+ * therefore, it has to be async-signal safe.
+ */
+ wake_poller(ps, 1, 1);
+}
+#endif
+
/*
* erts_poll_interrupt_timed():
* If 'set' != 0, interrupt thread blocked in erts_poll_wait() if it
@@ -2132,21 +2171,21 @@ ERTS_POLL_EXPORT(erts_poll_interrupt)(ErtsPollSet ps, int set)
void
ERTS_POLL_EXPORT(erts_poll_interrupt_timed)(ErtsPollSet ps,
int set,
- long msec)
+ erts_short_time_t msec)
{
#if ERTS_POLL_ASYNC_INTERRUPT_SUPPORT || defined(ERTS_SMP)
if (!set)
reset_wakeup_state(ps);
else {
if (erts_smp_atomic32_read_acqb(&ps->timeout) > (erts_aint32_t) msec)
- wake_poller(ps, 1);
+ wake_poller(ps, 1, 0);
#ifdef ERTS_POLL_COUNT_AVOIDED_WAKEUPS
else {
if (ERTS_POLLSET_IS_POLLED(ps))
- erts_smp_atomic_inc(&ps->no_avoided_wakeups);
- erts_smp_atomic_inc(&ps->no_avoided_interrupts);
+ erts_smp_atomic_inc_nob(&ps->no_avoided_wakeups);
+ erts_smp_atomic_inc_nob(&ps->no_avoided_interrupts);
}
- erts_smp_atomic_inc(&ps->no_interrupt_timed);
+ erts_smp_atomic_inc_nob(&ps->no_interrupt_timed);
#endif
}
#endif
@@ -2208,7 +2247,7 @@ ERTS_POLL_EXPORT(erts_poll_create_pollset)(void)
ps->internal_fd_limit = 0;
ps->fds_status = NULL;
ps->fds_status_len = 0;
- erts_smp_atomic_init(&ps->no_of_user_fds, 0);
+ erts_smp_atomic_init_nob(&ps->no_of_user_fds, 0);
#if ERTS_POLL_USE_KERNEL_POLL
ps->kp_fd = -1;
#if ERTS_POLL_USE_EPOLL
@@ -2260,16 +2299,14 @@ ERTS_POLL_EXPORT(erts_poll_create_pollset)(void)
ps->update_requests.next = NULL;
ps->update_requests.len = 0;
ps->curr_upd_req_block = &ps->update_requests;
- erts_smp_atomic32_init(&ps->have_update_requests, 0);
+ erts_smp_atomic32_init_nob(&ps->have_update_requests, 0);
#endif
#ifdef ERTS_SMP
- erts_atomic32_init(&ps->polled, 0);
+ erts_atomic32_init_nob(&ps->polled, 0);
erts_smp_mtx_init(&ps->mtx, "pollset");
#endif
-#ifdef ERTS_SMP
- erts_atomic32_init(&ps->wakeup_state, (erts_aint32_t) 0);
-#elif ERTS_POLL_ASYNC_INTERRUPT_SUPPORT
- ps->wakeup_state = 0;
+#if defined(USE_THREADS) || ERTS_POLL_ASYNC_INTERRUPT_SUPPORT
+ erts_atomic32_init_nob(&ps->wakeup_state, (erts_aint32_t) 0);
#endif
#if ERTS_POLL_USE_WAKEUP_PIPE
create_wakeup_pipe(ps);
@@ -2291,11 +2328,11 @@ ERTS_POLL_EXPORT(erts_poll_create_pollset)(void)
ps->internal_fd_limit = kp_fd + 1;
ps->kp_fd = kp_fd;
#endif
- erts_smp_atomic32_init(&ps->timeout, ERTS_AINT32_T_MAX);
+ erts_smp_atomic32_init_nob(&ps->timeout, ERTS_AINT32_T_MAX);
#ifdef ERTS_POLL_COUNT_AVOIDED_WAKEUPS
- erts_smp_atomic_init(&ps->no_avoided_wakeups, 0);
- erts_smp_atomic_init(&ps->no_avoided_interrupts, 0);
- erts_smp_atomic_init(&ps->no_interrupt_timed, 0);
+ erts_smp_atomic_init_nob(&ps->no_avoided_wakeups, 0);
+ erts_smp_atomic_init_nob(&ps->no_avoided_interrupts, 0);
+ erts_smp_atomic_init_nob(&ps->no_interrupt_timed, 0);
#endif
#if ERTS_POLL_USE_UPDATE_REQUESTS_QUEUE
handle_update_requests(ps);
@@ -2303,7 +2340,7 @@ ERTS_POLL_EXPORT(erts_poll_create_pollset)(void)
#if ERTS_POLL_USE_FALLBACK
ps->fallback_used = 0;
#endif
- erts_smp_atomic_set(&ps->no_of_user_fds, 0); /* Don't count wakeup pipe and fallback fd */
+ erts_smp_atomic_set_nob(&ps->no_of_user_fds, 0); /* Don't count wakeup pipe and fallback fd */
erts_smp_spin_lock(&pollsets_lock);
ps->next = pollsets;
@@ -2449,8 +2486,8 @@ ERTS_POLL_EXPORT(erts_poll_info)(ErtsPollSet ps, ErtsPollInfo *pip)
pip->memory_size = size;
- pip->poll_set_size = (int) erts_smp_atomic_read(&ps->no_of_user_fds);
-#ifdef ERTS_SMP
+ pip->poll_set_size = (int) erts_smp_atomic_read_nob(&ps->no_of_user_fds);
+#if ERTS_POLL_USE_WAKEUP_PIPE
pip->poll_set_size++; /* Wakeup pipe */
#endif
@@ -2507,9 +2544,9 @@ ERTS_POLL_EXPORT(erts_poll_info)(ErtsPollSet ps, ErtsPollInfo *pip)
pip->max_fds = max_fds;
#ifdef ERTS_POLL_COUNT_AVOIDED_WAKEUPS
- pip->no_avoided_wakeups = erts_smp_atomic_read(&ps->no_avoided_wakeups);
- pip->no_avoided_interrupts = erts_smp_atomic_read(&ps->no_avoided_interrupts);
- pip->no_interrupt_timed = erts_smp_atomic_read(&ps->no_interrupt_timed);
+ pip->no_avoided_wakeups = erts_smp_atomic_read_nob(&ps->no_avoided_wakeups);
+ pip->no_avoided_interrupts = erts_smp_atomic_read_nob(&ps->no_avoided_interrupts);
+ pip->no_interrupt_timed = erts_smp_atomic_read_nob(&ps->no_interrupt_timed);
#endif
ERTS_POLLSET_UNLOCK(ps);
@@ -2529,7 +2566,7 @@ fatal_error(char *format, ...)
{
va_list ap;
- if (ERTS_IS_CRASH_DUMPING || ERTS_GOT_SIGUSR1) {
+ if (ERTS_SOMEONE_IS_CRASH_DUMPING || ERTS_GOT_SIGUSR1) {
/*
* Crash dump writing and reception of sigusr1 (which will
* result in a crash dump) closes all file descriptors. This
@@ -2549,7 +2586,7 @@ fatal_error(char *format, ...)
static void
fatal_error_async_signal_safe(char *error_str)
{
- if (ERTS_IS_CRASH_DUMPING || ERTS_GOT_SIGUSR1) {
+ if (ERTS_SOMEONE_IS_CRASH_DUMPING || ERTS_GOT_SIGUSR1) {
/* See comment above in fatal_error() */
return;
}
diff --git a/erts/emulator/sys/common/erl_poll.h b/erts/emulator/sys/common/erl_poll.h
index 725a77a152..8dde619105 100644
--- a/erts/emulator/sys/common/erl_poll.h
+++ b/erts/emulator/sys/common/erl_poll.h
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2006-2009. All Rights Reserved.
+ * Copyright Ericsson AB 2006-2011. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -216,11 +216,14 @@ typedef struct {
#endif
} ErtsPollInfo;
+#ifdef ERTS_POLL_NEED_ASYNC_INTERRUPT_SUPPORT
+void ERTS_POLL_EXPORT(erts_poll_async_sig_interrupt)(ErtsPollSet);
+#endif
void ERTS_POLL_EXPORT(erts_poll_interrupt)(ErtsPollSet,
int);
void ERTS_POLL_EXPORT(erts_poll_interrupt_timed)(ErtsPollSet,
int,
- long);
+ erts_short_time_t);
ErtsPollEvents ERTS_POLL_EXPORT(erts_poll_control)(ErtsPollSet,
ErtsSysFdType,
ErtsPollEvents,
diff --git a/erts/emulator/sys/unix/erl_unix_sys.h b/erts/emulator/sys/unix/erl_unix_sys.h
index d8d51b192c..c8fcec8547 100644
--- a/erts/emulator/sys/unix/erl_unix_sys.h
+++ b/erts/emulator/sys/unix/erl_unix_sys.h
@@ -129,10 +129,12 @@
#define HAVE_ERTS_CHECK_IO_DEBUG
int erts_check_io_debug(void);
-
-#ifndef ENABLE_CHILD_WAITER_THREAD
+#ifndef ERTS_SMP
# undef ERTS_POLL_NEED_ASYNC_INTERRUPT_SUPPORT
# define ERTS_POLL_NEED_ASYNC_INTERRUPT_SUPPORT
+#endif
+
+#ifndef ENABLE_CHILD_WAITER_THREAD
# ifdef ERTS_SMP
# define ERTS_SMP_SCHEDULERS_NEED_TO_CHECK_CHILDREN
void erts_check_children(void);
@@ -144,6 +146,7 @@ typedef void *GETENV_STATE;
/*
** For the erl_timer_sup module.
*/
+typedef time_t erts_time_t;
typedef struct timeval SysTimeval;
diff --git a/erts/emulator/sys/unix/sys.c b/erts/emulator/sys/unix/sys.c
index bafbbb0f6c..f94e0f2296 100644
--- a/erts/emulator/sys/unix/sys.c
+++ b/erts/emulator/sys/unix/sys.c
@@ -52,6 +52,7 @@
#define ERTS_WANT_GOT_SIGUSR1
#define WANT_NONBLOCKING /* must define this to pull in defs from sys.h */
#include "sys.h"
+#include "erl_thr_progress.h"
#if defined(__APPLE__) && defined(__MACH__) && !defined(__DARWIN__)
#define __DARWIN__ 1
@@ -127,7 +128,6 @@ static ErtsSysReportExit *report_exit_list;
static ErtsSysReportExit *report_exit_transit_list;
#endif
-extern int check_async_ready(void);
extern int driver_interrupt(int, int);
extern void do_break(void);
@@ -167,12 +167,12 @@ static int debug_log = 0;
#ifdef ERTS_SMP
erts_smp_atomic32_t erts_got_sigusr1;
#define ERTS_SET_GOT_SIGUSR1 \
- erts_smp_atomic32_set(&erts_got_sigusr1, 1)
+ erts_smp_atomic32_set_mb(&erts_got_sigusr1, 1)
#define ERTS_UNSET_GOT_SIGUSR1 \
- erts_smp_atomic32_set(&erts_got_sigusr1, 0)
+ erts_smp_atomic32_set_mb(&erts_got_sigusr1, 0)
static erts_smp_atomic32_t have_prepared_crash_dump;
#define ERTS_PREPARED_CRASH_DUMP \
- ((int) erts_smp_atomic32_xchg(&have_prepared_crash_dump, 1))
+ ((int) erts_smp_atomic32_xchg_nob(&have_prepared_crash_dump, 1))
#else
volatile int erts_got_sigusr1;
#define ERTS_SET_GOT_SIGUSR1 (erts_got_sigusr1 = 1)
@@ -242,9 +242,9 @@ static int max_files = -1;
#ifdef ERTS_SMP
erts_smp_atomic32_t erts_break_requested;
#define ERTS_SET_BREAK_REQUESTED \
- erts_smp_atomic32_set(&erts_break_requested, (erts_aint32_t) 1)
+ erts_smp_atomic32_set_nob(&erts_break_requested, (erts_aint32_t) 1)
#define ERTS_UNSET_BREAK_REQUESTED \
- erts_smp_atomic32_set(&erts_break_requested, (erts_aint32_t) 0)
+ erts_smp_atomic32_set_nob(&erts_break_requested, (erts_aint32_t) 0)
#else
volatile int erts_break_requested = 0;
#define ERTS_SET_BREAK_REQUESTED (erts_break_requested = 1)
@@ -263,8 +263,9 @@ int erts_use_kernel_poll = 0;
struct {
int (*select)(ErlDrvPort, ErlDrvEvent, int, int);
int (*event)(ErlDrvPort, ErlDrvEvent, ErlDrvEventData);
+ void (*check_io_as_interrupt)(void);
void (*check_io_interrupt)(int);
- void (*check_io_interrupt_tmd)(int, long);
+ void (*check_io_interrupt_tmd)(int, erts_short_time_t);
void (*check_io)(int);
Uint (*size)(void);
Eterm (*info)(void *);
@@ -302,6 +303,9 @@ init_check_io(void)
if (erts_use_kernel_poll) {
io_func.select = driver_select_kp;
io_func.event = driver_event_kp;
+#ifdef ERTS_POLL_NEED_ASYNC_INTERRUPT_SUPPORT
+ io_func.check_io_as_interrupt = erts_check_io_async_sig_interrupt_kp;
+#endif
io_func.check_io_interrupt = erts_check_io_interrupt_kp;
io_func.check_io_interrupt_tmd = erts_check_io_interrupt_timed_kp;
io_func.check_io = erts_check_io_kp;
@@ -314,6 +318,9 @@ init_check_io(void)
else {
io_func.select = driver_select_nkp;
io_func.event = driver_event_nkp;
+#ifdef ERTS_POLL_NEED_ASYNC_INTERRUPT_SUPPORT
+ io_func.check_io_as_interrupt = erts_check_io_async_sig_interrupt_nkp;
+#endif
io_func.check_io_interrupt = erts_check_io_interrupt_nkp;
io_func.check_io_interrupt_tmd = erts_check_io_interrupt_timed_nkp;
io_func.check_io = erts_check_io_nkp;
@@ -325,6 +332,11 @@ init_check_io(void)
}
}
+#ifdef ERTS_POLL_NEED_ASYNC_INTERRUPT_SUPPORT
+#define ERTS_CHK_IO_AS_INTR() (*io_func.check_io_as_interrupt)()
+#else
+#define ERTS_CHK_IO_AS_INTR() (*io_func.check_io_interrupt)(1)
+#endif
#define ERTS_CHK_IO_INTR (*io_func.check_io_interrupt)
#define ERTS_CHK_IO_INTR_TMD (*io_func.check_io_interrupt_tmd)
#define ERTS_CHK_IO (*io_func.check_io)
@@ -339,6 +351,11 @@ init_check_io(void)
max_files = erts_check_io_max_files();
}
+#ifdef ERTS_POLL_NEED_ASYNC_INTERRUPT_SUPPORT
+#define ERTS_CHK_IO_AS_INTR() erts_check_io_async_sig_interrupt()
+#else
+#define ERTS_CHK_IO_AS_INTR() erts_check_io_interrupt(1)
+#endif
#define ERTS_CHK_IO_INTR erts_check_io_interrupt
#define ERTS_CHK_IO_INTR_TMD erts_check_io_interrupt_timed
#define ERTS_CHK_IO erts_check_io
@@ -346,15 +363,15 @@ init_check_io(void)
#endif
-#ifdef ERTS_SMP
void
erts_sys_schedule_interrupt(int set)
{
ERTS_CHK_IO_INTR(set);
}
+#ifdef ERTS_SMP
void
-erts_sys_schedule_interrupt_timed(int set, long msec)
+erts_sys_schedule_interrupt_timed(int set, erts_short_time_t msec)
{
ERTS_CHK_IO_INTR_TMD(set, msec);
}
@@ -364,7 +381,7 @@ Uint
erts_sys_misc_mem_sz(void)
{
Uint res = ERTS_CHK_IO_SZ();
- res += erts_smp_atomic_read(&sys_misc_mem_sz);
+ res += erts_smp_atomic_read_mb(&sys_misc_mem_sz);
return res;
}
@@ -509,9 +526,9 @@ erts_sys_pre_init(void)
#endif
}
#ifdef ERTS_SMP
- erts_smp_atomic32_init(&erts_break_requested, 0);
- erts_smp_atomic32_init(&erts_got_sigusr1, 0);
- erts_smp_atomic32_init(&have_prepared_crash_dump, 0);
+ erts_smp_atomic32_init_nob(&erts_break_requested, 0);
+ erts_smp_atomic32_init_nob(&erts_got_sigusr1, 0);
+ erts_smp_atomic32_init_nob(&have_prepared_crash_dump, 0);
#else
erts_break_requested = 0;
erts_got_sigusr1 = 0;
@@ -521,13 +538,12 @@ erts_sys_pre_init(void)
children_died = 0;
#endif
#endif /* USE_THREADS */
- erts_smp_atomic_init(&sys_misc_mem_sz, 0);
+ erts_smp_atomic_init_nob(&sys_misc_mem_sz, 0);
}
void
erl_sys_init(void)
{
- erts_smp_rwmtx_init(&environ_rwmtx, "environ");
#if !DISABLE_VFORK
{
int res;
@@ -553,7 +569,7 @@ erl_sys_init(void)
+ sizeof(CHILD_SETUP_PROG_NAME)
+ 1);
child_setup_prog = erts_alloc(ERTS_ALC_T_CS_PROG_PATH, csp_path_sz);
- erts_smp_atomic_add(&sys_misc_mem_sz, csp_path_sz);
+ erts_smp_atomic_add_nob(&sys_misc_mem_sz, csp_path_sz);
sprintf(child_setup_prog,
"%s%c%s",
bindir,
@@ -732,7 +748,7 @@ break_requested(void)
erl_exit(ERTS_INTR_EXIT, "");
ERTS_SET_BREAK_REQUESTED;
- ERTS_CHK_IO_INTR(1); /* Make sure we don't sleep in poll */
+ ERTS_CHK_IO_AS_INTR(); /* Make sure we don't sleep in poll */
}
/* set up signal handlers for break and quit */
@@ -932,18 +948,13 @@ void
os_flavor(char* namebuf, /* Where to return the name. */
unsigned size) /* Size of name buffer. */
{
- static int called = 0;
- static struct utsname uts; /* Information about the system. */
-
- if (!called) {
- char* s;
+ struct utsname uts; /* Information about the system. */
+ char* s;
- (void) uname(&uts);
- called = 1;
- for (s = uts.sysname; *s; s++) {
- if (isupper((int) *s)) {
- *s = tolower((int) *s);
- }
+ (void) uname(&uts);
+ for (s = uts.sysname; *s; s++) {
+ if (isupper((int) *s)) {
+ *s = tolower((int) *s);
}
}
strcpy(namebuf, uts.sysname);
@@ -1022,14 +1033,15 @@ static struct driver_data {
/* Driver interfaces */
static ErlDrvData spawn_start(ErlDrvPort, char*, SysDriverOpts*);
static ErlDrvData fd_start(ErlDrvPort, char*, SysDriverOpts*);
-static int fd_control(ErlDrvData, unsigned int, char *, int, char **, int);
+static ErlDrvSSizeT fd_control(ErlDrvData, unsigned int, char *, ErlDrvSizeT,
+ char **, ErlDrvSizeT);
static ErlDrvData vanilla_start(ErlDrvPort, char*, SysDriverOpts*);
static int spawn_init(void);
static void fd_stop(ErlDrvData);
static void stop(ErlDrvData);
static void ready_input(ErlDrvData, ErlDrvEvent);
static void ready_output(ErlDrvData, ErlDrvEvent);
-static void output(ErlDrvData, char*, int);
+static void output(ErlDrvData, char*, ErlDrvSizeT);
static void outputv(ErlDrvData, ErlIOVec*);
static void stop_select(ErlDrvEvent, void*);
@@ -1108,31 +1120,6 @@ struct erl_drv_entry vanilla_driver_entry = {
stop_select
};
-#if defined(USE_THREADS) && !defined(ERTS_SMP)
-static int async_drv_init(void);
-static ErlDrvData async_drv_start(ErlDrvPort, char*, SysDriverOpts*);
-static void async_drv_stop(ErlDrvData);
-static void async_drv_input(ErlDrvData, ErlDrvEvent);
-
-/* INTERNAL use only */
-
-struct erl_drv_entry async_driver_entry = {
- async_drv_init,
- async_drv_start,
- async_drv_stop,
- NULL,
- async_drv_input,
- NULL,
- "async",
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL
-};
-#endif
-
/* Handle SIGCHLD signals. */
#if (defined(SIG_SIGSET) || defined(SIG_SIGNAL))
static RETSIGTYPE onchld(void)
@@ -1146,7 +1133,7 @@ static RETSIGTYPE onchld(int signum)
smp_sig_notify('C');
#else
children_died = 1;
- ERTS_CHK_IO_INTR(1); /* Make sure we don't sleep in poll */
+ ERTS_CHK_IO_AS_INTR(); /* Make sure we don't sleep in poll */
#endif
}
@@ -1216,8 +1203,8 @@ static int spawn_init()
sys_sigset(SIGPIPE, SIG_IGN); /* Ignore - we'll handle the write failure */
driver_data = (struct driver_data *)
erts_alloc(ERTS_ALC_T_DRV_TAB, max_files * sizeof(struct driver_data));
- erts_smp_atomic_add(&sys_misc_mem_sz,
- max_files * sizeof(struct driver_data));
+ erts_smp_atomic_add_nob(&sys_misc_mem_sz,
+ max_files * sizeof(struct driver_data));
for (i = 0; i < max_files; i++)
driver_data[i].pid = -1;
@@ -1740,10 +1727,10 @@ static int fd_get_window_size(int fd, Uint32 *width, Uint32 *height)
return -1;
}
-static int fd_control(ErlDrvData drv_data,
- unsigned int command,
- char *buf, int len,
- char **rbuf, int rlen)
+static ErlDrvSSizeT fd_control(ErlDrvData drv_data,
+ unsigned int command,
+ char *buf, ErlDrvSizeT len,
+ char **rbuf, ErlDrvSizeT rlen)
{
int fd = (int)(long)drv_data;
char resbuff[2*sizeof(Uint32)];
@@ -1925,8 +1912,8 @@ static void clear_fd_data(int fd)
{
if (fd_data[fd].sz > 0) {
erts_free(ERTS_ALC_T_FD_ENTRY_BUF, (void *) fd_data[fd].buf);
- ASSERT(erts_smp_atomic_read(&sys_misc_mem_sz) >= fd_data[fd].sz);
- erts_smp_atomic_add(&sys_misc_mem_sz, -1*fd_data[fd].sz);
+ ASSERT(erts_smp_atomic_read_nob(&sys_misc_mem_sz) >= fd_data[fd].sz);
+ erts_smp_atomic_add_nob(&sys_misc_mem_sz, -1*fd_data[fd].sz);
}
fd_data[fd].buf = NULL;
fd_data[fd].sz = 0;
@@ -2015,18 +2002,20 @@ static void outputv(ErlDrvData e, ErlIOVec* ev)
int ix = driver_data[fd].port_num;
int pb = driver_data[fd].packet_bytes;
int ofd = driver_data[fd].ofd;
- int n;
- int sz;
+ ssize_t n;
+ ErlDrvSizeT sz;
char lb[4];
char* lbp;
- int len = ev->size;
+ ErlDrvSizeT len = ev->size;
/* (len > ((unsigned long)-1 >> (4-pb)*8)) */
+ /* if (pb >= 0 && (len & (((ErlDrvSizeT)1 << (pb*8))) - 1) != len) {*/
if (((pb == 2) && (len > 0xffff)) || (pb == 1 && len > 0xff)) {
driver_failure_posix(ix, EINVAL);
return; /* -1; */
}
- put_int32(len, lb);
+ /* Handles 0 <= pb <= 4 only */
+ put_int32((Uint32) len, lb);
lbp = lb + (4-pb);
ev->iov[0].iov_base = lbp;
@@ -2057,14 +2046,14 @@ static void outputv(ErlDrvData e, ErlIOVec* ev)
}
-static void output(ErlDrvData e, char* buf, int len)
+static void output(ErlDrvData e, char* buf, ErlDrvSizeT len)
{
int fd = (int)(long)e;
int ix = driver_data[fd].port_num;
int pb = driver_data[fd].packet_bytes;
int ofd = driver_data[fd].ofd;
- int n;
- int sz;
+ ssize_t n;
+ ErlDrvSizeT sz;
char lb[4];
char* lbp;
struct iovec iv[2];
@@ -2261,7 +2250,7 @@ static void ready_input(ErlDrvData e, ErlDrvEvent ready_fd)
port_inp_failure(port_num, ready_fd, -1);
}
else {
- erts_smp_atomic_add(&sys_misc_mem_sz, h);
+ erts_smp_atomic_add_nob(&sys_misc_mem_sz, h);
sys_memcpy(buf, cpos, bytes_left);
fd_data[ready_fd].buf = buf;
fd_data[ready_fd].sz = h;
@@ -2317,87 +2306,6 @@ static void stop_select(ErlDrvEvent fd, void* _)
close((int)fd);
}
-/*
-** Async opertation support
-*/
-#if defined(USE_THREADS) && !defined(ERTS_SMP)
-static void
-sys_async_ready_failed(int fd, int r, int err)
-{
- char buf[120];
- sprintf(buf, "sys_async_ready(): Fatal error: fd=%d, r=%d, errno=%d\n",
- fd, r, err);
- erts_silence_warn_unused_result(write(2, buf, strlen(buf)));
- abort();
-}
-
-/* called from threads !! */
-void sys_async_ready(int fd)
-{
- int r;
- while (1) {
- r = write(fd, "0", 1); /* signal main thread fd MUST be async_fd[1] */
- if (r == 1) {
- DEBUGF(("sys_async_ready(): r = 1\r\n"));
- break;
- }
- if (r < 0 && errno == EINTR) {
- DEBUGF(("sys_async_ready(): r = %d\r\n", r));
- continue;
- }
- sys_async_ready_failed(fd, r, errno);
- }
-}
-
-static int async_drv_init(void)
-{
- async_fd[0] = -1;
- async_fd[1] = -1;
- return 0;
-}
-
-static ErlDrvData async_drv_start(ErlDrvPort port_num,
- char* name, SysDriverOpts* opts)
-{
- if (async_fd[0] != -1)
- return ERL_DRV_ERROR_GENERAL;
- if (pipe(async_fd) < 0)
- return ERL_DRV_ERROR_GENERAL;
-
- DEBUGF(("async_drv_start: %d\r\n", port_num));
-
- SET_NONBLOCKING(async_fd[0]);
- driver_select(port_num, async_fd[0], ERL_DRV_READ, 1);
-
- if (init_async(async_fd[1]) < 0)
- return ERL_DRV_ERROR_GENERAL;
- return (ErlDrvData)port_num;
-}
-
-static void async_drv_stop(ErlDrvData e)
-{
- int port_num = (int)(long)e;
-
- DEBUGF(("async_drv_stop: %d\r\n", port_num));
-
- exit_async();
-
- driver_select(port_num, async_fd[0], ERL_DRV_READ, 0);
-
- close(async_fd[0]);
- close(async_fd[1]);
- async_fd[0] = async_fd[1] = -1;
-}
-
-
-static void async_drv_input(ErlDrvData e, ErlDrvEvent fd)
-{
- char *buf[32];
- DEBUGF(("async_drv_input\r\n"));
- while (read((int) fd, (void *) buf, 32) > 0); /* fd MUST be async_fd[0] */
- check_async_ready(); /* invoke all async_ready */
-}
-#endif
void erts_do_break_handling(void)
{
@@ -2409,11 +2317,7 @@ void erts_do_break_handling(void)
* therefore, make sure that all threads but this one are blocked before
* proceeding!
*/
- erts_smp_block_system(0);
- /*
- * NOTE: since we allow gc we are not allowed to lock
- * (any) process main locks while blocking system...
- */
+ erts_smp_thr_progress_block();
/* during break we revert to initial settings */
/* this is done differently for oldshell */
@@ -2441,7 +2345,7 @@ void erts_do_break_handling(void)
tcsetattr(0,TCSANOW,&temp_mode);
}
- erts_smp_release_system();
+ erts_smp_thr_progress_unblock();
}
/* Fills in the systems representation of the jam/beam process identifier.
@@ -2465,7 +2369,7 @@ erts_sys_putenv(char *buffer, int sep_ix)
#else
Uint sz = strlen(buffer)+1;
env = erts_alloc(ERTS_ALC_T_PUTENV_STR, sz);
- erts_smp_atomic_add(&sys_misc_mem_sz, sz);
+ erts_smp_atomic_add_nob(&sys_misc_mem_sz, sz);
strcpy(env,buffer);
#endif
erts_smp_rwmtx_rwlock(&environ_rwmtx);
@@ -2475,12 +2379,10 @@ erts_sys_putenv(char *buffer, int sep_ix)
}
int
-erts_sys_getenv(char *key, char *value, size_t *size)
+erts_sys_getenv__(char *key, char *value, size_t *size)
{
- char *orig_value;
int res;
- erts_smp_rwmtx_rlock(&environ_rwmtx);
- orig_value = getenv(key);
+ char *orig_value = getenv(key);
if (!orig_value)
res = -1;
else {
@@ -2495,6 +2397,15 @@ erts_sys_getenv(char *key, char *value, size_t *size)
res = 0;
}
}
+ return res;
+}
+
+int
+erts_sys_getenv(char *key, char *value, size_t *size)
+{
+ int res;
+ erts_smp_rwmtx_rlock(&environ_rwmtx);
+ res = erts_sys_getenv__(key, value, size);
erts_smp_rwmtx_runlock(&environ_rwmtx);
return res;
}
@@ -2504,33 +2415,8 @@ sys_init_io(void)
{
fd_data = (struct fd_data *)
erts_alloc(ERTS_ALC_T_FD_TAB, max_files * sizeof(struct fd_data));
- erts_smp_atomic_add(&sys_misc_mem_sz,
- max_files * sizeof(struct fd_data));
-
-#ifdef USE_THREADS
-#ifdef ERTS_SMP
- if (init_async(-1) < 0)
- erl_exit(1, "Failed to initialize async-threads\n");
-#else
- {
- /* This is speical stuff, starting a driver from the
- * system routines, but is a nice way of handling stuff
- * the erlang way
- */
- SysDriverOpts dopts;
- int ret;
-
- sys_memset((void*)&dopts, 0, sizeof(SysDriverOpts));
- add_driver_entry(&async_driver_entry);
- ret = erts_open_driver(NULL, NIL, "async", &dopts, NULL);
- DEBUGF(("open_driver = %d\n", ret));
- if (ret < 0)
- erl_exit(1, "Failed to open async driver\n");
- erts_port[ret].status |= ERTS_PORT_SFLG_IMMORTAL;
- }
-#endif
-#endif
-
+ erts_smp_atomic_add_nob(&sys_misc_mem_sz,
+ max_files * sizeof(struct fd_data));
}
#if (0) /* unused? */
@@ -2757,15 +2643,7 @@ initiate_report_exit_status(ErtsSysReportExit *rep, int status)
rep->next = report_exit_transit_list;
rep->status = status;
report_exit_transit_list = rep;
- /*
- * We need the scheduler thread to call check_children().
- * If the scheduler thread is sleeping in a poll with a
- * timeout, we need to wake the scheduler thread. We use the
- * functionality of the async driver to do this, instead of
- * implementing yet another driver doing the same thing. A
- * little bit ugly, but it works...
- */
- sys_async_ready(async_fd[1]);
+ erts_sys_schedule_interrupt(1);
}
static int check_children(void)
@@ -2852,20 +2730,11 @@ erl_sys_schedule(int runnable)
{
#ifdef ERTS_SMP
ERTS_CHK_IO(!runnable);
- ERTS_SMP_LC_ASSERT(!ERTS_LC_IS_BLOCKING);
#else
- ERTS_CHK_IO_INTR(0);
- if (runnable) {
- ERTS_CHK_IO(0); /* Poll for I/O */
- check_async_ready(); /* Check async completions */
- } else {
- int wait_for_io = !check_async_ready();
- if (wait_for_io)
- wait_for_io = !check_children();
- ERTS_CHK_IO(wait_for_io);
- }
- (void) check_children();
+ ERTS_CHK_IO(runnable ? 0 : !check_children());
#endif
+ ERTS_SMP_LC_ASSERT(!erts_thr_progress_is_blocking());
+ (void) check_children();
}
@@ -2893,8 +2762,8 @@ smp_sig_notify(char c)
static void *
signal_dispatcher_thread_func(void *unused)
{
- int initialized = 0;
#if !CHLDWTHR
+ int initialized = 0;
int notify_check_children = 0;
#endif
#ifdef ERTS_ENABLE_LOCK_CHECK
@@ -2922,20 +2791,20 @@ signal_dispatcher_thread_func(void *unused)
* to other threads.
*
* NOTE 2: The signal dispatcher thread is not a blockable
- * thread (i.e., it hasn't called
- * erts_register_blockable_thread()). This is
- * intentional. We want to be able to interrupt
- * writing of a crash dump by hitting C-c twice.
- * Since it isn't a blockable thread it is important
- * that it doesn't change the state of any data that
- * a blocking thread expects to have exclusive access
- * to (unless the signal dispatcher itself explicitly
- * is blocking all blockable threads).
+ * thread (i.e., not a thread managed by the
+ * erl_thr_progress module). This is intentional.
+ * We want to be able to interrupt writing of a crash
+ * dump by hitting C-c twice. Since it isn't a
+ * blockable thread it is important that it doesn't
+ * change the state of any data that a blocking thread
+ * expects to have exclusive access to (unless the
+ * signal dispatcher itself explicitly is blocking all
+ * blockable threads).
*/
switch (buf[i]) {
case 0: /* Emulator initialized */
- initialized = 1;
#if !CHLDWTHR
+ initialized = 1;
if (!notify_check_children)
#endif
break;
@@ -2970,7 +2839,7 @@ signal_dispatcher_thread_func(void *unused)
buf[i]);
}
}
- ERTS_SMP_LC_ASSERT(!ERTS_LC_IS_BLOCKING);
+ ERTS_SMP_LC_ASSERT(!erts_thr_progress_is_blocking());
}
return NULL;
}
@@ -3090,6 +2959,8 @@ erl_sys_args(int* argc, char** argv)
{
int i, j;
+ erts_smp_rwmtx_init(&environ_rwmtx, "environ");
+
i = 1;
ASSERT(argc && argv);
@@ -3151,4 +3022,5 @@ erl_sys_args(int* argc, char** argv)
argv[j++] = argv[i];
}
*argc = j;
+
}
diff --git a/erts/emulator/sys/vxworks/erl_vxworks_sys.h b/erts/emulator/sys/vxworks/erl_vxworks_sys.h
index ae46403600..3d53238ea6 100644
--- a/erts/emulator/sys/vxworks/erl_vxworks_sys.h
+++ b/erts/emulator/sys/vxworks/erl_vxworks_sys.h
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1997-2009. All Rights Reserved.
+ * Copyright Ericsson AB 1997-2011. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -158,6 +158,7 @@ typedef struct _vxworks_tms {
typedef long long SysHrTime;
+typedef time_t erts_time_t;
typedef struct timeval SysTimeval;
extern int sys_init_hrtime(void);
diff --git a/erts/emulator/sys/vxworks/sys.c b/erts/emulator/sys/vxworks/sys.c
index c6e7b65f32..739b026fb1 100644
--- a/erts/emulator/sys/vxworks/sys.c
+++ b/erts/emulator/sys/vxworks/sys.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1997-2010. All Rights Reserved.
+ * Copyright Ericsson AB 1997-2011. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -238,6 +238,12 @@ erl_sys_args(int* argc, char** argv)
ASSERT(max_files <= erts_vxworks_max_files);
}
+void
+erts_sys_schedule_interrupt(int set)
+{
+ erts_check_io_interrupt(set);
+}
+
/*
* Called from schedule() when it runs out of runnable processes,
* or when Erlang code has performed INPUT_REDUCTIONS reduction
@@ -246,7 +252,6 @@ erl_sys_args(int* argc, char** argv)
void
erl_sys_schedule(int runnable)
{
- erts_check_io_interrupt(0);
erts_check_io(!runnable);
}
@@ -309,7 +314,7 @@ static void request_break(void)
fprintf(stderr,"break!\n");
#endif
erts_break_requested = 1;
- erts_check_io_interrupt(1); /* Make sure we don't sleep in erts_poll_wait */
+ erts_check_io_async_sig_interrupt(1); /* Make sure we don't sleep in erts_poll_wait */
}
static void do_quit(void)
@@ -560,7 +565,7 @@ static void fd_stop(ErlDrvData);
static void stop(ErlDrvData);
static void ready_input(ErlDrvData fd, ErlDrvEvent ready_fd);
static void ready_output(ErlDrvData fd, ErlDrvEvent ready_fd);
-static void output(ErlDrvData fd, char *buf, int len);
+static void output(ErlDrvData fd, char *buf, ErlDrvSizeT len);
static void stop_select(ErlDrvEvent, void*);
struct erl_drv_entry spawn_driver_entry = {
@@ -1182,7 +1187,7 @@ static int sched_write(int port_num,int fd, char *buf, int len, int pb)
}
/* Fd is the value returned as drv_data by the start func */
-static void output(ErlDrvData drv_data, char *buf, int len)
+static void output(ErlDrvData drv_data, char *buf, ErlDrvSizeT len)
{
int buf_done, port_num, wval, pb, ofd;
byte lb[4];
@@ -1515,6 +1520,12 @@ erts_sys_getenv(char *key, char *value, size_t *size)
return res;
}
+int
+erts_sys_getenv__(char *key, char *value, size_t *size)
+{
+ return erts_sys_getenv(key, value, size);
+}
+
void
sys_init_io(void)
{
@@ -2025,9 +2036,6 @@ int erl_memory_show(int p0, int p1, int p2, int p3, int p4, int p5,
erts_printf("The memory block used by elib is save_malloc'ed "
"at 0x%08x.\n", (unsigned int) alloc_pool_ptr);
}
-#ifdef NO_FIX_ALLOC
- erts_printf("Fix_alloc is disabled in this build\n");
-#endif
erts_printf("Statistics from elib_malloc:\n");
ELIB_LOCK;
diff --git a/erts/emulator/sys/win32/erl_poll.c b/erts/emulator/sys/win32/erl_poll.c
index 074e2e247f..7a1d129cd5 100644
--- a/erts/emulator/sys/win32/erl_poll.c
+++ b/erts/emulator/sys/win32/erl_poll.c
@@ -309,9 +309,9 @@ struct ErtsPollSet_ {
#ifdef ERTS_SMP
extern erts_smp_atomic32_t erts_break_requested;
#define ERTS_SET_BREAK_REQUESTED \
- erts_smp_atomic32_set(&erts_break_requested, (erts_aint32_t) 1)
+ erts_smp_atomic32_set_nob(&erts_break_requested, (erts_aint32_t) 1)
#define ERTS_UNSET_BREAK_REQUESTED \
- erts_smp_atomic32_set(&erts_break_requested, (erts_aint32_t) 0)
+ erts_smp_atomic32_set_nob(&erts_break_requested, (erts_aint32_t) 0)
#else
extern volatile int erts_break_requested;
#define ERTS_SET_BREAK_REQUESTED (erts_break_requested = 1)
@@ -371,19 +371,19 @@ do { \
static ERTS_INLINE int
is_io_ready(ErtsPollSet ps)
{
- return erts_atomic32_read(&ps->wakeup_state) == ERTS_POLL_WOKEN_IO_READY;
+ return erts_atomic32_read_nob(&ps->wakeup_state) == ERTS_POLL_WOKEN_IO_READY;
}
static ERTS_INLINE void
woke_up(ErtsPollSet ps)
{
- if (erts_atomic32_read(&ps->wakeup_state) == ERTS_POLL_NOT_WOKEN)
- erts_atomic32_cmpxchg(&ps->wakeup_state,
- ERTS_POLL_WOKEN_TIMEDOUT,
- ERTS_POLL_NOT_WOKEN);
+ if (erts_atomic32_read_nob(&ps->wakeup_state) == ERTS_POLL_NOT_WOKEN)
+ erts_atomic32_cmpxchg_nob(&ps->wakeup_state,
+ ERTS_POLL_WOKEN_TIMEDOUT,
+ ERTS_POLL_NOT_WOKEN);
#ifdef DEBUG
{
- erts_aint32_t wakeup_state = erts_atomic32_read(&ps->wakeup_state);
+ erts_aint32_t wakeup_state = erts_atomic32_read_nob(&ps->wakeup_state);
switch (wakeup_state) {
case ERTS_POLL_WOKEN_IO_READY:
case ERTS_POLL_WOKEN_INTR:
@@ -401,7 +401,7 @@ static ERTS_INLINE int
wakeup_cause(ErtsPollSet ps)
{
int res;
- erts_aint32_t wakeup_state = erts_atomic32_read(&ps->wakeup_state);
+ erts_aint32_t wakeup_state = erts_atomic32_read_nob(&ps->wakeup_state);
switch (wakeup_state) {
case ERTS_POLL_WOKEN_IO_READY:
res = 0;
@@ -439,11 +439,11 @@ poll_wait_timeout(ErtsPollSet ps, SysTimeval *tvp)
* by ResetEvent().
*/
ERTS_THR_MEMORY_BARRIER;
- if (erts_atomic32_read(&ps->wakeup_state) != ERTS_POLL_NOT_WOKEN)
+ if (erts_atomic32_read_nob(&ps->wakeup_state) != ERTS_POLL_NOT_WOKEN)
return (DWORD) 0;
- if (timeout > ERTS_AINT32_T_MAX) /* Also prevents DWORD overflow */
- timeout = ERTS_AINT32_T_MAX;
+ if (timeout > ((time_t) ERTS_AINT32_T_MAX))
+ timeout = ERTS_AINT32_T_MAX; /* Also prevents DWORD overflow */
erts_smp_atomic32_set_relb(&ps->timeout, (erts_aint32_t) timeout);
return (DWORD) timeout;
@@ -455,17 +455,17 @@ wake_poller(ErtsPollSet ps, int io_ready)
erts_aint32_t wakeup_state;
if (io_ready) {
/* We may set the event multiple times. This is, however, harmless. */
- wakeup_state = erts_atomic32_read(&ps->wakeup_state);
+ wakeup_state = erts_atomic32_read_nob(&ps->wakeup_state);
erts_atomic32_set_relb(&ps->wakeup_state, ERTS_POLL_WOKEN_IO_READY);
}
else {
ERTS_THR_MEMORY_BARRIER;
- wakeup_state = erts_atomic32_read(&ps->wakeup_state);
+ wakeup_state = erts_atomic32_read_nob(&ps->wakeup_state);
while (wakeup_state != ERTS_POLL_WOKEN_IO_READY
&& wakeup_state != ERTS_POLL_WOKEN_INTR) {
- erts_aint32_t act = erts_atomic32_cmpxchg(&ps->wakeup_state,
- ERTS_POLL_WOKEN_INTR,
- wakeup_state);
+ erts_aint32_t act = erts_atomic32_cmpxchg_nob(&ps->wakeup_state,
+ ERTS_POLL_WOKEN_INTR,
+ wakeup_state);
if (act == wakeup_state) {
wakeup_state = act;
break;
@@ -488,13 +488,13 @@ wake_poller(ErtsPollSet ps, int io_ready)
static ERTS_INLINE void
reset_io_ready(ErtsPollSet ps)
{
- erts_atomic32_set(&ps->wakeup_state, ERTS_POLL_NOT_WOKEN);
+ erts_atomic32_set_nob(&ps->wakeup_state, ERTS_POLL_NOT_WOKEN);
}
static ERTS_INLINE void
restore_io_ready(ErtsPollSet ps)
{
- erts_atomic32_set(&ps->wakeup_state, ERTS_POLL_WOKEN_IO_READY);
+ erts_atomic32_set_nob(&ps->wakeup_state, ERTS_POLL_WOKEN_IO_READY);
}
/*
@@ -511,12 +511,12 @@ static ERTS_INLINE void
reset_interrupt(ErtsPollSet ps)
{
/* We need to keep io-ready if set */
- erts_aint32_t wakeup_state = erts_atomic32_read(&ps->wakeup_state);
+ erts_aint32_t wakeup_state = erts_atomic32_read_nob(&ps->wakeup_state);
while (wakeup_state != ERTS_POLL_WOKEN_IO_READY
&& wakeup_state != ERTS_POLL_NOT_WOKEN) {
- erts_aint32_t act = erts_atomic32_cmpxchg(&ps->wakeup_state,
- ERTS_POLL_NOT_WOKEN,
- wakeup_state);
+ erts_aint32_t act = erts_atomic32_cmpxchg_nob(&ps->wakeup_state,
+ ERTS_POLL_NOT_WOKEN,
+ wakeup_state);
if (wakeup_state == act)
break;
wakeup_state = act;
@@ -692,7 +692,7 @@ static void *break_waiter(void *param)
case WAIT_OBJECT_0:
ResetEvent(harr[0]);
erts_mtx_lock(&break_waiter_lock);
- erts_atomic32_set(&break_waiter_state,BREAK_WAITER_GOT_BREAK);
+ erts_atomic32_set_nob(&break_waiter_state,BREAK_WAITER_GOT_BREAK);
ERTS_THR_MEMORY_BARRIER;
SetEvent(break_happened_event);
erts_mtx_unlock(&break_waiter_lock);
@@ -700,7 +700,7 @@ static void *break_waiter(void *param)
case (WAIT_OBJECT_0+1):
ResetEvent(harr[1]);
erts_mtx_lock(&break_waiter_lock);
- erts_atomic32_set(&break_waiter_state,BREAK_WAITER_GOT_HALT);
+ erts_atomic32_set_nob(&break_waiter_state,BREAK_WAITER_GOT_HALT);
ERTS_THR_MEMORY_BARRIER;
SetEvent(break_happened_event);
erts_mtx_unlock(&break_waiter_lock);
@@ -1012,7 +1012,7 @@ void erts_poll_interrupt(ErtsPollSet ps, int set /* bool */)
void erts_poll_interrupt_timed(ErtsPollSet ps,
int set /* bool */,
- long msec)
+ erts_short_time_t msec)
{
HARDTRACEF(("In erts_poll_interrupt_timed(%d,%ld)",set,msec));
if (!set)
@@ -1153,23 +1153,29 @@ int erts_poll_wait(ErtsPollSet ps,
/*HARDDEBUGF(("timeout = %ld",(long) timeout));*/
- if (timeout > 0 && !erts_atomic32_read(&break_waiter_state)) {
+ if (timeout > 0 && !erts_atomic32_read_nob(&break_waiter_state)) {
HANDLE harr[2] = {ps->event_io_ready, break_happened_event};
int num_h = 2;
HARDDEBUGF(("Start waiting %d [%d]",num_h, (int) timeout));
ERTS_POLLSET_UNLOCK(ps);
+#ifdef ERTS_SMP
+ erts_thr_progress_prepare_wait(NULL);
+#endif
WaitForMultipleObjects(num_h, harr, FALSE, timeout);
+#ifdef ERTS_SMP
+ erts_thr_progress_finalize_wait(NULL);
+#endif
ERTS_POLLSET_LOCK(ps);
HARDDEBUGF(("Stop waiting %d [%d]",num_h, (int) timeout));
woke_up(ps);
}
ERTS_UNSET_BREAK_REQUESTED;
- if(erts_atomic32_read(&break_waiter_state)) {
+ if(erts_atomic32_read_nob(&break_waiter_state)) {
erts_mtx_lock(&break_waiter_lock);
- break_state = erts_atomic32_read(&break_waiter_state);
- erts_atomic32_set(&break_waiter_state,0);
+ break_state = erts_atomic32_read_nob(&break_waiter_state);
+ erts_atomic32_set_nob(&break_waiter_state,0);
ResetEvent(break_happened_event);
erts_mtx_unlock(&break_waiter_lock);
switch (break_state) {
@@ -1236,7 +1242,7 @@ int erts_poll_wait(ErtsPollSet ps,
erts_mtx_unlock(&w->mtx);
}
done:
- erts_smp_atomic32_set(&ps->timeout, ERTS_AINT32_T_MAX);
+ erts_smp_atomic32_set_nob(&ps->timeout, ERTS_AINT32_T_MAX);
*len = num;
ERTS_POLLSET_UNLOCK(ps);
HARDTRACEF(("Out erts_poll_wait"));
@@ -1316,11 +1322,11 @@ ErtsPollSet erts_poll_create_pollset(void)
ps->standby_wait_event = CreateManualEvent(FALSE);
ps->restore_events = 0;
- erts_atomic32_init(&ps->wakeup_state, ERTS_POLL_NOT_WOKEN);
+ erts_atomic32_init_nob(&ps->wakeup_state, ERTS_POLL_NOT_WOKEN);
#ifdef ERTS_SMP
erts_smp_mtx_init(&ps->mtx, "pollset");
#endif
- erts_smp_atomic32_init(&ps->timeout, ERTS_AINT32_T_MAX);
+ erts_smp_atomic32_init_nob(&ps->timeout, ERTS_AINT32_T_MAX);
HARDTRACEF(("Out erts_poll_create_pollset"));
return ps;
@@ -1372,7 +1378,7 @@ void erts_poll_init(void)
erts_mtx_init(&break_waiter_lock,"break_waiter_lock");
break_happened_event = CreateManualEvent(FALSE);
- erts_atomic32_init(&break_waiter_state, 0);
+ erts_atomic32_init_nob(&break_waiter_state, 0);
erts_thr_create(&thread, &break_waiter, NULL, NULL);
ERTS_UNSET_BREAK_REQUESTED;
diff --git a/erts/emulator/sys/win32/erl_win32_sys_ddll.c b/erts/emulator/sys/win32/erl_win32_sys_ddll.c
index a19f49af10..d00eed932b 100644
--- a/erts/emulator/sys/win32/erl_win32_sys_ddll.c
+++ b/erts/emulator/sys/win32/erl_win32_sys_ddll.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2006-2009. All Rights Reserved.
+ * Copyright Ericsson AB 2006-2011. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -25,6 +25,9 @@
#include <windows.h>
#define GET_ERTS_ALC_TEST
+#ifdef HAVE_CONFIG_H
+# include "config.h"
+#endif
#include "sys.h"
#include "global.h"
#include "erl_alloc.h"
diff --git a/erts/emulator/sys/win32/erl_win_dyn_driver.h b/erts/emulator/sys/win32/erl_win_dyn_driver.h
index ecb06868d5..ec5141838a 100644
--- a/erts/emulator/sys/win32/erl_win_dyn_driver.h
+++ b/erts/emulator/sys/win32/erl_win_dyn_driver.h
@@ -39,11 +39,11 @@ WDD_TYPEDEF(int, driver_exit, (ErlDrvPort, int));
WDD_TYPEDEF(int, driver_failure_eof, (ErlDrvPort));
WDD_TYPEDEF(int, driver_select, (ErlDrvPort, ErlDrvEvent, int, int));
WDD_TYPEDEF(int, driver_event, (ErlDrvPort, ErlDrvEvent,ErlDrvEventData));
-WDD_TYPEDEF(int, driver_output, (ErlDrvPort, char *, int));
-WDD_TYPEDEF(int, driver_output2, (ErlDrvPort, char *, int,char *, int));
-WDD_TYPEDEF(int, driver_output_binary, (ErlDrvPort, char *, int,ErlDrvBinary*, int, int));
-WDD_TYPEDEF(int, driver_outputv, (ErlDrvPort, char*, int, ErlIOVec *,int));
-WDD_TYPEDEF(int, driver_vec_to_buf, (ErlIOVec *, char *, int));
+WDD_TYPEDEF(int, driver_output, (ErlDrvPort, char *, ErlDrvSizeT));
+WDD_TYPEDEF(int, driver_output2, (ErlDrvPort, char *, ErlDrvSizeT ,char *, ErlDrvSizeT));
+WDD_TYPEDEF(int, driver_output_binary, (ErlDrvPort, char *, ErlDrvSizeT, ErlDrvBinary*, ErlDrvSizeT, ErlDrvSizeT));
+WDD_TYPEDEF(int, driver_outputv, (ErlDrvPort, char*, ErlDrvSizeT, ErlIOVec *, ErlDrvSizeT));
+WDD_TYPEDEF(ErlDrvSizeT, driver_vec_to_buf, (ErlIOVec *, char *, ErlDrvSizeT));
WDD_TYPEDEF(int, driver_set_timer, (ErlDrvPort, unsigned long));
WDD_TYPEDEF(int, driver_cancel_timer, (ErlDrvPort));
WDD_TYPEDEF(int, driver_read_timer, (ErlDrvPort, unsigned long *));
@@ -51,22 +51,22 @@ WDD_TYPEDEF(char *, erl_errno_id, (int));
WDD_TYPEDEF(void, set_busy_port, (ErlDrvPort, int));
WDD_TYPEDEF(void, set_port_control_flags, (ErlDrvPort, int));
WDD_TYPEDEF(int, get_port_flags, (ErlDrvPort));
-WDD_TYPEDEF(ErlDrvBinary *, driver_alloc_binary, (int));
-WDD_TYPEDEF(ErlDrvBinary *, driver_realloc_binary, (ErlDrvBinary *, int));
+WDD_TYPEDEF(ErlDrvBinary *, driver_alloc_binary, (ErlDrvSizeT));
+WDD_TYPEDEF(ErlDrvBinary *, driver_realloc_binary, (ErlDrvBinary *, ErlDrvSizeT));
WDD_TYPEDEF(void, driver_free_binary, (ErlDrvBinary *));
-WDD_TYPEDEF(void *, driver_alloc, (size_t));
-WDD_TYPEDEF(void *, driver_realloc, (void *, size_t));
+WDD_TYPEDEF(void *, driver_alloc, (ErlDrvSizeT));
+WDD_TYPEDEF(void *, driver_realloc, (void *, ErlDrvSizeT));
WDD_TYPEDEF(void, driver_free, (void *));
-WDD_TYPEDEF(int, driver_enq, (ErlDrvPort, char*, int));
-WDD_TYPEDEF(int, driver_pushq, (ErlDrvPort, char*, int));
-WDD_TYPEDEF(int, driver_deq, (ErlDrvPort, int));
-WDD_TYPEDEF(int, driver_sizeq, (ErlDrvPort));
-WDD_TYPEDEF(int, driver_enq_bin, (ErlDrvPort, ErlDrvBinary *, int,int));
-WDD_TYPEDEF(int, driver_pushq_bin, (ErlDrvPort, ErlDrvBinary *, int,int));
-WDD_TYPEDEF(int, driver_peekqv, (ErlDrvPort, ErlIOVec *));
+WDD_TYPEDEF(int, driver_enq, (ErlDrvPort, char*, ErlDrvSizeT));
+WDD_TYPEDEF(int, driver_pushq, (ErlDrvPort, char*, ErlDrvSizeT));
+WDD_TYPEDEF(ErlDrvSizeT, driver_deq, (ErlDrvPort, ErlDrvSizeT));
+WDD_TYPEDEF(ErlDrvSizeT, driver_sizeq, (ErlDrvPort));
+WDD_TYPEDEF(int, driver_enq_bin, (ErlDrvPort, ErlDrvBinary *, ErlDrvSizeT, ErlDrvSizeT));
+WDD_TYPEDEF(int, driver_pushq_bin, (ErlDrvPort, ErlDrvBinary *, ErlDrvSizeT, ErlDrvSizeT));
+WDD_TYPEDEF(ErlDrvSizeT, driver_peekqv, (ErlDrvPort, ErlIOVec *));
WDD_TYPEDEF(SysIOVec *, driver_peekq, (ErlDrvPort, int *));
-WDD_TYPEDEF(int, driver_enqv, (ErlDrvPort, ErlIOVec *, int));
-WDD_TYPEDEF(int, driver_pushqv, (ErlDrvPort, ErlIOVec *, int));
+WDD_TYPEDEF(int, driver_enqv, (ErlDrvPort, ErlIOVec *, ErlDrvSizeT));
+WDD_TYPEDEF(int, driver_pushqv, (ErlDrvPort, ErlIOVec *, ErlDrvSizeT));
WDD_TYPEDEF(void, add_driver_entry, (ErlDrvEntry *));
WDD_TYPEDEF(int, remove_driver_entry, (ErlDrvEntry *));
WDD_TYPEDEF(ErlDrvTermData, driver_mk_atom, (char*));
@@ -83,10 +83,10 @@ WDD_TYPEDEF(void *, driver_dl_open, (char *));
WDD_TYPEDEF(void *, driver_dl_sym, (void *, char *));
WDD_TYPEDEF(int, driver_dl_close, (void *));
WDD_TYPEDEF(char *, driver_dl_error, (void));
-WDD_TYPEDEF(unsigned long, erts_alc_test, (unsigned long,
- unsigned long,
- unsigned long,
- unsigned long));
+WDD_TYPEDEF(ErlDrvUInt, erts_alc_test, (ErlDrvUInt,
+ ErlDrvUInt,
+ ErlDrvUInt,
+ ErlDrvUInt));
WDD_TYPEDEF(ErlDrvSInt, driver_binary_get_refc, (ErlDrvBinary *dbp));
WDD_TYPEDEF(ErlDrvSInt, driver_binary_inc_refc, (ErlDrvBinary *dbp));
WDD_TYPEDEF(ErlDrvSInt, driver_binary_dec_refc, (ErlDrvBinary *dbp));
diff --git a/erts/emulator/sys/win32/erl_win_sys.h b/erts/emulator/sys/win32/erl_win_sys.h
index 92d8577537..03298a6c54 100644
--- a/erts/emulator/sys/win32/erl_win_sys.h
+++ b/erts/emulator/sys/win32/erl_win_sys.h
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1997-2009. All Rights Reserved.
+ * Copyright Ericsson AB 1997-2011. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -117,9 +117,30 @@ int erts_check_io_debug(void);
#define SYS_CLK_TCK 1000
#define SYS_CLOCK_RESOLUTION 1
+#if SIZEOF_TIME_T != 8
+# error "Unexpected sizeof(time_t)"
+#endif
+
+/*
+ * gcc uses a 4 byte time_t and vc++ uses an 8 byte time_t.
+ * Types seen in beam_emu.c *need* to have the same size
+ * as in the rest of the system...
+ */
+typedef __int64 erts_time_t;
+
+struct tm *sys_localtime_r(time_t *epochs, struct tm *ptm);
+struct tm *sys_gmtime_r(time_t *epochs, struct tm *ptm);
+time_t sys_mktime( struct tm *ptm);
+
+#define localtime_r sys_localtime_r
+#define HAVE_LOCALTIME_R 1
+#define gmtime_r sys_gmtime_r
+#define HAVE_GMTIME_R
+#define mktime sys_mktime
+
typedef struct {
- long tv_sec;
- long tv_usec;
+ erts_time_t tv_sec;
+ erts_time_t tv_usec;
} SysTimeval;
typedef struct {
@@ -169,10 +190,12 @@ void erts_sys_env_init(void);
extern volatile int erl_fp_exception;
#include <float.h>
-#if defined (__GNUC__)
+/* I suspect this test isn't right, it might depend on the version of GCC
+ rather than if it's a MINGW gcc, but I havent been able to pinpoint the
+ exact point where _finite was added to the headers in cygwin... */
+#if defined (__GNUC__) && !defined(__MINGW32__)
int _finite(double x);
#endif
-#endif
/*#define NO_FPE_SIGNALS*/
#define erts_get_current_fp_exception() NULL
@@ -191,13 +214,6 @@ int _finite(double x);
#define erts_sys_block_fpe() 0
#define erts_sys_unblock_fpe(x) do{}while(0)
-#define SIZEOF_SHORT 2
-#define SIZEOF_INT 4
-#define SIZEOF_LONG 4
-#define SIZEOF_VOID_P 4
-#define SIZEOF_SIZE_T 4
-#define SIZEOF_OFF_T 4
-
/*
* Seems to be missing.
*/
@@ -210,3 +226,4 @@ typedef long ssize_t;
int init_async(int);
int exit_async(void);
#endif
+#endif
diff --git a/erts/emulator/sys/win32/sys.c b/erts/emulator/sys/win32/sys.c
index a2159d063c..b106f0932d 100644..100755
--- a/erts/emulator/sys/win32/sys.c
+++ b/erts/emulator/sys/win32/sys.c
@@ -198,7 +198,7 @@ Uint
erts_sys_misc_mem_sz(void)
{
Uint res = (Uint) erts_check_io_size();
- res += (Uint) erts_smp_atomic_read(&sys_misc_mem_sz);
+ res += (Uint) erts_smp_atomic_read_mb(&sys_misc_mem_sz);
return res;
}
@@ -216,6 +216,9 @@ void sys_tty_reset(int exit_code)
void erl_sys_args(int* argc, char** argv)
{
char *event_name;
+
+ erts_sys_env_init();
+
nohup = get_and_remove_option(argc, argv, "-nohup");
#ifdef DEBUG
@@ -471,7 +474,7 @@ static int spawn_init(void);
static int fd_init(void);
static void fd_stop(ErlDrvData);
static void stop(ErlDrvData);
-static void output(ErlDrvData, char*, int);
+static void output(ErlDrvData, char*, ErlDrvSizeT);
static void ready_input(ErlDrvData, ErlDrvEvent);
static void ready_output(ErlDrvData, ErlDrvEvent);
static void stop_select(ErlDrvEvent, void*);
@@ -566,51 +569,6 @@ struct erl_drv_entry vanilla_driver_entry = {
stop_select
};
-#if defined(USE_THREADS) && !defined(ERTS_SMP)
-
-static int async_drv_init(void);
-static ErlDrvData async_drv_start(ErlDrvPort, char*, SysDriverOpts*);
-static void async_drv_stop(ErlDrvData);
-static void async_drv_input(ErlDrvData, ErlDrvEvent);
-
-/* INTERNAL use only */
-
-void null_output(ErlDrvData drv_data, char* buf, int len)
-{
-}
-
-void null_ready_output(ErlDrvData drv_data, ErlDrvEvent event)
-{
-}
-
-struct erl_drv_entry async_driver_entry = {
- async_drv_init,
- async_drv_start,
- async_drv_stop,
- null_output,
- async_drv_input,
- null_ready_output,
- "async",
- NULL, /* finish */
- NULL, /* handle */
- NULL, /* control */
- NULL, /* timeout */
- NULL, /* outputv */
- NULL, /* ready_async */
- NULL, /* flush */
- NULL, /* call */
- NULL, /* event */
- ERL_DRV_EXTENDED_MARKER,
- ERL_DRV_EXTENDED_MAJOR_VERSION,
- ERL_DRV_EXTENDED_MINOR_VERSION,
- 0, /* ERL_DRV_FLAGs */
- NULL,
- NULL, /* process_exit */
- stop_select
-};
-
-#endif
-
/*
* Initialises a DriverData structure.
*
@@ -648,7 +606,7 @@ new_driver_data(int port_num, int packet_bytes, int wait_objs_required, int use_
erts_smp_mtx_unlock(&sys_driver_data_lock);
return NULL;
}
- erts_smp_atomic_add(&sys_misc_mem_sz, dp->inBufSize);
+ erts_smp_atomic_add_nob(&sys_misc_mem_sz, dp->inBufSize);
dp->outBufSize = 0;
dp->outbuf = NULL;
dp->port_num = port_num;
@@ -733,8 +691,8 @@ release_driver_data(DriverData* dp)
#endif
if (dp->inbuf != NULL) {
- ASSERT(erts_smp_atomic_read(&sys_misc_mem_sz) >= dp->inBufSize);
- erts_smp_atomic_add(&sys_misc_mem_sz, -1*dp->inBufSize);
+ ASSERT(erts_smp_atomic_read_nob(&sys_misc_mem_sz) >= dp->inBufSize);
+ erts_smp_atomic_add_nob(&sys_misc_mem_sz, -1*dp->inBufSize);
DRV_BUF_FREE(dp->inbuf);
dp->inBufSize = 0;
dp->inbuf = NULL;
@@ -742,8 +700,8 @@ release_driver_data(DriverData* dp)
ASSERT(dp->inBufSize == 0);
if (dp->outbuf != NULL) {
- ASSERT(erts_smp_atomic_read(&sys_misc_mem_sz) >= dp->outBufSize);
- erts_smp_atomic_add(&sys_misc_mem_sz, -1*dp->outBufSize);
+ ASSERT(erts_smp_atomic_read_nob(&sys_misc_mem_sz) >= dp->outBufSize);
+ erts_smp_atomic_add_nob(&sys_misc_mem_sz, -1*dp->outBufSize);
DRV_BUF_FREE(dp->outbuf);
dp->outBufSize = 0;
dp->outbuf = NULL;
@@ -1162,7 +1120,8 @@ spawn_init(void)
#endif
driver_data = (struct driver_data *)
erts_alloc(ERTS_ALC_T_DRV_TAB, max_files * sizeof(struct driver_data));
- erts_smp_atomic_add(&sys_misc_mem_sz, max_files*sizeof(struct driver_data));
+ erts_smp_atomic_add_nob(&sys_misc_mem_sz,
+ max_files*sizeof(struct driver_data));
for (i = 0; i < max_files; i++)
driver_data[i].port_num = PORT_FREE;
@@ -1675,17 +1634,6 @@ create_child_process
WaitForSingleObject(hProcess, 50);
}
- /*
- * When an application spawns a process repeatedly, a new thread
- * instance will be created for each process but the previous
- * instances may not be cleaned up. This results in a significant
- * virtual memory loss each time the process is spawned. If there
- * is a WaitForInputIdle() call between CreateProcess() and
- * CloseHandle(), the problem does not occur. PSS ID Number: Q124121
- */
-
- WaitForInputIdle(piProcInfo.hProcess, 5000);
-
return ok;
}
@@ -1698,7 +1646,7 @@ create_child_process
static int create_pipe(HANDLE *phRead, HANDLE *phWrite, BOOL inheritRead, BOOL overlapped_io)
{
SECURITY_ATTRIBUTES sa = {sizeof(SECURITY_ATTRIBUTES), NULL, TRUE};
- char pipe_name[128]; /* Name of pipe. */
+ char pipe_name[256]; /* Name of pipe. */
Uint calls;
/*
@@ -1735,9 +1683,9 @@ static int create_pipe(HANDLE *phRead, HANDLE *phWrite, BOOL inheritRead, BOOL o
* Otherwise, create named pipes.
*/
- calls = (Uint) erts_smp_atomic_inctest(&pipe_creation_counter);
- sprintf(pipe_name, "\\\\.\\pipe\\erlang44_%d_%d",
- getpid(), calls);
+ calls = (UWord) erts_smp_atomic_inc_read_nob(&pipe_creation_counter);
+ erts_snprintf(pipe_name, sizeof(pipe_name),
+ "\\\\.\\pipe\\erlang44_%d_%bpu", getpid(), calls);
DEBUGF(("Creating pipe %s\n", pipe_name));
sa.bInheritHandle = inheritRead;
@@ -2483,13 +2431,13 @@ threaded_exiter(LPVOID param)
*/
static void
-output(ErlDrvData drv_data, char* buf, int len)
+output(ErlDrvData drv_data, char* buf, ErlDrvSizeT len)
/* long drv_data; /* The slot to use in the driver data table.
* For Windows NT, this is *NOT* a file handle.
* The handle is found in the driver data.
*/
-/* char *buf; /* Pointer to data to write to the port program. */
-/* int len; /* Number of bytes to write. */
+/* char *buf; /* Pointer to data to write to the port program. */
+/* ErlDrvSizeT len; /* Number of bytes to write. */
{
DriverData* dp;
int pb; /* The header size for this port. */
@@ -2529,7 +2477,7 @@ output(ErlDrvData drv_data, char* buf, int len)
}
dp->outBufSize = pb+len;
- erts_smp_atomic_add(&sys_misc_mem_sz, dp->outBufSize);
+ erts_smp_atomic_add_nob(&sys_misc_mem_sz, dp->outBufSize);
/*
* Store header bytes (if any).
@@ -2558,8 +2506,8 @@ output(ErlDrvData drv_data, char* buf, int len)
} else {
dp->out.ov.Offset += pb+len; /* For vanilla driver. */
/* XXX OffsetHigh should be changed too. */
- ASSERT(erts_smp_atomic_read(&sys_misc_mem_sz) >= dp->outBufSize);
- erts_smp_atomic_add(&sys_misc_mem_sz, -1*dp->outBufSize);
+ ASSERT(erts_smp_atomic_read_nob(&sys_misc_mem_sz) >= dp->outBufSize);
+ erts_smp_atomic_add_nob(&sys_misc_mem_sz, -1*dp->outBufSize);
DRV_BUF_FREE(dp->outbuf);
dp->outBufSize = 0;
dp->outbuf = NULL;
@@ -2673,9 +2621,9 @@ ready_input(ErlDrvData drv_data, ErlDrvEvent ready_event)
error = ERROR_NOT_ENOUGH_MEMORY;
break; /* Break out of loop into error handler. */
}
- ASSERT(erts_smp_atomic_read(&sys_misc_mem_sz) >= dp->inBufSize);
- erts_smp_atomic_add(&sys_misc_mem_sz,
- dp->totalNeeded - dp->inBufSize);
+ ASSERT(erts_smp_atomic_read_nob(&sys_misc_mem_sz) >= dp->inBufSize);
+ erts_smp_atomic_add_nob(&sys_misc_mem_sz,
+ dp->totalNeeded - dp->inBufSize);
dp->inBufSize = dp->totalNeeded;
dp->inbuf = new_buf;
}
@@ -2771,12 +2719,12 @@ ready_output(ErlDrvData drv_data, ErlDrvEvent ready_event)
DEBUGF(("ready_output(%d, 0x%x)\n", drv_data, ready_event));
set_busy_port(dp->port_num, 0);
if (!(dp->outbuf)) {
- /* Happens because event sometimes get signalled during a succesful
+ /* Happens because event sometimes get signalled during a successful
write... */
return;
}
- ASSERT(erts_smp_atomic_read(&sys_misc_mem_sz) >= dp->outBufSize);
- erts_smp_atomic_add(&sys_misc_mem_sz, -1*dp->outBufSize);
+ ASSERT(erts_smp_atomic_read_nob(&sys_misc_mem_sz) >= dp->outBufSize);
+ erts_smp_atomic_add_nob(&sys_misc_mem_sz, -1*dp->outBufSize);
DRV_BUF_FREE(dp->outbuf);
dp->outBufSize = 0;
dp->outbuf = NULL;
@@ -2824,30 +2772,6 @@ sys_init_io(void)
We estimate the number to twice the amount of ports.
We really dont know on windows, do we? */
max_files = 2*erts_max_ports;
-
-#ifdef USE_THREADS
-#ifdef ERTS_SMP
- if (init_async(-1) < 0)
- erl_exit(1, "Failed to initialize async-threads\n");
-#else
- {
- /* This is special stuff, starting a driver from the
- * system routines, but is a nice way of handling stuff
- * the erlang way
- */
- SysDriverOpts dopts;
- int ret;
-
- sys_memset((void*)&dopts, 0, sizeof(SysDriverOpts));
- add_driver_entry(&async_driver_entry);
- ret = erts_open_driver(NULL, NIL, "async", &dopts, NULL);
- DEBUGF(("open_driver = %d\n", ret));
- if (ret < 0)
- erl_exit(1, "Failed to open async driver\n");
- erts_port[ret].status |= ERTS_PORT_SFLG_IMMORTAL;
- }
-#endif
-#endif
}
#ifdef ERTS_SMP
@@ -2926,8 +2850,8 @@ Preload* sys_preloaded(void)
(num_preloaded+1)*sizeof(Preload));
res_name = erts_alloc(ERTS_ALC_T_PRELOADED,
(num_preloaded+1)*sizeof(unsigned));
- erts_smp_atomic_add(&sys_misc_mem_sz,
- (num_preloaded+1)*sizeof(Preload)
+ erts_smp_atomic_add_nob(&sys_misc_mem_sz,
+ (num_preloaded+1)*sizeof(Preload)
+ (num_preloaded+1)*sizeof(unsigned));
for (i = 0; i < num_preloaded; i++) {
int n;
@@ -2939,7 +2863,7 @@ Preload* sys_preloaded(void)
n = GETWORD(data);
data += 2;
preloaded[i].name = erts_alloc(ERTS_ALC_T_PRELOADED, n+1);
- erts_smp_atomic_add(&sys_misc_mem_sz, n+1);
+ erts_smp_atomic_add_nob(&sys_misc_mem_sz, n+1);
sys_memcpy(preloaded[i].name, data, n);
preloaded[i].name[n] = '\0';
data += n;
@@ -3281,7 +3205,7 @@ erts_sys_pre_init(void)
#endif
}
#endif
- erts_smp_atomic_init(&sys_misc_mem_sz, 0);
+ erts_smp_atomic_init_nob(&sys_misc_mem_sz, 0);
}
void noinherit_std_handle(DWORD type)
@@ -3297,8 +3221,6 @@ void erl_sys_init(void)
{
HANDLE handle;
- erts_sys_env_init();
-
noinherit_std_handle(STD_OUTPUT_HANDLE);
noinherit_std_handle(STD_INPUT_HANDLE);
noinherit_std_handle(STD_ERROR_HANDLE);
@@ -3310,7 +3232,7 @@ void erl_sys_init(void)
erts_smp_tsd_key_create(&win32_errstr_key);
InitializeCriticalSection(&htbc_lock);
#endif
- erts_smp_atomic_init(&pipe_creation_counter,0);
+ erts_smp_atomic_init_nob(&pipe_creation_counter,0);
/*
* Test if we have named pipes or not.
*/
@@ -3360,15 +3282,15 @@ void erl_sys_init(void)
SEM_FAILCRITICALERRORS | SEM_NOOPENFILEERRORBOX);
}
-#ifdef ERTS_SMP
void
erts_sys_schedule_interrupt(int set)
{
erts_check_io_interrupt(set);
}
+#ifdef ERTS_SMP
void
-erts_sys_schedule_interrupt_timed(int set, long msec)
+erts_sys_schedule_interrupt_timed(int set, erts_short_time_t msec)
{
erts_check_io_interrupt_timed(set, msec);
}
@@ -3382,76 +3304,7 @@ erts_sys_schedule_interrupt_timed(int set, long msec)
void
erl_sys_schedule(int runnable)
{
-#ifdef ERTS_SMP
erts_check_io(!runnable);
- ERTS_SMP_LC_ASSERT(!ERTS_LC_IS_BLOCKING);
-#else
- erts_check_io_interrupt(0);
- if (runnable) {
- erts_check_io(0); /* Poll for I/O */
- check_async_ready(); /* Check async completions */
- } else {
- erts_check_io(check_async_ready() ? 0 : 1);
- }
-#endif
-}
-
-#if defined(USE_THREADS) && !defined(ERTS_SMP)
-/*
- * Async operation support.
- */
-
-static ErlDrvEvent async_drv_event;
-
-void
-sys_async_ready(int fd)
-{
- SetEvent((HANDLE)async_drv_event);
-}
-
-static int
-async_drv_init(void)
-{
- async_drv_event = (ErlDrvEvent) NULL;
- return 0;
-}
-
-static ErlDrvData
-async_drv_start(ErlDrvPort port_num, char* name, SysDriverOpts* opts)
-{
- if (async_drv_event != (ErlDrvEvent) NULL) {
- return ERL_DRV_ERROR_GENERAL;
- }
- if ((async_drv_event = (ErlDrvEvent)CreateAutoEvent(FALSE)) == (ErlDrvEvent) NULL) {
- return ERL_DRV_ERROR_GENERAL;
- }
-
- driver_select(port_num, async_drv_event, ERL_DRV_READ|ERL_DRV_USE, 1);
- if (init_async(async_drv_event) < 0) {
- return ERL_DRV_ERROR_GENERAL;
- }
- return (ErlDrvData)port_num;
+ ERTS_SMP_LC_ASSERT(!erts_thr_progress_is_blocking());
}
-static void
-async_drv_stop(ErlDrvData port_num)
-{
- exit_async();
- driver_select((ErlDrvPort)port_num, async_drv_event, ERL_DRV_READ|ERL_DRV_USE, 0);
- /*CloseHandle((HANDLE)async_drv_event);*/
- async_drv_event = (ErlDrvEvent) NULL;
-}
-
-
-static void
-async_drv_input(ErlDrvData port_num, ErlDrvEvent e)
-{
- check_async_ready();
-
- /*
- * Our event is auto-resetting.
- */
-}
-
-#endif
-
diff --git a/erts/emulator/sys/win32/sys_env.c b/erts/emulator/sys/win32/sys_env.c
index 02c8433a10..064745d418 100644
--- a/erts/emulator/sys/win32/sys_env.c
+++ b/erts/emulator/sys/win32/sys_env.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2002-2010. All Rights Reserved.
+ * Copyright Ericsson AB 2002-2011. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -55,19 +55,17 @@ erts_sys_putenv(char *key_value, int sep_ix)
}
int
-erts_sys_getenv(char *key, char *value, size_t *size)
+erts_sys_getenv__(char *key, char *value, size_t *size)
{
size_t req_size = 0;
int res = 0;
DWORD new_size;
- erts_smp_rwmtx_rlock(&environ_rwmtx);
SetLastError(0);
new_size = GetEnvironmentVariable((LPCTSTR) key,
(LPTSTR) value,
(DWORD) *size);
res = !new_size && GetLastError() == ERROR_ENVVAR_NOT_FOUND ? -1 : 0;
- erts_smp_rwmtx_runlock(&environ_rwmtx);
if (res < 0)
return res;
res = new_size > *size ? 1 : 0;
@@ -75,6 +73,16 @@ erts_sys_getenv(char *key, char *value, size_t *size)
return res;
}
+int
+erts_sys_getenv(char *key, char *value, size_t *size)
+{
+ int res;
+ erts_smp_rwmtx_rlock(&environ_rwmtx);
+ res = erts_sys_getenv__(key, value, size);
+ erts_smp_rwmtx_runlock(&environ_rwmtx);
+ return res;
+}
+
struct win32_getenv_state {
char *env;
char *next;
diff --git a/erts/emulator/sys/win32/sys_float.c b/erts/emulator/sys/win32/sys_float.c
index 9e67ca7f48..6558ad2d99 100644
--- a/erts/emulator/sys/win32/sys_float.c
+++ b/erts/emulator/sys/win32/sys_float.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1997-2009. All Rights Reserved.
+ * Copyright Ericsson AB 1997-2011. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -18,6 +18,9 @@
*/
/* Float conversions */
+#ifdef HAVE_CONFIG_H
+# include "config.h"
+#endif
#include "sys.h"
#include "signal.h"
diff --git a/erts/emulator/sys/win32/sys_interrupt.c b/erts/emulator/sys/win32/sys_interrupt.c
index 943c338794..347c31053b 100644
--- a/erts/emulator/sys/win32/sys_interrupt.c
+++ b/erts/emulator/sys/win32/sys_interrupt.c
@@ -19,8 +19,12 @@
/*
* Purpose: Interrupt handling in windows.
*/
+#ifdef HAVE_CONFIG_H
+# include "config.h"
+#endif
#include "sys.h"
#include "erl_alloc.h"
+#include "erl_thr_progress.h"
#include "erl_driver.h"
#include "../../drivers/win32/win_con.h"
@@ -33,9 +37,9 @@
#ifdef ERTS_SMP
erts_smp_atomic32_t erts_break_requested;
#define ERTS_SET_BREAK_REQUESTED \
- erts_smp_atomic32_set(&erts_break_requested, (erts_aint32_t) 1)
+ erts_smp_atomic32_set_nob(&erts_break_requested, (erts_aint32_t) 1)
#define ERTS_UNSET_BREAK_REQUESTED \
- erts_smp_atomic32_set(&erts_break_requested, (erts_aint32_t) 0)
+ erts_smp_atomic32_set_nob(&erts_break_requested, (erts_aint32_t) 0)
#else
volatile int erts_break_requested = 0;
#define ERTS_SET_BREAK_REQUESTED (erts_break_requested = 1)
@@ -52,14 +56,14 @@ void erts_do_break_handling(void)
* therefore, make sure that all threads but this one are blocked before
* proceeding!
*/
- erts_smp_block_system(0);
+ erts_smp_thr_progress_block();
/* call the break handling function, reset the flag */
do_break();
ResetEvent(erts_sys_break_event);
ERTS_UNSET_BREAK_REQUESTED;
- erts_smp_release_system();
+ erts_smp_thr_progress_unblock();
}
diff --git a/erts/emulator/sys/win32/sys_time.c b/erts/emulator/sys/win32/sys_time.c
index 50e43065b5..b5123dc45d 100644
--- a/erts/emulator/sys/win32/sys_time.c
+++ b/erts/emulator/sys/win32/sys_time.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1997-2009. All Rights Reserved.
+ * Copyright Ericsson AB 1997-2011. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -20,6 +20,9 @@
* Purpose: System-dependent time functions.
*/
+#ifdef HAVE_CONFIG_H
+# include "config.h"
+#endif
#include "sys.h"
#include "assert.h"
@@ -32,28 +35,336 @@
/******************* Routines for time measurement *********************/
#define EPOCH_JULIAN_DIFF LL_LITERAL(11644473600)
+#define TICKS_PER_SECOND LL_LITERAL(10000000)
+#define SECONDS_PER_DAY LL_LITERAL(86400)
+
+#define ULI_TO_FILETIME(ft,ull) \
+ do { \
+ (ft).dwLowDateTime = (ull).LowPart; \
+ (ft).dwHighDateTime = (ull).HighPart; \
+ } while (0)
+
+#define FILETIME_TO_ULI(ull,ft) \
+ do { \
+ (ull).LowPart = (ft).dwLowDateTime; \
+ (ull).HighPart = (ft).dwHighDateTime; \
+ } while (0)
+
+#define EPOCH_TO_FILETIME(ft, epoch) \
+ do { \
+ ULARGE_INTEGER ull; \
+ ull.QuadPart = (((epoch) + EPOCH_JULIAN_DIFF) * TICKS_PER_SECOND); \
+ ULI_TO_FILETIME(ft,ull); \
+ } while(0)
+
+#define FILETIME_TO_EPOCH(epoch, ft) \
+ do { \
+ ULARGE_INTEGER ull; \
+ FILETIME_TO_ULI(ull,ft); \
+ (epoch) = ((ull.QuadPart / TICKS_PER_SECOND) - EPOCH_JULIAN_DIFF); \
+ } while(0)
+
static SysHrTime wrap = 0;
static DWORD last_tick_count = 0;
+/* Getting timezone information is a heavy operation, so we want to do this
+ only once */
+
+static TIME_ZONE_INFORMATION static_tzi;
+static int have_static_tzi = 0;
+
+static int days_in_month[2][13] = {
+ {0,31,28,31,30,31,30,31,31,30,31,30,31},
+ {0,31,29,31,30,31,30,31,31,30,31,30,31}};
+
int
sys_init_time(void)
{
+ if(GetTimeZoneInformation(&static_tzi) &&
+ static_tzi.StandardDate.wMonth != 0 &&
+ static_tzi.DaylightDate.wMonth != 0) {
+ have_static_tzi = 1;
+ }
return 1;
}
+/* Returns a switchtimes for DST as UTC filetimes given data from a
+ TIME_ZONE_INFORMATION, see sys_localtime_r for usage. */
+static void
+get_dst_switchtime(DWORD year,
+ SYSTEMTIME dstinfo, LONG bias,
+ FILETIME *utc_switchtime)
+{
+ DWORD occu;
+ DWORD weekday,wday_1st;
+ DWORD day, days_in;
+ FILETIME tmp,tmp2;
+ ULARGE_INTEGER ull;
+ int leap_year = 0;
+ if (dstinfo.wYear != 0) {
+ /* A year specific transition, in which case the data in the structure
+ is already properly set for a specific year. Compare year
+ with parameter and see if they correspond, in that case generate a
+ filetime directly, otherwise set the filetime to 0 */
+ if (year != dstinfo.wYear) {
+ utc_switchtime->dwLowDateTime = utc_switchtime->dwHighDateTime = 0;
+ return;
+ }
+ } else {
+ occu = dstinfo.wDay;
+ weekday = dstinfo.wDayOfWeek;
+
+ dstinfo.wDayOfWeek = 0;
+ dstinfo.wDay = 1;
+ dstinfo.wYear = year;
+
+ SystemTimeToFileTime(&dstinfo,&tmp);
+ ull.LowPart = tmp.dwLowDateTime;
+ ull.HighPart = tmp.dwHighDateTime;
+
+ ull.QuadPart /= (TICKS_PER_SECOND*SECONDS_PER_DAY); /* Julian Day */
+ wday_1st = (DWORD) ((ull.QuadPart + LL_LITERAL(1)) % LL_LITERAL(7));
+ day = (weekday >= wday_1st) ?
+ weekday - wday_1st + 1 :
+ weekday - wday_1st + 8;
+ --occu;
+ if (((dstinfo.wYear % 4) == 0 && (dstinfo.wYear % 100) > 0) ||
+ ((dstinfo.wYear % 400) == 0)) {
+ leap_year = 1;
+ }
+ days_in = days_in_month[leap_year][dstinfo.wMonth];
+ while (occu > 0 && (day + 7 <= days_in)) {
+ --occu;
+ day += 7;
+ }
+ dstinfo.wDay = day;
+ }
+ SystemTimeToFileTime(&dstinfo,&tmp);
+ /* correct for bias */
+ ull.LowPart = tmp.dwLowDateTime;
+ ull.HighPart = tmp.dwHighDateTime;
+ ull.QuadPart += (((LONGLONG) bias) * LL_LITERAL(60) * TICKS_PER_SECOND);
+ utc_switchtime->dwLowDateTime = ull.LowPart;
+ utc_switchtime->dwHighDateTime = ull.HighPart;
+ return;
+}
+
+/* This function gives approximately the correct year from a FILETIME
+ Around the actual new year, it may return the wrong value, but that's OK
+ as DST never switches around new year. */
+static DWORD
+approx_year(FILETIME ft)
+{
+ ULARGE_INTEGER ull;
+ FILETIME_TO_ULI(ull,ft);
+ ull.QuadPart /= LL_LITERAL(1000);
+ ull.QuadPart /= SECONDS_PER_DAY;
+ ull.QuadPart /= LL_LITERAL(3652425);
+ ull.QuadPart += 1601;
+ return (DWORD) ull.QuadPart;
+}
+
+struct tm *
+sys_localtime_r(time_t *epochs, struct tm *ptm)
+{
+ FILETIME ft,lft;
+ SYSTEMTIME st;
+
+ if ((((*epochs) + EPOCH_JULIAN_DIFF) * TICKS_PER_SECOND) < 0LL) {
+ fprintf(stderr,"1\r\n"); fflush(stderr);
+ return NULL;
+ }
+
+ EPOCH_TO_FILETIME(ft,*epochs);
+ ptm->tm_isdst = 0;
+ if (have_static_tzi) {
+ FILETIME dst_start, dst_stop;
+ ULARGE_INTEGER ull;
+ DWORD year = approx_year(ft);
+ get_dst_switchtime(year,static_tzi.DaylightDate,
+ static_tzi.Bias+static_tzi.StandardBias,&dst_start);
+ get_dst_switchtime(year,static_tzi.StandardDate,
+ static_tzi.Bias+static_tzi.StandardBias+
+ static_tzi.DaylightBias,
+ &dst_stop);
+ FILETIME_TO_ULI(ull,ft);
+
+ if (CompareFileTime(&ft,&dst_start) >= 0 &&
+ CompareFileTime(&ft,&dst_stop) < 0) {
+ ull.QuadPart -=
+ ((LONGLONG) static_tzi.Bias+static_tzi.StandardBias+
+ static_tzi.DaylightBias) *
+ LL_LITERAL(60) * TICKS_PER_SECOND;
+ ptm->tm_isdst = 1;
+ } else {
+ ull.QuadPart -=
+ ((LONGLONG) static_tzi.Bias+static_tzi.StandardBias)
+ * LL_LITERAL(60) * TICKS_PER_SECOND;
+ }
+ ULI_TO_FILETIME(ft,ull);
+ } else {
+ if (!FileTimeToLocalFileTime(&ft,&lft)) {
+ return NULL;
+ }
+ ft = lft;
+ }
+
+ if (!FileTimeToSystemTime(&ft,&st)) {
+ return NULL;
+ }
+
+ ptm->tm_year = (int) st.wYear - 1900;
+ ptm->tm_mon = (int) st.wMonth - 1;
+ ptm->tm_mday = (int) st.wDay;
+ ptm->tm_hour = (int) st.wHour;
+ ptm->tm_min = (int) st.wMinute;
+ ptm->tm_sec = (int) st.wSecond;
+ ptm->tm_wday = (int) st.wDayOfWeek;
+ {
+ int yday = ptm->tm_mday - 1;
+ int m = ptm->tm_mon;
+ int leap_year = 0;
+ if (((st.wYear % 4) == 0 && (st.wYear % 100) > 0) ||
+ ((st.wYear % 400) == 0)) {
+ leap_year = 1;
+ }
+ while (m > 0) {
+ yday +=days_in_month[leap_year][m];
+ --m;
+ }
+ ptm->tm_yday = yday;
+ }
+ return ptm;
+}
+
+struct tm *
+sys_gmtime_r(time_t *epochs, struct tm *ptm)
+{
+ FILETIME ft;
+ SYSTEMTIME st;
+
+ if ((((*epochs) + EPOCH_JULIAN_DIFF) * TICKS_PER_SECOND) < 0LL) {
+ return NULL;
+ }
+
+ EPOCH_TO_FILETIME(ft,*epochs);
+
+ if (!FileTimeToSystemTime(&ft,&st)) {
+ return NULL;
+ }
+
+ ptm->tm_year = (int) st.wYear - 1900;
+ ptm->tm_mon = (int) st.wMonth - 1;
+ ptm->tm_mday = (int) st.wDay;
+ ptm->tm_hour = (int) st.wHour;
+ ptm->tm_min = (int) st.wMinute;
+ ptm->tm_sec = (int) st.wSecond;
+ ptm->tm_wday = (int) st.wDayOfWeek;
+ ptm->tm_isdst = 0;
+ {
+ int yday = ptm->tm_mday - 1;
+ int m = ptm->tm_mon;
+ int leap_year = 0;
+ if (((st.wYear % 4) == 0 && (st.wYear % 100) > 0) ||
+ ((st.wYear % 400) == 0)) {
+ leap_year = 1;
+ }
+ while (m > 0) {
+ yday +=days_in_month[leap_year][m];
+ --m;
+ }
+ ptm->tm_yday = yday;
+ }
+
+ return ptm;
+}
+
+time_t
+sys_mktime(struct tm *ptm)
+{
+ FILETIME ft;
+ SYSTEMTIME st;
+ int dst = 0;
+ time_t epochs;
+
+ memset(&st,0,sizeof(st));
+ /* Convert relevant parts of truct tm to SYSTEMTIME */
+ st.wYear = (USHORT) (ptm->tm_year + 1900);
+ st.wMonth = (USHORT) (ptm->tm_mon + 1);
+ st.wDay = (USHORT) ptm->tm_mday;
+ st.wHour = (USHORT) ptm->tm_hour;
+ st.wMinute = (USHORT) ptm->tm_min;
+ st.wSecond = (USHORT) ptm->tm_sec;
+
+ SystemTimeToFileTime(&st,&ft);
+
+ /* ft is now some kind of local file time, but it may be wrong depending
+ on what is in the tm_dst field. We need to manually convert it to
+ UTC before turning it into epochs */
+
+ if (have_static_tzi) {
+ FILETIME dst_start, dst_stop;
+ ULARGE_INTEGER ull_start,ull_stop,ull_ft;
+
+ FILETIME_TO_ULI(ull_ft,ft);
+
+ /* Correct everything except DST */
+ ull_ft.QuadPart += (static_tzi.Bias+static_tzi.StandardBias)
+ * LL_LITERAL(60) * TICKS_PER_SECOND;
+
+ /* Determine if DST is active */
+ if (ptm->tm_isdst >= 0) {
+ dst = ptm->tm_isdst;
+ } else if (static_tzi.DaylightDate.wMonth != 0){
+ /* This is how windows mktime does it, meaning it does not
+ take nonexisting local times into account */
+ get_dst_switchtime(st.wYear,static_tzi.DaylightDate,
+ static_tzi.Bias+static_tzi.StandardBias,
+ &dst_start);
+ get_dst_switchtime(st.wYear,static_tzi.StandardDate,
+ static_tzi.Bias+static_tzi.StandardBias+
+ static_tzi.DaylightBias,
+ &dst_stop);
+ FILETIME_TO_ULI(ull_start,dst_start);
+ FILETIME_TO_ULI(ull_stop,dst_stop);
+ if ((ull_ft.QuadPart >= ull_start.QuadPart) &&
+ (ull_ft.QuadPart < ull_stop.QuadPart)) {
+ /* We are in DST */
+ dst = 1;
+ }
+ }
+ /* Correct for DST */
+ if (dst) {
+ ull_ft.QuadPart += static_tzi.DaylightBias *
+ LL_LITERAL(60) * TICKS_PER_SECOND;
+ }
+ epochs = ((ull_ft.QuadPart / TICKS_PER_SECOND) - EPOCH_JULIAN_DIFF);
+ } else {
+ /* No DST, life is easy... */
+ FILETIME lft;
+ LocalFileTimeToFileTime(&ft,&lft);
+ FILETIME_TO_EPOCH(epochs,lft);
+ }
+ /* Normalize the struct tm */
+ sys_localtime_r(&epochs,ptm);
+ return epochs;
+}
+
void
sys_gettimeofday(SysTimeval *tv)
{
SYSTEMTIME t;
FILETIME ft;
- LONGLONG lft;
+ ULARGE_INTEGER ull;
GetSystemTime(&t);
SystemTimeToFileTime(&t, &ft);
- memcpy(&lft, &ft, sizeof(lft));
- tv->tv_usec = (long) ((lft / LL_LITERAL(10)) % LL_LITERAL(1000000));
- tv->tv_sec = (long) ((lft / LL_LITERAL(10000000)) - EPOCH_JULIAN_DIFF);
+ FILETIME_TO_ULI(ull,ft);
+ tv->tv_usec = (long) ((ull.QuadPart / LL_LITERAL(10)) %
+ LL_LITERAL(1000000));
+ tv->tv_sec = (long) ((ull.QuadPart / LL_LITERAL(10000000)) -
+ EPOCH_JULIAN_DIFF);
}
SysHrTime
@@ -88,9 +399,3 @@ sys_times(SysTimes *buffer) {
buffer->tms_stime = (clock_t) (system & LL_LITERAL(0x7FFFFFFF));
return kernel_ticks;
}
-
-
-
-
-
-
diff --git a/erts/emulator/test/Makefile b/erts/emulator/test/Makefile
index 4d0c87bf12..a3dcbc4cf3 100644
--- a/erts/emulator/test/Makefile
+++ b/erts/emulator/test/Makefile
@@ -61,7 +61,7 @@ MODULES= \
exception_SUITE \
float_SUITE \
fun_SUITE \
- fun_r12_SUITE \
+ fun_r13_SUITE \
gc_SUITE \
guard_SUITE \
hash_SUITE \
@@ -88,6 +88,7 @@ MODULES= \
send_term_SUITE \
sensitive_SUITE \
signal_SUITE \
+ smoke_test_SUITE \
statistics_SUITE \
system_info_SUITE \
system_profile_SUITE \
diff --git a/erts/emulator/test/a_SUITE_data/timer_driver.c b/erts/emulator/test/a_SUITE_data/timer_driver.c
index ef4dcdf501..44be94e0f0 100644
--- a/erts/emulator/test/a_SUITE_data/timer_driver.c
+++ b/erts/emulator/test/a_SUITE_data/timer_driver.c
@@ -17,7 +17,9 @@
#define CANCELLED 4
static ErlDrvData timer_start(ErlDrvPort, char*);
-static void timer_stop(ErlDrvData), timer_read(ErlDrvData, char*, int), timer(ErlDrvData);
+static void timer_stop(ErlDrvData),
+ timer_read(ErlDrvData, char*, ErlDrvSizeT),
+ timer(ErlDrvData);
static ErlDrvEntry timer_driver_entry =
{
@@ -33,6 +35,16 @@ static ErlDrvEntry timer_driver_entry =
NULL,
timer,
NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ ERL_DRV_EXTENDED_MARKER,
+ ERL_DRV_EXTENDED_MAJOR_VERSION,
+ ERL_DRV_EXTENDED_MINOR_VERSION,
+ 0,
+ NULL,
+ NULL,
NULL
};
@@ -47,8 +59,9 @@ static ErlDrvData timer_start(ErlDrvPort port, char *buf)
}
/* set the timer, this is monitored from erlang measuring the time */
-static void timer_read(ErlDrvData port, char *buf, int len)
+static void timer_read(ErlDrvData p, char *buf, ErlDrvSizeT len)
{
+ ErlDrvPort port = (ErlDrvPort) p;
char reply[1];
if (buf[0] == START_TIMER) {
@@ -62,8 +75,9 @@ static void timer_read(ErlDrvData port, char *buf, int len)
}
}
-static void timer_stop(ErlDrvData port)
+static void timer_stop(ErlDrvData p)
{
+ ErlDrvPort port = (ErlDrvPort) p;
driver_cancel_timer(port);
}
diff --git a/erts/emulator/test/alloc_SUITE_data/allocator_test.h b/erts/emulator/test/alloc_SUITE_data/allocator_test.h
index b869a4079c..cd4a91d34a 100644
--- a/erts/emulator/test/alloc_SUITE_data/allocator_test.h
+++ b/erts/emulator/test/alloc_SUITE_data/allocator_test.h
@@ -19,7 +19,7 @@
#ifndef ALLOCATOR_TEST_H__
#define ALLOCATOR_TEST_H__
-typedef unsigned long Ulong;
+typedef ErlDrvUInt Ulong;
#ifndef __WIN32__
Ulong erts_alc_test(Ulong, Ulong, Ulong, Ulong);
@@ -82,15 +82,17 @@ typedef void* erts_cond;
#define NO_OF_BKTS ((Ulong) ALC_TEST0(0x102))
#define FIND_BKT(A, I) ((int) ALC_TEST2(0x103, (A), (I)))
-/* From erl_bestfit_alloc.c */
-#define IS_AOBF(A) ((Ulong) ALC_TEST1(0x200, (A)))
-#define RBT_ROOT(A) ((RBT_t *) ALC_TEST1(0x201, (A)))
-#define RBT_PARENT(T) ((RBT_t *) ALC_TEST1(0x202, (T)))
-#define RBT_LEFT(T) ((RBT_t *) ALC_TEST1(0x203, (T)))
-#define RBT_RIGHT(T) ((RBT_t *) ALC_TEST1(0x204, (T)))
-#define RBT_NEXT(T) ((RBTL_t *) ALC_TEST1(0x205, (T)))
-#define RBT_IS_BLACK(T) ((Ulong) ALC_TEST1(0x206, (T)))
-#define RBT_IS_TREE(T) ((Ulong) ALC_TEST1(0x207, (T)))
+/* From erl_bestfit_alloc.c and erl_ao_firstfit_alloc.c */
+#define IS_AOBF(A) ((Ulong) ALC_TEST1(RBT_OP(0), (A)))
+#define RBT_ROOT(A) ((RBT_t *) ALC_TEST1(RBT_OP(1), (A)))
+#define RBT_PARENT(T) ((RBT_t *) ALC_TEST1(RBT_OP(2), (T)))
+#define RBT_LEFT(T) ((RBT_t *) ALC_TEST1(RBT_OP(3), (T)))
+#define RBT_RIGHT(T) ((RBT_t *) ALC_TEST1(RBT_OP(4), (T)))
+#define RBT_NEXT(T) ((RBTL_t *) ALC_TEST1(RBT_OP(5), (T)))
+#define RBT_IS_BLACK(T) ((Ulong) ALC_TEST1(RBT_OP(6), (T)))
+#define RBT_IS_TREE(T) ((Ulong) ALC_TEST1(RBT_OP(7), (T)))
+#define IS_AOFF(A) ((Ulong) ALC_TEST1(RBT_OP(8), (A)))
+#define RBT_MAX_SZ(T) ((Ulong) ALC_TEST1(RBT_OP(9), (T)))
/* From erl_mseg.c */
#define HAVE_MSEG() ((int) ALC_TEST0(0x400))
diff --git a/erts/emulator/test/alloc_SUITE_data/coalesce.c b/erts/emulator/test/alloc_SUITE_data/coalesce.c
index c84da97d35..6f35d3279b 100644
--- a/erts/emulator/test/alloc_SUITE_data/coalesce.c
+++ b/erts/emulator/test/alloc_SUITE_data/coalesce.c
@@ -267,7 +267,7 @@ void
testcase_run(TestCaseState_t *tcs)
{
char *argv_org[] = {"-tmmbcs1024", "-tsbct2048", "-trmbcmt100", "-tas", NULL, NULL};
- char *alg[] = {"af", "gf", "bf", "aobf", NULL};
+ char *alg[] = {"af", "gf", "bf", "aobf", "aoff", NULL};
int i;
for (i = 0; alg[i]; i++) {
diff --git a/erts/emulator/test/alloc_SUITE_data/rbtree.c b/erts/emulator/test/alloc_SUITE_data/rbtree.c
index c97e0aac1a..4e7f821baf 100644
--- a/erts/emulator/test/alloc_SUITE_data/rbtree.c
+++ b/erts/emulator/test/alloc_SUITE_data/rbtree.c
@@ -34,6 +34,14 @@ typedef struct {
#define PRINT_TREE
#endif
+/* Ugly hack to steer the test code towards the right allocator */
+#define RBT_OP(CMD) (current_rbt_type_op_base + (CMD))
+static enum {
+ BESTFIT_OP_BASE = 0x200,
+ AO_FIRSTFIT_OP_BASE = 0x500
+}current_rbt_type_op_base;
+
+
#ifdef PRINT_TREE
#define INDENT_STEP 5
@@ -65,12 +73,11 @@ print_tree_aux(TestCaseState_t *tcs, RBT_t *x, int indent)
static void
-print_tree(TestCaseState_t *tcs, RBT_t *root, int aobf)
+print_tree(TestCaseState_t *tcs, RBT_t *root)
{
- char *type = aobf ? "Size-Adress" : "Size";
- testcase_printf(tcs, " --- %s tree begin ---\r\n", type);
+ testcase_printf(tcs, " --- Tree begin ---\r\n");
print_tree_aux(tcs, root, 0);
- testcase_printf(tcs, " --- %s tree end ---\r\n", type);
+ testcase_printf(tcs, " --- Tree end ---\r\n");
}
#endif
@@ -78,7 +85,8 @@ print_tree(TestCaseState_t *tcs, RBT_t *root, int aobf)
static RBT_t *
check_tree(TestCaseState_t *tcs, Allctr_t *alc, Ulong size)
{
- int i, max_i, address_order;
+ enum { BF, AOBF, AOFF }type;
+ int i, max_i;
char stk[128];
RBT_t *root, *x, *y, *res;
Ulong x_sz, y_sz, is_x_black;
@@ -86,11 +94,14 @@ check_tree(TestCaseState_t *tcs, Allctr_t *alc, Ulong size)
res = NULL;
- address_order = IS_AOBF(alc);
+ if (IS_AOBF(alc)) type = AOBF;
+ else if (IS_AOFF(alc)) type = AOFF;
+ else type = BF;
+
root = RBT_ROOT(alc);
#ifdef PRINT_TREE
- print_tree(tcs, root, address_order);
+ print_tree(tcs, root);
#endif
max_i = i = -1;
@@ -165,12 +176,18 @@ check_tree(TestCaseState_t *tcs, Allctr_t *alc, Ulong size)
if (y) {
y_sz = BLK_SZ(y);
ASSERT(tcs, RBT_PARENT(y) == x);
- if (address_order) {
+ switch (type) {
+ case AOBF:
ASSERT(tcs, y_sz < x_sz || (y_sz == x_sz && y < x));
- }
- else {
+ break;
+ case BF:
ASSERT(tcs, RBT_IS_TREE(y));
ASSERT(tcs, y_sz < x_sz);
+ break;
+ case AOFF:
+ ASSERT(tcs, y < x);
+ ASSERT(tcs, RBT_MAX_SZ(y) <= RBT_MAX_SZ(x));
+ break;
}
}
@@ -178,16 +195,22 @@ check_tree(TestCaseState_t *tcs, Allctr_t *alc, Ulong size)
if (y) {
y_sz = BLK_SZ(y);
ASSERT(tcs, RBT_PARENT(y) == x);
- if (address_order) {
+ switch (type) {
+ case AOBF:
ASSERT(tcs, y_sz > x_sz || (y_sz == x_sz && y > x));
- }
- else {
+ break;
+ case BF:
ASSERT(tcs, RBT_IS_TREE(y));
ASSERT(tcs, y_sz > x_sz);
+ break;
+ case AOFF:
+ ASSERT(tcs, y > x);
+ ASSERT(tcs, RBT_MAX_SZ(y) <= RBT_MAX_SZ(x));
+ break;
}
}
- if (!address_order) {
+ if (type == BF) {
Ulong l_sz;
RBTL_t *l = RBT_NEXT(x);
for (l = RBT_NEXT(x); l; l = RBT_NEXT(l)) {
@@ -202,13 +225,20 @@ check_tree(TestCaseState_t *tcs, Allctr_t *alc, Ulong size)
res = x;
else {
y_sz = BLK_SZ(res);
- if (address_order) {
+ switch (type) {
+ case AOBF:
if (x_sz < y_sz || (x_sz == y_sz && x < res))
res = x;
- }
- else {
- if (!res || x_sz < y_sz)
+ break;
+ case BF:
+ if (x_sz < y_sz)
res = x;
+ break;
+ case AOFF:
+ if (x < res) {
+ res = x;
+ }
+ break;
}
}
}
@@ -257,7 +287,7 @@ static void
test_it(TestCaseState_t *tcs)
{
int i;
- Allctr_t a = ((rbtree_test_data *) tcs->extra)->allocator;
+ Allctr_t* a = ((rbtree_test_data *) tcs->extra)->allocator;
void **blk = ((rbtree_test_data *) tcs->extra)->blk;
void **fence = ((rbtree_test_data *) tcs->extra)->fence;
Ulong min_blk_sz;
@@ -338,6 +368,7 @@ testcase_run(TestCaseState_t *tcs)
{
char *argv1[] = {"-tasbf", NULL};
char *argv2[] = {"-tasaobf", NULL};
+ char *argv3[] = {"-tasaoff", NULL};
Allctr_t *a;
rbtree_test_data *td;
@@ -355,6 +386,7 @@ testcase_run(TestCaseState_t *tcs)
testcase_printf(tcs, "Starting test of best fit...\n");
+ current_rbt_type_op_base = BESTFIT_OP_BASE;
td->allocator = a = START_ALC("rbtree_bf_", 0, argv1);
ASSERT(tcs, a);
@@ -371,6 +403,7 @@ testcase_run(TestCaseState_t *tcs)
testcase_printf(tcs, "Starting test of address order best fit...\n");
+ current_rbt_type_op_base = BESTFIT_OP_BASE;
td->allocator = a = START_ALC("rbtree_aobf_", 0, argv2);
ASSERT(tcs, a);
@@ -383,4 +416,19 @@ testcase_run(TestCaseState_t *tcs)
testcase_printf(tcs, "Address order best fit test succeeded!\n");
+ /* Address order first fit... */
+
+ testcase_printf(tcs, "Starting test of address order first fit...\n");
+
+ current_rbt_type_op_base = AO_FIRSTFIT_OP_BASE;
+ td->allocator = a = START_ALC("rbtree_aoff_", 0, argv3);
+
+ ASSERT(tcs, a);
+
+ test_it(tcs);
+
+ STOP_ALC(a);
+ td->allocator = NULL;
+
+ testcase_printf(tcs, "Address order first fit test succeeded!\n");
}
diff --git a/erts/emulator/test/alloc_SUITE_data/testcase_driver.c b/erts/emulator/test/alloc_SUITE_data/testcase_driver.c
index 1e98844838..66971654a2 100644
--- a/erts/emulator/test/alloc_SUITE_data/testcase_driver.c
+++ b/erts/emulator/test/alloc_SUITE_data/testcase_driver.c
@@ -50,13 +50,32 @@ typedef struct {
ErlDrvData testcase_drv_start(ErlDrvPort port, char *command);
void testcase_drv_stop(ErlDrvData drv_data);
-void testcase_drv_run(ErlDrvData drv_data, char *buf, int len);
+void testcase_drv_run(ErlDrvData drv_data, char *buf, ErlDrvSizeT len);
static ErlDrvEntry testcase_drv_entry = {
NULL,
testcase_drv_start,
testcase_drv_stop,
- testcase_drv_run
+ testcase_drv_run,
+ NULL,
+ NULL,
+ "testcase_drv",
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ ERL_DRV_EXTENDED_MARKER,
+ ERL_DRV_EXTENDED_MAJOR_VERSION,
+ ERL_DRV_EXTENDED_MINOR_VERSION,
+ 0,
+ NULL,
+ NULL,
+ NULL
};
@@ -92,7 +111,7 @@ testcase_drv_stop(ErlDrvData drv_data)
}
void
-testcase_drv_run(ErlDrvData drv_data, char *buf, int len)
+testcase_drv_run(ErlDrvData drv_data, char *buf, ErlDrvSizeT len)
{
InternalTestCaseState_t *itcs = (InternalTestCaseState_t *) drv_data;
ErlDrvTermData result_atom;
diff --git a/erts/emulator/test/bif_SUITE.erl b/erts/emulator/test/bif_SUITE.erl
index c7617d3b90..a21b055596 100644
--- a/erts/emulator/test/bif_SUITE.erl
+++ b/erts/emulator/test/bif_SUITE.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 2005-2011. All Rights Reserved.
+%% Copyright Ericsson AB 2005-2012. All Rights Reserved.
%%
%% The contents of this file are subject to the Erlang Public License,
%% Version 1.1, (the "License"); you may not use this file except in
@@ -28,7 +28,7 @@
types/1,
t_list_to_existing_atom/1,os_env/1,otp_7526/1,
binary_to_atom/1,binary_to_existing_atom/1,
- atom_to_binary/1,min_max/1]).
+ atom_to_binary/1,min_max/1, erlang_halt/1]).
suite() -> [{ct_hooks,[ts_install_cth]}].
@@ -36,7 +36,7 @@ all() ->
[types, t_list_to_existing_atom, os_env, otp_7526,
display,
atom_to_binary, binary_to_atom, binary_to_existing_atom,
- min_max].
+ min_max, erlang_halt].
groups() ->
[].
@@ -438,7 +438,55 @@ min_max(Config) when is_list(Config) ->
ok.
+
+
+erlang_halt(Config) when is_list(Config) ->
+ try erlang:halt(undefined) of
+ _-> ?t:fail({erlang,halt,{undefined}})
+ catch error:badarg -> ok end,
+ try halt(undefined) of
+ _-> ?t:fail({halt,{undefined}})
+ catch error:badarg -> ok end,
+ try erlang:halt(undefined, []) of
+ _-> ?t:fail({erlang,halt,{undefined,[]}})
+ catch error:badarg -> ok end,
+ try halt(undefined, []) of
+ _-> ?t:fail({halt,{undefined,[]}})
+ catch error:badarg -> ok end,
+ try halt(0, undefined) of
+ _-> ?t:fail({halt,{0,undefined}})
+ catch error:badarg -> ok end,
+ try halt(0, [undefined]) of
+ _-> ?t:fail({halt,{0,[undefined]}})
+ catch error:badarg -> ok end,
+ try halt(0, [{undefined,true}]) of
+ _-> ?t:fail({halt,{0,[{undefined,true}]}})
+ catch error:badarg -> ok end,
+ try halt(0, [{flush,undefined}]) of
+ _-> ?t:fail({halt,{0,[{flush,undefined}]}})
+ catch error:badarg -> ok end,
+ try halt(0, [{flush,true,undefined}]) of
+ _-> ?t:fail({halt,{0,[{flush,true,undefined}]}})
+ catch error:badarg -> ok end,
+ H = hostname(),
+ {ok,N1} = slave:start(H, halt_node1),
+ {badrpc,nodedown} = rpc:call(N1, erlang, halt, []),
+ {ok,N2} = slave:start(H, halt_node2),
+ {badrpc,nodedown} = rpc:call(N2, erlang, halt, [0]),
+ {ok,N3} = slave:start(H, halt_node3),
+ {badrpc,nodedown} = rpc:call(N3, erlang, halt, [0,[]]),
+ ok.
+
+
+
%% Helpers
id(I) -> I.
+hostname() ->
+ hostname(atom_to_list(node())).
+
+hostname([$@ | Hostname]) ->
+ list_to_atom(Hostname);
+hostname([_C | Cs]) ->
+ hostname(Cs).
diff --git a/erts/emulator/test/big_SUITE.erl b/erts/emulator/test/big_SUITE.erl
index 3487917677..413bd3bcae 100644
--- a/erts/emulator/test/big_SUITE.erl
+++ b/erts/emulator/test/big_SUITE.erl
@@ -26,7 +26,7 @@
shift_limit_1/1, powmod/1, system_limit/1, otp_6692/1]).
%% Internal exports.
--export([eval/1, funcall/2]).
+-export([eval/1]).
-export([init/3]).
-export([fac/1, fib/1, pow/2, gcd/2, lcm/2]).
@@ -162,12 +162,15 @@ multi_match([Node|Ns], Expr, Rs) ->
multi_match([], _, Rs) -> Rs.
eval(Expr) ->
- Fun = {?MODULE,funcall},
- {value,V,_} = erl_eval:expr(Expr, [], Fun), %Applied arithmetic BIFs.
- V = eval(Expr, Fun), %Real arithmetic instructions.
- V.
+ LFH = fun(Name, As) -> apply(?MODULE, Name, As) end,
+
+ %% Applied arithmetic BIFs.
+ {value,V,_} = erl_eval:expr(Expr, [], {value,LFH}),
-funcall(F, As) -> apply(?MODULE, F, As).
+ %% Real arithmetic instructions.
+ V = eval(Expr, LFH),
+
+ V.
%% Like a subset of erl_eval:expr/3, but uses real arithmetic instructions instead of
%% applying them (it does make a difference).
diff --git a/erts/emulator/test/binary_SUITE.erl b/erts/emulator/test/binary_SUITE.erl
index 4e82381fba..d9fc876482 100644
--- a/erts/emulator/test/binary_SUITE.erl
+++ b/erts/emulator/test/binary_SUITE.erl
@@ -33,7 +33,6 @@
%% erlang:external_size/1
%% size(Binary)
%% iolist_size/1
-%% concat_binary/1
%% split_binary/2
%% hash(Binary, N)
%% phash(Binary, N)
@@ -47,7 +46,7 @@
init_per_testcase/2, end_per_testcase/2,
copy_terms/1, conversions/1, deep_lists/1, deep_bitstr_lists/1,
bad_list_to_binary/1, bad_binary_to_list/1,
- t_split_binary/1, bad_split/1, t_concat_binary/1,
+ t_split_binary/1, bad_split/1,
terms/1, terms_float/1, external_size/1, t_iolist_size/1,
t_hash/1,
bad_size/1,
@@ -68,7 +67,7 @@ suite() -> [{ct_hooks,[ts_install_cth]},
all() ->
[copy_terms, conversions, deep_lists, deep_bitstr_lists,
- t_split_binary, bad_split, t_concat_binary,
+ t_split_binary, bad_split,
bad_list_to_binary, bad_binary_to_list, terms,
terms_float, external_size, t_iolist_size,
bad_binary_to_term_2, safe_binary_to_term2,
@@ -93,7 +92,6 @@ init_per_group(_GroupName, Config) ->
end_per_group(_GroupName, Config) ->
Config.
-
init_per_testcase(Func, Config) when is_atom(Func), is_list(Config) ->
Config.
@@ -381,41 +379,6 @@ bad_split(Config) when is_list(Config) ->
bad_split(Bin, Pos) ->
{'EXIT',{badarg,_}} = (catch split_binary(Bin, Pos)).
-%% Tests concat_binary/2 and size/1.
-
-t_concat_binary(suite) -> [];
-t_concat_binary(Config) when is_list(Config) ->
- test_concat([]),
-
- test_concat([[]]),
- test_concat([[], []]),
- test_concat([[], [], []]),
-
- test_concat([[1], []]),
- test_concat([[], [2]]),
- test_concat([[], [3], []]),
-
- test_concat([[1, 2, 3], [4, 5, 6, 7]]),
- test_concat([[1, 2, 3], [4, 5, 6, 7], [9, 10]]),
-
- test_concat([lists:seq(0, 255), lists:duplicate(1024, $@),
- lists:duplicate(2048, $a),
- lists:duplicate(4000, $b)]),
- ok.
-
-test_concat(Lists) ->
- test_concat(Lists, 0, [], []).
-
-test_concat([List|Rest], Size, Combined, Binaries) ->
- ?line Bin = list_to_binary(List),
- ?line test_concat(Rest, Size+length(List), Combined++List, [Bin|Binaries]);
-test_concat([], Size, Combined, Binaries0) ->
- ?line Binaries = lists:reverse(Binaries0),
- ?line Bin = concat_binary(Binaries),
- ?line Size = size(Bin),
- ?line Size = iolist_size(Bin),
- ?line Combined = binary_to_list(Bin).
-
t_hash(doc) -> "Test hash/2 with different type of binaries.";
t_hash(Config) when is_list(Config) ->
test_hash([]),
@@ -478,6 +441,11 @@ terms(Config) when is_list(Config) ->
Sz when is_integer(Sz), size(Bin) =< Sz ->
ok
end,
+ Bin1 = term_to_binary(Term, [{minor_version, 1}]),
+ case erlang:external_size(Bin1, [{minor_version, 1}]) of
+ Sz1 when is_integer(Sz1), size(Bin1) =< Sz1 ->
+ ok
+ end,
Term = binary_to_term(Bin),
Term = binary_to_term(Bin, [safe]),
Unaligned = make_unaligned_sub_binary(Bin),
@@ -510,7 +478,12 @@ terms_float(Config) when is_list(Config) ->
Term = binary_to_term(Bin0),
Bin1 = term_to_binary(Term, [{minor_version,1}]),
Term = binary_to_term(Bin1),
- true = size(Bin1) < size(Bin0)
+ true = size(Bin1) < size(Bin0),
+ Size0 = erlang:external_size(Term),
+ Size00 = erlang:external_size(Term, [{minor_version, 0}]),
+ Size1 = erlang:external_size(Term, [{minor_version, 1}]),
+ true = (Size0 =:= Size00),
+ true = Size1 < Size0
end).
external_size(Config) when is_list(Config) ->
@@ -526,7 +499,9 @@ external_size(Config) when is_list(Config) ->
io:format(" Aligned size: ~p\n", [Sz1]),
io:format("Unaligned size: ~p\n", [Sz2]),
?line ?t:fail()
- end.
+ end,
+ ?line erlang:external_size(Bin) =:= erlang:external_size(Bin, [{minor_version, 1}]),
+ ?line erlang:external_size(Unaligned) =:= erlang:external_size(Unaligned, [{minor_version, 1}]).
external_size_1(Term, Size0, Limit) when Size0 < Limit ->
case erlang:external_size(Term) of
diff --git a/erts/emulator/test/bs_construct_SUITE.erl b/erts/emulator/test/bs_construct_SUITE.erl
index 1959803385..7fdf36711b 100644
--- a/erts/emulator/test/bs_construct_SUITE.erl
+++ b/erts/emulator/test/bs_construct_SUITE.erl
@@ -553,6 +553,11 @@ huge_float_check({'EXIT',{badarg,_}}) -> ok.
huge_binary(Config) when is_list(Config) ->
?line 16777216 = size(<<0:(id(1 bsl 26)),(-1):(id(1 bsl 26))>>),
+ ?line garbage_collect(),
+ ?line id(<<0:((1 bsl 32)-1)>>),
+ ?line garbage_collect(),
+ ?line id(<<0:(id((1 bsl 32)-1))>>),
+ ?line garbage_collect(),
ok.
system_limit(Config) when is_list(Config) ->
@@ -565,6 +570,10 @@ system_limit(Config) when is_list(Config) ->
?line {'EXIT',{system_limit,_}} =
(catch <<(id(<<>>))/binary,0:(id(1 bsl 100))>>),
+ %% Would fail to load.
+ ?line {'EXIT',{system_limit,_}} = (catch <<0:(1 bsl 67)>>),
+ ?line {'EXIT',{system_limit,_}} = (catch <<0:((1 bsl 64)+1)>>),
+
case WordSize of
4 ->
system_limit_32();
@@ -581,6 +590,14 @@ system_limit_32() ->
?line {'EXIT',{system_limit,_}} = (catch <<0:(id(8)),42:536870912/unit:8>>),
?line {'EXIT',{system_limit,_}} =
(catch <<0:(id(8)),42:(id(536870912))/unit:8>>),
+
+ %% The size would be silently truncated, resulting in a crash.
+ ?line {'EXIT',{system_limit,_}} = (catch <<0:(1 bsl 35)>>),
+ ?line {'EXIT',{system_limit,_}} = (catch <<0:((1 bsl 32)+1)>>),
+
+ %% Would fail to load.
+ ?line {'EXIT',{system_limit,_}} = (catch <<0:(1 bsl 43)>>),
+ ?line {'EXIT',{system_limit,_}} = (catch <<0:((1 bsl 40)+1)>>),
ok.
badarg(Config) when is_list(Config) ->
diff --git a/erts/emulator/test/bs_match_misc_SUITE.erl b/erts/emulator/test/bs_match_misc_SUITE.erl
index b022f96740..15427661f3 100644
--- a/erts/emulator/test/bs_match_misc_SUITE.erl
+++ b/erts/emulator/test/bs_match_misc_SUITE.erl
@@ -23,7 +23,7 @@
bound_var/1,bound_tail/1,t_float/1,little_float/1,sean/1,
kenneth/1,encode_binary/1,native/1,happi/1,
size_var/1,wiger/1,x0_context/1,huge_float_field/1,
- writable_binary_matched/1,otp_7198/1]).
+ writable_binary_matched/1,otp_7198/1,unordered_bindings/1]).
-include_lib("test_server/include/test_server.hrl").
@@ -33,7 +33,7 @@ all() ->
[bound_var, bound_tail, t_float, little_float, sean,
kenneth, encode_binary, native, happi, size_var, wiger,
x0_context, huge_float_field, writable_binary_matched,
- otp_7198].
+ otp_7198, unordered_bindings].
groups() ->
[].
@@ -553,5 +553,15 @@ otp_7198_scan(<<C, Rest/binary>>, TokAcc) when
otp_7198_scan(Rest, [{'KEYWORD', C} | TokAcc])
end.
+unordered_bindings(Config) when is_list(Config) ->
+ {<<1,2,3,4>>,<<42,42>>,<<3,3,3>>} =
+ unordered_bindings(4, 2, 3, <<1,2,3,4, 42,42, 3,3,3, 3>>),
+ ok.
+
+unordered_bindings(CompressedLength, HashSize, PadLength, T) ->
+ <<Content:CompressedLength/binary,Mac:HashSize/binary,
+ Padding:PadLength/binary,PadLength>> = T,
+ {Content,Mac,Padding}.
+
id(I) -> I.
diff --git a/erts/emulator/test/bs_utf_SUITE.erl b/erts/emulator/test/bs_utf_SUITE.erl
index 72c656c400..4ab7d674a6 100644
--- a/erts/emulator/test/bs_utf_SUITE.erl
+++ b/erts/emulator/test/bs_utf_SUITE.erl
@@ -64,8 +64,7 @@ end_per_group(_GroupName, Config) ->
utf8_roundtrip(Config) when is_list(Config) ->
?line utf8_roundtrip(0, 16#D7FF),
- ?line utf8_roundtrip(16#E000, 16#FFFD),
- ?line utf8_roundtrip(16#10000, 16#10FFFF),
+ ?line utf8_roundtrip(16#E000, 16#10FFFF),
ok.
utf8_roundtrip(First, Last) when First =< Last ->
@@ -91,8 +90,7 @@ utf16_roundtrip(Config) when is_list(Config) ->
do_utf16_roundtrip(Fun) ->
do_utf16_roundtrip(0, 16#D7FF, Fun),
- do_utf16_roundtrip(16#E000, 16#FFFD, Fun),
- do_utf16_roundtrip(16#10000, 16#10FFFF, Fun).
+ do_utf16_roundtrip(16#E000, 16#10FFFF, Fun).
do_utf16_roundtrip(First, Last, Fun) when First =< Last ->
Fun(First),
@@ -129,8 +127,7 @@ utf32_roundtrip(Config) when is_list(Config) ->
do_utf32_roundtrip(Fun) ->
do_utf32_roundtrip(0, 16#D7FF, Fun),
- do_utf32_roundtrip(16#E000, 16#FFFD, Fun),
- do_utf32_roundtrip(16#10000, 16#10FFFF, Fun).
+ do_utf32_roundtrip(16#E000, 16#10FFFF, Fun).
do_utf32_roundtrip(First, Last, Fun) when First =< Last ->
Fun(First),
@@ -158,7 +155,6 @@ utf32_little_roundtrip(Char) ->
utf8_illegal_sequences(Config) when is_list(Config) ->
?line fail_range(16#10FFFF+1, 16#10FFFF+512), %Too large.
?line fail_range(16#D800, 16#DFFF), %Reserved for UTF-16.
- ?line fail_range(16#FFFE, 16#FFFF), %Non-characters.
%% Illegal first character.
?line [fail(<<I,16#8F,16#8F,16#8F>>) || I <- lists:seq(16#80, 16#BF)],
@@ -251,7 +247,6 @@ fail_1(_) -> ok.
utf16_illegal_sequences(Config) when is_list(Config) ->
?line utf16_fail_range(16#10FFFF+1, 16#10FFFF+512), %Too large.
?line utf16_fail_range(16#D800, 16#DFFF), %Reserved for UTF-16.
- ?line utf16_fail_range(16#FFFE, 16#FFFF), %Non-characters.
?line lonely_hi_surrogate(16#D800, 16#DFFF),
?line leading_lo_surrogate(16#DC00, 16#DFFF),
@@ -300,7 +295,6 @@ leading_lo_surrogate(_, _, _) -> ok.
utf32_illegal_sequences(Config) when is_list(Config) ->
?line utf32_fail_range(16#10FFFF+1, 16#10FFFF+512), %Too large.
?line utf32_fail_range(16#D800, 16#DFFF), %Reserved for UTF-16.
- ?line utf32_fail_range(16#FFFE, 16#FFFF), %Non-characters.
?line utf32_fail_range(-100, -1),
ok.
diff --git a/erts/emulator/test/busy_port_SUITE.erl b/erts/emulator/test/busy_port_SUITE.erl
index 8365e1c540..3a29fd4d68 100644
--- a/erts/emulator/test/busy_port_SUITE.erl
+++ b/erts/emulator/test/busy_port_SUITE.erl
@@ -20,7 +20,7 @@
-module(busy_port_SUITE).
-export([all/0, suite/0,groups/0,init_per_suite/1, end_per_suite/1,
- init_per_group/2,end_per_group/2,
+ init_per_group/2,end_per_group/2,end_per_testcase/2,
io_to_busy/1, message_order/1, send_3/1,
system_monitor/1, no_trap_exit/1,
no_trap_exit_unlinked/1, trap_exit/1, multiple_writers/1,
@@ -53,6 +53,20 @@ init_per_group(_GroupName, Config) ->
end_per_group(_GroupName, Config) ->
Config.
+end_per_testcase(_Case, Config) when is_list(Config) ->
+ case whereis(busy_drv_server) of
+ undefined ->
+ ok;
+ Pid when is_pid(Pid) ->
+ Ref = monitor(process, Pid),
+ unlink(Pid),
+ exit(Pid, kill),
+ receive
+ {'DOWN',Ref,process,Pid,_} ->
+ ok
+ end
+ end,
+ Config.
%% Tests I/O operations to a busy port, to make sure a suspended send
%% operation is correctly restarted. This used to crash Beam.
@@ -495,12 +509,12 @@ hs_busy_pcmd(Prt, Opts, StartFun, EndFun) ->
P = spawn_link(fun () ->
erlang:yield(),
Tester ! {self(), doing_port_command},
- Start = os:timestamp(),
+ Start = now(),
Res = try {return,
port_command(Prt, [], Opts)}
catch Exception:Error -> {Exception, Error}
end,
- End = os:timestamp(),
+ End = now(),
Time = round(timer:now_diff(End, Start)/1000),
Tester ! {self(), port_command_result, Res, Time}
end),
diff --git a/erts/emulator/test/busy_port_SUITE_data/busy_drv.c b/erts/emulator/test/busy_port_SUITE_data/busy_drv.c
index 1273d610ba..75106d3757 100644
--- a/erts/emulator/test/busy_port_SUITE_data/busy_drv.c
+++ b/erts/emulator/test/busy_port_SUITE_data/busy_drv.c
@@ -11,7 +11,8 @@
#define YES 1
static ErlDrvData busy_start(ErlDrvPort, char*);
-static void busy_stop(ErlDrvData), busy_from_erlang(ErlDrvData, char*, int);
+static void busy_stop(ErlDrvData),
+ busy_from_erlang(ErlDrvData, char*, ErlDrvSizeT);
ErlDrvEntry busy_driver_entry =
{
@@ -23,6 +24,20 @@ ErlDrvEntry busy_driver_entry =
NULL,
"busy_drv",
NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ ERL_DRV_EXTENDED_MARKER,
+ ERL_DRV_EXTENDED_MAJOR_VERSION,
+ ERL_DRV_EXTENDED_MINOR_VERSION,
+ 0,
+ NULL,
+ NULL,
NULL
};
@@ -73,7 +88,7 @@ static void busy_stop(ErlDrvData port)
}
static void
-busy_from_erlang(ErlDrvData port, char* buf, int count)
+busy_from_erlang(ErlDrvData port, char* buf, ErlDrvSizeT count)
{
if ((ErlDrvPort)port == slave_port) {
set_busy_port(slave_port, next_slave_state);
diff --git a/erts/emulator/test/busy_port_SUITE_data/hs_busy_drv.c b/erts/emulator/test/busy_port_SUITE_data/hs_busy_drv.c
index 35919da2d0..9f6bd310c6 100644
--- a/erts/emulator/test/busy_port_SUITE_data/hs_busy_drv.c
+++ b/erts/emulator/test/busy_port_SUITE_data/hs_busy_drv.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2009. All Rights Reserved.
+ * Copyright Ericsson AB 2009-2011. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -21,9 +21,9 @@
#include "erl_driver.h"
ErlDrvData start(ErlDrvPort port, char *command);
-void output(ErlDrvData drv_data, char *buf, int len);
-int control(ErlDrvData drv_data, unsigned int command, char *buf,
- int len, char **rbuf, int rlen);
+void output(ErlDrvData drv_data, char *buf, ErlDrvSizeT len);
+ErlDrvSSizeT control(ErlDrvData drv_data, unsigned int command, char *buf,
+ ErlDrvSizeT len, char **rbuf, ErlDrvSizeT rlen);
static ErlDrvEntry busy_drv_entry = {
NULL /* init */,
@@ -61,7 +61,7 @@ ErlDrvData start(ErlDrvPort port, char *command)
return (ErlDrvData) port;
}
-void output(ErlDrvData drv_data, char *buf, int len)
+void output(ErlDrvData drv_data, char *buf, ErlDrvSizeT len)
{
int res;
ErlDrvPort port = (ErlDrvPort) drv_data;
@@ -76,8 +76,8 @@ void output(ErlDrvData drv_data, char *buf, int len)
driver_failure_atom(port, "driver_output_term failed");
}
-int control(ErlDrvData drv_data, unsigned int command, char *buf,
- int len, char **rbuf, int rlen)
+ErlDrvSSizeT control(ErlDrvData drv_data, unsigned int command, char *buf,
+ ErlDrvSizeT len, char **rbuf, ErlDrvSizeT rlen)
{
switch (command) {
case 'B': /* busy */
diff --git a/erts/emulator/test/call_trace_SUITE.erl b/erts/emulator/test/call_trace_SUITE.erl
index 93fdc157f7..9d80b01748 100644
--- a/erts/emulator/test/call_trace_SUITE.erl
+++ b/erts/emulator/test/call_trace_SUITE.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1999-2011. All Rights Reserved.
+%% Copyright Ericsson AB 1999-2012. All Rights Reserved.
%%
%% The contents of this file are subject to the Erlang Public License,
%% Version 1.1, (the "License"); you may not use this file except in
@@ -165,10 +165,14 @@ worker_loop() ->
worker_foo(_Arg) ->
ok.
-basic(doc) ->
- "Basic test of the call tracing (we trace one process).";
-basic(suite) -> [];
-basic(Config) when is_list(Config) ->
+%% Basic test of the call tracing (we trace one process).
+basic(_Config) ->
+ case test_server:is_native(lists) of
+ true -> {skip,"lists is native"};
+ false -> basic()
+ end.
+
+basic() ->
?line start_tracer(),
?line trace_info(self(), flags),
?line trace_info(self(), tracer),
@@ -263,9 +267,15 @@ foo() -> foo0.
foo(X) -> X+1.
foo(X, Y) -> X+Y.
-flags(doc) -> "Test flags (arity, timestamp) for call_trace/3. "
- "Also, test the '{tracer,Pid}' option.";
-flags(Config) when is_list(Config) ->
+%% Test flags (arity, timestamp) for call_trace/3.
+%% Also, test the '{tracer,Pid}' option.
+flags(_Config) ->
+ case test_server:is_native(filename) of
+ true -> {skip,"filename is native"};
+ false -> flags()
+ end.
+
+flags() ->
?line Tracer = start_tracer_loop(),
?line trace_pid(self(), true, [call,{tracer,Tracer}]),
@@ -428,9 +438,14 @@ pam_foo(A, B) ->
{ok,A,B}.
-change_pam(doc) -> "Test changing PAM programs for a function.";
-change_pam(suite) -> [];
-change_pam(Config) when is_list(Config) ->
+%% Test changing PAM programs for a function.
+change_pam(_Config) ->
+ case test_server:is_native(lists) of
+ true -> {skip,"lists is native"};
+ false -> change_pam()
+ end.
+
+change_pam() ->
?line start_tracer(),
?line Self = self(),
@@ -468,10 +483,11 @@ change_pam_trace(Prog) ->
{match_spec,Prog} = trace_info({erlang,process_info,2}, match_spec),
ok.
-return_trace(doc) -> "Test the new return trace.";
-return_trace(suite) -> [];
-return_trace(Config) when is_list(Config) ->
- return_trace().
+return_trace(_Config) ->
+ case test_server:is_native(lists) of
+ true -> {skip,"lists is native"};
+ false -> return_trace()
+ end.
return_trace() ->
X = {save,me},
@@ -521,7 +537,7 @@ return_trace() ->
?line {match_spec,Prog2} = trace_info({erlang,atom_to_list,1}, match_spec),
?line lists:seq(2, 7),
- ?line atom_to_list(non_literal(nisse)),
+ ?line _ = atom_to_list(non_literal(nisse)),
?line expect({trace,Self,return_from,{lists,seq,2},[2,3,4,5,6,7]}),
?line expect({trace,Self,return_from,{erlang,atom_to_list,1},"nisse"}),
@@ -539,10 +555,11 @@ return_trace() ->
nasty() ->
exit(good_bye).
-exception_trace(doc) -> "Test the new exception trace.";
-exception_trace(suite) -> [];
-exception_trace(Config) when is_list(Config) ->
- exception_trace().
+exception_trace(_Config) ->
+ case test_server:is_native(lists) of
+ true -> {skip,"lists is native"};
+ false -> exception_trace()
+ end.
exception_trace() ->
X = {save,me},
@@ -600,7 +617,7 @@ exception_trace() ->
trace_info({erlang,atom_to_list,1}, match_spec),
?line lists:seq(2, 7),
- ?line atom_to_list(non_literal(nisse)),
+ ?line _ = atom_to_list(non_literal(nisse)),
?line expect({trace,Self,return_from,{lists,seq,2},[2,3,4,5,6,7]}),
?line expect({trace,Self,return_from,{erlang,atom_to_list,1},"nisse"}),
@@ -934,6 +951,10 @@ exception_nocatch(Config) when is_list(Config) ->
exception_nocatch().
exception_nocatch() ->
+ Deep4LocThrow = get_deep_4_loc({throw,[42]}),
+ Deep4LocError = get_deep_4_loc({error,[42]}),
+ Deep4LocBadmatch = get_deep_4_loc({'=',[a,b]}),
+
Prog = [{'_',[],[{exception_trace}]}],
?line 1 = erlang:trace_pattern({?MODULE,deep_1,'_'}, Prog),
?line 1 = erlang:trace_pattern({?MODULE,deep_2,'_'}, Prog),
@@ -959,8 +980,9 @@ exception_nocatch() ->
{trace,t2,exception_from,{erlang,throw,1},
{error,{nocatch,Q2}}}],
exception_from, {error,{nocatch,Q2}}),
- ?line expect({trace,T2,exit,{{nocatch,Q2},[{erlang,throw,[Q2]},
- {?MODULE,deep_4,1}]}}),
+ ?line expect({trace,T2,exit,{{nocatch,Q2},[{erlang,throw,[Q2],[]},
+ {?MODULE,deep_4,1,
+ Deep4LocThrow}]}}),
?line Q3 = {dump,[dump,{dump}]},
?line T3 =
exception_nocatch(?LINE, error, [Q3], 4,
@@ -968,18 +990,29 @@ exception_nocatch() ->
{trace,t3,exception_from,{erlang,error,1},
{error,Q3}}],
exception_from, {error,Q3}),
- ?line expect({trace,T3,exit,{Q3,[{erlang,error,[Q3]},
- {?MODULE,deep_4,1}]}}),
+ ?line expect({trace,T3,exit,{Q3,[{erlang,error,[Q3],[]},
+ {?MODULE,deep_4,1,Deep4LocError}]}}),
?line T4 =
exception_nocatch(?LINE, '=', [17,4711], 5, [],
exception_from, {error,{badmatch,4711}}),
- ?line expect({trace,T4,exit,{{badmatch,4711},[{?MODULE,deep_4,1}]}}),
+ ?line expect({trace,T4,exit,{{badmatch,4711},
+ [{?MODULE,deep_4,1,Deep4LocBadmatch}]}}),
%%
?line erlang:trace_pattern({?MODULE,'_','_'}, false),
?line erlang:trace_pattern({erlang,'_','_'}, false),
?line expect(),
?line ok.
+get_deep_4_loc(Arg) ->
+ try
+ deep_4(Arg),
+ ?t:fail(should_not_return_to_here)
+ catch
+ _:_ ->
+ [{?MODULE,deep_4,1,Loc0}|_] = erlang:get_stacktrace(),
+ Loc0
+ end.
+
exception_nocatch(Line, B, Q, N, Extra, Tag, R) ->
?line io:format("== Subtest: ~w", [Line]),
?line Go = make_ref(),
diff --git a/erts/emulator/test/code_SUITE.erl b/erts/emulator/test/code_SUITE.erl
index a062cea117..25ce94096f 100644
--- a/erts/emulator/test/code_SUITE.erl
+++ b/erts/emulator/test/code_SUITE.erl
@@ -20,28 +20,34 @@
-module(code_SUITE).
-export([all/0, suite/0,groups/0,init_per_suite/1, end_per_suite/1,
init_per_group/2,end_per_group/2,
- new_binary_types/1,t_check_process_code/1,t_check_process_code_ets/1,
+ versions/1,new_binary_types/1,
+ t_check_process_code/1,t_check_old_code/1,
+ t_check_process_code_ets/1,
external_fun/1,get_chunk/1,module_md5/1,make_stub/1,
- make_stub_many_funs/1,constant_pools/1,
- false_dependency/1,coverage/1]).
+ make_stub_many_funs/1,constant_pools/1,constant_refc_binaries/1,
+ false_dependency/1,coverage/1,fun_confusion/1]).
+-define(line_trace, 1).
-include_lib("test_server/include/test_server.hrl").
suite() -> [{ct_hooks,[ts_install_cth]}].
all() ->
- [new_binary_types, t_check_process_code,
- t_check_process_code_ets, external_fun, get_chunk,
+ [versions, new_binary_types, t_check_process_code,
+ t_check_process_code_ets, t_check_old_code, external_fun, get_chunk,
module_md5, make_stub, make_stub_many_funs,
- constant_pools, false_dependency, coverage].
+ constant_pools, constant_refc_binaries, false_dependency,
+ coverage, fun_confusion].
groups() ->
[].
init_per_suite(Config) ->
+ erts_debug:set_internal_state(available_internal_state, true),
Config.
end_per_suite(_Config) ->
+ catch erts_debug:set_internal_state(available_internal_state, false),
ok.
init_per_group(_GroupName, Config) ->
@@ -50,6 +56,60 @@ init_per_group(_GroupName, Config) ->
end_per_group(_GroupName, Config) ->
Config.
+%% Make sure that only two versions of a module can be loaded.
+versions(Config) when is_list(Config) ->
+ V1 = compile_version(1, Config),
+ V2 = compile_version(2, Config),
+ V3 = compile_version(3, Config),
+
+ {ok,P1,1} = load_version(V1, 1),
+ {ok,P2,2} = load_version(V2, 2),
+ {error,not_purged} = load_version(V2, 2),
+ {error,not_purged} = load_version(V3, 3),
+
+ 1 = check_version(P1),
+ 2 = check_version(P2),
+ 2 = versions:version(),
+
+ %% Kill processes, unload code.
+ P1 ! P2 ! done,
+ _ = monitor(process, P1),
+ _ = monitor(process, P2),
+ receive
+ {'DOWN',_,process,P1,normal} -> ok
+ end,
+ receive
+ {'DOWN',_,process,P2,normal} -> ok
+ end,
+ true = erlang:purge_module(versions),
+ true = erlang:delete_module(versions),
+ true = erlang:purge_module(versions),
+ ok.
+
+compile_version(Version, Config) ->
+ Data = ?config(data_dir, Config),
+ File = filename:join(Data, "versions"),
+ {ok,versions,Bin} = compile:file(File, [{d,'VERSION',Version},
+ binary,report]),
+ Bin.
+
+load_version(Code, Ver) ->
+ case erlang:load_module(versions, Code) of
+ {module,versions} ->
+ Pid = spawn_link(versions, loop, []),
+ Ver = versions:version(),
+ Ver = check_version(Pid),
+ {ok,Pid,Ver};
+ Error ->
+ Error
+ end.
+
+check_version(Pid) ->
+ Pid ! {self(),version},
+ receive
+ {Pid,version,Version} ->
+ Version
+ end.
new_binary_types(Config) when is_list(Config) ->
?line Data = ?config(data_dir, Config),
@@ -248,9 +308,36 @@ fun_refc(F) ->
Count.
+%% Test the erlang:check_old_code/1 BIF.
+t_check_old_code(Config) when is_list(Config) ->
+ ?line Data = ?config(data_dir, Config),
+ ?line File = filename:join(Data, "my_code_test"),
+
+ ?line erlang:purge_module(my_code_test),
+ ?line erlang:delete_module(my_code_test),
+ ?line catch erlang:purge_module(my_code_test),
+
+ ?line false = erlang:check_old_code(my_code_test),
+
+ ?line {ok,my_code_test,Code} = compile:file(File, [binary]),
+ ?line {module,my_code_test} = code:load_binary(my_code_test, File, Code),
+
+ ?line false = erlang:check_old_code(my_code_test),
+ ?line {module,my_code_test} = code:load_binary(my_code_test, File, Code),
+ ?line true = erlang:check_old_code(my_code_test),
+
+ ?line true = erlang:purge_module(my_code_test),
+ ?line true = erlang:delete_module(my_code_test),
+ ?line true = erlang:purge_module(my_code_test),
+
+ ?line {'EXIT',_} = (catch erlang:check_old_code([])),
+
+ ok.
+
external_fun(Config) when is_list(Config) ->
?line false = erlang:function_exported(another_code_test, x, 1),
- ?line ExtFun = erlang:make_fun(id(another_code_test), x, 1),
+ AnotherCodeTest = id(another_code_test),
+ ExtFun = fun AnotherCodeTest:x/1,
?line {'EXIT',{undef,_}} = (catch ExtFun(answer)),
?line false = erlang:function_exported(another_code_test, x, 1),
?line false = lists:member(another_code_test, erlang:loaded()),
@@ -375,7 +462,7 @@ make_stub_many_funs(Config) when is_list(Config) ->
constant_pools(Config) when is_list(Config) ->
?line Data = ?config(data_dir, Config),
?line File = filename:join(Data, "literals"),
- ?line {ok,literals,Code} = compile:file(File, [report,binary,constant_pool]),
+ ?line {ok,literals,Code} = compile:file(File, [report,binary]),
?line {module,literals} = erlang:load_module(literals,
make_sub_binary(Code)),
@@ -446,6 +533,131 @@ create_old_heap() ->
create_old_heap()
end.
+constant_refc_binaries(Config) when is_list(Config) ->
+ wait_for_memory_deallocations(),
+ Bef = memory_binary(),
+ io:format("Binary data (bytes) before test: ~p\n", [Bef]),
+
+ %% Compile the the literals module.
+ Data = ?config(data_dir, Config),
+ File = filename:join(Data, "literals"),
+ {ok,literals,Code} = compile:file(File, [report,binary]),
+
+ %% Load the code and make sure that the binary is a refc binary.
+ {module,literals} = erlang:load_module(literals, Code),
+ Bin = literals:binary(),
+ Sz = byte_size(Bin),
+ Check = erlang:md5(Bin),
+ io:format("Size of literal refc binary: ~p\n", [Sz]),
+ {refc_binary,Sz,_,_} = erts_debug:get_internal_state({binary_info,Bin}),
+ true = erlang:delete_module(literals),
+ false = erlang:check_process_code(self(), literals),
+ true = erlang:purge_module(literals),
+
+ %% Now try to provoke a memory leak.
+ provoke_mem_leak(10, Code, Check),
+
+ %% Calculate the change in allocated binary data.
+ erlang:garbage_collect(),
+ wait_for_memory_deallocations(),
+ Aft = memory_binary(),
+ io:format("Binary data (bytes) after test: ~p", [Aft]),
+ Diff = Aft - Bef,
+ if
+ Diff < 0 ->
+ io:format("~p less bytes", [abs(Diff)]);
+ Diff > 0 ->
+ io:format("~p more bytes", [Diff]);
+ true ->
+ ok
+ end,
+
+ %% Test for leaks. We must accept some natural variations in
+ %% the size of allocated binaries.
+ if
+ Diff > 64*1024 ->
+ ?t:fail(binary_leak);
+ true ->
+ ok
+ end.
+
+memory_binary() ->
+ try
+ erlang:memory(binary)
+ catch
+ error:notsup ->
+ 0
+ end.
+
+provoke_mem_leak(0, _, _) -> ok;
+provoke_mem_leak(N, Code, Check) ->
+ {module,literals} = erlang:load_module(literals, Code),
+
+ %% Create several processes with references to the literal binary.
+ Self = self(),
+ Pids = [spawn_link(fun() ->
+ create_binaries(Self, NumRefs, Check)
+ end) || NumRefs <- lists:seq(1, 10)],
+ [receive {started,Pid} -> ok end || Pid <- Pids],
+
+ %% Make the code old and remove references to the constant pool
+ %% in all processes.
+ true = erlang:delete_module(literals),
+ Ms = [spawn_monitor(fun() ->
+ false = erlang:check_process_code(Pid, literals)
+ end) || Pid <- Pids],
+ [receive
+ {'DOWN',R,process,P,normal} ->
+ ok
+ end || {P,R} <- Ms],
+
+ %% Purge the code.
+ true = erlang:purge_module(literals),
+
+ %% Tell the processes that the code has been purged.
+ [begin
+ monitor(process, Pid),
+ Pid ! purged
+ end || Pid <- Pids],
+
+ %% Wait for all processes to terminate.
+ [receive
+ {'DOWN',_,process,Pid,normal} ->
+ ok
+ end || Pid <- Pids],
+
+ %% We now expect that the binary has been deallocated.
+ provoke_mem_leak(N-1, Code, Check).
+
+create_binaries(Parent, NumRefs, Check) ->
+ Bin = literals:binary(),
+ Bins = lists:duplicate(NumRefs, Bin),
+ {bits,Bits} = literals:bits(),
+ Parent ! {started,self()},
+ receive
+ purged ->
+ %% The code has been purged. Now make sure that
+ %% the binaries haven't been corrupted.
+ Check = erlang:md5(Bin),
+ [Bin = B || B <- Bins],
+ <<42:13,Bin/binary>> = Bits,
+
+ %% Remove all references to the binaries
+ %% Doing it explicitly like this ensures that
+ %% the binaries are gone when the parent process
+ %% receives the 'DOWN' message.
+ erlang:garbage_collect()
+ end.
+
+wait_for_memory_deallocations() ->
+ try
+ erts_debug:set_internal_state(wait, deallocations)
+ catch
+ error:undef ->
+ erts_debug:set_internal_state(available_internal_state, true),
+ wait_for_memory_deallocations()
+ end.
+
%% OTP-7559: c_p->cp could contain garbage and create a false dependency
%% to a module in a process. (Thanks to Richard Carlsson.)
false_dependency(Config) when is_list(Config) ->
@@ -527,6 +739,30 @@ coverage(Config) when is_list(Config) ->
?line {'EXIT',{badarg,_}} = (catch erlang:module_loaded(42)),
ok.
+fun_confusion(Config) when is_list(Config) ->
+ Data = ?config(data_dir, Config),
+ Src = filename:join(Data, "fun_confusion"),
+ Mod = fun_confusion,
+
+ %% Load first version of module.
+ compile_load(Mod, Src, 1),
+ F1 = Mod:f(),
+ 1 = F1(),
+
+ %% Load second version of module.
+ compile_load(Mod, Src, 2),
+ F2 = Mod:f(),
+
+ %% F1 should refer to the old code, not the newly loaded code.
+ 1 = F1(),
+ 2 = F2(),
+ ok.
+
+compile_load(Mod, Src, Ver) ->
+ {ok,Mod,Code1} = compile:file(Src, [binary,{d,version,Ver}]),
+ {module,Mod} = code:load_binary(Mod, "fun_confusion.beam", Code1),
+ ok.
+
%% Utilities.
make_sub_binary(Bin) when is_binary(Bin) ->
diff --git a/erts/emulator/test/code_SUITE_data/fun_confusion.erl b/erts/emulator/test/code_SUITE_data/fun_confusion.erl
new file mode 100644
index 0000000000..16000861df
--- /dev/null
+++ b/erts/emulator/test/code_SUITE_data/fun_confusion.erl
@@ -0,0 +1,31 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2011. All Rights Reserved.
+%%
+%% The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved online at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% %CopyrightEnd%
+%%
+
+-module(fun_confusion).
+
+-export([f/0]).
+
+f() ->
+ fun() -> version() end.
+
+version() ->
+ %% Changing the value returned here should change
+ %% the identity of the fun in f/0.
+ ?version.
+
diff --git a/erts/emulator/test/code_SUITE_data/literals.erl b/erts/emulator/test/code_SUITE_data/literals.erl
index 9f99b1a780..658427095e 100644
--- a/erts/emulator/test/code_SUITE_data/literals.erl
+++ b/erts/emulator/test/code_SUITE_data/literals.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 2007-2009. All Rights Reserved.
+%% Copyright Ericsson AB 2007-2011. All Rights Reserved.
%%
%% The contents of this file are subject to the Erlang Public License,
%% Version 1.1, (the "License"); you may not use this file except in
@@ -18,7 +18,7 @@
%%
-module(literals).
--export([a/0,b/0,huge_bignum/0]).
+-export([a/0,b/0,huge_bignum/0,binary/0,unused_binaries/0,bits/0]).
a() ->
{a,42.0,[7,38877938333399637266518333334747]}.
@@ -81,3 +81,22 @@ b() ->
huge_bignum() ->
36#9987333333392789234879423987243987423432879423879234897423879423874328794323248423872348742323487423987423879243872347824374238792437842374283926276478623462342363243SDKJFSDLEFHDSHJFE48H3838973879JFSDKJLFASLKJVBJKLEJKLDYEIOEHFEOU39873487SFHJSLDFASUIDFHSDHFEYR0R987YDFHDHFDLKHFSIDFHSIDFSIFDHSIFHWIHR07373767667987769707660766789076874238792437842374283926276478623462342363243SDKJFSDLEFHDSHJFE48H3838973879JFSDKJLFASLKJVBJKLEJKLDYEIOEHFEOU39873487SFHJSLDFASUIDFHSDHFEYR0R987YDFHDHFDLKHFSIDFHSIDFSIFDHSIFHWIHR0737376766798779987333333392789234879423987243987423432879423879234897423879423874328794323248423872348742323487423987423879243872347824374238792437842374283926276478623462342363243SDKJFSDLEFHDSHJFE48H3838973879JFSDKJLFASLKJVBJKLEJKLDYEIOEHFEOU39873487SFHJSLDFASUIDFHSDHFEYR0R987YDFHDHFDLKHFSIDFHSIDFSIFDHSIFHWIHR07373767667987769707660766789076874238792437842374283926276478623462342363243SDKJFSDLEFHDSHJFE48H3838973879JFSDKJLFASLKJVBJKLEJKLDYEIOEHFEOU39873487SFHJSLDFASUIDFHSDHFEYR0R987YDFHDHFDLKHFSIDFHSIDFSIFDHSIFHWIHR07373767667987779JFSDKJLFASLKJVBJKLEJKLDYEIOEHFEOU39873487SFHJSLDFASUIDFHSDHFEYR0R987YDFHDHFDLKHFSIDFHSIDFSIFDHSIFHWIHR07373767667987769707660766789076874238792437842374283926276478623462342363243SDKJFSDLEFHDSHJFE48H3838973879JFSDKJLFASLKJVBJKLEJKLDYEIOEHFEOU39873487SFHJSLDFASUIDFHSDHFEYR0R987YDFHDHFDLKHFSIDFHSIDFSIFDHSIFHWIHR0737376766798779987333333392789234879423987243987423432879423879234897423879423874328794323248423872348742323487423987423879243872347824374238792437842374283926276478623462342363243SDKJFSDLEFHDSHJFE48H3838973879JFSDKJLFASLKJVBJKLEJKLDYEIOEHFEOU39873487SFHJSLDFASUIDFHSDHFEYR0R987YDFHDHFDLKHFSIDFHSIDFSIFDHSIFHWIHR07373767667987769707660766789076874238792437842374283926276478623462342363243SDKJFSDLEFHDSHJFE48H3838973879JFSDKJLFASLKJVBJKLEJKLDYEIOEHFEOU39873487SFHJSLDFASUIDFHSDHFEYR0R987YDFHDHFDLKHFSIDFHSIDFSIFDHSIFHWIHR073737676679877.
+
+-define(TIMES_FOUR(X), X,X,X,X).
+-define(BYTES_256, 0:256,1:256,2:256,3:256, 4:256,5:256,6:256,7:256).
+-define(KB_1, ?TIMES_FOUR(?BYTES_256)).
+-define(KB_4, ?TIMES_FOUR(?KB_1)).
+-define(KB_16, ?TIMES_FOUR(?KB_4)).
+-define(KB_64, ?TIMES_FOUR(?KB_16)).
+-define(KB_128, ?TIMES_FOUR(?KB_64)).
+-define(MB_1, ?TIMES_FOUR(?KB_128)).
+
+binary() ->
+ %% Too big to be a heap binary.
+ <<?MB_1>>.
+
+unused_binaries() ->
+ {<<?KB_128>>,<<?BYTES_256>>}.
+
+bits() ->
+ {bits,<<42:13,?MB_1>>}.
diff --git a/erts/emulator/test/code_SUITE_data/versions.erl b/erts/emulator/test/code_SUITE_data/versions.erl
new file mode 100644
index 0000000000..7a6fd8847d
--- /dev/null
+++ b/erts/emulator/test/code_SUITE_data/versions.erl
@@ -0,0 +1,33 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2011. All Rights Reserved.
+%%
+%% The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved online at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% %CopyrightEnd%
+%%
+
+-module(versions).
+-export([loop/0,version/0]).
+
+loop() ->
+ receive
+ {Pid,version} ->
+ Pid ! {self(),version,version()},
+ loop();
+ done ->
+ ok
+ end.
+
+version() ->
+ ?VERSION.
diff --git a/erts/emulator/test/ddll_SUITE_data/dummy_drv.c b/erts/emulator/test/ddll_SUITE_data/dummy_drv.c
index e0d5067743..86f2abf1b1 100644
--- a/erts/emulator/test/ddll_SUITE_data/dummy_drv.c
+++ b/erts/emulator/test/ddll_SUITE_data/dummy_drv.c
@@ -7,7 +7,7 @@
static ErlDrvPort erlang_port;
static ErlDrvData dummy_start(ErlDrvPort, char*);
-static void dummy_read(ErlDrvData port, char *buf, int count);
+static void dummy_read(ErlDrvData port, char *buf, ErlDrvSizeT count);
static void dummy_stop(ErlDrvData), easy_read(ErlDrvData, char*, int);
static ErlDrvEntry dummy_driver_entry = {
@@ -18,6 +18,21 @@ static ErlDrvEntry dummy_driver_entry = {
NULL,
NULL,
"dummy_drv",
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ ERL_DRV_EXTENDED_MARKER,
+ ERL_DRV_EXTENDED_MAJOR_VERSION,
+ ERL_DRV_EXTENDED_MINOR_VERSION,
+ 0,
+ NULL,
+ NULL,
NULL
};
@@ -37,7 +52,7 @@ static ErlDrvData dummy_start(ErlDrvPort port,char *buf)
return (ErlDrvData)port;
}
-static void dummy_read(ErlDrvData port, char *buf, int count)
+static void dummy_read(ErlDrvData port, char *buf, ErlDrvSizeT count)
{
driver_output(erlang_port, buf, count);
}
diff --git a/erts/emulator/test/ddll_SUITE_data/echo_drv.c b/erts/emulator/test/ddll_SUITE_data/echo_drv.c
index edf78a979d..2b3510c641 100644
--- a/erts/emulator/test/ddll_SUITE_data/echo_drv.c
+++ b/erts/emulator/test/ddll_SUITE_data/echo_drv.c
@@ -3,9 +3,10 @@
static ErlDrvPort erlang_port;
static ErlDrvData echo_start(ErlDrvPort, char *);
-static void from_erlang(ErlDrvData, char*, int);
-static int echo_call(ErlDrvData drv_data, unsigned int command, char *buf,
- int len, char **rbuf, int rlen, unsigned *ret_flags);
+static void from_erlang(ErlDrvData, char*, ErlDrvSizeT);
+static ErlDrvSSizeT echo_call(ErlDrvData drv_data, unsigned int command,
+ char *buf, ErlDrvSizeT len,
+ char **rbuf, ErlDrvSizeT rlen, unsigned *ret_flags);
static ErlDrvEntry echo_driver_entry = {
NULL, /* Init */
echo_start,
@@ -21,7 +22,15 @@ static ErlDrvEntry echo_driver_entry = {
NULL,
NULL,
NULL,
- echo_call
+ echo_call,
+ NULL,
+ ERL_DRV_EXTENDED_MARKER,
+ ERL_DRV_EXTENDED_MAJOR_VERSION,
+ ERL_DRV_EXTENDED_MINOR_VERSION,
+ 0,
+ NULL,
+ NULL,
+ NULL
};
DRIVER_INIT(echo_drv)
@@ -36,14 +45,15 @@ echo_start(ErlDrvPort port, char *buf)
}
static void
-from_erlang(ErlDrvData data, char *buf, int count)
+from_erlang(ErlDrvData data, char *buf, ErlDrvSizeT count)
{
driver_output((ErlDrvPort) data, buf, count);
}
-static int
-echo_call(ErlDrvData drv_data, unsigned int command, char *buf,
- int len, char **rbuf, int rlen, unsigned *ret_flags)
+static ErlDrvSSizeT
+echo_call(ErlDrvData drv_data, unsigned int command,
+ char *buf, ErlDrvSizeT len, char **rbuf, ErlDrvSizeT rlen,
+ unsigned *ret_flags)
{
*rbuf = buf;
*ret_flags |= DRIVER_CALL_KEEP_BUFFER;
diff --git a/erts/emulator/test/ddll_SUITE_data/echo_drv_fail_init.c b/erts/emulator/test/ddll_SUITE_data/echo_drv_fail_init.c
index 3b2a44d907..26aa03a012 100644
--- a/erts/emulator/test/ddll_SUITE_data/echo_drv_fail_init.c
+++ b/erts/emulator/test/ddll_SUITE_data/echo_drv_fail_init.c
@@ -3,9 +3,10 @@
static ErlDrvPort erlang_port;
static ErlDrvData echo_start(ErlDrvPort, char *);
-static void from_erlang(ErlDrvData, char*, int);
-static int echo_call(ErlDrvData drv_data, unsigned int command, char *buf,
- int len, char **rbuf, int rlen, unsigned *ret_flags);
+static void from_erlang(ErlDrvData, char*, ErlDrvSizeT);
+static ErlDrvSSizeT echo_call(ErlDrvData drv_data, unsigned int command,
+ char *buf, ErlDrvSizeT len,
+ char **rbuf, ErlDrvSizeT rlen, unsigned *ret_flags);
static int echo_failing_init(void);
static ErlDrvEntry echo_driver_entry = {
@@ -23,7 +24,15 @@ static ErlDrvEntry echo_driver_entry = {
NULL,
NULL,
NULL,
- echo_call
+ echo_call,
+ NULL,
+ ERL_DRV_EXTENDED_MARKER,
+ ERL_DRV_EXTENDED_MAJOR_VERSION,
+ ERL_DRV_EXTENDED_MINOR_VERSION,
+ 0,
+ NULL,
+ NULL,
+ NULL
};
DRIVER_INIT(echo_drv)
@@ -43,14 +52,15 @@ echo_start(ErlDrvPort port, char *buf)
}
static void
-from_erlang(ErlDrvData data, char *buf, int count)
+from_erlang(ErlDrvData data, char *buf, ErlDrvSizeT count)
{
driver_output((ErlDrvPort) data, buf, count);
}
-static int
-echo_call(ErlDrvData drv_data, unsigned int command, char *buf,
- int len, char **rbuf, int rlen, unsigned *ret_flags)
+static ErlDrvSSizeT
+echo_call(ErlDrvData drv_data, unsigned int command,
+ char *buf, ErlDrvSizeT len, char **rbuf, ErlDrvSizeT rlen,
+ unsigned *ret_flags)
{
*rbuf = buf;
*ret_flags |= DRIVER_CALL_KEEP_BUFFER;
diff --git a/erts/emulator/test/ddll_SUITE_data/initfail_drv.c b/erts/emulator/test/ddll_SUITE_data/initfail_drv.c
index b676ff5121..ad241b9c4f 100644
--- a/erts/emulator/test/ddll_SUITE_data/initfail_drv.c
+++ b/erts/emulator/test/ddll_SUITE_data/initfail_drv.c
@@ -3,7 +3,7 @@
static ErlDrvPort erlang_port;
static ErlDrvData easy_start(ErlDrvPort, char*);
-static void easy_stop(ErlDrvData), easy_read(ErlDrvData, char*, int);
+static void easy_stop(ErlDrvData), easy_read(ErlDrvData, char*, ErlDrvSizeT);
static ErlDrvEntry easy_driver_entry =
{
@@ -14,6 +14,21 @@ static ErlDrvEntry easy_driver_entry =
NULL,
NULL,
"easy",
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ ERL_DRV_EXTENDED_MARKER,
+ ERL_DRV_EXTENDED_MAJOR_VERSION,
+ ERL_DRV_EXTENDED_MINOR_VERSION,
+ 0,
+ NULL,
+ NULL,
NULL
};
@@ -34,7 +49,7 @@ static ErlDrvData easy_start(ErlDrvPort port, char *buf)
return (ErlDrvData)port;
}
-static void easy_read(ErlDrvData port, char *buf, int count)
+static void easy_read(ErlDrvData port, char *buf, ErlDrvSizeT count)
{
driver_output(erlang_port, buf, count);
}
diff --git a/erts/emulator/test/ddll_SUITE_data/lock_drv.c b/erts/emulator/test/ddll_SUITE_data/lock_drv.c
index 2ec8fa3a29..d2605c5bfc 100644
--- a/erts/emulator/test/ddll_SUITE_data/lock_drv.c
+++ b/erts/emulator/test/ddll_SUITE_data/lock_drv.c
@@ -3,9 +3,10 @@
static ErlDrvPort erlang_port;
static ErlDrvData echo_start(ErlDrvPort, char *);
-static void from_erlang(ErlDrvData, char*, int);
-static int echo_call(ErlDrvData drv_data, unsigned int command, char *buf,
- int len, char **rbuf, int rlen, unsigned *ret_flags);
+static void from_erlang(ErlDrvData, char*, ErlDrvSizeT);
+static ErlDrvSSizeT echo_call(ErlDrvData drv_data, unsigned int command,
+ char *buf, ErlDrvSizeT len,
+ char **rbuf, ErlDrvSizeT rlen, unsigned *ret_flags);
static ErlDrvEntry echo_driver_entry = {
NULL, /* Init */
echo_start,
@@ -21,7 +22,15 @@ static ErlDrvEntry echo_driver_entry = {
NULL,
NULL,
NULL,
- echo_call
+ echo_call,
+ NULL,
+ ERL_DRV_EXTENDED_MARKER,
+ ERL_DRV_EXTENDED_MAJOR_VERSION,
+ ERL_DRV_EXTENDED_MINOR_VERSION,
+ 0,
+ NULL,
+ NULL,
+ NULL
};
DRIVER_INIT(echo_drv)
@@ -37,14 +46,15 @@ echo_start(ErlDrvPort port, char *buf)
}
static void
-from_erlang(ErlDrvData data, char *buf, int count)
+from_erlang(ErlDrvData data, char *buf, ErlDrvSizeT count)
{
driver_output((ErlDrvPort) data, buf, count);
}
-static int
-echo_call(ErlDrvData drv_data, unsigned int command, char *buf,
- int len, char **rbuf, int rlen, unsigned *ret_flags)
+static ErlDrvSSizeT
+echo_call(ErlDrvData drv_data, unsigned int command,
+ char *buf, ErlDrvSizeT len, char **rbuf, ErlDrvSizeT rlen,
+ unsigned *ret_flags)
{
ErlDrvPort port = (ErlDrvPort) drv_data;
driver_lock_driver(port);
diff --git a/erts/emulator/test/ddll_SUITE_data/noinit_drv.c b/erts/emulator/test/ddll_SUITE_data/noinit_drv.c
index 931386a305..5abf5c4dc6 100644
--- a/erts/emulator/test/ddll_SUITE_data/noinit_drv.c
+++ b/erts/emulator/test/ddll_SUITE_data/noinit_drv.c
@@ -3,7 +3,7 @@
static ErlDrvPort erlang_port;
static ErlDrvData easy_start(ErlDrvPort, char*);
-static void easy_stop(ErlDrvData), easy_read(ErlDrvData, char*, int);
+static void easy_stop(ErlDrvData), easy_read(ErlDrvData, char*, ErlDrvSizeT);
static ErlDrvEntry easy_driver_entry =
{
@@ -14,6 +14,21 @@ static ErlDrvEntry easy_driver_entry =
NULL,
NULL,
"easy",
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ ERL_DRV_EXTENDED_MARKER,
+ ERL_DRV_EXTENDED_MAJOR_VERSION,
+ ERL_DRV_EXTENDED_MINOR_VERSION,
+ 0,
+ NULL,
+ NULL,
NULL
};
@@ -28,7 +43,7 @@ DRIVER_INIT(noinit_drv)
/*
* Provoke an error when loading the module.
*/
-int no_driver_init(void *handle)
+ErlDrvEntry* no_driver_init(void *handle)
#endif
{
erlang_port = (ErlDrvPort)-1;
@@ -46,7 +61,7 @@ static ErlDrvData easy_start(ErlDrvPort port,char *buf)
return (ErlDrvData)port;
}
-static void easy_read(ErlDrvData port, char *buf, int count)
+static void easy_read(ErlDrvData port, char *buf, ErlDrvSizeT count)
{
driver_output(erlang_port, buf, count);
}
diff --git a/erts/emulator/test/ddll_SUITE_data/wrongname_drv.c b/erts/emulator/test/ddll_SUITE_data/wrongname_drv.c
index 3a35820ee7..ac7efa30de 100644
--- a/erts/emulator/test/ddll_SUITE_data/wrongname_drv.c
+++ b/erts/emulator/test/ddll_SUITE_data/wrongname_drv.c
@@ -7,7 +7,7 @@
static ErlDrvPort erlang_port;
static ErlDrvData easy_start(ErlDrvPort, char*);
-static void easy_stop(ErlDrvData), easy_read(ErlDrvData, char*, int);
+static void easy_stop(ErlDrvData), easy_read(ErlDrvData, char*, ErlDrvSizeT);
static ErlDrvEntry easy_driver_entry =
{
@@ -18,6 +18,21 @@ static ErlDrvEntry easy_driver_entry =
NULL,
NULL,
"easy",
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ ERL_DRV_EXTENDED_MARKER,
+ ERL_DRV_EXTENDED_MAJOR_VERSION,
+ ERL_DRV_EXTENDED_MINOR_VERSION,
+ 0,
+ NULL,
+ NULL,
NULL
};
@@ -38,7 +53,7 @@ static ErlDrvData easy_start(ErlDrvPort port,char *buf)
return (ErlDrvData)port;
}
-static void easy_read(ErlDrvData port, char *buf, int count)
+static void easy_read(ErlDrvData port, char *buf, ErlDrvSizeT count)
{
driver_output(erlang_port, buf, count);
}
diff --git a/erts/emulator/test/decode_packet_SUITE.erl b/erts/emulator/test/decode_packet_SUITE.erl
index c0499554eb..4acbe8c6e0 100644
--- a/erts/emulator/test/decode_packet_SUITE.erl
+++ b/erts/emulator/test/decode_packet_SUITE.erl
@@ -26,12 +26,14 @@
-export([all/0, suite/0,groups/0,init_per_suite/1, end_per_suite/1,
init_per_group/2,end_per_group/2,
init_per_testcase/2,end_per_testcase/2,
- basic/1, packet_size/1, neg/1, http/1, line/1, ssl/1, otp_8536/1]).
+ basic/1, packet_size/1, neg/1, http/1, line/1, ssl/1, otp_8536/1,
+ otp_9389/1, otp_9389_line/1]).
suite() -> [{ct_hooks,[ts_install_cth]}].
all() ->
- [basic, packet_size, neg, http, line, ssl, otp_8536].
+ [basic, packet_size, neg, http, line, ssl, otp_8536,
+ otp_9389, otp_9389_line].
groups() ->
[].
@@ -251,6 +253,28 @@ packet_size(Config) when is_list(Config) ->
?line {error,_} = decode_pkt(4,<<Size:32,Packet/binary>>)
end,
lists:seq(-10,-1)),
+
+ %% Test OTP-9389, long HTTP header lines.
+ Opts = [{packet_size, 128}],
+ Pkt = list_to_binary(["GET / HTTP/1.1\r\nHost: localhost\r\nLink: /",
+ string:chars($Y, 64), "\r\n\r\n"]),
+ <<Pkt1:50/binary, Pkt2/binary>> = Pkt,
+ ?line {ok, {http_request,'GET',{abs_path,"/"},{1,1}}, Rest1} =
+ erlang:decode_packet(http, Pkt1, Opts),
+ ?line {ok, {http_header,_,'Host',_,"localhost"}, Rest2} =
+ erlang:decode_packet(httph, Rest1, Opts),
+ ?line {more, undefined} = erlang:decode_packet(httph, Rest2, Opts),
+ ?line {ok, {http_header,_,"Link",_,_}, _} =
+ erlang:decode_packet(httph, list_to_binary([Rest2, Pkt2]), Opts),
+
+ Pkt3 = list_to_binary(["GET / HTTP/1.1\r\nHost: localhost\r\nLink: /",
+ string:chars($Y, 129), "\r\n\r\n"]),
+ ?line {ok, {http_request,'GET',{abs_path,"/"},{1,1}}, Rest3} =
+ erlang:decode_packet(http, Pkt3, Opts),
+ ?line {ok, {http_header,_,'Host',_,"localhost"}, Rest4} =
+ erlang:decode_packet(httph, Rest3, Opts),
+ ?line {error, invalid} = erlang:decode_packet(httph, Rest4, Opts),
+
ok.
@@ -557,3 +581,35 @@ decode_pkt(Type,Bin,Opts) ->
%%io:format(" -> ~p\n",[Res]),
Res.
+otp_9389(doc) -> ["Verify line_length works correctly for HTTP headers"];
+otp_9389(suite) -> [];
+otp_9389(Config) when is_list(Config) ->
+ Opts = [{packet_size, 16384}, {line_length, 3000}],
+ Pkt = list_to_binary(["GET / HTTP/1.1\r\nHost: localhost\r\nLink: /",
+ string:chars($X, 8192),
+ "\r\nContent-Length: 0\r\n\r\n"]),
+ <<Pkt1:5000/binary, Pkt2/binary>> = Pkt,
+ {ok, {http_request,'GET',{abs_path,"/"},{1,1}}, Rest1} =
+ erlang:decode_packet(http, Pkt1, Opts),
+ {ok, {http_header,_,'Host',_,"localhost"}, Rest2} =
+ erlang:decode_packet(httph, Rest1, Opts),
+ {more, undefined} = erlang:decode_packet(httph, Rest2, Opts),
+ {ok, {http_header,_,"Link",_,Link}, Rest3} =
+ erlang:decode_packet(httph, list_to_binary([Rest2, Pkt2]), Opts),
+ true = (length(Link) > 8000),
+ {ok, {http_header,_,'Content-Length',_,"0"}, <<"\r\n">>} =
+ erlang:decode_packet(httph, Rest3, Opts),
+ ok.
+
+otp_9389_line(doc) -> ["Verify packet_size works correctly for line mode"];
+otp_9389_line(suite) -> [];
+otp_9389_line(Config) when is_list(Config) ->
+ Opts = [{packet_size, 20}],
+ Line1 = <<"0123456789012345678\n">>,
+ Line2 = <<"0123456789\n">>,
+ Line3 = <<"01234567890123456789\n">>,
+ Pkt = list_to_binary([Line1, Line2, Line3]),
+ ?line {ok, Line1, Rest1} = erlang:decode_packet(line, Pkt, Opts),
+ ?line {ok, Line2, Rest2} = erlang:decode_packet(line, Rest1, Opts),
+ ?line {error, invalid} = erlang:decode_packet(line, Rest2, Opts),
+ ok.
diff --git a/erts/emulator/test/distribution_SUITE.erl b/erts/emulator/test/distribution_SUITE.erl
index 4bebae51cc..08308629fe 100644
--- a/erts/emulator/test/distribution_SUITE.erl
+++ b/erts/emulator/test/distribution_SUITE.erl
@@ -18,7 +18,7 @@
%%
-module(distribution_SUITE).
--compile(r12).
+-compile(r13).
%% Tests distribution and the tcp driver.
@@ -37,7 +37,7 @@
dist_auto_connect_never/1, dist_auto_connect_once/1,
dist_parallel_send/1,
atom_roundtrip/1,
- atom_roundtrip_r12b/1,
+ atom_roundtrip_r13b/1,
contended_atom_cache_entry/1,
bad_dist_structure/1,
bad_dist_ext_receive/1,
@@ -62,7 +62,7 @@ all() ->
link_to_dead_new_node, applied_monitor_node,
ref_port_roundtrip, nil_roundtrip, stop_dist,
{group, trap_bif}, {group, dist_auto_connect},
- dist_parallel_send, atom_roundtrip, atom_roundtrip_r12b,
+ dist_parallel_send, atom_roundtrip, atom_roundtrip_r13b,
contended_atom_cache_entry, bad_dist_structure, {group, bad_dist_ext}].
groups() ->
@@ -173,15 +173,20 @@ bulk_sendsend(Terms, BinSize) ->
Ratio = if MonitorCount2 == 0 -> MonitorCount1 / 1.0;
true -> MonitorCount1 / MonitorCount2
end,
- %% A somewhat arbitrary ratio, but hopefully one that will accomodate
- %% a wide range of CPU speeds.
- true = (Ratio > 8.0),
- {comment,
- integer_to_list(Rate1) ++ " K/s, " ++
- integer_to_list(Rate2) ++ " K/s, " ++
- integer_to_list(MonitorCount1) ++ " monitor msgs, " ++
- integer_to_list(MonitorCount2) ++ " monitor msgs, " ++
- float_to_list(Ratio) ++ " monitor ratio"}.
+ Comment = integer_to_list(Rate1) ++ " K/s, " ++
+ integer_to_list(Rate2) ++ " K/s, " ++
+ integer_to_list(MonitorCount1) ++ " monitor msgs, " ++
+ integer_to_list(MonitorCount2) ++ " monitor msgs, " ++
+ float_to_list(Ratio) ++ " monitor ratio",
+ if
+ %% A somewhat arbitrary ratio, but hopefully one that will
+ %% accommodate a wide range of CPU speeds.
+ Ratio > 8.0 ->
+ {comment,Comment};
+ true ->
+ io:put_chars(Comment),
+ ?line ?t:fail(ratio_too_low)
+ end.
bulk_sendsend2(Terms, BinSize, BusyBufSize) ->
?line Dog = test_server:timetrap(test_server:seconds(30)),
@@ -331,7 +336,7 @@ receiver2(Num, TotSize) ->
link_to_busy(doc) -> "Test that link/1 to a busy distribution port works.";
link_to_busy(Config) when is_list(Config) ->
- ?line Dog = test_server:timetrap(test_server:seconds(30)),
+ ?line Dog = test_server:timetrap(test_server:seconds(60)),
?line {ok, Node} = start_node(link_to_busy),
?line Recv = spawn(Node, erlang, apply, [fun sink/1, [link_to_busy_sink]]),
@@ -378,7 +383,7 @@ tail_applied_linker(Pid) ->
exit_to_busy(doc) -> "Test that exit/2 to a busy distribution port works.";
exit_to_busy(Config) when is_list(Config) ->
- ?line Dog = test_server:timetrap(test_server:seconds(30)),
+ ?line Dog = test_server:timetrap(test_server:seconds(60)),
?line {ok, Node} = start_node(exit_to_busy),
Tracer = case os:getenv("TRACE_BUSY_DIST_PORT") of
@@ -1095,17 +1100,17 @@ atom_roundtrip(Config) when is_list(Config) ->
?line stop_node(Node),
?line ok.
-atom_roundtrip_r12b(Config) when is_list(Config) ->
- case ?t:is_release_available("r12b") of
+atom_roundtrip_r13b(Config) when is_list(Config) ->
+ case ?t:is_release_available("r13b") of
true ->
?line AtomData = atom_data(),
?line verify_atom_data(AtomData),
- ?line {ok, Node} = start_node(Config, [], "r12b"),
+ ?line {ok, Node} = start_node(Config, [], "r13b"),
?line do_atom_roundtrip(Node, AtomData),
?line stop_node(Node),
?line ok;
false ->
- ?line {skip,"No OTP R12B available"}
+ ?line {skip,"No OTP R13B available"}
end.
do_atom_roundtrip(Node, AtomData) ->
@@ -1597,8 +1602,8 @@ bad_dist_ext_control(Config) when is_list(Config) ->
?line stop_node(Victim).
bad_dist_ext_connection_id(Config) when is_list(Config) ->
- ?line {ok, Offender} = start_node(bad_dist_ext_receive_offender),
- ?line {ok, Victim} = start_node(bad_dist_ext_receive_victim),
+ ?line {ok, Offender} = start_node(bad_dist_ext_connection_id_offender),
+ ?line {ok, Victim} = start_node(bad_dist_ext_connection_id_victim),
?line start_node_monitors([Offender,Victim]),
?line Parent = self(),
diff --git a/erts/emulator/test/driver_SUITE.erl b/erts/emulator/test/driver_SUITE.erl
index f6cf01ce16..643357263c 100644
--- a/erts/emulator/test/driver_SUITE.erl
+++ b/erts/emulator/test/driver_SUITE.erl
@@ -47,8 +47,8 @@
fd_change/1,
steal_control/1,
otp_6602/1,
- 'driver_system_info_ver1.0'/1,
- 'driver_system_info_ver1.1'/1,
+ driver_system_info_base_ver/1,
+ driver_system_info_prev_ver/1,
driver_system_info_current_ver/1,
driver_monitor/1,
@@ -75,7 +75,9 @@
smp_select/1,
driver_select_use/1,
thread_mseg_alloc_cache_clean/1,
- otp_9302/1]).
+ otp_9302/1,
+ thr_free_drv/1,
+ async_blast/1]).
-export([bin_prefix/2]).
@@ -133,8 +135,8 @@ all() ->
[outputv_errors, outputv_echo, queue_echo, {group, timer},
driver_unloaded, io_ready_exit, use_fallback_pollset,
bad_fd_in_pollset, driver_event, fd_change,
- steal_control, otp_6602, 'driver_system_info_ver1.0',
- 'driver_system_info_ver1.1',
+ steal_control, otp_6602, driver_system_info_base_ver,
+ driver_system_info_prev_ver,
driver_system_info_current_ver, driver_monitor,
{group, ioq_exit}, zero_extended_marker_garb_drv,
invalid_extended_marker_drv, larger_major_vsn_drv,
@@ -143,7 +145,9 @@ all() ->
otp_6879, caller, many_events, missing_callbacks,
smp_select, driver_select_use,
thread_mseg_alloc_cache_clean,
- otp_9302].
+ otp_9302,
+ thr_free_drv,
+ async_blast].
groups() ->
[{timer, [],
@@ -1079,19 +1083,19 @@ otp_6602(Config) when is_list(Config) ->
-define(EXPECTED_SYSTEM_INFO_NAMES, ?EXPECTED_SYSTEM_INFO_NAMES2).
-'driver_system_info_ver1.0'(doc) ->
+'driver_system_info_base_ver'(doc) ->
[];
-'driver_system_info_ver1.0'(suite) ->
+'driver_system_info_base_ver'(suite) ->
[];
-'driver_system_info_ver1.0'(Config) when is_list(Config) ->
- ?line driver_system_info_test(Config, sys_info_1_0_drv).
+'driver_system_info_base_ver'(Config) when is_list(Config) ->
+ ?line driver_system_info_test(Config, sys_info_base_drv).
-'driver_system_info_ver1.1'(doc) ->
+'driver_system_info_prev_ver'(doc) ->
[];
-'driver_system_info_ver1.1'(suite) ->
+'driver_system_info_prev_ver'(suite) ->
[];
-'driver_system_info_ver1.1'(Config) when is_list(Config) ->
- ?line driver_system_info_test(Config, sys_info_1_1_drv).
+'driver_system_info_prev_ver'(Config) when is_list(Config) ->
+ ?line driver_system_info_test(Config, sys_info_prev_drv).
driver_system_info_current_ver(doc) ->
[];
@@ -1590,7 +1594,7 @@ otp_6879(Config) when is_list(Config) ->
end
end,
Procs),
- %% Also try it when input exeeds default buffer (256 bytes)
+ %% Also try it when input exceeds default buffer (256 bytes)
?line Data = lists:seq(1, 1000),
?line case open_port({spawn, Drv}, []) of
Port when is_port(Port) ->
@@ -1804,13 +1808,13 @@ thread_mseg_alloc_cache_clean(Config) when is_list(Config) ->
?line {skipped, "driver_alloc() using too large single block threshold"};
{_, _, 0} ->
?line {skipped, "driver_alloc() using too low single block threshold"};
- {true, MsegAllocInfo, SBCT} ->
+ {true, _MsegAllocInfo, SBCT} ->
?line DrvName = 'thr_alloc_drv',
?line Path = ?config(data_dir, Config),
?line erl_ddll:start(),
?line ok = load_driver(Path, DrvName),
?line Port = open_port({spawn, DrvName}, []),
- ?line CCI = mseg_alloc_cci(MsegAllocInfo),
+ ?line CCI = 1000,
?line ?t:format("CCI = ~p~n", [CCI]),
?line CCC = mseg_alloc_ccc(),
?line ?t:format("CCC = ~p~n", [CCC]),
@@ -1831,7 +1835,7 @@ mseg_alloc_cci(MsegAllocInfo) ->
?line CCI.
mseg_alloc_ccc() ->
- mseg_alloc_ccc(erlang:system_info({allocator,mseg_alloc})).
+ mseg_alloc_ccc(mseg_inst_info(0)).
mseg_alloc_ccc(MsegAllocInfo) ->
?line {value,{memkind, MKL}} = lists:keysearch(memkind,1,MsegAllocInfo),
@@ -1841,7 +1845,7 @@ mseg_alloc_ccc(MsegAllocInfo) ->
?line GigaCCC*1000000000 + CCC.
mseg_alloc_cached_segments() ->
- mseg_alloc_cached_segments(erlang:system_info({allocator,mseg_alloc})).
+ mseg_alloc_cached_segments(mseg_inst_info(0)).
mseg_alloc_cached_segments(MsegAllocInfo) ->
MemName = case is_halfword_vm() of
@@ -1859,6 +1863,13 @@ mseg_alloc_cached_segments(MsegAllocInfo) ->
= lists:keysearch(cached_segments, 1, SL),
?line CS.
+mseg_inst_info(I) ->
+ {value, {instance, I, Value}}
+ = lists:keysearch(I,
+ 2,
+ erlang:system_info({allocator,mseg_alloc})),
+ Value.
+
is_halfword_vm() ->
case {erlang:system_info({wordsize, internal}),
erlang:system_info({wordsize, external})} of
@@ -1902,18 +1913,105 @@ otp_9302(Config) when is_list(Config) ->
?line port_command(Port, ""),
?line {msg, block} = get_port_msg(Port, infinity),
?line {msg, job} = get_port_msg(Port, infinity),
- ?line case erlang:system_info(thread_pool_size) of
- 0 ->
- {msg, cancel} = get_port_msg(Port, infinity);
- _ ->
- ok
- end,
- ?line {msg, job} = get_port_msg(Port, infinity),
+ ?line C = case erlang:system_info(thread_pool_size) of
+ 0 ->
+ ?line {msg, cancel} = get_port_msg(Port, infinity),
+ ?line {msg, job} = get_port_msg(Port, infinity),
+ ?line false;
+ _ ->
+ case get_port_msg(Port, infinity) of
+ {msg, cancel} -> %% Cancel always fail in Rel >= 15
+ ?line {msg, job} = get_port_msg(Port, infinity),
+ ?line false;
+ {msg, job} ->
+ ?line ok,
+ ?line true
+ end
+ end,
?line {msg, end_of_jobs} = get_port_msg(Port, infinity),
?line no_msg = get_port_msg(Port, 2000),
?line port_close(Port),
+ ?line case C of
+ true ->
+ ?line {comment, "Async job cancelled"};
+ false ->
+ ?line {comment, "Async job not cancelled"}
+ end.
+
+thr_free_drv(Config) when is_list(Config) ->
+ ?line Path = ?config(data_dir, Config),
+ ?line erl_ddll:start(),
+ ?line ok = load_driver(Path, thr_free_drv),
+ ?line MemBefore = driver_alloc_size(),
+% io:format("SID=~p", [erlang:system_info(scheduler_id)]),
+ ?line Port = open_port({spawn, thr_free_drv}, []),
+ ?line MemPeek = driver_alloc_size(),
+ ?line true = is_port(Port),
+ ?line ok = thr_free_drv_control(Port, 0),
+ ?line port_close(Port),
+ ?line MemAfter = driver_alloc_size(),
+ ?line io:format("MemPeek=~p~n", [MemPeek]),
+ ?line io:format("MemBefore=~p, MemAfter=~p~n", [MemBefore, MemAfter]),
+ ?line MemBefore = MemAfter,
+ ?line case MemPeek of
+ undefined -> ok;
+ _ ->
+ ?line true = MemPeek > MemBefore
+ end,
?line ok.
+thr_free_drv_control(Port, N) ->
+ case erlang:port_control(Port, 0, "") of
+ "done" ->
+ ok;
+ "more" ->
+ erlang:yield(),
+% io:format("N=~p, SID=~p", [N, erlang:system_info(scheduler_id)]),
+ thr_free_drv_control(Port, N+1)
+ end.
+
+async_blast(Config) when is_list(Config) ->
+ ?line Path = ?config(data_dir, Config),
+ ?line erl_ddll:start(),
+ ?line ok = load_driver(Path, async_blast_drv),
+ ?line SchedOnln = erlang:system_info(schedulers_online),
+ ?line MemBefore = driver_alloc_size(),
+ ?line Start = os:timestamp(),
+ ?line Blast = fun () ->
+ Port = open_port({spawn, async_blast_drv}, []),
+ true = is_port(Port),
+ port_command(Port, ""),
+ receive
+ {Port, done} ->
+ ok
+ end,
+ port_close(Port)
+ end,
+ ?line Ps = lists:map(fun (N) ->
+ spawn_opt(Blast,
+ [{scheduler,
+ (N rem SchedOnln)+ 1},
+ monitor])
+ end,
+ lists:seq(1, 100)),
+ ?line MemMid = driver_alloc_size(),
+ ?line lists:foreach(fun ({Pid, Mon}) ->
+ receive
+ {'DOWN',Mon,process,Pid,_} -> ok
+ end
+ end, Ps),
+ ?line End = os:timestamp(),
+ ?line MemAfter = driver_alloc_size(),
+ ?line io:format("MemBefore=~p, MemMid=~p, MemAfter=~p~n",
+ [MemBefore, MemMid, MemAfter]),
+ ?line AsyncBlastTime = timer:now_diff(End,Start)/1000000,
+ ?line io:format("AsyncBlastTime=~p~n", [AsyncBlastTime]),
+ ?line MemBefore = MemAfter,
+ ?line erlang:display({async_blast_time, AsyncBlastTime}),
+ ?line ok.
+
+
+
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% Utilities
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
@@ -2077,3 +2175,41 @@ start_node(Config) when is_list(Config) ->
stop_node(Node) ->
?t:stop_node(Node).
+
+wait_deallocations() ->
+ try
+ erts_debug:set_internal_state(wait, deallocations)
+ catch error:undef ->
+ erts_debug:set_internal_state(available_internal_state, true),
+ wait_deallocations()
+ end.
+
+driver_alloc_size() ->
+ case erlang:system_info(smp_support) of
+ true ->
+ ok;
+ false ->
+ %% driver_alloc also used by elements in lock-free queues,
+ %% give these some time to be deallocated...
+ receive after 100 -> ok end
+ end,
+ wait_deallocations(),
+ case erlang:system_info({allocator_sizes, driver_alloc}) of
+ false ->
+ undefined;
+ MemInfo ->
+ CS = lists:foldl(
+ fun ({instance, _, L}, Acc) ->
+ {value,{_,SBMBCS}} = lists:keysearch(sbmbcs, 1, L),
+ {value,{_,MBCS}} = lists:keysearch(mbcs, 1, L),
+ {value,{_,SBCS}} = lists:keysearch(sbcs, 1, L),
+ [SBMBCS,MBCS,SBCS | Acc]
+ end,
+ [],
+ MemInfo),
+ lists:foldl(
+ fun(L, Sz0) ->
+ {value,{_,Sz,_,_}} = lists:keysearch(blocks_size, 1, L),
+ Sz0+Sz
+ end, 0, CS)
+ end.
diff --git a/erts/emulator/test/driver_SUITE_data/Makefile.src b/erts/emulator/test/driver_SUITE_data/Makefile.src
index 5b3ba1557e..9cc107cc66 100644
--- a/erts/emulator/test/driver_SUITE_data/Makefile.src
+++ b/erts/emulator/test/driver_SUITE_data/Makefile.src
@@ -12,10 +12,12 @@ MISC_DRVS = outputv_drv@dll@ \
many_events_drv@dll@ \
missing_callback_drv@dll@ \
thr_alloc_drv@dll@ \
- otp_9302_drv@dll@
+ otp_9302_drv@dll@ \
+ thr_free_drv@dll@ \
+ async_blast_drv@dll@
-SYS_INFO_DRVS = sys_info_1_0_drv@dll@ \
- sys_info_1_1_drv@dll@ \
+SYS_INFO_DRVS = sys_info_base_drv@dll@ \
+ sys_info_prev_drv@dll@ \
sys_info_curr_drv@dll@
VSN_MISMATCH_DRVS = zero_extended_marker_garb_drv@dll@ \
diff --git a/erts/emulator/test/driver_SUITE_data/async_blast_drv.c b/erts/emulator/test/driver_SUITE_data/async_blast_drv.c
new file mode 100644
index 0000000000..c2086c5860
--- /dev/null
+++ b/erts/emulator/test/driver_SUITE_data/async_blast_drv.c
@@ -0,0 +1,124 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2011. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+#include "erl_driver.h"
+
+#define NO_ASYNC_JOBS 10000
+
+static void stop(ErlDrvData drv_data);
+static ErlDrvData start(ErlDrvPort port,
+ char *command);
+static void output(ErlDrvData drv_data,
+ char *buf, ErlDrvSizeT len);
+static void ready_async(ErlDrvData drv_data,
+ ErlDrvThreadData thread_data);
+
+static ErlDrvEntry async_blast_drv_entry = {
+ NULL /* init */,
+ start,
+ stop,
+ output,
+ NULL /* ready_input */,
+ NULL /* ready_output */,
+ "async_blast_drv",
+ NULL /* finish */,
+ NULL /* handle */,
+ NULL /* control */,
+ NULL /* timeout */,
+ NULL /* outputv */,
+ ready_async,
+ NULL /* flush */,
+ NULL /* call */,
+ NULL /* event */,
+ ERL_DRV_EXTENDED_MARKER,
+ ERL_DRV_EXTENDED_MAJOR_VERSION,
+ ERL_DRV_EXTENDED_MINOR_VERSION,
+ ERL_DRV_FLAG_USE_PORT_LOCKING,
+ NULL /* handle2 */,
+ NULL /* handle_monitor */
+};
+
+typedef struct {
+ ErlDrvPort port;
+ ErlDrvTermData caller;
+ int counter;
+} async_blast_data_t;
+
+
+DRIVER_INIT(async_blast_drv)
+{
+ return &async_blast_drv_entry;
+}
+
+static void stop(ErlDrvData drv_data)
+{
+ driver_free((void *) drv_data);
+}
+
+static ErlDrvData start(ErlDrvPort port,
+ char *command)
+{
+ async_blast_data_t *abd;
+
+ abd = driver_alloc(sizeof(async_blast_data_t));
+ if (!abd)
+ return ERL_DRV_ERROR_GENERAL;
+
+ abd->port = port;
+ abd->counter = 0;
+ return (ErlDrvData) abd;
+}
+
+static void async_invoke(void *data)
+{
+
+}
+#include <stdio.h>
+
+static void ready_async(ErlDrvData drv_data,
+ ErlDrvThreadData thread_data)
+{
+ async_blast_data_t *abd = (async_blast_data_t *) drv_data;
+ if (--abd->counter == 0) {
+ ErlDrvTermData spec[] = {
+ ERL_DRV_PORT, driver_mk_port(abd->port),
+ ERL_DRV_ATOM, driver_mk_atom("done"),
+ ERL_DRV_TUPLE, 2
+ };
+ driver_send_term(abd->port, abd->caller,
+ spec, sizeof(spec)/sizeof(spec[0]));
+ }
+}
+
+static void output(ErlDrvData drv_data,
+ char *buf, ErlDrvSizeT len)
+{
+ async_blast_data_t *abd = (async_blast_data_t *) drv_data;
+ if (abd->counter == 0) {
+ int i;
+ abd->caller = driver_caller(abd->port);
+ abd->counter = NO_ASYNC_JOBS;
+ for (i = 0; i < NO_ASYNC_JOBS; i++) {
+ if (0 > driver_async(abd->port, NULL, async_invoke, NULL, NULL)) {
+ driver_failure_atom(abd->port, "driver_async_failed");
+ break;
+ }
+ }
+ }
+}
diff --git a/erts/emulator/test/driver_SUITE_data/caller_drv.c b/erts/emulator/test/driver_SUITE_data/caller_drv.c
index a78d51966f..1ed20b0638 100644
--- a/erts/emulator/test/driver_SUITE_data/caller_drv.c
+++ b/erts/emulator/test/driver_SUITE_data/caller_drv.c
@@ -23,17 +23,17 @@
static ErlDrvData start(ErlDrvPort port,
char *command);
static void output(ErlDrvData drv_data,
- char *buf, int len);
+ char *buf, ErlDrvSizeT len);
static void outputv(ErlDrvData drv_data,
ErlIOVec *ev);
-static int control(ErlDrvData drv_data,
- unsigned int command, char *buf,
- int len, char **rbuf, int rlen);
-static int call(ErlDrvData drv_data,
- unsigned int command,
- char *buf, int len,
- char **rbuf, int rlen,
- unsigned int *flags);
+static ErlDrvSSizeT control(ErlDrvData drv_data,
+ unsigned int command, char *buf,
+ ErlDrvSizeT len, char **rbuf, ErlDrvSizeT rlen);
+static ErlDrvSSizeT call(ErlDrvData drv_data,
+ unsigned int command,
+ char *buf, ErlDrvSizeT len,
+ char **rbuf, ErlDrvSizeT rlen,
+ unsigned int *flags);
static ErlDrvEntry caller_drv_entry = {
NULL /* init */,
@@ -98,7 +98,7 @@ start(ErlDrvPort port, char *command)
}
static void
-output(ErlDrvData drv_data, char *buf, int len)
+output(ErlDrvData drv_data, char *buf, ErlDrvSizeT len)
{
send_caller(drv_data, "output");
}
@@ -109,20 +109,20 @@ outputv(ErlDrvData drv_data, ErlIOVec *ev)
send_caller(drv_data, "outputv");
}
-static int
+static ErlDrvSSizeT
control(ErlDrvData drv_data,
unsigned int command, char *buf,
- int len, char **rbuf, int rlen)
+ ErlDrvSizeT len, char **rbuf, ErlDrvSizeT rlen)
{
send_caller(drv_data, "control");
return 0;
}
-static int
+static ErlDrvSSizeT
call(ErlDrvData drv_data,
unsigned int command,
- char *buf, int len,
- char **rbuf, int rlen,
+ char *buf, ErlDrvSizeT len,
+ char **rbuf, ErlDrvSizeT rlen,
unsigned int *flags)
{
/* echo call */
diff --git a/erts/emulator/test/driver_SUITE_data/chkio_drv.c b/erts/emulator/test/driver_SUITE_data/chkio_drv.c
index bbdb09cfcb..40f1ad4fea 100644
--- a/erts/emulator/test/driver_SUITE_data/chkio_drv.c
+++ b/erts/emulator/test/driver_SUITE_data/chkio_drv.c
@@ -141,8 +141,8 @@ static void chkio_drv_stop(ErlDrvData);
static void chkio_drv_ready_input(ErlDrvData, ErlDrvEvent);
static void chkio_drv_ready_output(ErlDrvData, ErlDrvEvent);
static void chkio_drv_ready_event(ErlDrvData, ErlDrvEvent, ErlDrvEventData);
-static int chkio_drv_control(ErlDrvData, unsigned int,
- char *, int, char **, int);
+static ErlDrvSSizeT chkio_drv_control(ErlDrvData, unsigned int,
+ char *, ErlDrvSizeT, char **, ErlDrvSizeT);
static void chkio_drv_timeout(ErlDrvData);
static void chkio_drv_stop_select(ErlDrvEvent, void*);
@@ -188,7 +188,7 @@ stop_use_fallback_pollset(ChkioDrvData *cddp)
for (i = 0; i < CHKIO_FALLBACK_FDS; i++) {
if (cbdp->dev_null[i].fd >= 0) {
if (driver_select(cddp->port,
- (ErlDrvEvent) cbdp->dev_null[i].fd,
+ (ErlDrvEvent) (ErlDrvSInt) cbdp->dev_null[i].fd,
DO_WRITE,
0) != 0) {
fprintf(stderr,
@@ -200,7 +200,7 @@ stop_use_fallback_pollset(ChkioDrvData *cddp)
}
if (cbdp->dev_zero[i].fd >= 0) {
if (driver_select(cddp->port,
- (ErlDrvEvent) cbdp->dev_zero[i].fd,
+ (ErlDrvEvent) (ErlDrvSInt) cbdp->dev_zero[i].fd,
DO_READ,
0) != 0) {
fprintf(stderr,
@@ -212,7 +212,7 @@ stop_use_fallback_pollset(ChkioDrvData *cddp)
}
if (cbdp->pipe_in[i].fd >= 0) {
if (driver_select(cddp->port,
- (ErlDrvEvent) cbdp->pipe_in[i].fd,
+ (ErlDrvEvent) (ErlDrvSInt) cbdp->pipe_in[i].fd,
DO_READ,
0) != 0) {
fprintf(stderr,
@@ -224,7 +224,7 @@ stop_use_fallback_pollset(ChkioDrvData *cddp)
}
if (cbdp->pipe_out[i].fd >= 0) {
if (driver_select(cddp->port,
- (ErlDrvEvent) cbdp->pipe_out[i].fd,
+ (ErlDrvEvent) (ErlDrvSInt) cbdp->pipe_out[i].fd,
DO_WRITE,
0) != 0) {
fprintf(stderr,
@@ -249,11 +249,11 @@ stop_driver_event(ChkioDrvData *cddp)
cddp->test_data = NULL;
if (cdep->in_fd >= 0) {
- driver_event(cddp->port, (ErlDrvEvent) cdep->in_fd, NULL);
+ driver_event(cddp->port, (ErlDrvEvent) (ErlDrvSInt) cdep->in_fd, NULL);
close(cdep->in_fd);
}
if (cdep->out_fd >= 0) {
- driver_event(cddp->port, (ErlDrvEvent) cdep->out_fd, NULL);
+ driver_event(cddp->port, (ErlDrvEvent) (ErlDrvSInt) cdep->out_fd, NULL);
close(cdep->out_fd);
}
driver_free(cdep);
@@ -268,7 +268,7 @@ stop_fd_change(ChkioDrvData *cddp)
cddp->test_data = NULL;
driver_cancel_timer(cddp->port);
if (cfcp->fds[0] >= 0) {
- driver_select(cddp->port, (ErlDrvEvent) cfcp->fds[0], DO_READ, 0);
+ driver_select(cddp->port, (ErlDrvEvent) (ErlDrvSInt) cfcp->fds[0], DO_READ, 0);
close(cfcp->fds[0]);
close(cfcp->fds[1]);
}
@@ -282,8 +282,8 @@ stop_bad_fd_in_pollset(ChkioDrvData *cddp)
if (cddp->test_data) {
ChkioBadFdInPollset *bfipp = (ChkioBadFdInPollset *) cddp->test_data;
cddp->test_data = NULL;
- driver_select(cddp->port, (ErlDrvEvent) bfipp->fds[0], DO_WRITE, 0);
- driver_select(cddp->port, (ErlDrvEvent) bfipp->fds[1], DO_READ, 0);
+ driver_select(cddp->port, (ErlDrvEvent) (ErlDrvSInt) bfipp->fds[0], DO_WRITE, 0);
+ driver_select(cddp->port, (ErlDrvEvent) (ErlDrvSInt) bfipp->fds[1], DO_READ, 0);
driver_free((void *) bfipp);
}
}
@@ -296,21 +296,21 @@ stop_steal(ChkioDrvData *cddp)
cddp->test_data = NULL;
if (csp->driver_select_fds[0] >= 0)
driver_select(cddp->port,
- (ErlDrvEvent) csp->driver_select_fds[0],
+ (ErlDrvEvent) (ErlDrvSInt) csp->driver_select_fds[0],
DO_READ,
0);
if (csp->driver_select_fds[1] >= 0)
driver_select(cddp->port,
- (ErlDrvEvent) csp->driver_select_fds[1],
+ (ErlDrvEvent) (ErlDrvSInt) csp->driver_select_fds[1],
DO_WRITE,
0);
if (csp->driver_event_fds[0] >= 0)
driver_event(cddp->port,
- (ErlDrvEvent) csp->driver_event_fds[0],
+ (ErlDrvEvent) (ErlDrvSInt) csp->driver_event_fds[0],
NULL);
if (csp->driver_event_fds[1] >= 0)
driver_event(cddp->port,
- (ErlDrvEvent) csp->driver_event_fds[1],
+ (ErlDrvEvent) (ErlDrvSInt) csp->driver_event_fds[1],
NULL);
driver_free(csp);
}
@@ -353,7 +353,7 @@ static void free_smp_select(ChkioSmpSelect* pip, ErlDrvPort port)
abort();
}
case Selected:
- driver_select(port, (ErlDrvEvent)pip->read_fd, DO_READ, 0);
+ driver_select(port, (ErlDrvEvent)(ErlDrvSInt)pip->read_fd, DO_READ, 0);
/*fall through*/
case Opened:
close(pip->read_fd);
@@ -475,8 +475,8 @@ chkio_drv_stop(ErlDrvData drv_data) {
fprintf(stderr, "%s:%d: Failed to open /dev/null\n",
__FILE__, __LINE__);
}
- driver_select(cddp->port, (ErlDrvEvent) fd, DO_WRITE, 1);
- driver_select(cddp->port, (ErlDrvEvent) fd, DO_WRITE, 0);
+ driver_select(cddp->port, (ErlDrvEvent) (ErlDrvSInt) fd, DO_WRITE, 1);
+ driver_select(cddp->port, (ErlDrvEvent) (ErlDrvSInt) fd, DO_WRITE, 0);
close(fd);
@@ -491,7 +491,7 @@ chkio_drv_ready_output(ErlDrvData drv_data, ErlDrvEvent event)
{
#ifdef UNIX
ChkioDrvData *cddp = (ChkioDrvData *) drv_data;
- int fd = (int) event;
+ int fd = (int) (ErlDrvSInt) event;
switch (cddp->test) {
case CHKIO_USE_FALLBACK_POLLSET: {
@@ -533,7 +533,7 @@ chkio_drv_ready_input(ErlDrvData drv_data, ErlDrvEvent event)
{
#ifdef UNIX
ChkioDrvData *cddp = (ChkioDrvData *) drv_data;
- int fd = (int) event;
+ int fd = (int) (ErlDrvSInt) event;
switch (cddp->test) {
case CHKIO_USE_FALLBACK_POLLSET: {
@@ -630,7 +630,7 @@ chkio_drv_ready_event(ErlDrvData drv_data,
case CHKIO_DRIVER_EVENT: {
#ifdef HAVE_POLL_H
ChkioDriverEvent *cdep = cddp->test_data;
- int fd = (int) event;
+ int fd = (int) (ErlDrvSInt) event;
if (fd == cdep->in_fd) {
if (event_data->events == POLLIN
&& event_data->revents == POLLIN) {
@@ -679,7 +679,7 @@ chkio_drv_timeout(ErlDrvData drv_data)
int in_fd = cfcp->fds[0];
int out_fd = cfcp->fds[1];
if (in_fd >= 0) {
- if (driver_select(cddp->port, (ErlDrvEvent) in_fd, DO_READ, 0) < 0)
+ if (driver_select(cddp->port, (ErlDrvEvent) (ErlDrvSInt) in_fd, DO_READ, 0) < 0)
driver_failure_atom(cddp->port, "deselect_failed");
(void) write(out_fd, (void *) "!", 1);
close(out_fd);
@@ -689,7 +689,7 @@ chkio_drv_timeout(ErlDrvData drv_data)
driver_failure_posix(cddp->port, errno);
}
else {
- if (driver_select(cddp->port, (ErlDrvEvent) cfcp->fds[0],
+ if (driver_select(cddp->port, (ErlDrvEvent) (ErlDrvSInt) cfcp->fds[0],
DO_READ, 1) < 0)
driver_failure_atom(cddp->port, "select_failed");
if (cfcp->fds[0] == in_fd)
@@ -709,14 +709,14 @@ chkio_drv_timeout(ErlDrvData drv_data)
#endif /* UNIX */
}
-static int
+static ErlDrvSSizeT
chkio_drv_control(ErlDrvData drv_data,
unsigned int command,
- char *buf, int len,
- char **rbuf, int rlen)
+ char *buf, ErlDrvSizeT len,
+ char **rbuf, ErlDrvSizeT rlen)
{
char *res_str;
- int res_len = -1;
+ ErlDrvSSizeT res_len = -1;
#ifndef UNIX
#ifdef __WIN32__
res_str = "skip: windows_different";
@@ -854,7 +854,7 @@ chkio_drv_control(ErlDrvData drv_data,
int fds[2];
cbdp->dev_null[i].fd = open("/dev/null", O_WRONLY);
if (driver_select(cddp->port,
- (ErlDrvEvent) cbdp->dev_null[i].fd,
+ (ErlDrvEvent) (ErlDrvSInt) cbdp->dev_null[i].fd,
DO_WRITE,
1) != 0) {
driver_failure_posix(cddp->port, errno);
@@ -862,7 +862,7 @@ chkio_drv_control(ErlDrvData drv_data,
}
cbdp->dev_zero[i].fd = open("/dev/zero", O_RDONLY);
if (driver_select(cddp->port,
- (ErlDrvEvent) cbdp->dev_zero[i].fd,
+ (ErlDrvEvent) (ErlDrvSInt) cbdp->dev_zero[i].fd,
DO_READ,
1) != 0) {
driver_failure_posix(cddp->port, errno);
@@ -873,7 +873,7 @@ chkio_drv_control(ErlDrvData drv_data,
cbdp->pipe_in[i].fd = fds[0];
cbdp->pipe_out[i].fd = fds[1];
if (driver_select(cddp->port,
- (ErlDrvEvent) cbdp->pipe_in[i].fd,
+ (ErlDrvEvent) (ErlDrvSInt) cbdp->pipe_in[i].fd,
DO_READ,
1) != 0) {
driver_failure_posix(cddp->port, EIO);
@@ -882,7 +882,7 @@ chkio_drv_control(ErlDrvData drv_data,
if (i % 2 == 0)
(void) write(cbdp->pipe_out[i].fd, "!", 1);
if (driver_select(cddp->port,
- (ErlDrvEvent) cbdp->pipe_out[i].fd,
+ (ErlDrvEvent) (ErlDrvSInt) cbdp->pipe_out[i].fd,
DO_WRITE,
1) != 0) {
driver_failure_posix(cddp->port, EIO);
@@ -928,8 +928,8 @@ chkio_drv_control(ErlDrvData drv_data,
bfipp->fds[0] = fds[9];
bfipp->fds[1] = fds[10];
cddp->test_data = (void *) bfipp;
- driver_select(cddp->port, (ErlDrvEvent) fds[9], DO_WRITE, 1);
- driver_select(cddp->port, (ErlDrvEvent) fds[10], DO_READ, 1);
+ driver_select(cddp->port, (ErlDrvEvent) (ErlDrvSInt) fds[9], DO_WRITE, 1);
+ driver_select(cddp->port, (ErlDrvEvent) (ErlDrvSInt) fds[10], DO_READ, 1);
}
}
res_str = "ok";
@@ -965,7 +965,7 @@ chkio_drv_control(ErlDrvData drv_data,
cdep->in_ok = 0;
res = driver_event(cddp->port,
- (ErlDrvEvent) in_fd,
+ (ErlDrvEvent) (ErlDrvSInt) in_fd,
&cdep->in_data);
if (res < 0) {
res_str = "skip: driver_event() not supported";
@@ -985,7 +985,7 @@ chkio_drv_control(ErlDrvData drv_data,
cdep->out_ok = 0;
res = driver_event(cddp->port,
- (ErlDrvEvent) out_fd,
+ (ErlDrvEvent) (ErlDrvSInt) out_fd,
&cdep->out_data);
if (res < 0) {
close(out_fd);
@@ -1062,7 +1062,7 @@ chkio_drv_control(ErlDrvData drv_data,
csp->event_data[0].events = POLLIN;
csp->event_data[0].revents = 0;
res = driver_event(cddp->port,
- (ErlDrvEvent) csp->driver_event_fds[0],
+ (ErlDrvEvent) (ErlDrvSInt) csp->driver_event_fds[0],
&csp->event_data[0]);
if (res < 0)
driver_failure_atom(cddp->port,
@@ -1071,7 +1071,7 @@ chkio_drv_control(ErlDrvData drv_data,
csp->event_data[1].events = POLLOUT;
csp->event_data[1].revents = 0;
res = driver_event(cddp->port,
- (ErlDrvEvent) csp->driver_event_fds[1],
+ (ErlDrvEvent) (ErlDrvSInt) csp->driver_event_fds[1],
&csp->event_data[1]);
if (res < 0)
driver_failure_atom(cddp->port,
@@ -1083,7 +1083,7 @@ chkio_drv_control(ErlDrvData drv_data,
/* Steal with driver_select() */
if (res >= 0) {
res = driver_select(cddp->port,
- (ErlDrvEvent) csp->driver_select_fds[0],
+ (ErlDrvEvent) (ErlDrvSInt) csp->driver_select_fds[0],
DO_READ,
1);
if (res < 0)
@@ -1092,7 +1092,7 @@ chkio_drv_control(ErlDrvData drv_data,
}
if (res >= 0) {
res = driver_select(cddp->port,
- (ErlDrvEvent) csp->driver_select_fds[1],
+ (ErlDrvEvent) (ErlDrvSInt) csp->driver_select_fds[1],
DO_WRITE,
1);
if (res < 0)
@@ -1159,14 +1159,14 @@ chkio_drv_control(ErlDrvData drv_data,
csap->driver_event_fds[1] = write_fds[1];
res = driver_select(cddp->port,
- (ErlDrvEvent) csap->driver_select_fds[0],
+ (ErlDrvEvent) (ErlDrvSInt) csap->driver_select_fds[0],
DO_READ,
1);
if (res < 0)
driver_failure_atom(cddp->port, "driver_select_failed");
if (res >= 0) {
res = driver_select(cddp->port,
- (ErlDrvEvent) csap->driver_select_fds[1],
+ (ErlDrvEvent) (ErlDrvSInt) csap->driver_select_fds[1],
DO_WRITE,
1);
if (res < 0)
@@ -1177,7 +1177,7 @@ chkio_drv_control(ErlDrvData drv_data,
csap->event_data[0].events = POLLIN;
csap->event_data[0].revents = 0;
res = driver_event(cddp->port,
- (ErlDrvEvent) csap->driver_event_fds[0],
+ (ErlDrvEvent) (ErlDrvSInt) csap->driver_event_fds[0],
&csap->event_data[0]);
if (res < 0) {
close(csap->driver_event_fds[0]);
@@ -1190,7 +1190,7 @@ chkio_drv_control(ErlDrvData drv_data,
csap->event_data[1].events = POLLOUT;
csap->event_data[1].revents = 0;
res = driver_event(cddp->port,
- (ErlDrvEvent) csap->driver_event_fds[1],
+ (ErlDrvEvent) (ErlDrvSInt) csap->driver_event_fds[1],
&csap->event_data[1]);
if (res < 0)
driver_failure_atom(cddp->port,
@@ -1285,7 +1285,7 @@ chkio_drv_control(ErlDrvData drv_data,
}
else {
TRACEF(("%T: Select on pipe [%d->%d]\n", cddp->id, pip->write_fd, pip->read_fd));
- if (driver_select(cddp->port, (ErlDrvEvent)pip->read_fd, DO_READ, 1)) {
+ if (driver_select(cddp->port, (ErlDrvEvent)(ErlDrvSInt)pip->read_fd, DO_READ, 1)) {
fprintf(stderr, "driver_select failed for fd=%d\n", pip->read_fd);
abort();
}
@@ -1314,7 +1314,7 @@ chkio_drv_control(ErlDrvData drv_data,
op >>= 1;
if (op & 1) {
TRACEF(("%T: Deselect on pipe [%d->%d]\n", cddp->id, pip->write_fd, pip->read_fd));
- if (driver_select(cddp->port, (ErlDrvEvent)pip->read_fd, DO_READ, 0)) {
+ if (driver_select(cddp->port, (ErlDrvEvent)(ErlDrvSInt)pip->read_fd, DO_READ, 0)) {
fprintf(stderr, "driver_(de)select failed for fd=%d\n", pip->read_fd);
abort();
}
diff --git a/erts/emulator/test/driver_SUITE_data/io_ready_exit_drv.c b/erts/emulator/test/driver_SUITE_data/io_ready_exit_drv.c
index 6afa46b3a2..e6a3edcd74 100644
--- a/erts/emulator/test/driver_SUITE_data/io_ready_exit_drv.c
+++ b/erts/emulator/test/driver_SUITE_data/io_ready_exit_drv.c
@@ -40,8 +40,9 @@ static void io_ready_exit_ready_input(ErlDrvData, ErlDrvEvent);
static void io_ready_exit_ready_output(ErlDrvData, ErlDrvEvent);
static void io_ready_exit_drv_output(ErlDrvData, char *, int);
static void io_ready_exit_drv_finish(void);
-static int io_ready_exit_drv_control(ErlDrvData, unsigned int,
- char *, int, char **, int);
+static ErlDrvSSizeT io_ready_exit_drv_control(ErlDrvData, unsigned int,
+ char *, ErlDrvSizeT,
+ char **, ErlDrvSizeT);
static ErlDrvEntry io_ready_exit_drv_entry = {
NULL, /* init */
@@ -56,7 +57,17 @@ static ErlDrvEntry io_ready_exit_drv_entry = {
io_ready_exit_drv_control,
NULL, /* timeout */
NULL, /* outputv */
- NULL /* ready_async */
+ NULL, /* ready_async */
+ NULL, /* flush */
+ NULL, /* call */
+ NULL, /* event */
+ ERL_DRV_EXTENDED_MARKER,
+ ERL_DRV_EXTENDED_MAJOR_VERSION,
+ ERL_DRV_EXTENDED_MINOR_VERSION,
+ 0, /* ERL_DRV_FLAGs */
+ NULL, /* handle2 */
+ NULL, /* process_exit */
+ NULL /* stop_select */
};
/* -------------------------------------------------------------------------
@@ -83,7 +94,7 @@ io_ready_exit_drv_stop(ErlDrvData drv_data) {
#ifdef UNIX
if (oeddp->fds[0] >= 0) {
driver_select(oeddp->port,
- (ErlDrvEvent) oeddp->fds[0],
+ (ErlDrvEvent) (ErlDrvSInt) oeddp->fds[0],
DO_READ|DO_WRITE,
0);
close(oeddp->fds[0]);
@@ -109,15 +120,15 @@ io_ready_exit_ready_input(ErlDrvData drv_data, ErlDrvEvent event)
driver_failure_atom(oeddp->port, "ready_input_driver_failure");
}
-static int
+static ErlDrvSSizeT
io_ready_exit_drv_control(ErlDrvData drv_data,
unsigned int command,
- char *buf, int len,
- char **rbuf, int rlen)
+ char *buf, ErlDrvSizeT len,
+ char **rbuf, ErlDrvSizeT rlen)
{
char *abuf;
char *res_str;
- int res_len;
+ ErlDrvSSizeT res_len;
IOReadyExitDrvData *oeddp = (IOReadyExitDrvData *) drv_data;
#ifndef UNIX
res_str = "nyiftos";
@@ -127,9 +138,9 @@ io_ready_exit_drv_control(ErlDrvData drv_data,
}
else {
res_str = "ok";
- write(oeddp->fds[1], "!", 1);
+ (void) write(oeddp->fds[1], "!", 1);
driver_select(oeddp->port,
- (ErlDrvEvent) oeddp->fds[0],
+ (ErlDrvEvent) (ErlDrvSInt) oeddp->fds[0],
DO_READ|DO_WRITE,
1);
}
diff --git a/erts/emulator/test/driver_SUITE_data/ioq_exit_drv.c b/erts/emulator/test/driver_SUITE_data/ioq_exit_drv.c
index e49de388b4..b2cc1e785a 100644
--- a/erts/emulator/test/driver_SUITE_data/ioq_exit_drv.c
+++ b/erts/emulator/test/driver_SUITE_data/ioq_exit_drv.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2007-2010. All Rights Reserved.
+ * Copyright Ericsson AB 2007-2011. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -91,7 +91,8 @@ static ErlDrvData start(ErlDrvPort port, char *command);
static void stop(ErlDrvData drv_data);
static void ready_input(ErlDrvData drv_data, ErlDrvEvent event);
static void ready_output(ErlDrvData drv_data, ErlDrvEvent event);
-static int control(ErlDrvData, unsigned int, char *, int, char **, int);
+static ErlDrvSSizeT control(ErlDrvData, unsigned int,
+ char *, ErlDrvSizeT, char **, ErlDrvSizeT);
static void timeout(ErlDrvData drv_data);
static void ready_async(ErlDrvData drv_data, ErlDrvThreadData thread_data);
static void flush(ErlDrvData drv_data);
@@ -155,10 +156,10 @@ start(ErlDrvPort port, char *command)
return (ErlDrvData) ddp;
}
-static int control(ErlDrvData drv_data,
- unsigned int command,
- char *buf, int len,
- char **rbuf, int rlen)
+static ErlDrvSSizeT control(ErlDrvData drv_data,
+ unsigned int command,
+ char *buf, ErlDrvSizeT len,
+ char **rbuf, ErlDrvSizeT rlen)
{
IOQExitDrvData *ddp = (IOQExitDrvData *) drv_data;
char *res_str = "nyiftos";
@@ -227,7 +228,7 @@ static int control(ErlDrvData drv_data,
res_str = "ok";
done: {
- int res_len = strlen(res_str);
+ ErlDrvSSizeT res_len = strlen(res_str);
if (res_len > rlen) {
char *abuf = driver_alloc(sizeof(char)*res_len);
if (!abuf)
diff --git a/erts/emulator/test/driver_SUITE_data/many_events_drv.c b/erts/emulator/test/driver_SUITE_data/many_events_drv.c
index 7417dbf7f8..a34432a8fe 100644
--- a/erts/emulator/test/driver_SUITE_data/many_events_drv.c
+++ b/erts/emulator/test/driver_SUITE_data/many_events_drv.c
@@ -3,14 +3,17 @@
#endif
#include <stdio.h>
+#include <string.h>
#include "erl_driver.h"
static ErlDrvPort erlang_port;
static ErlDrvData many_events_start(ErlDrvPort, char *);
-static void from_erlang(ErlDrvData, char*, int);
+static void from_erlang(ErlDrvData, char*, ErlDrvSizeT);
static void from_port(ErlDrvData drv_data, ErlDrvEvent event);
-static int many_events_call(ErlDrvData drv_data, unsigned int command, char *buf,
- int len, char **rbuf, int rlen, unsigned *ret_flags);
+static ErlDrvSSizeT many_events_call(ErlDrvData drv_data, unsigned int command,
+ char *buf, ErlDrvSizeT len,
+ char **rbuf, ErlDrvSizeT rlen,
+ unsigned *ret_flags);
static ErlDrvEntry many_events_driver_entry = {
NULL, /* Init */
many_events_start,
@@ -26,7 +29,15 @@ static ErlDrvEntry many_events_driver_entry = {
NULL,
NULL,
NULL,
- many_events_call
+ many_events_call,
+ NULL,
+ ERL_DRV_EXTENDED_MARKER,
+ ERL_DRV_EXTENDED_MAJOR_VERSION,
+ ERL_DRV_EXTENDED_MINOR_VERSION,
+ 0,
+ NULL,
+ NULL,
+ NULL
};
DRIVER_INIT(many_events_drv)
@@ -41,7 +52,7 @@ many_events_start(ErlDrvPort port, char *buf)
}
static void
-from_erlang(ErlDrvData data, char *buf, int count)
+from_erlang(ErlDrvData data, char *buf, ErlDrvSizeT count)
{
int i;
int num;
@@ -87,9 +98,10 @@ static void from_port(ErlDrvData data, ErlDrvEvent ev)
return;
}
-static int
-many_events_call(ErlDrvData drv_data, unsigned int command, char *buf,
- int len, char **rbuf, int rlen, unsigned *ret_flags)
+static ErlDrvSSizeT
+many_events_call(ErlDrvData drv_data, unsigned int command,
+ char *buf, ErlDrvSizeT len, char **rbuf, ErlDrvSizeT rlen,
+ unsigned *ret_flags)
{
*rbuf = buf;
*ret_flags |= DRIVER_CALL_KEEP_BUFFER;
diff --git a/erts/emulator/test/driver_SUITE_data/monitor_drv.c b/erts/emulator/test/driver_SUITE_data/monitor_drv.c
index 1da6a56a72..3da067fd09 100644
--- a/erts/emulator/test/driver_SUITE_data/monitor_drv.c
+++ b/erts/emulator/test/driver_SUITE_data/monitor_drv.c
@@ -21,8 +21,9 @@
#include "erl_driver.h"
static ErlDrvData monitor_drv_start(ErlDrvPort, char *);
-static int monitor_drv_control(ErlDrvData, unsigned int,
- char *, int, char **, int);
+static void monitor_drv_stop(ErlDrvData data);
+static ErlDrvSSizeT monitor_drv_control(ErlDrvData, unsigned int,
+ char *, ErlDrvSizeT, char **, ErlDrvSizeT);
static void handle_monitor(ErlDrvData drv_data, ErlDrvMonitor *monitor);
#define OP_I_AM_IPID 1
@@ -50,7 +51,7 @@ typedef struct {
static ErlDrvEntry monitor_drv_entry = {
NULL /* init */,
monitor_drv_start,
- NULL /* stop */,
+ monitor_drv_stop,
NULL /* output */,
NULL /* ready_input */,
NULL /* ready_output */,
@@ -122,16 +123,16 @@ static void handle_monitor(ErlDrvData drv_data, ErlDrvMonitor *monitor)
return;
}
-static int
+static ErlDrvSSizeT
monitor_drv_control(ErlDrvData drv_data,
unsigned int command,
- char *ibuf, int ilen,
- char **rbuf, int rlen)
+ char *ibuf, ErlDrvSizeT ilen,
+ char **rbuf, ErlDrvSizeT rlen)
{
MyDrvData *data = (MyDrvData *) drv_data;
char *answer = NULL;
char buff[64];
- int alen;
+ ErlDrvSSizeT alen;
switch (command) {
case OP_I_AM_IPID:
diff --git a/erts/emulator/test/driver_SUITE_data/otp_6879_drv.c b/erts/emulator/test/driver_SUITE_data/otp_6879_drv.c
index 8c0a9aadfd..ff44145ca7 100644
--- a/erts/emulator/test/driver_SUITE_data/otp_6879_drv.c
+++ b/erts/emulator/test/driver_SUITE_data/otp_6879_drv.c
@@ -20,11 +20,11 @@
#include <string.h>
#include "erl_driver.h"
-static int call(ErlDrvData drv_data,
- unsigned int command,
- char *buf, int len,
- char **rbuf, int rlen,
- unsigned int *flags);
+static ErlDrvSSizeT call(ErlDrvData drv_data,
+ unsigned int command,
+ char *buf, ErlDrvSizeT len,
+ char **rbuf, ErlDrvSizeT rlen,
+ unsigned int *flags);
static ErlDrvEntry otp_6879_drv_entry = {
NULL /* init */,
@@ -57,11 +57,11 @@ DRIVER_INIT(otp_6879_drv)
}
-static int call(ErlDrvData drv_data,
- unsigned int command,
- char *buf, int len,
- char **rbuf, int rlen,
- unsigned int *flags)
+static ErlDrvSSizeT call(ErlDrvData drv_data,
+ unsigned int command,
+ char *buf, ErlDrvSizeT len,
+ char **rbuf, ErlDrvSizeT rlen,
+ unsigned int *flags)
{
/* echo call */
if (len > rlen)
diff --git a/erts/emulator/test/driver_SUITE_data/otp_9302_drv.c b/erts/emulator/test/driver_SUITE_data/otp_9302_drv.c
index beee1b735f..221fd0ce51 100644
--- a/erts/emulator/test/driver_SUITE_data/otp_9302_drv.c
+++ b/erts/emulator/test/driver_SUITE_data/otp_9302_drv.c
@@ -28,7 +28,7 @@ static void stop(ErlDrvData drv_data);
static ErlDrvData start(ErlDrvPort port,
char *command);
static void output(ErlDrvData drv_data,
- char *buf, int len);
+ char *buf, ErlDrvSizeT len);
static void ready_async(ErlDrvData drv_data,
ErlDrvThreadData thread_data);
@@ -196,13 +196,13 @@ static void ready_async(ErlDrvData drv_data,
}
static void output(ErlDrvData drv_data,
- char *buf, int len)
+ char *buf, ErlDrvSizeT len)
{
Otp9302Data *data = (Otp9302Data *) drv_data;
ErlDrvTermData td_port = driver_mk_port(data->port);
ErlDrvTermData td_receiver = driver_caller(data->port);
ErlDrvTermData td_job = driver_mk_atom("job");
- unsigned int key = (unsigned int) data->port;
+ unsigned int key = (unsigned int) (ErlDrvSInt) data->port;
long id[5];
Otp9302AsyncData *ad[5];
int i;
diff --git a/erts/emulator/test/driver_SUITE_data/outputv_drv.c b/erts/emulator/test/driver_SUITE_data/outputv_drv.c
index 87f66ae413..3e3d4a3a03 100644
--- a/erts/emulator/test/driver_SUITE_data/outputv_drv.c
+++ b/erts/emulator/test/driver_SUITE_data/outputv_drv.c
@@ -3,7 +3,9 @@
static ErlDrvPort erlang_port;
static ErlDrvData outputv_start(ErlDrvPort, char*);
-static void outputv_stop(ErlDrvData), outputv_read(ErlDrvData, char*, int), outputv(ErlDrvData, ErlIOVec*);
+static void outputv_stop(ErlDrvData),
+ outputv_read(ErlDrvData, char*, ErlDrvSizeT),
+ outputv(ErlDrvData, ErlIOVec*);
static ErlDrvEntry outputv_driver_entry =
{
@@ -19,6 +21,16 @@ static ErlDrvEntry outputv_driver_entry =
NULL,
NULL,
outputv,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ ERL_DRV_EXTENDED_MARKER,
+ ERL_DRV_EXTENDED_MAJOR_VERSION,
+ ERL_DRV_EXTENDED_MINOR_VERSION,
+ 0,
+ NULL,
+ NULL,
NULL
};
@@ -38,7 +50,7 @@ static ErlDrvData outputv_start(ErlDrvPort port, char *buf)
return (ErlDrvData)port;
}
-static void outputv_read(ErlDrvData port, char *buf, int count)
+static void outputv_read(ErlDrvData port, char *buf, ErlDrvSizeT count)
{
erlang_port = (ErlDrvPort)-1;
}
diff --git a/erts/emulator/test/driver_SUITE_data/peek_non_existing_queue_drv.c b/erts/emulator/test/driver_SUITE_data/peek_non_existing_queue_drv.c
index 3a5b5af13a..8e203f74ec 100644
--- a/erts/emulator/test/driver_SUITE_data/peek_non_existing_queue_drv.c
+++ b/erts/emulator/test/driver_SUITE_data/peek_non_existing_queue_drv.c
@@ -66,7 +66,8 @@ typedef struct {
static ErlDrvData start(ErlDrvPort, char *);
static void stop(ErlDrvData);
-static int control(ErlDrvData, unsigned int, char *, int, char **, int);
+static ErlDrvSSizeT control(ErlDrvData, unsigned int,
+ char *, ErlDrvSizeT, char **, ErlDrvSizeT);
static void ready_async(ErlDrvData, ErlDrvThreadData);
static void async_test(void *);
static void async_wait(void *);
@@ -121,10 +122,10 @@ static void stop(ErlDrvData drv_data)
driver_free(drv_data);
}
-static int control(ErlDrvData drv_data,
- unsigned int command,
- char *buf, int len,
- char **rbuf, int rlen)
+static ErlDrvSSizeT control(ErlDrvData drv_data,
+ unsigned int command,
+ char *buf, ErlDrvSizeT len,
+ char **rbuf, ErlDrvSizeT rlen)
{
PeekNonXQDrvData *dp = (PeekNonXQDrvData *) drv_data;
unsigned int key = 0;
@@ -158,7 +159,7 @@ static int control(ErlDrvData drv_data,
}
done: {
- int res_len = strlen(res_str);
+ ErlDrvSSizeT res_len = strlen(res_str);
if (res_len > rlen) {
char *abuf = driver_alloc(sizeof(char)*res_len);
if (!abuf)
diff --git a/erts/emulator/test/driver_SUITE_data/queue_drv.c b/erts/emulator/test/driver_SUITE_data/queue_drv.c
index ded69f89f9..a02b57dc9a 100644
--- a/erts/emulator/test/driver_SUITE_data/queue_drv.c
+++ b/erts/emulator/test/driver_SUITE_data/queue_drv.c
@@ -1,4 +1,5 @@
#include <stdio.h>
+#include <string.h>
#include "erl_driver.h"
#define put_int32(i, s) {((char*)(s))[0] = (char)((i) >> 24) & 0xff; \
@@ -33,9 +34,10 @@
static ErlDrvPort erlang_port;
static unsigned opcode; /* Opcode for next operation. */
static ErlDrvData queue_start(ErlDrvPort, char*);
-static void queue_stop(ErlDrvData), queue_read(ErlDrvData, char*, int);
+static void queue_stop(ErlDrvData), queue_read(ErlDrvData, char*, ErlDrvSizeT);
static void queue_outputv(ErlDrvData, ErlIOVec*);
-static int control(ErlDrvData, unsigned int, char*, int, char**, int);
+static ErlDrvSSizeT control(ErlDrvData, unsigned int,
+ char*, ErlDrvSizeT, char**, ErlDrvSizeT);
static ErlDrvBinary* read_head(ErlDrvPort, int bytes);
static ErlDrvEntry queue_driver_entry =
@@ -52,6 +54,16 @@ static ErlDrvEntry queue_driver_entry =
control,
NULL,
queue_outputv,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ ERL_DRV_EXTENDED_MARKER,
+ ERL_DRV_EXTENDED_MAJOR_VERSION,
+ ERL_DRV_EXTENDED_MINOR_VERSION,
+ 0,
+ NULL,
+ NULL,
NULL
};
@@ -73,7 +85,7 @@ static ErlDrvData queue_start(ErlDrvPort port, char *buf)
}
/* messages from Erlang */
-static void queue_read(ErlDrvData port, char *buf, int len)
+static void queue_read(ErlDrvData port, char *buf, ErlDrvSizeT len)
{
}
@@ -82,8 +94,9 @@ static void queue_stop(ErlDrvData port)
erlang_port = (ErlDrvPort) -1;
}
-static int
-control(ErlDrvData drv_data, unsigned command, char* buf, int len, char** rbuf, int rlen)
+static ErlDrvSSizeT
+control(ErlDrvData drv_data, unsigned command,
+ char* buf, ErlDrvSizeT len, char** rbuf, ErlDrvSizeT rlen)
{
ErlDrvBinary* b;
diff --git a/erts/emulator/test/driver_SUITE_data/sys_info_1_0_drv.c b/erts/emulator/test/driver_SUITE_data/sys_info_base_drv.c
index 0504778086..c22a415c59 100644
--- a/erts/emulator/test/driver_SUITE_data/sys_info_1_0_drv.c
+++ b/erts/emulator/test/driver_SUITE_data/sys_info_base_drv.c
@@ -3,34 +3,35 @@
* compliance with the License. You should have received a copy of the
* Erlang Public License along with this software. If not, it can be
* retrieved via the world wide web at http://www.erlang.org/.
- *
+ *
* Software distributed under the License is distributed on an "AS IS"
* basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
* the License for the specific language governing rights and limitations
* under the License.
- *
+ *
* The Initial Developer of the Original Code is Ericsson Utvecklings AB.
* Portions created by Ericsson are Copyright 1999, Ericsson Utvecklings
* AB. All Rights Reserved.''
- *
+ *
* $Id$
*/
/*
* Author: Rickard Green
*
- * Description: Driver that fakes driver version 1.0 and tests
+ * Description: Driver that fakes driver version 2.0 and tests
* driver_system_info().
*
*/
#include "sys_info_drv_impl.h"
-#define SYS_INFO_DRV_MAJOR_VSN 1
+#define SYS_INFO_DRV_MAJOR_VSN 2
#define SYS_INFO_DRV_MINOR_VSN 0
-#define SYS_INFO_DRV_NAME_STR "sys_info_1_0_drv"
-#define SYS_INFO_DRV_NAME sys_info_1_0_drv
+#define SYS_INFO_DRV_NAME_STR "sys_info_base_drv"
+#define SYS_INFO_DRV_NAME sys_info_base_drv
#define SYS_INFO_DRV_LAST_FIELD smp_support
+#define ERL_DRV_SYS_INFO_SIZE sizeof(ErlDrvSysInfo)
#define SYS_INFO_DRV_RES_FORMAT "ok: " \
"drv_drv_vsn=%d.%d " \
@@ -38,8 +39,9 @@
"erts_vsn=%s " \
"otp_vsn=%s " \
"thread=%s " \
- "smp=%s"
-
+ "smp=%s " \
+ "async_thrs=%d " \
+ "sched_thrs=%d"
static size_t
sys_info_drv_max_res_len(ErlDrvSysInfo *sip)
@@ -51,6 +53,8 @@ sys_info_drv_max_res_len(ErlDrvSysInfo *sip)
slen += strlen(sip->otp_release) + 1;
slen += 5; /* threads */
slen += 5; /* smp */
+ slen += 20; /* async_thrs */
+ slen += 20; /* sched_thrs */
return slen;
}
@@ -66,7 +70,9 @@ sys_info_drv_sprintf_sys_info(ErlDrvSysInfo *sip, char *str)
sip->erts_version,
sip->otp_release,
sip->thread_support ? "true" : "false",
- sip->smp_support ? "true" : "false");
+ sip->smp_support ? "true" : "false",
+ sip->async_threads,
+ sip->scheduler_threads);
}
#include "sys_info_drv_impl.c"
diff --git a/erts/emulator/test/driver_SUITE_data/sys_info_drv_impl.c b/erts/emulator/test/driver_SUITE_data/sys_info_drv_impl.c
index 2d3203ae5d..c6c70a2075 100644
--- a/erts/emulator/test/driver_SUITE_data/sys_info_drv_impl.c
+++ b/erts/emulator/test/driver_SUITE_data/sys_info_drv_impl.c
@@ -46,7 +46,8 @@
#endif
static ErlDrvData start(ErlDrvPort, char *);
-static int control(ErlDrvData, unsigned int, char *, int, char **, int);
+static ErlDrvSSizeT control(ErlDrvData, unsigned int,
+ char *, ErlDrvSizeT, char **, ErlDrvSizeT);
static ErlDrvEntry drv_entry = {
NULL /* init */,
@@ -84,13 +85,13 @@ start(ErlDrvPort port, char *command)
return (ErlDrvData) port;
}
-static int
+static ErlDrvSSizeT
control(ErlDrvData drv_data,
unsigned int command,
- char *buf, int len,
- char **rbuf, int rlen)
+ char *buf, ErlDrvSizeT len,
+ char **rbuf, ErlDrvSizeT rlen)
{
- int res;
+ ErlDrvSSizeT res;
char *str;
size_t slen, slen2;
ErlDrvPort port = (ErlDrvPort) drv_data;
diff --git a/erts/emulator/test/driver_SUITE_data/sys_info_1_1_drv.c b/erts/emulator/test/driver_SUITE_data/sys_info_prev_drv.c
index fa21828284..815d96cc97 100644
--- a/erts/emulator/test/driver_SUITE_data/sys_info_1_1_drv.c
+++ b/erts/emulator/test/driver_SUITE_data/sys_info_prev_drv.c
@@ -3,34 +3,35 @@
* compliance with the License. You should have received a copy of the
* Erlang Public License along with this software. If not, it can be
* retrieved via the world wide web at http://www.erlang.org/.
- *
+ *
* Software distributed under the License is distributed on an "AS IS"
* basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
* the License for the specific language governing rights and limitations
* under the License.
- *
+ *
* The Initial Developer of the Original Code is Ericsson Utvecklings AB.
* Portions created by Ericsson are Copyright 1999, Ericsson Utvecklings
* AB. All Rights Reserved.''
- *
+ *
* $Id$
*/
/*
* Author: Rickard Green
*
- * Description: Driver that fakes driver version 1.1 and tests
+ * Description: Driver that fakes driver version 2.0 and tests
* driver_system_info().
*
*/
#include "sys_info_drv_impl.h"
-#define SYS_INFO_DRV_MAJOR_VSN 1
-#define SYS_INFO_DRV_MINOR_VSN 1
-#define SYS_INFO_DRV_NAME_STR "sys_info_1_1_drv"
-#define SYS_INFO_DRV_NAME sys_info_1_1_drv
+#define SYS_INFO_DRV_MAJOR_VSN 2
+#define SYS_INFO_DRV_MINOR_VSN 0
+#define SYS_INFO_DRV_NAME_STR "sys_info_prev_drv"
+#define SYS_INFO_DRV_NAME sys_info_prev_drv
#define SYS_INFO_DRV_LAST_FIELD scheduler_threads
+#define ERL_DRV_SYS_INFO_SIZE sizeof(ErlDrvSysInfo)
#define SYS_INFO_DRV_RES_FORMAT "ok: " \
"drv_drv_vsn=%d.%d " \
@@ -42,7 +43,6 @@
"async_thrs=%d " \
"sched_thrs=%d"
-
static size_t
sys_info_drv_max_res_len(ErlDrvSysInfo *sip)
{
@@ -76,5 +76,3 @@ sys_info_drv_sprintf_sys_info(ErlDrvSysInfo *sip, char *str)
}
#include "sys_info_drv_impl.c"
-
-
diff --git a/erts/emulator/test/driver_SUITE_data/thr_alloc_drv.c b/erts/emulator/test/driver_SUITE_data/thr_alloc_drv.c
index c7edbba7f6..95a6ae9bdf 100644
--- a/erts/emulator/test/driver_SUITE_data/thr_alloc_drv.c
+++ b/erts/emulator/test/driver_SUITE_data/thr_alloc_drv.c
@@ -21,14 +21,8 @@
#include "erl_driver.h"
ErlDrvData start(ErlDrvPort port, char *command);
-int control(ErlDrvData drv_data, unsigned int command, char *buf,
- int len, char **rbuf, int rlen);
-
-static int call(ErlDrvData drv_data,
- unsigned int command,
- char *buf, int len,
- char **rbuf, int rlen,
- unsigned int *flags);
+ErlDrvSSizeT control(ErlDrvData drv_data, unsigned int command, char *buf,
+ ErlDrvSizeT len, char **rbuf, ErlDrvSizeT rlen);
static ErlDrvEntry thr_alloc_drv_entry = {
NULL /* init */,
@@ -76,12 +70,12 @@ ErlDrvData start(ErlDrvPort port, char *command)
return (ErlDrvData) port;
}
-int control(ErlDrvData drv_data, unsigned int command, char *buf,
- int len, char **rbuf, int rlen)
+ErlDrvSSizeT control(ErlDrvData drv_data, unsigned int command, char *buf,
+ ErlDrvSizeT len, char **rbuf, ErlDrvSizeT rlen)
{
ErlDrvPort port = (ErlDrvPort) drv_data;
char *result = "failure";
- int result_len;
+ ErlDrvSSizeT result_len;
if (len <= 20) {
int res;
ErlDrvTid tid;
diff --git a/erts/emulator/test/driver_SUITE_data/thr_free_drv.c b/erts/emulator/test/driver_SUITE_data/thr_free_drv.c
new file mode 100644
index 0000000000..439fe6a184
--- /dev/null
+++ b/erts/emulator/test/driver_SUITE_data/thr_free_drv.c
@@ -0,0 +1,241 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2011. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+#include <stdlib.h>
+#include <errno.h>
+#include <string.h>
+#include "erl_driver.h"
+
+#define BLOCKS_PER_THREAD 100000
+#define NO_THREADS 10
+#define BLOCKS_PER_CTRL 1000
+
+typedef struct {
+ ErlDrvMutex *mtx;
+ ErlDrvCond *cnd;
+ int b;
+ int *go;
+ int *skip;
+ void *blocks[BLOCKS_PER_THREAD];
+} test_thread_data;
+
+typedef struct {
+ ErlDrvPort port;
+ int b;
+ int go;
+ int skip;
+ test_thread_data ttd[NO_THREADS+1];
+ ErlDrvTid tids[NO_THREADS+1];
+} test_data;
+
+static ErlDrvData start(ErlDrvPort port, char *command);
+static void stop(ErlDrvData data);
+static ErlDrvSSizeT control(ErlDrvData drv_data, unsigned int command, char *buf,
+ ErlDrvSizeT len, char **rbuf, ErlDrvSizeT rlen);
+
+static ErlDrvEntry thr_free_drv_entry = {
+ NULL /* init */,
+ start,
+ stop,
+ NULL /* output */,
+ NULL /* ready_input */,
+ NULL /* ready_output */,
+ "thr_free_drv",
+ NULL /* finish */,
+ NULL /* handle */,
+ control,
+ NULL /* timeout */,
+ NULL /* outputv */,
+ NULL /* ready_async */,
+ NULL /* flush */,
+ NULL /* call */,
+ NULL /* event */,
+ ERL_DRV_EXTENDED_MARKER,
+ ERL_DRV_EXTENDED_MAJOR_VERSION,
+ ERL_DRV_EXTENDED_MINOR_VERSION,
+ ERL_DRV_FLAG_USE_PORT_LOCKING,
+ NULL /* handle2 */,
+ NULL /* handle_monitor */
+};
+
+DRIVER_INIT(thr_free_drv)
+{
+ return &thr_free_drv_entry;
+}
+
+void *
+test_thread(void *vttd)
+{
+ test_thread_data *ttd = (test_thread_data *) vttd;
+ int i, skip;
+
+ erl_drv_mutex_lock(ttd->mtx);
+
+ while (!*ttd->go)
+ erl_drv_cond_wait(ttd->cnd, ttd->mtx);
+ skip = *ttd->skip;
+ erl_drv_mutex_unlock(ttd->mtx);
+
+ if (!skip) {
+ for (i = 0; i < BLOCKS_PER_THREAD; i++)
+ driver_free(ttd->blocks[i]);
+ }
+ return NULL;
+}
+
+ErlDrvData start(ErlDrvPort port, char *command)
+{
+ int join = 0, t, b, res;
+ test_thread_data *ttd;
+ test_data *td = driver_alloc(sizeof(test_data));
+ if (!td)
+ return ERL_DRV_ERROR_GENERAL;
+ ttd = td->ttd;
+ for (b = 0; b < BLOCKS_PER_THREAD; b++)
+ for (t = 0; t <= NO_THREADS; t++)
+ ttd[t].blocks[b] = NULL;
+ ttd[0].mtx = NULL;
+ ttd[0].cnd = NULL;
+
+ for (b = 0; b < BLOCKS_PER_THREAD; b++) {
+ for (t = 0; t <= NO_THREADS; t++) {
+ ttd[t].blocks[b] = driver_alloc(1);
+ if (ttd[t].blocks[b] == NULL)
+ goto fail;
+ }
+ }
+
+ td->b = -1;
+ td->go = 0;
+ td->skip = 0;
+
+ ttd[0].mtx = erl_drv_mutex_create("test_mutex");
+ if (!ttd[0].mtx)
+ goto fail;
+ ttd[0].cnd = erl_drv_cond_create("test_cnd");
+ if (!ttd[0].cnd)
+ goto fail;
+ ttd[0].go = &td->go;
+ ttd[0].skip = &td->skip;
+
+ for (t = 1; t <= NO_THREADS; t++) {
+ ttd[t].mtx = ttd[0].mtx;
+ ttd[t].cnd = ttd[0].cnd;
+ ttd[t].go = ttd[0].go;
+ ttd[t].skip = ttd[0].skip;
+ res = erl_drv_thread_create("test_thread",
+ &td->tids[t],
+ test_thread,
+ &ttd[t],
+ NULL);
+ if (res != 0)
+ goto fail;
+ join = t;
+ }
+
+ td->port = port;
+
+ return (ErlDrvData) td;
+
+fail:
+
+ if (join) {
+ erl_drv_mutex_lock(ttd[0].mtx);
+ td->go = 1;
+ td->skip = 1;
+ erl_drv_cond_broadcast(ttd[0].cnd);
+ erl_drv_mutex_unlock(ttd[0].mtx);
+ for (t = 1; t <= join; t++)
+ erl_drv_thread_join(td->tids[t], NULL);
+ }
+
+ if (ttd[0].mtx)
+ erl_drv_mutex_destroy(ttd[0].mtx);
+ if (ttd[0].cnd)
+ erl_drv_cond_destroy(ttd[0].cnd);
+
+ for (b = 0; b < BLOCKS_PER_THREAD; b++) {
+ for (t = 0; t <= NO_THREADS; t++) {
+ if (ttd[t].blocks[b] != NULL)
+ driver_free(ttd[t].blocks[b]);
+ }
+ }
+ driver_free(td);
+ return ERL_DRV_ERROR_GENERAL;
+}
+
+static void stop(ErlDrvData drv_data)
+{
+ test_data *td = (test_data *) drv_data;
+ int t, b;
+ for (t = 1; t <= NO_THREADS; t++)
+ erl_drv_thread_join(td->tids[t], NULL);
+ for (b = 0; b < BLOCKS_PER_THREAD; b++) {
+ if (td->ttd[0].blocks[b])
+ driver_free(td->ttd[0].blocks[b]);
+ }
+ erl_drv_mutex_destroy(td->ttd[0].mtx);
+ erl_drv_cond_destroy(td->ttd[0].cnd);
+ driver_free(td);
+}
+
+static ErlDrvSSizeT control(ErlDrvData drv_data, unsigned int command, char *buf,
+ ErlDrvSizeT len, char **rbuf, ErlDrvSizeT rlen)
+{
+ test_data *td = (test_data *) drv_data;
+ char *result = "failure";
+ int i, b;
+ int res;
+ ErlDrvSSizeT result_len;
+
+ if (td->b == -1) {
+ erl_drv_mutex_lock(td->ttd[0].mtx);
+ td->go = 1;
+ erl_drv_cond_broadcast(td->ttd[0].cnd);
+ erl_drv_mutex_unlock(td->ttd[0].mtx);
+ td->b = 0;
+ }
+
+ for (i = 0, b = td->b; i < BLOCKS_PER_CTRL && b < BLOCKS_PER_THREAD; i++, b++) {
+ driver_free(td->ttd[0].blocks[b]);
+ td->ttd[0].blocks[b] = NULL;
+ }
+
+ td->b = b;
+ if (b >= BLOCKS_PER_THREAD)
+ result = "done";
+ else
+ result = "more";
+
+ result_len = strlen(result);
+ if (result_len <= rlen) {
+ memcpy(*rbuf, result, result_len);
+ return result_len;
+ }
+ else {
+ *rbuf = driver_alloc(result_len);
+ if (!*rbuf) {
+ driver_failure_posix(td->port, ENOMEM);
+ return 0;
+ }
+ else {
+ memcpy(*rbuf, result, result_len);
+ return result_len;
+ }
+ }
+}
diff --git a/erts/emulator/test/driver_SUITE_data/timer_drv.c b/erts/emulator/test/driver_SUITE_data/timer_drv.c
index b96a95dd4c..8c3f203a64 100644
--- a/erts/emulator/test/driver_SUITE_data/timer_drv.c
+++ b/erts/emulator/test/driver_SUITE_data/timer_drv.c
@@ -22,7 +22,9 @@
static ErlDrvPort erlang_port;
static ErlDrvData timer_start(ErlDrvPort, char*);
-static void timer_stop(ErlDrvData), timer_read(ErlDrvData, char*, int), timer(ErlDrvData);
+static void timer_stop(ErlDrvData);
+static void timer_read(ErlDrvData, char*, ErlDrvSizeT);
+static void timer(ErlDrvData);
static ErlDrvEntry timer_driver_entry =
{
@@ -38,6 +40,16 @@ static ErlDrvEntry timer_driver_entry =
NULL,
timer,
NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ ERL_DRV_EXTENDED_MARKER,
+ ERL_DRV_EXTENDED_MAJOR_VERSION,
+ ERL_DRV_EXTENDED_MINOR_VERSION,
+ 0,
+ NULL,
+ NULL,
NULL
};
@@ -57,8 +69,9 @@ static ErlDrvData timer_start(ErlDrvPort port, char *buf)
}
/* set the timer, this is monitored from erlang measuring the time */
-static void timer_read(ErlDrvData port, char *buf, int len)
+static void timer_read(ErlDrvData p, char *buf, ErlDrvSizeT len)
{
+ ErlDrvPort port = (ErlDrvPort) p;
char reply[1];
if (buf[0] == START_TIMER) {
diff --git a/erts/emulator/test/emulator.spec b/erts/emulator/test/emulator.spec
index 1ea751cc3b..7a6dd83020 100644
--- a/erts/emulator/test/emulator.spec
+++ b/erts/emulator/test/emulator.spec
@@ -1 +1,2 @@
+{enable_builtin_hooks, false}.
{suites,"../emulator_test",all}.
diff --git a/erts/emulator/test/erl_drv_thread_SUITE_data/testcase_driver.c b/erts/emulator/test/erl_drv_thread_SUITE_data/testcase_driver.c
index 1e98844838..b4542f3e36 100644
--- a/erts/emulator/test/erl_drv_thread_SUITE_data/testcase_driver.c
+++ b/erts/emulator/test/erl_drv_thread_SUITE_data/testcase_driver.c
@@ -50,13 +50,33 @@ typedef struct {
ErlDrvData testcase_drv_start(ErlDrvPort port, char *command);
void testcase_drv_stop(ErlDrvData drv_data);
-void testcase_drv_run(ErlDrvData drv_data, char *buf, int len);
+void testcase_drv_run(ErlDrvData drv_data, char *buf, ErlDrvSizeT len);
static ErlDrvEntry testcase_drv_entry = {
NULL,
testcase_drv_start,
testcase_drv_stop,
- testcase_drv_run
+ testcase_drv_run,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL, /* handle */
+ NULL, /* control */
+ NULL, /* timeout */
+ NULL, /* outputv */
+ NULL, /* ready_async */
+ NULL,
+ NULL,
+ NULL,
+ ERL_DRV_EXTENDED_MARKER,
+ ERL_DRV_EXTENDED_MAJOR_VERSION,
+ ERL_DRV_EXTENDED_MINOR_VERSION,
+ 0,
+ NULL,
+ NULL,
+ NULL,
+
};
@@ -92,7 +112,7 @@ testcase_drv_stop(ErlDrvData drv_data)
}
void
-testcase_drv_run(ErlDrvData drv_data, char *buf, int len)
+testcase_drv_run(ErlDrvData drv_data, char *buf, ErlDrvSizeT len)
{
InternalTestCaseState_t *itcs = (InternalTestCaseState_t *) drv_data;
ErlDrvTermData result_atom;
diff --git a/erts/emulator/test/erts_debug_SUITE.erl b/erts/emulator/test/erts_debug_SUITE.erl
index 4dc2fbaae2..87778dd0c2 100644
--- a/erts/emulator/test/erts_debug_SUITE.erl
+++ b/erts/emulator/test/erts_debug_SUITE.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 2005-2011. All Rights Reserved.
+%% Copyright Ericsson AB 2005-2012. All Rights Reserved.
%%
%% The contents of this file are subject to the Erlang Public License,
%% Version 1.1, (the "License"); you may not use this file except in
@@ -23,13 +23,13 @@
-export([all/0, suite/0,groups/0,init_per_suite/1, end_per_suite/1,
init_per_group/2,end_per_group/2,
init_per_testcase/2,end_per_testcase/2,
- flat_size/1,flat_size_big/1,df/1,
+ test_size/1,flat_size_big/1,df/1,
instructions/1]).
suite() -> [{ct_hooks,[ts_install_cth]}].
all() ->
- [flat_size, flat_size_big, df, instructions].
+ [test_size, flat_size_big, df, instructions].
groups() ->
[].
@@ -55,16 +55,58 @@ end_per_testcase(_Func, Config) ->
Dog=?config(watchdog, Config),
?t:timetrap_cancel(Dog).
-flat_size(Config) when is_list(Config) ->
- 0 = erts_debug:flat_size([]),
- 0 = erts_debug:flat_size(42),
- 2 = erts_debug:flat_size([a|b]),
- 1 = erts_debug:flat_size({}),
- 2 = erts_debug:flat_size({[]}),
- 3 = erts_debug:flat_size({a,b}),
- 7 = erts_debug:flat_size({a,[b,c]}),
+test_size(Config) when is_list(Config) ->
+ ConsCell1 = id([a|b]),
+ ConsCell2 = id(ConsCell1),
+ ConsCellSz = 2,
+
+ 0 = do_test_size([]),
+ 0 = do_test_size(42),
+ ConsCellSz = do_test_size(ConsCell1),
+ 1 = do_test_size({}),
+ 2 = do_test_size({[]}),
+ 3 = do_test_size({a,b}),
+ 7 = do_test_size({a,[b,c]}),
+
+ %% Test internal consistency of sizes, but without testing
+ %% exact sizes.
+ Const = id(42),
+ AnotherConst = id(7),
+
+ %% Fun environment size = 0 (the smallest fun possible)
+ SimplestFun = fun() -> ok end,
+ FunSz0 = do_test_size(SimplestFun),
+
+ %% Fun environment size = 1
+ FunSz1 = do_test_size(fun() -> Const end),
+ FunSz1 = FunSz0 + 1,
+
+ %% Fun environment size = 2
+ FunSz2 = do_test_size(fun() -> Const+AnotherConst end),
+ FunSz2 = FunSz1 + 1,
+
+ FunSz1 = do_test_size(fun() -> ConsCell1 end) - do_test_size(ConsCell1),
+
+ %% Test shared data structures.
+ do_test_size([ConsCell1|ConsCell1],
+ 3*ConsCellSz,
+ 2*ConsCellSz),
+ do_test_size(fun() -> {ConsCell1,ConsCell2} end,
+ FunSz2 + 2*ConsCellSz,
+ FunSz2 + ConsCellSz),
+ do_test_size({SimplestFun,SimplestFun},
+ 2*FunSz0+do_test_size({a,b}),
+ FunSz0+do_test_size({a,b})),
ok.
+do_test_size(Term) ->
+ Sz = erts_debug:flat_size(Term),
+ Sz = erts_debug:size(Term).
+
+do_test_size(Term, FlatSz, Sz) ->
+ FlatSz = erts_debug:flat_size(Term),
+ Sz = erts_debug:size(Term).
+
flat_size_big(Config) when is_list(Config) ->
%% Build a term whose external size only fits in a big num (on 32-bit CPU).
flat_size_big_1(16#11111111111111117777777777777777888889999, 0, 16#FFFFFFF).
@@ -96,3 +138,6 @@ instructions(Config) when is_list(Config) ->
?line Is = erts_debug:instructions(),
?line _ = [list_to_atom(I) || I <- Is],
ok.
+
+id(I) ->
+ I.
diff --git a/erts/emulator/test/exception_SUITE.erl b/erts/emulator/test/exception_SUITE.erl
index 9d6fc9521d..109cec25cb 100644
--- a/erts/emulator/test/exception_SUITE.erl
+++ b/erts/emulator/test/exception_SUITE.erl
@@ -23,9 +23,10 @@
init_per_group/2,end_per_group/2,
badmatch/1, pending_errors/1, nil_arith/1,
stacktrace/1, nested_stacktrace/1, raise/1, gunilla/1, per/1,
- exception_with_heap_frag/1]).
+ exception_with_heap_frag/1, line_numbers/1]).
-export([bad_guy/2]).
+-export([crash/1]).
-include_lib("test_server/include/test_server.hrl").
-import(lists, [foreach/2]).
@@ -35,7 +36,7 @@ suite() -> [{ct_hooks,[ts_install_cth]}].
all() ->
[badmatch, pending_errors, nil_arith, stacktrace,
nested_stacktrace, raise, gunilla, per,
- exception_with_heap_frag].
+ exception_with_heap_frag, line_numbers].
groups() ->
[].
@@ -141,14 +142,20 @@ pending_exit_message(Args, Expected) ->
end,
process_flag(trap_exit, false).
-pending({badarg, [{erlang,Bif,BifArgs},{?MODULE,Func,Arity}|_]}, Func, Args, _Code)
- when is_atom(Bif), is_list(BifArgs), length(Args) == Arity ->
+pending({badarg,[{erlang,Bif,BifArgs,Loc1},
+ {?MODULE,Func,Arity,Loc2}|_]},
+ Func, Args, _Code)
+ when is_atom(Bif), is_list(BifArgs), length(Args) =:= Arity,
+ is_list(Loc1), is_list(Loc2) ->
ok;
-pending({undef,[{non_existing_module,foo,[]}|_]}, _, _, _) ->
+pending({undef,[{non_existing_module,foo,[],Loc}|_]}, _, _, _)
+ when is_list(Loc) ->
ok;
-pending({function_clause,[{?MODULE,Func,Args}|_]}, Func, Args, _Code) ->
+pending({function_clause,[{?MODULE,Func,Args,Loc}|_]}, Func, Args, _Code)
+ when is_list(Loc) ->
ok;
-pending({Code,[{?MODULE,Func,Arity}|_]}, Func, Args, Code) when length(Args) == Arity ->
+pending({Code,[{?MODULE,Func,Arity,Loc}|_]}, Func, Args, Code)
+ when length(Args) =:= Arity, is_list(Loc) ->
ok;
pending(Reason, _Function, _Args, _Code) ->
test_server:fail({bad_exit_reason,Reason}).
@@ -255,24 +262,24 @@ stacktrace(Conf) when is_list(Conf) ->
?line {_,Mref} = spawn_monitor(fun() -> exit({Tag,erlang:get_stacktrace()}) end),
?line {Tag,[]} = receive {'DOWN',Mref,_,_,Info} -> Info end,
V = [make_ref()|self()],
- ?line {value2,{caught1,badarg,[{erlang,abs,[V]}|_]=St1}} =
+ ?line {value2,{caught1,badarg,[{erlang,abs,[V],_}|_]=St1}} =
stacktrace_1({'abs',V}, error, {value,V}),
?line St1 = erase(stacktrace1),
?line St1 = erase(stacktrace2),
?line St1 = erlang:get_stacktrace(),
- ?line {caught2,{error,badarith},[{?MODULE,my_add,2}|_]=St2} =
+ ?line {caught2,{error,badarith},[{?MODULE,my_add,2,_}|_]=St2} =
stacktrace_1({'div',{1,0}}, error, {'add',{0,a}}),
- ?line [{?MODULE,my_div,2}|_] = erase(stacktrace1),
+ ?line [{?MODULE,my_div,2,_}|_] = erase(stacktrace1),
?line St2 = erase(stacktrace2),
?line St2 = erlang:get_stacktrace(),
- ?line {caught2,{error,{try_clause,V}},[{?MODULE,stacktrace_1,3}|_]=St3} =
+ ?line {caught2,{error,{try_clause,V}},[{?MODULE,stacktrace_1,3,_}|_]=St3} =
stacktrace_1({value,V}, error, {value,V}),
?line St3 = erase(stacktrace1),
?line St3 = erase(stacktrace2),
?line St3 = erlang:get_stacktrace(),
- ?line {caught2,{throw,V},[{?MODULE,foo,1}|_]=St4} =
+ ?line {caught2,{throw,V},[{?MODULE,foo,1,_}|_]=St4} =
stacktrace_1({value,V}, error, {throw,V}),
- ?line [{?MODULE,stacktrace_1,3}|_] = erase(stacktrace1),
+ ?line [{?MODULE,stacktrace_1,3,_}|_] = erase(stacktrace1),
?line St4 = erase(stacktrace2),
?line St4 = erlang:get_stacktrace(),
@@ -280,8 +287,8 @@ stacktrace(Conf) when is_list(Conf) ->
?line stacktrace_2()
catch
error:{badmatch,_} ->
- [{?MODULE,stacktrace_2,0},
- {?MODULE,stacktrace,1}|_] =
+ [{?MODULE,stacktrace_2,0,_},
+ {?MODULE,stacktrace,1,_}|_] =
erlang:get_stacktrace(),
ok
end.
@@ -315,15 +322,15 @@ nested_stacktrace(Conf) when is_list(Conf) ->
nested_stacktrace_1({{value,{V,x1}},void,{V,x1}},
{void,void,void}),
?line {caught1,
- [{?MODULE,my_add,2}|_],
+ [{?MODULE,my_add,2,_}|_],
value2,
- [{?MODULE,my_add,2}|_]} =
+ [{?MODULE,my_add,2,_}|_]} =
nested_stacktrace_1({{'add',{V,x1}},error,badarith},
{{value,{V,x2}},void,{V,x2}}),
?line {caught1,
- [{?MODULE,my_add,2}|_],
- {caught2,[{erlang,abs,[V]}|_]},
- [{erlang,abs,[V]}|_]} =
+ [{?MODULE,my_add,2,_}|_],
+ {caught2,[{erlang,abs,[V],_}|_]},
+ [{erlang,abs,[V],_}|_]} =
nested_stacktrace_1({{'add',{V,x1}},error,badarith},
{{'abs',V},error,badarg}),
ok.
@@ -362,14 +369,14 @@ raise(Conf) when is_list(Conf) ->
end,
?line A = erlang:get_stacktrace(),
?line A = get(raise),
- ?line [{?MODULE,my_div,2}|_] = A,
+ ?line [{?MODULE,my_div,2,_}|_] = A,
%%
N = 8, % Must be even
?line N = erlang:system_flag(backtrace_depth, N),
+ ?line B = odd_even(N, []),
?line try even(N)
catch error:function_clause -> ok
end,
- ?line B = odd_even(N, []),
?line B = erlang:get_stacktrace(),
%%
?line C0 = odd_even(N+1, []),
@@ -387,19 +394,12 @@ raise(Conf) when is_list(Conf) ->
odd_even(N, R) when is_integer(N), N > 1 ->
odd_even(N-1,
[if (N rem 2) == 0 ->
- {?MODULE,even,1};
+ {?MODULE,even,1,[{file,"odd_even.erl"},{line,3}]};
true ->
- {?MODULE,odd,1}
+ {?MODULE,odd,1,[{file,"odd_even.erl"},{line,6}]}
end|R]);
odd_even(1, R) ->
- [{?MODULE,odd,[1]}|R].
-
-even(N) when is_integer(N), N > 1, (N rem 2) == 0 ->
- odd(N-1)++[N].
-
-odd(N) when is_integer(N), N > 1, (N rem 2) == 1 ->
- even(N-1)++[N].
-
+ [{?MODULE,odd,[1],[{file,"odd_even.erl"},{line,5}]}|R].
foo({value,Value}) -> Value;
foo({'div',{A,B}}) ->
@@ -526,4 +526,186 @@ do_exception_with_heap_frag(Bin, [Sz|Sizes]) ->
do_exception_with_heap_frag(Bin, Sizes);
do_exception_with_heap_frag(_, []) -> ok.
+line_numbers(Config) when is_list(Config) ->
+ {'EXIT',{{case_clause,bad_tag},
+ [{?MODULE,line1,2,
+ [{file,"fake_file.erl"},{line,3}]},
+ {?MODULE,line_numbers,1,_}|_]}} =
+ (catch line1(bad_tag, 0)),
+ {'EXIT',{badarith,
+ [{?MODULE,line1,2,
+ [{file,"fake_file.erl"},{line,5}]},
+ {?MODULE,line_numbers,1,_}|_]}} =
+ (catch line1(a, not_an_integer)),
+ {'EXIT',{{badmatch,{ok,1}},
+ [{?MODULE,line1,2,
+ [{file,"fake_file.erl"},{line,7}]},
+ {?MODULE,line_numbers,1,_}|_]}} =
+ (catch line1(a, 0)),
+ {'EXIT',{crash,
+ [{?MODULE,crash,1,
+ [{file,"fake_file.erl"},{line,14}]},
+ {?MODULE,line_numbers,1,_}|_]}} =
+ (catch line1(a, 41)),
+
+ ModFile = ?MODULE_STRING++".erl",
+ [{?MODULE,maybe_crash,1,[{file,"call.erl"},{line,28}]},
+ {?MODULE,call1,0,[{file,"call.erl"},{line,14}]},
+ {?MODULE,close_calls,1,[{file,"call.erl"},{line,5}]},
+ {?MODULE,line_numbers,1,[{file,ModFile},{line,_}]}|_] =
+ close_calls(call1),
+ [{?MODULE,maybe_crash,1,[{file,"call.erl"},{line,28}]},
+ {?MODULE,call2,0,[{file,"call.erl"},{line,18}]},
+ {?MODULE,close_calls,1,[{file,"call.erl"},{line,6}]},
+ {?MODULE,line_numbers,1,[{file,ModFile},{line,_}]}|_] =
+ close_calls(call2),
+ [{?MODULE,maybe_crash,1,[{file,"call.erl"},{line,28}]},
+ {?MODULE,call3,0,[{file,"call.erl"},{line,22}]},
+ {?MODULE,close_calls,1,[{file,"call.erl"},{line,7}]},
+ {?MODULE,line_numbers,1,[{file,ModFile},{line,_}]}|_] =
+ close_calls(call3),
+ no_crash = close_calls(other),
+
+ <<0,0>> = build_binary1(16),
+ {'EXIT',{badarg,
+ [{?MODULE,build_binary1,1,
+ [{file,"bit_syntax.erl"},{line,72503}]},
+ {?MODULE,line_numbers,1,
+ [{file,ModFile},{line,_}]}|_]}} =
+ (catch build_binary1(bad_size)),
+
+ <<7,1,2,3>> = build_binary2(8, <<1,2,3>>),
+ {'EXIT',{badarg,
+ [{?MODULE,build_binary2,2,
+ [{file,"bit_syntax.erl"},{line,72507}]},
+ {?MODULE,line_numbers,1,
+ [{file,ModFile},{line,_}]}|_]}} =
+ (catch build_binary2(bad_size, <<>>)),
+ {'EXIT',{badarg,
+ [{erlang,bit_size,[bad_binary],[]},
+ {?MODULE,build_binary2,2,
+ [{file,"bit_syntax.erl"},{line,72507}]},
+ {?MODULE,line_numbers,1,
+ [{file,ModFile},{line,_}]}|_]}} =
+ (catch build_binary2(8, bad_binary)),
+
+ {'EXIT',{function_clause,
+ [{?MODULE,do_call_abs,[y,y],
+ [{file,"gc_bif.erl"},{line,18}]},
+ {?MODULE,line_numbers,1,_}|_]}} =
+ (catch do_call_abs(y, y)),
+ {'EXIT',{badarg,
+ [{erlang,abs,[[]],[]},
+ {?MODULE,do_call_abs,2,
+ [{file,"gc_bif.erl"},{line,19}]},
+ {?MODULE,line_numbers,1,_}|_]}} =
+ (catch do_call_abs(x, [])),
+
+ {'EXIT',{{badmatch,"42"},
+ [{MODULE,applied_bif_1,1,[{file,"applied_bif.erl"},{line,5}]},
+ {?MODULE,line_numbers,1,_}|_]}} =
+ (catch applied_bif_1(42)),
+
+ {'EXIT',{{badmatch,{current_location,
+ {?MODULE,applied_bif_2,0,
+ [{file,"applied_bif.erl"},{line,9}]}}},
+ [{MODULE,applied_bif_2,0,[{file,"applied_bif.erl"},{line,10}]},
+ {?MODULE,line_numbers,1,_}|_]}} =
+ (catch applied_bif_2()),
+
+ ok.
+
id(I) -> I.
+
+-file("odd_even.erl", 1). %Line 1
+even(N) when is_integer(N), N > 1, (N rem 2) == 0 ->
+ odd(N-1)++[N]. %Line 3
+
+odd(N) when is_integer(N), N > 1, (N rem 2) == 1 ->
+ even(N-1)++[N]. %Line 6
+
+%%
+%% If the compiler removes redundant line instructions (any
+%% line instruction with the same location as the previous),
+%% and the loader also removes line instructions before
+%% tail-recursive calls to external functions, then the
+%% badmatch exception in line 7 below will be reported as
+%% occurring in line 6.
+%%
+%% That means that any removal of redundant line instructions
+%% must all be done in the compiler OR in the loader.
+%%
+-file("fake_file.erl", 1). %Line 1
+line1(Tag, X) -> %Line 2
+ case Tag of %Line 3
+ a ->
+ Y = X + 1, %Line 5
+ Res = id({ok,Y}), %Line 6
+ ?MODULE:crash({ok,42} = Res); %Line 7
+ b ->
+ x = id(x), %Line 9
+ ok %Line 10
+ end. %Line 11
+
+crash(_) -> %Line 13
+ erlang:error(crash). %Line 14
+
+-file("call.erl", 1). %Line 1
+close_calls(Where) -> %Line 2
+ put(where_to_crash, Where), %Line 3
+ try
+ call1(), %Line 5
+ call2(), %Line 6
+ call3(), %Line 7
+ no_crash %Line 8
+ catch error:crash ->
+ erlang:get_stacktrace() %Line 10
+ end. %Line 11
+
+call1() -> %Line 13
+ maybe_crash(call1), %Line 14
+ ok. %Line 15
+
+call2() -> %Line 17
+ maybe_crash(call2), %Line 18
+ ok. %Line 19
+
+call3() -> %Line 21
+ maybe_crash(call3), %Line 22
+ ok. %Line 23
+
+maybe_crash(Name) -> %Line 25
+ case get(where_to_crash) of %Line 26
+ Name ->
+ erlang:error(crash); %Line 28
+ _ ->
+ ok %Line 30
+ end.
+
+-file("bit_syntax.erl", 72500). %Line 72500
+build_binary1(Size) -> %Line 72501
+ id(42), %Line 72502
+ <<0:Size>>. %Line 72503
+
+build_binary2(Size, Bin) -> %Line 72505
+ id(0), %Line 72506
+ <<7:Size,Bin/binary>>. %Line 72507
+
+-file("gc_bif.erl", 17).
+do_call_abs(x, Arg) -> %Line 18
+ abs(Arg). %Line 19
+
+%% Make sure a BIF that is applied does not leave the p->cp
+%% set (and thus generating an extra entry on the stack).
+
+-file("applied_bif.erl", 1).
+%% Explicit apply.
+applied_bif_1(I) -> %Line 3
+ L = apply(erlang, integer_to_list, [I]), %Line 4
+ fail = L, %Line 5
+ ok. %Line 6
+%% Implicit apply.
+applied_bif_2() -> %Line 8
+ R = process_info(self(), current_location), %Line 9
+ fail = R, %Line 10
+ ok. %Line 11
diff --git a/erts/emulator/test/float_SUITE.erl b/erts/emulator/test/float_SUITE.erl
index 736510339f..8e6923ce9f 100644
--- a/erts/emulator/test/float_SUITE.erl
+++ b/erts/emulator/test/float_SUITE.erl
@@ -25,8 +25,9 @@
init_per_group/2,end_per_group/2,
init_per_testcase/2,end_per_testcase/2,
fpe/1,fp_drv/1,fp_drv_thread/1,denormalized/1,match/1,
- bad_float_unpack/1]).
+ bad_float_unpack/1,cmp_zero/1, cmp_integer/1, cmp_bignum/1]).
-export([otp_7178/1]).
+-export([hidden_inf/1]).
init_per_testcase(Func, Config) when is_atom(Func), is_list(Config) ->
@@ -41,10 +42,12 @@ suite() -> [{ct_hooks,[ts_install_cth]}].
all() ->
[fpe, fp_drv, fp_drv_thread, otp_7178, denormalized,
- match, bad_float_unpack].
+ match, bad_float_unpack, {group, comparison}
+ ,hidden_inf
+ ].
groups() ->
- [].
+ [{comparison, [parallel], [cmp_zero, cmp_integer, cmp_bignum]}].
init_per_suite(Config) ->
Config.
@@ -187,6 +190,101 @@ bad_float_unpack(Config) when is_list(Config) ->
bad_float_unpack_match(<<F:64/float>>) -> F;
bad_float_unpack_match(<<I:64/integer-signed>>) -> I.
+cmp_zero(_Config) ->
+ cmp(0.5e-323,0).
+
+cmp_integer(_Config) ->
+ Axis = (1 bsl 53)-2.0, %% The point where floating points become unprecise
+ span_cmp(Axis,2,200),
+ cmp(Axis*Axis,round(Axis)).
+
+cmp_bignum(_Config) ->
+ span_cmp((1 bsl 58) - 1.0),%% Smallest bignum float
+
+ %% Test when the big num goes from I to I+1 in size
+ [span_cmp((1 bsl (32*I)) - 1.0) || I <- lists:seq(2,30)],
+
+ %% Test bignum greater then largest float
+ cmp((1 bsl (64*16)) - 1, (1 bsl (64*15)) * 1.0),
+ %% Test when num is much larger then float
+ [cmp((1 bsl (32*I)) - 1, (1 bsl (32*(I-2))) * 1.0) || I <- lists:seq(3,30)],
+ %% Test when float is much larger than num
+ [cmp((1 bsl (64*15)) * 1.0, (1 bsl (32*(I)))) || I <- lists:seq(1,29)],
+
+ %% Test that all int == float works as they should
+ [true = 1 bsl N == (1 bsl N)*1.0 || N <- lists:seq(0, 1023)],
+ [true = (1 bsl N)*-1 == (1 bsl N)*-1.0 || N <- lists:seq(0, 1023)].
+
+span_cmp(Axis) ->
+ span_cmp(Axis, 25).
+span_cmp(Axis, Length) ->
+ span_cmp(Axis, round(Axis) bsr 52, Length).
+span_cmp(Axis, Incr, Length) ->
+ [span_cmp(Axis, Incr, Length, 1 bsl (1 bsl I)) || I <- lists:seq(0,6)].
+%% This function creates tests around number axis. Both <, > and == is tested
+%% for both negative and positive numbers.
+%%
+%% Axis: The number around which to do the tests eg. (1 bsl 58) - 1.0
+%% Incr: How much to increment the test numbers inbetween each test.
+%% Length: Length/2 is the number of Incr away from Axis to test on the
+%% negative and positive plane.
+%% Diff: How much the float and int should differ when comparing
+span_cmp(Axis, Incr, Length, Diff) ->
+ [begin
+ cmp(round(Axis*-1.0)+Diff+I*Incr,Axis*-1.0+I*Incr),
+ cmp(Axis*-1.0+I*Incr,round(Axis*-1.0)-Diff+I*Incr)
+ end || I <- lists:seq((Length div 2)*-1,(Length div 2))],
+ [begin
+ cmp(round(Axis)+Diff+I*Incr,Axis+I*Incr),
+ cmp(Axis+I*Incr,round(Axis)-Diff+I*Incr)
+ end || I <- lists:seq((Length div 2)*-1,(Length div 2))].
+
+cmp(Big,Small) when is_float(Big) ->
+ BigGtSmall = lists:flatten(
+ io_lib:format("~f > ~p",[Big,Small])),
+ BigLtSmall = lists:flatten(
+ io_lib:format("~f < ~p",[Big,Small])),
+ BigEqSmall = lists:flatten(
+ io_lib:format("~f == ~p",[Big,Small])),
+ SmallGtBig = lists:flatten(
+ io_lib:format("~p > ~f",[Small,Big])),
+ SmallLtBig = lists:flatten(
+ io_lib:format("~p < ~f",[Small,Big])),
+ SmallEqBig = lists:flatten(
+ io_lib:format("~p == ~f",[Small,Big])),
+ cmp(Big,Small,BigGtSmall,BigLtSmall,SmallGtBig,SmallLtBig,
+ SmallEqBig,BigEqSmall);
+cmp(Big,Small) when is_float(Small) ->
+ BigGtSmall = lists:flatten(
+ io_lib:format("~p > ~f",[Big,Small])),
+ BigLtSmall = lists:flatten(
+ io_lib:format("~p < ~f",[Big,Small])),
+ BigEqSmall = lists:flatten(
+ io_lib:format("~p == ~f",[Big,Small])),
+ SmallGtBig = lists:flatten(
+ io_lib:format("~f > ~p",[Small,Big])),
+ SmallLtBig = lists:flatten(
+ io_lib:format("~f < ~p",[Small,Big])),
+ SmallEqBig = lists:flatten(
+ io_lib:format("~f == ~p",[Small,Big])),
+ cmp(Big,Small,BigGtSmall,BigLtSmall,SmallGtBig,SmallLtBig,
+ SmallEqBig,BigEqSmall).
+
+cmp(Big,Small,BigGtSmall,BigLtSmall,SmallGtBig,SmallLtBig,
+ SmallEqBig,BigEqSmall) ->
+ {_,_,_,true} = {Big,Small,BigGtSmall,
+ Big > Small},
+ {_,_,_,false} = {Big,Small,BigLtSmall,
+ Big < Small},
+ {_,_,_,false} = {Big,Small,SmallGtBig,
+ Small > Big},
+ {_,_,_,true} = {Big,Small,SmallLtBig,
+ Small < Big},
+ {_,_,_,false} = {Big,Small,SmallEqBig,
+ Small == Big},
+ {_,_,_,false} = {Big,Small,BigEqSmall,
+ Big == Small}.
+
id(I) -> I.
start_node(Config) when is_list(Config) ->
@@ -205,3 +303,25 @@ start_node(Config) when is_list(Config) ->
stop_node(Node) ->
?t:stop_node(Node).
+
+
+%% Test that operations that might hide infinite intermediate results
+%% do not supress the badarith.
+hidden_inf(Config) when is_list(Config) ->
+ ZeroP = 0.0,
+ ZeroN = id(ZeroP) * (-1),
+ [hidden_inf_1(A, B, Z, 9.23e307)
+ || A <- [1.0, -1.0, 3.1415, -0.00001000131, 3.57e257, ZeroP, ZeroN],
+ B <- [1.0, -1.0, 3.1415, -0.00001000131, 3.57e257, ZeroP, ZeroN],
+ Z <- [ZeroP, ZeroN]],
+ ok.
+
+hidden_inf_1(A, B, Zero, Huge) ->
+ {'EXIT',{badarith,_}} = (catch (B / (A / Zero))),
+ {'EXIT',{badarith,_}} = (catch (B * (A / Zero))),
+ {'EXIT',{badarith,_}} = (catch (B / (Huge * Huge))),
+ {'EXIT',{badarith,_}} = (catch (B * (Huge * Huge))),
+ {'EXIT',{badarith,_}} = (catch (B / (Huge + Huge))),
+ {'EXIT',{badarith,_}} = (catch (B * (Huge + Huge))),
+ {'EXIT',{badarith,_}} = (catch (B / (-Huge - Huge))),
+ {'EXIT',{badarith,_}} = (catch (B * (-Huge - Huge))).
diff --git a/erts/emulator/test/float_SUITE_data/fp_drv.c b/erts/emulator/test/float_SUITE_data/fp_drv.c
index eb453f6cd6..b80385c3f9 100644
--- a/erts/emulator/test/float_SUITE_data/fp_drv.c
+++ b/erts/emulator/test/float_SUITE_data/fp_drv.c
@@ -22,6 +22,7 @@
# define PRINTF(X)
#endif
+#include <string.h>
#include <math.h>
#ifdef __WIN32__
#include <float.h>
@@ -37,7 +38,8 @@ int _finite(double x);
#define ERTS_FP_CONTROL_TEST 0
#define ERTS_FP_THREAD_TEST 1
-static int control(ErlDrvData, unsigned int, char *, int, char **, int);
+static ErlDrvSSizeT control(ErlDrvData, unsigned int, char *,
+ ErlDrvSizeT, char **, ErlDrvSizeT);
static ErlDrvEntry fp_drv_entry = {
NULL /* init */,
@@ -97,10 +99,10 @@ do_test(void *unused)
return "ok";
}
-static int control(ErlDrvData drv_data,
- unsigned int command,
- char *buf, int len,
- char **rbuf, int rlen)
+static ErlDrvSSizeT control(ErlDrvData drv_data,
+ unsigned int command,
+ char *buf, ErlDrvSizeT len,
+ char **rbuf, ErlDrvSizeT rlen)
{
char *res_str;
PRINTF(("control(%p, %d, ...) called\r\n", drv_data, command));
diff --git a/erts/emulator/test/fun_SUITE.erl b/erts/emulator/test/fun_SUITE.erl
index 7795efe57e..559e540016 100644
--- a/erts/emulator/test/fun_SUITE.erl
+++ b/erts/emulator/test/fun_SUITE.erl
@@ -647,17 +647,11 @@ refc_dist_1() ->
%% Fun is passed in an exit signal. Wait until it is gone.
?line wait_until(fun () -> 4 =/= fun_refc(F2) end),
?line 3 = fun_refc(F2),
- erts_debug:set_internal_state(available_internal_state, true),
- ?line F_refc = case erts_debug:get_internal_state(force_heap_frags) of
- false -> 3;
- true -> 2 % GC after bif already decreased it
- end,
- ?line F_refc = fun_refc(F),
- erts_debug:set_internal_state(available_internal_state, false),
+ ?line true = erlang:garbage_collect(),
+ ?line 2 = fun_refc(F),
refc_dist_send(Node, F).
refc_dist_send(Node, F) ->
- ?line true = erlang:garbage_collect(),
?line Pid = spawn_link(Node,
fun() -> receive
{To,Fun} when is_function(Fun) ->
diff --git a/erts/emulator/test/fun_r12_SUITE.erl b/erts/emulator/test/fun_r13_SUITE.erl
index 3b1dfc9825..76ddf9fec9 100644
--- a/erts/emulator/test/fun_r12_SUITE.erl
+++ b/erts/emulator/test/fun_r13_SUITE.erl
@@ -17,10 +17,10 @@
%% %CopyrightEnd%
%%
--module(fun_r12_SUITE).
--compile(r12).
+-module(fun_r13_SUITE).
+-compile(r13).
--export([all/0, suite/0,groups/0,init_per_suite/1, end_per_suite/1,
+-export([all/0, suite/0,groups/0,init_per_suite/1, end_per_suite/1,
init_per_group/2,end_per_group/2,
init_per_testcase/2,end_per_testcase/2,dist_old_release/1]).
@@ -29,10 +29,10 @@
suite() -> [{ct_hooks,[ts_install_cth]}].
-all() ->
+all() ->
[dist_old_release].
-groups() ->
+groups() ->
[].
init_per_suite(Config) ->
diff --git a/erts/emulator/test/guard_SUITE.erl b/erts/emulator/test/guard_SUITE.erl
index f41324c2cc..a5df9b59a0 100644
--- a/erts/emulator/test/guard_SUITE.erl
+++ b/erts/emulator/test/guard_SUITE.erl
@@ -421,7 +421,7 @@ try_gbif(Id, X, Y) ->
try_fail_gbif(Id, X, Y) ->
case catch guard_bif(Id, X, Y) of
- {'EXIT', {function_clause,[{?MODULE,guard_bif,[Id,X,Y]}|_]}} ->
+ {'EXIT',{function_clause,[{?MODULE,guard_bif,[Id,X,Y],_}|_]}} ->
io:format("guard_bif(~p, ~p, ~p) -- ok", [Id,X,Y]);
Other ->
?line ok = io:format("guard_bif(~p, ~p, ~p) -- bad result: ~p\n",
@@ -493,9 +493,9 @@ type_tests(Test, [Type|T], Allowed) ->
end;
false ->
case catch type_test(Test, Value) of
- {'EXIT', {function_clause, {?MODULE,type_test,[Test,Value]}}} ->
- ok;
- {'EXIT', {function_clause,[{?MODULE,type_test,[Test,Value]}|_]}} ->
+ {'EXIT',{function_clause,
+ [{?MODULE,type_test,[Test,Value],Loc}|_]}}
+ when is_list(Loc) ->
ok;
{'EXIT',Other} ->
?line test_server:fail({unexpected_error_reason,Other});
diff --git a/erts/emulator/test/hibernate_SUITE.erl b/erts/emulator/test/hibernate_SUITE.erl
index 203fa6b48e..82a0aad189 100644
--- a/erts/emulator/test/hibernate_SUITE.erl
+++ b/erts/emulator/test/hibernate_SUITE.erl
@@ -25,16 +25,16 @@
init_per_group/2,end_per_group/2,
init_per_testcase/2,end_per_testcase/2,
basic/1,dynamic_call/1,min_heap_size/1,bad_args/1,
- messages_in_queue/1,undefined_mfa/1, no_heap/1]).
+ messages_in_queue/1,undefined_mfa/1,no_heap/1,wake_up_and_bif_trap/1]).
%% Used by test cases.
--export([basic_hibernator/1,dynamic_call_hibernator/2,messages_in_queue_restart/2, no_heap_loop/0]).
+-export([basic_hibernator/1,dynamic_call_hibernator/2,messages_in_queue_restart/2, no_heap_loop/0,characters_to_list_trap/1]).
suite() -> [{ct_hooks,[ts_install_cth]}].
all() ->
[basic, dynamic_call, min_heap_size, bad_args, messages_in_queue,
- undefined_mfa, no_heap].
+ undefined_mfa, no_heap, wake_up_and_bif_trap].
groups() ->
[].
@@ -384,6 +384,31 @@ clean_dict() ->
lists:foreach(fun ({Key, _}) -> erase(Key) end, Dict).
%%
+%% Wake up and then immediatly bif trap with a lengthy computation.
+%%
+
+wake_up_and_bif_trap(doc) -> [];
+wake_up_and_bif_trap(suite) -> [];
+wake_up_and_bif_trap(Config) when is_list(Config) ->
+ ?line Self = self(),
+ ?line Pid = spawn_link(fun() -> erlang:hibernate(?MODULE, characters_to_list_trap, [Self]) end),
+ ?line Pid ! wakeup,
+ ?line receive
+ {ok, Pid0} when Pid0 =:= Pid -> ok
+ after 5000 ->
+ ?line ?t:fail(process_blocked)
+ end,
+ ?line unlink(Pid),
+ ?line exit(Pid, bye).
+
+%% Lengthy computation that traps (in characters_to_list_trap_3).
+characters_to_list_trap(Parent) ->
+ Bin0 = <<"abcdefghijklmnopqrstuvwxz0123456789">>,
+ Bin = binary:copy(Bin0, 1500),
+ unicode:characters_to_list(Bin),
+ Parent ! {ok, self()}.
+
+%%
%% Misc
%%
diff --git a/erts/emulator/test/match_spec_SUITE.erl b/erts/emulator/test/match_spec_SUITE.erl
index 2b21fa58f4..461773114e 100644
--- a/erts/emulator/test/match_spec_SUITE.erl
+++ b/erts/emulator/test/match_spec_SUITE.erl
@@ -27,8 +27,9 @@
destructive_in_test_bif/1, guard_exceptions/1,
unary_plus/1, unary_minus/1, moving_labels/1]).
-export([fpe/1]).
+-export([otp_9422/1]).
--export([runner/2]).
+-export([runner/2, loop_runner/3]).
-export([f1/1, f2/2, f3/2, fn/1, fn/2, fn/3]).
-export([do_boxed_and_small/0]).
@@ -57,7 +58,8 @@ all() ->
trace_control_word, silent, silent_no_ms, ms_trace2,
ms_trace3, boxed_and_small, destructive_in_test_bif,
guard_exceptions, unary_plus, unary_minus, fpe,
- moving_labels];
+ moving_labels,
+ otp_9422];
true -> [not_run]
end.
@@ -208,6 +210,43 @@ test_3(Config) when is_list(Config) ->
?line collect(P1, [{trace, P1, call, {?MODULE, f2, [a, b]}, [true]}]),
?line ok.
+otp_9422(doc) -> [];
+otp_9422(Config) when is_list(Config) ->
+ Laps = 1000,
+ ?line Fun1 = fun() -> otp_9422_tracee() end,
+ ?line P1 = spawn_link(?MODULE, loop_runner, [self(), Fun1, Laps]),
+ io:format("spawned ~p as tracee\n", [P1]),
+
+ ?line erlang:trace(P1, true, [call, silent]),
+
+ ?line Fun2 = fun() -> otp_9422_trace_changer() end,
+ ?line P2 = spawn_link(?MODULE, loop_runner, [self(), Fun2, Laps]),
+ io:format("spawned ~p as trace_changer\n", [P2]),
+
+ start_collect(P1),
+ start_collect(P2),
+
+ %%receive after 10*1000 -> ok end,
+
+ stop_collect(P1),
+ stop_collect(P2),
+ ok.
+
+otp_9422_tracee() ->
+ ?MODULE:f1(a),
+ ?MODULE:f1(b),
+ ?MODULE:f1(c).
+
+otp_9422_trace_changer() ->
+ Pat1 = [{[a], [], [{enable_trace, arity}]}],
+ ?line erlang:trace_pattern({?MODULE, f1, 1}, Pat1),
+ Pat2 = [{[b], [], [{disable_trace, arity}]}],
+ ?line erlang:trace_pattern({?MODULE, f1, 1}, Pat2).
+
+
+
+
+
bad_match_spec_bin(Config) when is_list(Config) ->
{'EXIT',{badarg,_}} = (catch ets:match_spec_run([1], <<>>)),
B0 = <<1,2>>,
@@ -932,6 +971,24 @@ runner(Collector, Fun) ->
Collector ! {gone, self()}
end.
+loop_runner(Collector, Fun, Laps) ->
+ receive
+ {go, Collector} ->
+ go
+ end,
+ loop_runner_cont(Collector, Fun, 0, Laps).
+
+loop_runner_cont(_Collector, _Fun, Laps, Laps) ->
+ receive
+ {done, Collector} ->
+ io:format("loop_runner ~p exit after ~p laps\n", [self(), Laps]),
+ Collector ! {gone, self()}
+ end;
+loop_runner_cont(Collector, Fun, N, Laps) ->
+ Fun(),
+ loop_runner_cont(Collector, Fun, N+1, Laps).
+
+
f1(X) ->
{X}.
diff --git a/erts/emulator/test/mtx_SUITE.erl b/erts/emulator/test/mtx_SUITE.erl
index e0a7878bd8..024c3456a8 100644
--- a/erts/emulator/test/mtx_SUITE.erl
+++ b/erts/emulator/test/mtx_SUITE.erl
@@ -62,16 +62,29 @@ init_per_suite(Config) when is_list(Config) ->
Config.
end_per_suite(Config) when is_list(Config) ->
+ catch erts_debug:set_internal_state(available_internal_state, false),
Config.
init_per_testcase(_Case, Config) ->
Dog = ?t:timetrap(?t:minutes(15)),
+ %% Wait for deallocations to complete since we measure
+ %% runtime in test cases.
+ wait_deallocations(),
[{watchdog, Dog}|Config].
end_per_testcase(_Func, Config) ->
Dog = ?config(watchdog, Config),
?t:timetrap_cancel(Dog).
+wait_deallocations() ->
+ try
+ erts_debug:set_internal_state(wait, deallocations)
+ catch
+ error:undef ->
+ erts_debug:set_internal_state(available_internal_state, true),
+ wait_deallocations()
+ end.
+
suite() -> [{ct_hooks,[ts_install_cth]}].
all() ->
@@ -109,7 +122,7 @@ long_rwlock(Config) when is_list(Config) ->
%% A very short run time is expected, since
%% threads in the test mostly wait
?t:format("RunTime=~p~n", [RunTime]),
- ?line true = RunTime < 100,
+ ?line true = RunTime < 400,
?line RunTimeStr = "Run-time during test was "++integer_to_list(RunTime)++" ms.",
case LLRes of
ok ->
@@ -268,7 +281,7 @@ hammer_sched_rwlock_test(FreqRead, LockCheck, Blocking, WaitLocked, WaitUnlocked
_ ->
{_, RunTime} = statistics(runtime),
?t:format("RunTime=~p~n", [RunTime]),
- ?line true = RunTime < 500,
+ ?line true = RunTime < 700,
{comment,
"Run-time during test was "
++ integer_to_list(RunTime)
diff --git a/erts/emulator/test/mtx_SUITE_data/mtx_SUITE.c b/erts/emulator/test/mtx_SUITE_data/mtx_SUITE.c
index 818023211c..7c8137dc83 100644
--- a/erts/emulator/test/mtx_SUITE_data/mtx_SUITE.c
+++ b/erts/emulator/test/mtx_SUITE_data/mtx_SUITE.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2010. All Rights Reserved.
+ * Copyright Ericsson AB 2010-2011. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -552,13 +552,19 @@ create_rwlock(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[])
static ERL_NIF_TERM
rwlock_op(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[])
{
- rwlock_resource_t *rwlr;
+ /*
+ * Use a union for pointer type conversion to avoid compiler warnings
+ * about strict-aliasing violations with gcc-4.1. gcc >= 4.2 does not
+ * emit the warning.
+ * TODO: Reconsider use of union once gcc-4.1 is obsolete?
+ */
+ union { void* vp; rwlock_resource_t *p; } rwlr;
int blocking, write, wait_locked, wait_unlocked;
if (argc != 5)
goto badarg;
- if (!enif_get_resource(env, argv[0], enif_priv_data(env), (void **) &rwlr))
+ if (!enif_get_resource(env, argv[0], enif_priv_data(env), &rwlr.vp))
goto badarg;
blocking = get_bool(env, argv[1]);
@@ -581,22 +587,22 @@ rwlock_op(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[])
if (write) {
if (blocking)
- RWMUTEX_WLOCK(rwlr->rwlock);
+ RWMUTEX_WLOCK(rwlr.p->rwlock);
else
- while (EBUSY == RWMUTEX_TRYWLOCK(rwlr->rwlock));
- if (rwlr->lock_check) {
- ASSERT(!ATOMIC_READ(&rwlr->is_locked));
- ATOMIC_SET(&rwlr->is_locked, -1);
+ while (EBUSY == RWMUTEX_TRYWLOCK(rwlr.p->rwlock));
+ if (rwlr.p->lock_check) {
+ ASSERT(!ATOMIC_READ(&rwlr.p->is_locked));
+ ATOMIC_SET(&rwlr.p->is_locked, -1);
}
}
else {
if (blocking)
- RWMUTEX_RLOCK(rwlr->rwlock);
+ RWMUTEX_RLOCK(rwlr.p->rwlock);
else
- while (EBUSY == RWMUTEX_TRYRLOCK(rwlr->rwlock));
- if (rwlr->lock_check) {
- ASSERT(ATOMIC_READ(&rwlr->is_locked) >= 0);
- ATOMIC_INC(&rwlr->is_locked);
+ while (EBUSY == RWMUTEX_TRYRLOCK(rwlr.p->rwlock));
+ if (rwlr.p->lock_check) {
+ ASSERT(ATOMIC_READ(&rwlr.p->is_locked) >= 0);
+ ATOMIC_INC(&rwlr.p->is_locked);
}
}
@@ -604,18 +610,18 @@ rwlock_op(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[])
milli_sleep(wait_locked);
if (write) {
- if (rwlr->lock_check) {
- ASSERT(ATOMIC_READ(&rwlr->is_locked) == -1);
- ATOMIC_SET(&rwlr->is_locked, 0);
+ if (rwlr.p->lock_check) {
+ ASSERT(ATOMIC_READ(&rwlr.p->is_locked) == -1);
+ ATOMIC_SET(&rwlr.p->is_locked, 0);
}
- RWMUTEX_WUNLOCK(rwlr->rwlock);
+ RWMUTEX_WUNLOCK(rwlr.p->rwlock);
}
else {
- if (rwlr->lock_check) {
- ASSERT(ATOMIC_READ(&rwlr->is_locked) > 0);
- ATOMIC_DEC(&rwlr->is_locked);
+ if (rwlr.p->lock_check) {
+ ASSERT(ATOMIC_READ(&rwlr.p->is_locked) > 0);
+ ATOMIC_DEC(&rwlr.p->is_locked);
}
- RWMUTEX_RUNLOCK(rwlr->rwlock);
+ RWMUTEX_RUNLOCK(rwlr.p->rwlock);
}
if (wait_unlocked)
diff --git a/erts/emulator/test/nif_SUITE.erl b/erts/emulator/test/nif_SUITE.erl
index 91d695d979..6bd7361612 100644
--- a/erts/emulator/test/nif_SUITE.erl
+++ b/erts/emulator/test/nif_SUITE.erl
@@ -35,7 +35,9 @@
resource_takeover/1,
threading/1, send/1, send2/1, send3/1, send_threaded/1, neg/1,
is_checks/1,
- get_length/1, make_atom/1, make_string/1]).
+ get_length/1, make_atom/1, make_string/1, reverse_list_test/1,
+ otp_9668/1
+ ]).
-export([many_args_100/100]).
@@ -60,7 +62,9 @@ all() ->
iolist_as_binary, resource, resource_binary,
resource_takeover, threading, send, send2, send3,
send_threaded, neg, is_checks, get_length, make_atom,
- make_string].
+ make_string,reverse_list_test,
+ otp_9668
+ ].
groups() ->
[].
@@ -257,10 +261,54 @@ types(Config) when is_list(Config) ->
end,
[{},{ok},{{}},{[],{}},{1,2,3,4,5}]),
Stuff = [[],{},0,0.0,(1 bsl 100),(fun()-> ok end),make_ref(),self()],
- [eq_cmp(A,clone(B)) || A<-Stuff, B<-Stuff],
+ [eq_cmp(A,clone(B)) || A<-Stuff, B<-Stuff],
+
+ {IntSz, LongSz} = type_sizes(),
+ UintMax = (1 bsl (IntSz*8)) - 1,
+ IntMax = UintMax bsr 1,
+ IntMin = -(IntMax+1),
+ UlongMax = (1 bsl (LongSz*8)) - 1,
+ LongMax = UlongMax bsr 1,
+ LongMin = -(LongMax+1),
+ Uint64Max = (1 bsl 64) - 1,
+ Int64Max = Uint64Max bsr 1,
+ Int64Min = -(Int64Max+1),
+ Limits = [{IntMin,IntMax},{0,UintMax},{LongMin,LongMax},{0,UlongMax},{Int64Min,Int64Max},{0,Uint64Max}],
+ io:format("Limits = ~p\n", [Limits]),
+ lists:foreach(fun(I) ->
+ R1 = echo_int(I),
+ %%io:format("echo_int(~p) -> ~p\n", [I, R1]),
+ R2 = my_echo_int(I, Limits),
+ ?line R1 = R2,
+ ?line true = (R1 =:= R2),
+ ?line true = (R1 == R2)
+ end, int_list()),
+
?line verify_tmpmem(TmpMem),
+ ?line true = (compare(-1294536544000, -1178704800000) < 0),
+ ?line true = (compare(-1178704800000, -1294536544000) > 0),
+ ?line true = (compare(-295147905179352825856, -36893488147419103232) < 0),
+ ?line true = (compare(-36893488147419103232, -295147905179352825856) > 0),
+ ?line true = (compare(-29514790517935282585612345678, -36893488147419103232) < 0),
+ ?line true = (compare(-36893488147419103232, -29514790517935282585612345678) > 0),
ok.
+int_list() ->
+ Start = 1 bsl 200,
+ int_list([Start], -Start).
+int_list([N | _]=List, End) when N<End ->
+ List;
+int_list([N | _]=List, End) ->
+ int_list([N - (1 + (abs(N) div 3)) | List], End).
+
+my_echo_int(I, Limits) ->
+ lists:map(fun({Min,Max}) ->
+ if I < Min -> false;
+ I > Max -> false;
+ true -> I
+ end
+ end, Limits).
+
clone(X) ->
binary_to_term(term_to_binary(X)).
@@ -811,7 +859,13 @@ resource_holder(Pid,Reply,List) ->
threading(doc) -> ["Test the threading API functions (reuse tests from driver API)"];
-threading(Config) when is_list(Config) ->
+threading(Config) when is_list(Config) ->
+ case erlang:system_info(threads) of
+ true -> threading_do(Config);
+ false -> {skipped,"No thread support"}
+ end.
+
+threading_do(Config) ->
?line Data = ?config(data_dir, Config),
?line File = filename:join(Data, "tester"),
?line {ok,tester,ModBin} = compile:file(File, [binary,return_errors]),
@@ -1121,7 +1175,28 @@ is_checks(Config) when is_list(Config) ->
?line ensure_lib_loaded(Config, 1),
?line ok = check_is(hejsan, <<19,98>>, make_ref(), ok, fun() -> ok end,
self(), hd(erlang:ports()), [], [1,9,9,8],
- {hejsan, "hejsan", [$h,"ejs",<<"an">>]}),
+ {hejsan, "hejsan", [$h,"ejs",<<"an">>]}, 12),
+ ?line ok = check_is(hejsan, <<19,98>>, make_ref(), ok, fun() -> ok end,
+ self(), hd(erlang:ports()), [], [1,9,9,8],
+ {hejsan, "hejsan", [$h,"ejs",<<"an">>]}, -12),
+ ?line ok = check_is(hejsan, <<19,98>>, make_ref(), ok, fun() -> ok end,
+ self(), hd(erlang:ports()), [], [1,9,9,8],
+ {hejsan, "hejsan", [$h,"ejs",<<"an">>]}, 18446744073709551617),
+ ?line ok = check_is(hejsan, <<19,98>>, make_ref(), ok, fun() -> ok end,
+ self(), hd(erlang:ports()), [], [1,9,9,8],
+ {hejsan, "hejsan", [$h,"ejs",<<"an">>]}, -18446744073709551617),
+ ?line ok = check_is(hejsan, <<19,98>>, make_ref(), ok, fun() -> ok end,
+ self(), hd(erlang:ports()), [], [1,9,9,8],
+ {hejsan, "hejsan", [$h,"ejs",<<"an">>]}, 99.146),
+ ?line ok = check_is(hejsan, <<19,98>>, make_ref(), ok, fun() -> ok end,
+ self(), hd(erlang:ports()), [], [1,9,9,8],
+ {hejsan, "hejsan", [$h,"ejs",<<"an">>]}, -99.146),
+ ?line ok = check_is(hejsan, <<19,98>>, make_ref(), ok, fun() -> ok end,
+ self(), hd(erlang:ports()), [], [1,9,9,8],
+ {hejsan, "hejsan", [$h,"ejs",<<"an">>]}, 18446744073709551616.2e2),
+ ?line ok = check_is(hejsan, <<19,98>>, make_ref(), ok, fun() -> ok end,
+ self(), hd(erlang:ports()), [], [1,9,9,8],
+ {hejsan, "hejsan", [$h,"ejs",<<"an">>]}, -18446744073709551616.2e2),
try
?line error = check_is_exception(),
?line throw(expected_badarg)
@@ -1164,15 +1239,37 @@ make_string(Config) when is_list(Config) ->
AStringWithAccents = [$E,$r,$l,$a,$n,$g,$ ,16#e4,$r,$ ,$e,$t,$t,$ ,$g,$e,$n,$e,$r,$e,$l,$l,$t,$ ,$p,$r,$o,$g,$r,$a,$m,$s,$p,$r,16#e5,$k],
?line Strings = {A0String,A0String,A0String,A0String0, AStringWithAccents}.
+reverse_list_test(Config) ->
+ ?line ensure_lib_loaded(Config, 1),
+ List = lists:seq(1,100),
+ RevList = lists:reverse(List),
+ ?line RevList = reverse_list(List),
+ ?line badarg = reverse_list(foo).
+
+otp_9668(doc) -> ["Memory leak of tmp-buffer when inspecting iolist or unaligned binary in unbound environment"];
+otp_9668(Config) ->
+ ensure_lib_loaded(Config, 1),
+ TmpMem = tmpmem(),
+ IOList = ["This",' ',<<"is">>,' ',[<<"an iolist">>,'.']],
+ otp_9668_nif(IOList),
+
+ <<_:5/bitstring,UnalignedBin:10/binary,_/bitstring>> = <<"Abuse me as unaligned">>,
+ otp_9668_nif(UnalignedBin),
+
+ ?line verify_tmpmem(TmpMem),
+ ok.
+
+
tmpmem() ->
case erlang:system_info({allocator,temp_alloc}) of
false -> undefined;
MemInfo ->
MSBCS = lists:foldl(
fun ({instance, _, L}, Acc) ->
+ {value,{_,SBMBCS}} = lists:keysearch(sbmbcs, 1, L),
{value,{_,MBCS}} = lists:keysearch(mbcs, 1, L),
{value,{_,SBCS}} = lists:keysearch(sbcs, 1, L),
- [MBCS,SBCS | Acc]
+ [SBMBCS,MBCS,SBCS | Acc]
end,
[],
MemInfo),
@@ -1251,7 +1348,7 @@ get_resource(_,_) -> ?nif_stub.
release_resource(_) -> ?nif_stub.
last_resource_dtor_call() -> ?nif_stub.
make_new_resource(_,_) -> ?nif_stub.
-check_is(_,_,_,_,_,_,_,_,_,_) -> ?nif_stub.
+check_is(_,_,_,_,_,_,_,_,_,_,_) -> ?nif_stub.
check_is_exception() -> ?nif_stub.
length_test(_,_,_,_,_) -> ?nif_stub.
make_atoms() -> ?nif_stub.
@@ -1269,6 +1366,10 @@ send_blob_thread(_,_,_) -> ?nif_stub.
join_send_thread(_) -> ?nif_stub.
copy_blob(_) -> ?nif_stub.
send_term(_,_) -> ?nif_stub.
+reverse_list(_) -> ?nif_stub.
+echo_int(_) -> ?nif_stub.
+type_sizes() -> ?nif_stub.
+otp_9668_nif(_) -> ?nif_stub.
nif_stub_error(Line) ->
exit({nif_not_loaded,module,?MODULE,line,Line}).
diff --git a/erts/emulator/test/nif_SUITE_data/nif_SUITE.c b/erts/emulator/test/nif_SUITE_data/nif_SUITE.c
index 0bb93daa33..03092fef5e 100644
--- a/erts/emulator/test/nif_SUITE_data/nif_SUITE.c
+++ b/erts/emulator/test/nif_SUITE_data/nif_SUITE.c
@@ -28,6 +28,7 @@
static int static_cntA; /* zero by default */
static int static_cntB = NIF_SUITE_LIB_VER * 100;
+static ERL_NIF_TERM atom_false;
static ERL_NIF_TERM atom_self;
static ERL_NIF_TERM atom_ok;
static ERL_NIF_TERM atom_join;
@@ -40,7 +41,18 @@ typedef struct
CallInfo* call_history;
NifModPrivData* nif_mod;
union { ErlNifResourceType* t; long l; } rt_arr[2];
-}PrivData;
+} PrivData;
+
+/*
+ * Use a union for pointer type conversion to avoid compiler warnings
+ * about strict-aliasing violations with gcc-4.1. gcc >= 4.2 does not
+ * emit the warning.
+ * TODO: Reconsider use of union once gcc-4.1 is obsolete?
+ */
+typedef union {
+ void* vp;
+ struct make_term_info* p;
+} mti_t;
void add_call(ErlNifEnv* env, PrivData* data, const char* func_name)
{
@@ -103,7 +115,7 @@ static int load(ErlNifEnv* env, void** priv_data, ERL_NIF_TERM load_info)
msgenv_resource_type = enif_open_resource_type(env,NULL,"nif_SUITE.msgenv",
msgenv_dtor,
ERL_NIF_RT_CREATE, NULL);
-
+ atom_false = enif_make_atom(env,"false");
atom_self = enif_make_atom(env,"self");
atom_ok = enif_make_atom(env,"ok");
atom_join = enif_make_atom(env,"join");
@@ -333,8 +345,13 @@ static int test_double(ErlNifEnv* env, double d1)
#define TAG_BITS 4
#define SMALL_BITS (sizeof(void*)*8 - TAG_BITS)
+#ifdef _WIN64
+#define MAX_SMALL ((1LL << (SMALL_BITS-1))-1)
+#define MIN_SMALL (-(1LL << (SMALL_BITS-1)))
+#else
#define MAX_SMALL ((1L << (SMALL_BITS-1))-1)
#define MIN_SMALL (-(1L << (SMALL_BITS-1)))
+#endif
static ERL_NIF_TERM type_test(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{
@@ -481,6 +498,45 @@ error:
return enif_make_atom(env,"error");
}
+static ERL_NIF_TERM echo_int(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
+{
+ int sint;
+ unsigned uint;
+ long slong;
+ unsigned long ulong;
+ ErlNifSInt64 sint64;
+ ErlNifUInt64 uint64;
+ ERL_NIF_TERM sint_term = atom_false, uint_term = atom_false;
+ ERL_NIF_TERM slong_term = atom_false, ulong_term = atom_false;
+ ERL_NIF_TERM sint64_term = atom_false, uint64_term = atom_false;
+
+ if (enif_get_int(env, argv[0], &sint)) {
+ sint_term = enif_make_int(env, sint);
+ }
+ if (enif_get_uint(env, argv[0], &uint)) {
+ uint_term = enif_make_uint(env, uint);
+ }
+ if (enif_get_long(env, argv[0], &slong)) {
+ slong_term = enif_make_long(env, slong);
+ }
+ if (enif_get_ulong(env, argv[0], &ulong)) {
+ ulong_term = enif_make_ulong(env, ulong);
+ }
+ if (enif_get_int64(env, argv[0], &sint64)) {
+ sint64_term = enif_make_int64(env, sint64);
+ }
+ if (enif_get_uint64(env, argv[0], &uint64)) {
+ uint64_term = enif_make_uint64(env, uint64);
+ }
+ return enif_make_list6(env, sint_term, uint_term, slong_term, ulong_term, sint64_term, uint64_term);
+}
+
+static ERL_NIF_TERM type_sizes(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
+{
+ return enif_make_tuple2(env, enif_make_int(env, sizeof(int)),
+ enif_make_int(env, sizeof(long)));
+}
+
static ERL_NIF_TERM tuple_2_list(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{
int arity = -1;
@@ -667,7 +723,7 @@ static ERL_NIF_TERM get_resource_type(ErlNifEnv* env, int argc, const ERL_NIF_TE
static ERL_NIF_TERM alloc_resource(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{
ErlNifBinary data_bin;
- union { ErlNifResourceType* t; long l;} type;
+ union { ErlNifResourceType* t; long l; } type;
union { void* p; long l;} data;
if (!enif_get_long(env, argv[0], &type.l)
|| !enif_inspect_binary(env, argv[1], &data_bin)
@@ -691,7 +747,7 @@ static ERL_NIF_TERM make_resource(ErlNifEnv* env, int argc, const ERL_NIF_TERM a
static ERL_NIF_TERM make_new_resource(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{
ErlNifBinary data_bin;
- union { ErlNifResourceType* t; long l;} type;
+ union { ErlNifResourceType* t; long l; } type;
void* data;
ERL_NIF_TERM ret;
if (!enif_get_long(env, argv[0], &type.l)
@@ -709,7 +765,7 @@ static ERL_NIF_TERM make_new_resource(ErlNifEnv* env, int argc, const ERL_NIF_TE
static ERL_NIF_TERM make_new_resource_binary(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{
ErlNifBinary data_bin;
- union { struct binary_resource* p; void* vp; long l;} br;
+ union { struct binary_resource* p; void* vp; long l; } br;
void* buf;
ERL_NIF_TERM ret;
if (!enif_inspect_binary(env, argv[0], &data_bin)
@@ -781,6 +837,7 @@ static ERL_NIF_TERM release_resource(ErlNifEnv* env, int argc, const ERL_NIF_TER
* argv[7] an empty list
* argv[8] a non-empty list
* argv[9] a tuple
+ * argv[10] a number (small, big integer or float)
*/
static ERL_NIF_TERM check_is(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{
@@ -797,6 +854,7 @@ static ERL_NIF_TERM check_is(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]
if (!enif_is_list(env, argv[7])) return enif_make_badarg(env);
if (!enif_is_list(env, argv[8])) return enif_make_badarg(env);
if (!enif_is_tuple(env, argv[9])) return enif_make_badarg(env);
+ if (!enif_is_number(env, argv[10])) return enif_make_badarg(env);
return ok_atom;
}
@@ -1229,10 +1287,7 @@ static void msgenv_dtor(ErlNifEnv* env, void* obj)
static ERL_NIF_TERM clear_msgenv(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{
- union {
- void* vp;
- struct make_term_info* p;
- }mti;
+ mti_t mti;
if (!enif_get_resource(env, argv[0], msgenv_resource_type, &mti.vp)) {
return enif_make_badarg(env);
}
@@ -1245,7 +1300,7 @@ static ERL_NIF_TERM clear_msgenv(ErlNifEnv* env, int argc, const ERL_NIF_TERM ar
static ERL_NIF_TERM grow_blob(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{
- union { void* vp; struct make_term_info* p; }mti;
+ mti_t mti;
ERL_NIF_TERM term;
if (!enif_get_resource(env, argv[0], msgenv_resource_type, &mti.vp)
|| (argc>2 && !enif_get_uint(env,argv[2], &mti.p->n))) {
@@ -1261,7 +1316,7 @@ static ERL_NIF_TERM grow_blob(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[
static ERL_NIF_TERM send_blob(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{
- union { void* vp; struct make_term_info* p; }mti;
+ mti_t mti;
ErlNifPid to;
ERL_NIF_TERM copy;
int res;
@@ -1276,7 +1331,7 @@ static ERL_NIF_TERM send_blob(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[
static ERL_NIF_TERM send3_blob(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{
- union { void* vp; struct make_term_info* p; }mti;
+ mti_t mti;
ErlNifPid to;
ERL_NIF_TERM copy;
int res;
@@ -1294,7 +1349,7 @@ static ERL_NIF_TERM send3_blob(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv
void* threaded_sender(void *arg)
{
- union { void* vp; struct make_term_info* p; }mti;
+ mti_t mti;
mti.vp = arg;
enif_mutex_lock(mti.p->mtx);
@@ -1309,7 +1364,7 @@ void* threaded_sender(void *arg)
static ERL_NIF_TERM send_blob_thread(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{
- union { void* vp; struct make_term_info* p; }mti;
+ mti_t mti;
ERL_NIF_TERM copy;
if (!enif_get_resource(env, argv[0], msgenv_resource_type, &mti.vp)
|| !enif_get_local_pid(env,argv[1], &mti.p->to_pid)) {
@@ -1335,7 +1390,7 @@ static ERL_NIF_TERM send_blob_thread(ErlNifEnv* env, int argc, const ERL_NIF_TER
static ERL_NIF_TERM join_send_thread(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{
- union { void* vp; struct make_term_info* p; }mti;
+ mti_t mti;
int err;
if (!enif_get_resource(env, argv[0], msgenv_resource_type, &mti.vp)) {
return enif_make_badarg(env);
@@ -1352,7 +1407,7 @@ static ERL_NIF_TERM join_send_thread(ErlNifEnv* env, int argc, const ERL_NIF_TER
static ERL_NIF_TERM copy_blob(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{
- union { void* vp; struct make_term_info* p; }mti;
+ mti_t mti;
if (!enif_get_resource(env, argv[0], msgenv_resource_type, &mti.vp)) {
return enif_make_badarg(env);
}
@@ -1373,6 +1428,34 @@ static ERL_NIF_TERM send_term(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[
return enif_make_int(env, ret);
}
+static ERL_NIF_TERM reverse_list(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) {
+ ERL_NIF_TERM rev_list;
+
+ if(!enif_make_reverse_list(env, argv[0], &rev_list))
+ return enif_make_atom(env, "badarg");
+ return rev_list;
+}
+
+static ERL_NIF_TERM otp_9668_nif(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
+{
+ /* Inspect in process independent env */
+ ErlNifEnv* myenv = enif_alloc_env();
+ ERL_NIF_TERM mycopy = enif_make_copy(myenv, argv[0]);
+ ErlNifBinary obin, cbin;
+
+ if ((enif_inspect_binary(env, argv[0], &obin)
+ && enif_inspect_binary(myenv, mycopy, &cbin))
+ ||
+ (enif_inspect_iolist_as_binary(env, argv[0], &obin)
+ && enif_inspect_iolist_as_binary(myenv, mycopy, &cbin)))
+ {
+ assert(obin.size == cbin.size);
+ assert(memcmp(obin.data, cbin.data, obin.size) == 0);
+ }
+ enif_free_env(myenv);
+ return atom_ok;
+}
+
static ErlNifFunc nif_funcs[] =
{
{"lib_version", 0, lib_version},
@@ -1399,7 +1482,7 @@ static ErlNifFunc nif_funcs[] =
{"release_resource", 1, release_resource},
{"last_resource_dtor_call", 0, last_resource_dtor_call},
{"make_new_resource", 2, make_new_resource},
- {"check_is", 10, check_is},
+ {"check_is", 11, check_is},
{"check_is_exception", 0, check_is_exception},
{"length_test", 5, length_test},
{"make_atoms", 0, make_atoms},
@@ -1417,7 +1500,11 @@ static ErlNifFunc nif_funcs[] =
{"send_blob_thread", 3, send_blob_thread},
{"join_send_thread", 1, join_send_thread},
{"copy_blob", 1, copy_blob},
- {"send_term", 2, send_term}
+ {"send_term", 2, send_term},
+ {"reverse_list",1, reverse_list},
+ {"echo_int", 1, echo_int},
+ {"type_sizes", 0, type_sizes},
+ {"otp_9668_nif", 1, otp_9668_nif}
};
ERL_NIF_INIT(nif_SUITE,nif_funcs,load,reload,upgrade,unload)
diff --git a/erts/emulator/test/nif_SUITE_data/tester.c b/erts/emulator/test/nif_SUITE_data/tester.c
index 08466d0f18..257b116322 100644
--- a/erts/emulator/test/nif_SUITE_data/tester.c
+++ b/erts/emulator/test/nif_SUITE_data/tester.c
@@ -61,6 +61,7 @@ static int reload(ErlNifEnv* env, void** priv_data, ERL_NIF_TERM load_info)
static ERL_NIF_TERM run(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{
testcase_run(NULL);
+ testcase_cleanup(NULL);
return enif_make_atom(env, "ok");
}
diff --git a/erts/emulator/test/port_SUITE.erl b/erts/emulator/test/port_SUITE.erl
index eac56a867d..0a1ef5a78f 100644
--- a/erts/emulator/test/port_SUITE.erl
+++ b/erts/emulator/test/port_SUITE.erl
@@ -724,6 +724,8 @@ open_ports(Name, Settings) ->
[];
system_limit ->
[];
+ enomem ->
+ [];
Other ->
?line test_server:fail({open_ports, Other})
end;
diff --git a/erts/emulator/test/port_SUITE_data/echo_drv.c b/erts/emulator/test/port_SUITE_data/echo_drv.c
index 25eda116fe..1d39c6a00c 100644
--- a/erts/emulator/test/port_SUITE_data/echo_drv.c
+++ b/erts/emulator/test/port_SUITE_data/echo_drv.c
@@ -1,4 +1,5 @@
#include <stdio.h>
+#include <string.h>
#include "erl_driver.h"
@@ -17,11 +18,9 @@ typedef struct _erl_drv_data EchoDrvData;
static EchoDrvData *echo_drv_start(ErlDrvPort port, char *command);
static void echo_drv_stop(EchoDrvData *data_p);
-static void echo_drv_output(EchoDrvData *data_p, char *buf, int len);
+static void echo_drv_output(ErlDrvData drv_data, char *buf,
+ ErlDrvSizeT len);
static void echo_drv_finish(void);
-static int echo_drv_control(EchoDrvData *data_p, unsigned int command,
- char *buf, int len,
- char **rbuf, int rlen);
static ErlDrvEntry echo_drv_entry = {
NULL, /* init */
@@ -33,10 +32,20 @@ static ErlDrvEntry echo_drv_entry = {
"echo_drv",
echo_drv_finish,
NULL, /* handle */
- echo_drv_control,
+ NULL, /* control */
NULL, /* timeout */
NULL, /* outputv */
- NULL /* ready_async */
+ NULL, /* ready_async */
+ NULL,
+ NULL,
+ NULL,
+ ERL_DRV_EXTENDED_MARKER,
+ ERL_DRV_EXTENDED_MAJOR_VERSION,
+ ERL_DRV_EXTENDED_MINOR_VERSION,
+ 0,
+ NULL,
+ NULL,
+ NULL,
};
@@ -68,7 +77,8 @@ static EchoDrvData *echo_drv_start(ErlDrvPort port, char *command) {
static void echo_drv_stop(EchoDrvData *data_p) {
}
-static void echo_drv_output(EchoDrvData *data_p, char *buf, int len) {
+static void echo_drv_output(ErlDrvData drv_data, char *buf, ErlDrvSizeT len) {
+ EchoDrvData *data_p = (EchoDrvData *) drv_data;
void *void_ptr;
ErlDrvPort port = void_ptr = data_p;
@@ -77,9 +87,3 @@ static void echo_drv_output(EchoDrvData *data_p, char *buf, int len) {
static void echo_drv_finish() {
}
-
-static int echo_drv_control(EchoDrvData *data_p, unsigned int command,
- char *buf, int len,
- char **rbuf, int rlen) {
- return 0;
-}
diff --git a/erts/emulator/test/port_SUITE_data/exit_drv.c b/erts/emulator/test/port_SUITE_data/exit_drv.c
index 60f1b321bd..5f366b3545 100644
--- a/erts/emulator/test/port_SUITE_data/exit_drv.c
+++ b/erts/emulator/test/port_SUITE_data/exit_drv.c
@@ -5,11 +5,9 @@ typedef struct _erl_drv_data ExitDrvData;
static ExitDrvData *exit_drv_start(ErlDrvPort port, char *command);
static void exit_drv_stop(ExitDrvData *data_p);
-static void exit_drv_output(ExitDrvData *data_p, char *buf, int len);
+static void exit_drv_output(ExitDrvData *data_p, char *buf,
+ ErlDrvSizeT len);
static void exit_drv_finish(void);
-static int exit_drv_control(ExitDrvData *data_p, unsigned int command,
- char *buf, int len,
- char **rbuf, int rlen);
static ErlDrvEntry exit_drv_entry = {
NULL, /* init */
@@ -21,10 +19,20 @@ static ErlDrvEntry exit_drv_entry = {
"exit_drv",
exit_drv_finish,
NULL, /* handle */
- exit_drv_control,
+ NULL, /* control */
NULL, /* timeout */
NULL, /* outputv */
- NULL /* ready_async */
+ NULL, /* ready_async */
+ NULL,
+ NULL,
+ NULL,
+ ERL_DRV_EXTENDED_MARKER,
+ ERL_DRV_EXTENDED_MAJOR_VERSION,
+ ERL_DRV_EXTENDED_MINOR_VERSION,
+ 0,
+ NULL,
+ NULL,
+ NULL,
};
DRIVER_INIT(exit_drv)
@@ -45,7 +53,7 @@ exit_drv_stop(ExitDrvData *datap)
}
static void
-exit_drv_output(ExitDrvData *datap, char *buf, int len)
+exit_drv_output(ExitDrvData *datap, char *buf, ErlDrvSizeT len)
{
driver_exit((ErlDrvPort) datap, 0);
}
@@ -55,14 +63,3 @@ exit_drv_finish(void)
{
}
-
-static int
-exit_drv_control(ExitDrvData *datap,
- unsigned int command,
- char *buf,
- int len,
- char **rbuf,
- int rlen)
-{
- return 0;
-}
diff --git a/erts/emulator/test/port_SUITE_data/failure_drv.c b/erts/emulator/test/port_SUITE_data/failure_drv.c
index 34d48e00f8..5826e6d5a9 100644
--- a/erts/emulator/test/port_SUITE_data/failure_drv.c
+++ b/erts/emulator/test/port_SUITE_data/failure_drv.c
@@ -5,10 +5,8 @@ typedef struct _erl_drv_data FailureDrvData;
static FailureDrvData *failure_drv_start(ErlDrvPort, char *);
static void failure_drv_stop(FailureDrvData *);
-static void failure_drv_output(FailureDrvData *, char *, int);
+static void failure_drv_output(ErlDrvData, char *, ErlDrvSizeT);
static void failure_drv_finish(void);
-static int failure_drv_control(FailureDrvData *, unsigned int,
- char *, int, char **, int);
static ErlDrvEntry failure_drv_entry = {
NULL, /* init */
@@ -18,12 +16,22 @@ static ErlDrvEntry failure_drv_entry = {
NULL, /* ready_input */
NULL, /* ready_output */
"failure_drv",
- failure_drv_finish,
+ NULL, /* finish */
NULL, /* handle */
- failure_drv_control,
+ NULL, /* control */
NULL, /* timeout */
NULL, /* outputv */
- NULL /* ready_async */
+ NULL, /* ready_async */
+ NULL,
+ NULL,
+ NULL,
+ ERL_DRV_EXTENDED_MARKER,
+ ERL_DRV_EXTENDED_MAJOR_VERSION,
+ ERL_DRV_EXTENDED_MINOR_VERSION,
+ 0,
+ NULL,
+ NULL,
+ NULL,
};
@@ -46,7 +54,8 @@ static FailureDrvData *failure_drv_start(ErlDrvPort port, char *command) {
static void failure_drv_stop(FailureDrvData *data_p) {
}
-static void failure_drv_output(FailureDrvData *data_p, char *buf, int len) {
+static void failure_drv_output(ErlDrvData drv_data, char *buf, ErlDrvSizeT len) {
+ FailureDrvData *data_p = (FailureDrvData *) drv_data;
void *void_ptr;
ErlDrvPort port = void_ptr = data_p;
@@ -55,9 +64,3 @@ static void failure_drv_output(FailureDrvData *data_p, char *buf, int len) {
static void failure_drv_finish() {
}
-
-static int failure_drv_control(FailureDrvData *data_p, unsigned int command,
- char *buf, int len,
- char **rbuf, int rlen) {
- return 0;
-}
diff --git a/erts/emulator/test/port_bif_SUITE_data/control_drv.c b/erts/emulator/test/port_bif_SUITE_data/control_drv.c
index e9f57a887a..b937a8bb15 100644
--- a/erts/emulator/test/port_bif_SUITE_data/control_drv.c
+++ b/erts/emulator/test/port_bif_SUITE_data/control_drv.c
@@ -1,13 +1,15 @@
#include <stdio.h>
#include <stdlib.h>
+#include <string.h>
#include "erl_driver.h"
static ErlDrvPort erlang_port;
static ErlDrvData control_start(ErlDrvPort, char*);
static void control_stop(ErlDrvData);
-static void control_read(ErlDrvData, char*, int);
-static int control_control(ErlDrvData, unsigned int, char*, int, char**, int);
+static void control_read(ErlDrvData, char*, ErlDrvSizeT);
+static ErlDrvSSizeT control_control(ErlDrvData, unsigned int, char*,
+ ErlDrvSizeT, char**, ErlDrvSizeT);
static ErlDrvEntry control_driver_entry =
{
@@ -21,9 +23,19 @@ static ErlDrvEntry control_driver_entry =
NULL,
NULL,
control_control,
+ NULL, /* timeout */
+ NULL, /* outputv */
+ NULL, /* ready_async */
+ NULL,
+ NULL,
+ NULL,
+ ERL_DRV_EXTENDED_MARKER,
+ ERL_DRV_EXTENDED_MAJOR_VERSION,
+ ERL_DRV_EXTENDED_MINOR_VERSION,
+ 0,
+ NULL,
NULL,
NULL,
- NULL
};
DRIVER_INIT(control_drv)
@@ -41,7 +53,7 @@ static ErlDrvData control_start(ErlDrvPort port,char *buf)
return (ErlDrvData)port;
}
-static void control_read(ErlDrvData port, char *buf, int count)
+static void control_read(ErlDrvData port, char *buf, ErlDrvSizeT count)
{
driver_output(erlang_port, buf, count);
}
@@ -51,8 +63,9 @@ static void control_stop(ErlDrvData port)
erlang_port = (ErlDrvPort)-1;
}
-static int control_control(ErlDrvData port, unsigned command, char* buf, int count,
- char** res, int res_size)
+static ErlDrvSSizeT control_control(ErlDrvData port, unsigned command,
+ char* buf, ErlDrvSizeT count,
+ char** res, ErlDrvSizeT res_size)
{
switch (command) {
case 'e':
diff --git a/erts/emulator/test/process_SUITE.erl b/erts/emulator/test/process_SUITE.erl
index f68e712268..fdc55a4cc5 100644
--- a/erts/emulator/test/process_SUITE.erl
+++ b/erts/emulator/test/process_SUITE.erl
@@ -35,7 +35,7 @@
self_exit/1, normal_suicide_exit/1, abnormal_suicide_exit/1,
t_exit_2_catch/1, trap_exit_badarg/1, trap_exit_badarg_in_bif/1,
exit_and_timeout/1, exit_twice/1,
- t_process_info/1, process_info_other_msg/1,
+ t_process_info/1, process_info_other/1, process_info_other_msg/1,
process_info_other_dist_msg/1,
process_info_2_list/1, process_info_lock_reschedule/1,
process_info_lock_reschedule2/1,
@@ -64,7 +64,7 @@ suite() -> [{ct_hooks,[ts_install_cth]}].
all() ->
[spawn_with_binaries, t_exit_1, {group, t_exit_2},
trap_exit_badarg, trap_exit_badarg_in_bif,
- t_process_info, process_info_other_msg,
+ t_process_info, process_info_other, process_info_other_msg,
process_info_other_dist_msg, process_info_2_list,
process_info_lock_reschedule,
process_info_lock_reschedule2,
@@ -258,7 +258,9 @@ trap_exit_badarg() ->
?line Pid = fun_spawn(fun() -> bad_guy(kb_128()) end),
?line Garbage = kb_128(),
?line receive
- {'EXIT', Pid, {badarg,[{erlang,abs,[Garbage]},{?MODULE,bad_guy,1}|_]}} ->
+ {'EXIT',Pid,{badarg,[{erlang,abs,[Garbage],Loc1},
+ {?MODULE,bad_guy,1,Loc2}|_]}}
+ when is_list(Loc1), is_list(Loc2) ->
ok;
Other ->
?line ok = io:format("Bad EXIT message: ~P", [Other, 30]),
@@ -410,7 +412,7 @@ etwice_high(Low) ->
exit(Low, first),
exit(Low, second).
-%% Tests the process_info/1 BIF.
+%% Tests the process_info/2 BIF.
t_process_info(Config) when is_list(Config) ->
?line [] = process_info(self(), registered_name),
?line register(my_name, self()),
@@ -418,13 +420,100 @@ t_process_info(Config) when is_list(Config) ->
?line {status, running} = process_info(self(), status),
?line {min_heap_size, 233} = process_info(self(), min_heap_size),
?line {min_bin_vheap_size, 46368} = process_info(self(), min_bin_vheap_size),
- ?line {current_function, {?MODULE, t_process_info, 1}} =
+ ?line {current_function,{?MODULE,t_process_info,1}} =
process_info(self(), current_function),
+ ?line {current_function,{?MODULE,t_process_info,1}} =
+ apply(erlang, process_info, [self(),current_function]),
+
+ %% current_location and current_stacktrace
+ {Line1,Res1} = {?LINE,process_info(self(), current_location)},
+ verify_loc(Line1, Res1),
+ {Line2,Res2} = {?LINE,apply(erlang, process_info,
+ [self(),current_location])},
+ verify_loc(Line2, Res2),
+ pi_stacktrace([{?MODULE,t_process_info,1,?LINE}]),
+
?line Gleader = group_leader(),
?line {group_leader, Gleader} = process_info(self(), group_leader),
?line {'EXIT',{badarg,_Info}} = (catch process_info('not_a_pid')),
ok.
+pi_stacktrace(Expected0) ->
+ {Line,Res} = {?LINE,erlang:process_info(self(), current_stacktrace)},
+ {current_stacktrace,Stack} = Res,
+ Expected = [{?MODULE,pi_stacktrace,1,Line}|Expected0],
+ pi_stacktrace_1(Stack, Expected).
+
+pi_stacktrace_1([{M,F,A,Loc}|Stk], [{M,F,A,Line}|Exp]) ->
+ case Loc of
+ [] ->
+ %% No location info for some reason (+L, native code).
+ io:format("Missing location information for ~w:~w/~w",
+ [M,F,A]),
+ ok;
+ [_|_] ->
+ Line = proplists:get_value(line, Loc),
+ File = proplists:get_value(file, Loc),
+ File = ?MODULE_STRING ++ ".erl"
+ end,
+ pi_stacktrace_1(Stk, Exp);
+pi_stacktrace_1([_|_], []) -> ok.
+
+verify_loc(Line, {current_location,{?MODULE,t_process_info=F,1=A,Loc}}) ->
+ case Loc of
+ [] ->
+ %% No location info for some reason (+L, native code).
+ io:format("Missing location information for ~w:~w/~w",
+ [?MODULE,F,A]),
+ ok;
+ [_|_] ->
+ Line = proplists:get_value(line, Loc),
+ File = proplists:get_value(file, Loc),
+ File = ?MODULE_STRING ++ ".erl"
+ end.
+
+process_info_other(Config) when is_list(Config) ->
+ Self = self(),
+ Pid = spawn_link(fun() -> process_info_looper(Self) end),
+ receive after 1 -> ok end,
+ pio_current_location(10000, Pid, 0, 0),
+ pio_current_stacktrace().
+
+pio_current_location(0, _, Pi, Looper) ->
+ io:format("~w call(s) to erlang:process_info/2", [Pi]),
+ io:format("~w call(s) to ~w:process_info_looper/1", [Looper,?MODULE]);
+pio_current_location(N, Pid, Pi, Looper) ->
+ erlang:yield(),
+ {current_location,Where} = process_info(Pid, current_location),
+ case Where of
+ {erlang,process_info,2,[]} ->
+ pio_current_location(N-1, Pid, Pi+1, Looper);
+ {?MODULE,process_info_looper,1,Loc} when is_list(Loc) ->
+ pio_current_location(N-1, Pid, Pi, Looper+1)
+ end.
+
+pio_current_stacktrace() ->
+ L = [begin
+ {current_stacktrace,Stk} = process_info(P, current_stacktrace),
+ {P,Stk}
+ end || P <- processes()],
+ [erlang:garbage_collect(P) || {P,_} <- L],
+ erlang:garbage_collect(),
+ [verify_stacktrace(Stk) || {_,Stk} <- L],
+ ok.
+
+verify_stacktrace([{M,F,A,Loc}|T])
+ when is_atom(M),
+ is_atom(F),
+ is_integer(A),
+ is_list(Loc) ->
+ verify_stacktrace(T);
+verify_stacktrace([]) -> ok.
+
+process_info_looper(Parent) ->
+ process_info(Parent, current_location),
+ process_info_looper(Parent).
+
%% Tests the process_info/1 BIF on another process with messages.
process_info_other_msg(Config) when is_list(Config) ->
Self = self(),
diff --git a/erts/emulator/test/scheduler_SUITE.erl b/erts/emulator/test/scheduler_SUITE.erl
index f16d0ea429..8931562828 100644
--- a/erts/emulator/test/scheduler_SUITE.erl
+++ b/erts/emulator/test/scheduler_SUITE.erl
@@ -55,7 +55,7 @@
scheduler_suspend/1,
reader_groups/1]).
--define(DEFAULT_TIMEOUT, ?t:minutes(10)).
+-define(DEFAULT_TIMEOUT, ?t:minutes(15)).
-define(MIN_SCHEDULER_TEST_TIMEOUT, ?t:minutes(1)).
@@ -87,8 +87,17 @@ init_per_group(_GroupName, Config) ->
end_per_group(_GroupName, Config) ->
Config.
-
+init_per_testcase(update_cpu_info, Config) ->
+ case os:find_executable("taskset") of
+ false ->
+ {skip,"Could not find 'taskset' in path"};
+ _ ->
+ init_per_tc(update_cpu_info, Config)
+ end;
init_per_testcase(Case, Config) when is_list(Config) ->
+ init_per_tc(Case, Config).
+
+init_per_tc(Case, Config) ->
Dog = ?t:timetrap(?DEFAULT_TIMEOUT),
process_flag(priority, max),
erlang:display({'------------', ?MODULE, Case, '------------'}),
@@ -510,16 +519,18 @@ bound_loop(NS, N, M, Sched) ->
bindings(Node, BindType) ->
Parent = self(),
Ref = make_ref(),
- spawn_link(Node,
- fun () ->
- enable_internal_state(),
- Res = (catch erts_debug:get_internal_state(
- {fake_scheduler_bindings, BindType})),
- Parent ! {Ref, Res}
- end),
+ Pid = spawn_link(Node,
+ fun () ->
+ enable_internal_state(),
+ Res = (catch erts_debug:get_internal_state(
+ {fake_scheduler_bindings,
+ BindType})),
+ Parent ! {Ref, Res}
+ end),
receive
{Ref, Res} ->
?t:format("~p: ~p~n", [BindType, Res]),
+ unlink(Pid),
Res
end.
@@ -1030,7 +1041,7 @@ sbt_test(Config, CpuTCmd, ClBt, Bt, LP) ->
?line ok.
scheduler_suspend(Config) when is_list(Config) ->
- ?line Dog = ?t:timetrap(?t:minutes(2)),
+ ?line Dog = ?t:timetrap(?t:minutes(5)),
?line lists:foreach(fun (S) -> scheduler_suspend_test(Config, S) end,
[64, 32, 16, default]),
?line ?t:timetrap_cancel(Dog),
@@ -1675,7 +1686,7 @@ do_it(Tracer, Low, Normal, High, Max, RedsPerSchedLimit) ->
EndWait = now(),
BalanceWait = timer:now_diff(EndWait,StartWait) div 1000,
erlang:display({balance_wait, BalanceWait}),
- Timeout = ?DEFAULT_TIMEOUT - ?t:seconds(10) - BalanceWait,
+ Timeout = ?DEFAULT_TIMEOUT - ?t:minutes(4) - BalanceWait,
Res = case Timeout < ?MIN_SCHEDULER_TEST_TIMEOUT of
true ->
stop_work(Low, Normal, High, Max),
diff --git a/erts/emulator/test/send_term_SUITE.erl b/erts/emulator/test/send_term_SUITE.erl
index 6615873392..ba0ba804ca 100644
--- a/erts/emulator/test/send_term_SUITE.erl
+++ b/erts/emulator/test/send_term_SUITE.erl
@@ -175,6 +175,10 @@ chk_temp_alloc() ->
%% Verify that we havn't got anything allocated by temp_alloc
lists:foreach(
fun ({instance, _, TI}) ->
+ ?line {value, {sbmbcs, SBMBCInfo}}
+ = lists:keysearch(sbmbcs, 1, TI),
+ ?line {value, {blocks, 0, _, _}}
+ = lists:keysearch(blocks, 1, SBMBCInfo),
?line {value, {mbcs, MBCInfo}}
= lists:keysearch(mbcs, 1, TI),
?line {value, {blocks, 0, _, _}}
diff --git a/erts/emulator/test/send_term_SUITE_data/send_term_drv.c b/erts/emulator/test/send_term_SUITE_data/send_term_drv.c
index 165cce2e9d..b3feca79f0 100644
--- a/erts/emulator/test/send_term_SUITE_data/send_term_drv.c
+++ b/erts/emulator/test/send_term_SUITE_data/send_term_drv.c
@@ -24,7 +24,7 @@
static ErlDrvPort erlang_port;
static ErlDrvData send_term_drv_start(ErlDrvPort port, char *command);
static void send_term_drv_stop(ErlDrvData drv_data);
-static void send_term_drv_run(ErlDrvData drv_data, char *buf, int len);
+static void send_term_drv_run(ErlDrvData drv_data, char *buf, ErlDrvSizeT len);
static int make_ext_term_list(ErlDrvTermData *td, int bad);
@@ -39,6 +39,22 @@ static ErlDrvEntry send_term_drv_entry = {
NULL,
NULL,
"send_term_drv",
+ NULL,
+ NULL, /* handle */
+ NULL, /* control */
+ NULL, /* timeout */
+ NULL, /* outputv */
+ NULL, /* ready_async */
+ NULL,
+ NULL,
+ NULL,
+ ERL_DRV_EXTENDED_MARKER,
+ ERL_DRV_EXTENDED_MAJOR_VERSION,
+ ERL_DRV_EXTENDED_MINOR_VERSION,
+ 0,
+ NULL,
+ NULL,
+ NULL,
};
DRIVER_INIT(send_term_drv)
@@ -64,7 +80,7 @@ static void send_term_drv_stop(ErlDrvData drv_data)
static void output_term(ErlDrvTermData* msg, int len);
static void fail_term(ErlDrvTermData* msg, int len, int line);
-static void send_term_drv_run(ErlDrvData port, char *buf, int count)
+static void send_term_drv_run(ErlDrvData port, char *buf, ErlDrvSizeT count)
{
char buf7[1024];
ErlDrvTermData spec[1024];
diff --git a/erts/emulator/test/sensitive_SUITE.erl b/erts/emulator/test/sensitive_SUITE.erl
index 634df367ca..e073eab596 100644
--- a/erts/emulator/test/sensitive_SUITE.erl
+++ b/erts/emulator/test/sensitive_SUITE.erl
@@ -160,8 +160,7 @@ recv_trace(Config) when is_list(Config) ->
?line {messages,Messages} = process_info(Tracer, messages),
[{trace,Parent,'receive',a},
{trace,Parent,'receive',{trace_delivered,_,_}},
- {trace,Parent,'receive',c},
- {trace,Parent,'receive',{trace_delivered,_,_}}] = Messages,
+ {trace,Parent,'receive',c}|_] = Messages,
?line unlink(Tracer), exit(Tracer, kill),
?line unlink(Sender), exit(Sender, kill),
diff --git a/erts/emulator/test/smoke_test_SUITE.erl b/erts/emulator/test/smoke_test_SUITE.erl
new file mode 100644
index 0000000000..98f1cf1ad5
--- /dev/null
+++ b/erts/emulator/test/smoke_test_SUITE.erl
@@ -0,0 +1,139 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2011. All Rights Reserved.
+%%
+%% The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved online at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% %CopyrightEnd%
+%%
+
+-module(smoke_test_SUITE).
+
+-include_lib("test_server/include/test_server.hrl").
+
+%-compile(export_all).
+-export([all/0, suite/0,groups/0,init_per_suite/1, end_per_suite/1,
+ init_per_group/2,end_per_group/2,
+ init_per_testcase/2, end_per_testcase/2]).
+
+-export([boot_combo/1]).
+
+-define(DEFAULT_TIMEOUT, ?t:minutes(2)).
+
+suite() -> [{ct_hooks,[ts_install_cth]}].
+
+all() ->
+ [boot_combo].
+
+groups() ->
+ [].
+
+init_per_suite(Config) ->
+ Config.
+
+end_per_suite(_Config) ->
+ ok.
+
+init_per_group(_GroupName, Config) ->
+ Config.
+
+end_per_group(_GroupName, Config) ->
+ Config.
+
+
+init_per_testcase(Case, Config) when is_list(Config) ->
+ Dog = ?t:timetrap(?DEFAULT_TIMEOUT),
+ [{testcase, Case},{watchdog, Dog}|Config].
+
+end_per_testcase(_Case, Config) when is_list(Config) ->
+ Dog = ?config(watchdog, Config),
+ ?t:timetrap_cancel(Dog),
+ ok.
+
+%%%
+%%% The test cases -------------------------------------------------------------
+%%%
+
+boot_combo(Config) when is_list(Config) ->
+ ZFlags = os:getenv("ERL_ZFLAGS"),
+ NOOP = fun () -> ok end,
+ A42 = fun () ->
+ case erlang:system_info(threads) of
+ true ->
+ 42 = erlang:system_info(thread_pool_size);
+ false ->
+ ok
+ end
+ end,
+ SMPDisable = fun () -> false = erlang:system_info(smp_support) end,
+ try
+ chk_boot(Config, "+Ktrue", NOOP),
+ chk_boot(Config, "+A42", A42),
+ chk_boot(Config, "-smp disable", SMPDisable),
+ chk_boot(Config, "+Ktrue +A42", A42),
+ chk_boot(Config, "-smp disable +A42",
+ fun () -> SMPDisable(), A42() end),
+ chk_boot(Config, "-smp disable +Ktrue", SMPDisable),
+ chk_boot(Config, "-smp disable +Ktrue +A42",
+ fun () -> SMPDisable(), A42() end),
+ %% A lot more combos could be implemented...
+ ok
+ after
+ os:putenv("ERL_ZFLAGS", case ZFlags of
+ false -> "";
+ _ -> ZFlags
+ end)
+ end.
+
+%%%
+%%% Aux functions --------------------------------------------------------------
+%%%
+
+chk_boot(Config, Args, Fun) ->
+ true = os:putenv("ERL_ZFLAGS", Args),
+ Success = make_ref(),
+ Parent = self(),
+ ?t:format("--- Testing ~s~n", [Args]),
+ {ok, Node} = start_node(Config),
+ Pid = spawn_link(Node, fun () ->
+ Fun(),
+ Parent ! {self(), Success}
+ end),
+ receive
+ {Pid, Success} ->
+ Node = node(Pid),
+ stop_node(Node),
+ ?t:format("--- Success!~n", []),
+ ok
+ end.
+
+start_node(Config) ->
+ start_node(Config, "").
+
+start_node(Config, Args) when is_list(Config) ->
+ ?line Pa = filename:dirname(code:which(?MODULE)),
+ ?line {A, B, C} = now(),
+ ?line Name = list_to_atom(atom_to_list(?MODULE)
+ ++ "-"
+ ++ atom_to_list(?config(testcase, Config))
+ ++ "-"
+ ++ integer_to_list(A)
+ ++ "-"
+ ++ integer_to_list(B)
+ ++ "-"
+ ++ integer_to_list(C)),
+ ?line ?t:start_node(Name, slave, [{args, "-pa "++Pa++" "++Args}]).
+
+stop_node(Node) ->
+ ?t:stop_node(Node).
+
diff --git a/erts/emulator/test/statistics_SUITE.erl b/erts/emulator/test/statistics_SUITE.erl
index 0392312a6f..a93dd309c1 100644
--- a/erts/emulator/test/statistics_SUITE.erl
+++ b/erts/emulator/test/statistics_SUITE.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1997-2011. All Rights Reserved.
+%% Copyright Ericsson AB 1997-2012. All Rights Reserved.
%%
%% The contents of this file are subject to the Erlang Public License,
%% Version 1.1, (the "License"); you may not use this file except in
@@ -29,6 +29,7 @@
runtime_zero_diff/1,
runtime_update/1, runtime_diff/1,
run_queue_one/1,
+ scheduler_wall_time/1,
reductions/1, reductions_big/1, garbage_collection/1, io/1,
badarg/1]).
@@ -51,8 +52,8 @@ suite() -> [{ct_hooks,[ts_install_cth]}].
all() ->
[{group, wall_clock}, {group, runtime}, reductions,
- reductions_big, {group, run_queue}, garbage_collection,
- io, badarg].
+ reductions_big, {group, run_queue}, scheduler_wall_time,
+ garbage_collection, io, badarg].
groups() ->
[{wall_clock, [],
@@ -266,11 +267,10 @@ run_queue_one(Config) when is_list(Config) ->
run_queue_one_test(Config) when is_list(Config) ->
- ?line Hog = spawn_link(?MODULE, hog, [self()]),
+ ?line _Hog = spawn_link(?MODULE, hog, [self()]),
?line receive
- hog_started ->
- Hog ! go
- end,
+ hog_started -> ok
+ end,
?line receive after 100 -> ok end, % Give hog a head start.
?line case statistics(run_queue) of
N when N >= 1 -> ok;
@@ -280,18 +280,88 @@ run_queue_one_test(Config) when is_list(Config) ->
%% CPU-bound process, going at low priority. It will always be ready
%% to run.
-
+
hog(Pid) ->
?line process_flag(priority, low),
?line Pid ! hog_started,
- ?line receive
- go -> hog_iter(0)
+ ?line Mon = erlang:monitor(process, Pid),
+ ?line hog_iter(0, Mon).
+
+hog_iter(N, Mon) when N > 0 ->
+ receive
+ {'DOWN', Mon, _, _, _} -> ok
+ after 0 ->
+ ?line hog_iter(N-1, Mon)
+ end;
+hog_iter(0, Mon) ->
+ ?line hog_iter(10000, Mon).
+
+%%% Tests of statistics(scheduler_wall_time).
+
+scheduler_wall_time(doc) ->
+ "Tests that statistics(scheduler_wall_time) works as intended";
+scheduler_wall_time(Config) when is_list(Config) ->
+ %% Should return undefined if system_flag is not turned on yet
+ undefined = statistics(scheduler_wall_time),
+ %% Turn on statistics
+ false = erlang:system_flag(scheduler_wall_time, true),
+ try
+ Schedulers = erlang:system_info(schedulers_online),
+ %% Let testserver and everyone else finish their work
+ timer:sleep(500),
+ %% Empty load
+ EmptyLoad = get_load(),
+ {false, _} = {lists:any(fun(Load) -> Load > 50 end, EmptyLoad),EmptyLoad},
+ MeMySelfAndI = self(),
+ StartHog = fun() ->
+ Pid = spawn(?MODULE, hog, [self()]),
+ receive hog_started -> MeMySelfAndI ! go end,
+ Pid
+ end,
+ P1 = StartHog(),
+ %% Max on one, the other schedulers empty (hopefully)
+ %% Be generous the process can jump between schedulers
+ %% which is ok and we don't want the test to fail for wrong reasons
+ _L1 = [S1Load|EmptyScheds1] = get_load(),
+ {true,_} = {S1Load > 50,S1Load},
+ {false,_} = {lists:any(fun(Load) -> Load > 50 end, EmptyScheds1),EmptyScheds1},
+ {true,_} = {lists:sum(EmptyScheds1) < 60,EmptyScheds1},
+
+ %% 50% load
+ HalfHogs = [StartHog() || _ <- lists:seq(1, (Schedulers-1) div 2)],
+ HalfLoad = lists:sum(get_load()) div Schedulers,
+ if Schedulers < 2, HalfLoad > 80 -> ok; %% Ok only one scheduler online and one hog
+ %% We want roughly 50% load
+ HalfLoad > 40, HalfLoad < 60 -> ok;
+ true -> exit({halfload, HalfLoad})
+ end,
+
+ %% 100% load
+ LastHogs = [StartHog() || _ <- lists:seq(1, Schedulers div 2)],
+ FullScheds = get_load(),
+ {false,_} = {lists:any(fun(Load) -> Load < 80 end, FullScheds),FullScheds},
+ FullLoad = lists:sum(FullScheds) div Schedulers,
+ if FullLoad > 90 -> ok;
+ true -> exit({fullload, FullLoad})
+ end,
+
+ [exit(Pid, kill) || Pid <- [P1|HalfHogs++LastHogs]],
+ AfterLoad = get_load(),
+ {false,_} = {lists:any(fun(Load) -> Load > 5 end, AfterLoad),AfterLoad},
+ true = erlang:system_flag(scheduler_wall_time, false)
+ after
+ erlang:system_flag(scheduler_wall_time, false)
end.
-hog_iter(N) when N > 0 ->
- ?line hog_iter(N-1);
-hog_iter(0) ->
- ?line hog_iter(10000).
+get_load() ->
+ Start = erlang:statistics(scheduler_wall_time),
+ timer:sleep(500),
+ End = erlang:statistics(scheduler_wall_time),
+ lists:reverse(lists:sort(load_percentage(lists:sort(Start),lists:sort(End)))).
+
+load_percentage([{Id, WN, TN}|Ss], [{Id, WP, TP}|Ps]) ->
+ [100*(WN-WP) div (TN-TP)|load_percentage(Ss, Ps)];
+load_percentage([], []) -> [].
garbage_collection(doc) ->
diff --git a/erts/emulator/test/system_info_SUITE.erl b/erts/emulator/test/system_info_SUITE.erl
index 9b782b35a2..0350eb671d 100644
--- a/erts/emulator/test/system_info_SUITE.erl
+++ b/erts/emulator/test/system_info_SUITE.erl
@@ -37,7 +37,7 @@
init_per_group/2,end_per_group/2,
init_per_testcase/2, end_per_testcase/2]).
--export([process_count/1, system_version/1, misc_smoke_tests/1, heap_size/1, wordsize/1]).
+-export([process_count/1, system_version/1, misc_smoke_tests/1, heap_size/1, wordsize/1, memory/1]).
-define(DEFAULT_TIMEOUT, ?t:minutes(2)).
@@ -45,7 +45,7 @@ suite() -> [{ct_hooks,[ts_install_cth]}].
all() ->
[process_count, system_version, misc_smoke_tests,
- heap_size, wordsize].
+ heap_size, wordsize, memory].
groups() ->
[].
@@ -187,3 +187,312 @@ wordsize(Config) when is_list(Config) ->
Other ->
exit({unexpected_wordsizes,Other})
end.
+
+memory(doc) -> ["Verify that erlang:memory/0 and memory results in crashdump produce are similar"];
+memory(Config) when is_list(Config) ->
+ %%
+ %% Verify that erlang:memory/0 and memory results in
+ %% crashdump produce are similar.
+ %%
+ %% erlang:memory/0 requests information from each scheduler
+ %% thread and puts the information together in erlang code
+ %% (erlang.erl).
+ %%
+ %% When a crash dump is written we cannot use the
+ %% erlang:memory/0 implementation. The crashdump implementation
+ %% is a pure C implementation inspecting all allocator instances
+ %% after the system has been blocked (erts_memory() in erl_alloc.c).
+ %%
+ %% Since we got two implementations, modifications can easily
+ %% cause them to produce different results.
+ %%
+ %% erts_debug:get_internal_state(memory) blocks the system and
+ %% execute the same code as the crash dump writing uses.
+ %%
+
+ erts_debug:set_internal_state(available_internal_state, true),
+ %% Use a large heap size on the controling process in
+ %% order to avoid changes in its heap size during
+ %% comparisons.
+ MinHeapSize = process_flag(min_heap_size, 1024*1024),
+ Prio = process_flag(priority, max),
+ try
+ erlang:memory(), %% first call will init stat atoms
+ garbage_collect(), %% blow up heap
+ memory_test(Config)
+ catch
+ error:notsup -> {skipped, "erlang:memory() not supported"}
+ after
+ process_flag(min_heap_size, MinHeapSize),
+ process_flag(priority, Prio),
+ catch erts_debug:set_internal_state(available_internal_state, false)
+ end.
+
+memory_test(_Config) ->
+
+ MWs = spawn_mem_workers(),
+
+ DPs = mem_workers_call(MWs,
+ fun () ->
+ mapn(fun (_) ->
+ spawn(fun () ->
+ receive
+ after infinity ->
+ ok
+ end
+ end)
+ end,
+ 1000 div erlang:system_info(schedulers_online))
+ end,
+ []),
+ cmp_memory(MWs, "spawn procs"),
+
+ Ps = lists:flatten(DPs),
+
+ mem_workers_call(MWs,
+ fun () ->
+ lists:foreach(fun (P) -> link(P) end, Ps)
+ end,
+ []),
+ cmp_memory(MWs, "link procs"),
+ mem_workers_call(MWs,
+ fun () ->
+ lists:foreach(fun (P) -> unlink(P) end, Ps)
+ end,
+ []),
+ cmp_memory(MWs, "unlink procs"),
+
+ DMs = mem_workers_call(MWs,
+ fun () ->
+ lists:map(fun (P) ->
+ monitor(process, P)
+ end, Ps)
+ end,
+ []),
+ cmp_memory(MWs, "monitor procs"),
+ Ms = lists:flatten(DMs),
+ mem_workers_call(MWs,
+ fun () ->
+ lists:foreach(fun (M) ->
+ demonitor(M)
+ end, Ms)
+ end,
+ []),
+ cmp_memory(MWs, "demonitor procs"),
+
+ mem_workers_call(MWs,
+ fun () ->
+ lists:foreach(fun (P) ->
+ P ! {a, "message", make_ref()}
+ end, Ps)
+ end,
+ []),
+ cmp_memory(MWs, "message procs"),
+
+ mem_workers_call(MWs,
+ fun () ->
+ Mons = lists:map(fun (P) ->
+ exit(P, kill),
+ monitor(process, P)
+ end,
+ Ps),
+ lists:foreach(fun (Mon) ->
+ receive
+ {'DOWN', Mon, _, _, _} -> ok
+ end
+ end,
+ Mons)
+ end, []),
+ cmp_memory(MWs, "kill procs"),
+
+ mem_workers_call(MWs,
+ fun () ->
+ put(binary_data,
+ mapn(fun (_) -> list_to_binary(lists:duplicate(256,$?)) end, 100))
+ end,
+ []),
+
+ cmp_memory(MWs, "store binary data"),
+
+ mem_workers_call(MWs,
+ fun () ->
+ put(binary_data, false),
+ garbage_collect()
+ end,
+ []),
+ cmp_memory(MWs, "release binary data"),
+
+ mem_workers_call(MWs,
+ fun () ->
+ list_to_atom("an ugly atom "++integer_to_list(erlang:system_info(scheduler_id))),
+ list_to_atom("another ugly atom "++integer_to_list(erlang:system_info(scheduler_id))),
+ list_to_atom("yet another ugly atom "++integer_to_list(erlang:system_info(scheduler_id)))
+ end,
+ []),
+ cmp_memory(MWs, "new atoms"),
+
+
+ mem_workers_call(MWs,
+ fun () ->
+ T = ets:new(?MODULE, []),
+ ets:insert(T, {gurka, lists:seq(1,10000)}),
+ ets:insert(T, {banan, lists:seq(1,1024)}),
+ ets:insert(T, {appelsin, make_ref()}),
+ put(ets_id, T)
+ end,
+ []),
+ cmp_memory(MWs, "store ets data"),
+
+ mem_workers_call(MWs,
+ fun () ->
+ ets:delete(get(ets_id)),
+ put(ets_id, false)
+ end,
+ []),
+ cmp_memory(MWs, "remove ets data"),
+
+ lists:foreach(fun (MW) ->
+ unlink(MW),
+ Mon = monitor(process, MW),
+ exit(MW, kill),
+ receive
+ {'DOWN', Mon, _, _, _} -> ok
+ end
+ end,
+ MWs),
+ ok.
+
+mem_worker() ->
+ receive
+ {call, From, Fun, Args} ->
+ From ! {reply, self(), apply(Fun, Args)},
+ mem_worker();
+ {cast, _From, Fun, Args} ->
+ apply(Fun, Args),
+ mem_worker()
+ end.
+
+mem_workers_call(MWs, Fun, Args) ->
+ lists:foreach(fun (MW) ->
+ MW ! {call, self(), Fun, Args}
+ end,
+ MWs),
+ lists:map(fun (MW) ->
+ receive
+ {reply, MW, Res} ->
+ Res
+ end
+ end,
+ MWs).
+
+mem_workers_cast(MWs, Fun, Args) ->
+ lists:foreach(fun (MW) ->
+ MW ! {cast, self(), Fun, Args}
+ end,
+ MWs).
+
+spawn_mem_workers() ->
+ spawn_mem_workers(erlang:system_info(schedulers_online)).
+
+spawn_mem_workers(0) ->
+ [];
+spawn_mem_workers(N) ->
+ [spawn_opt(fun () -> mem_worker() end,
+ [{scheduler, N rem erlang:system_info(schedulers_online) + 1},
+ link]) | spawn_mem_workers(N-1)].
+
+
+
+mem_get(X, Mem) ->
+ case lists:keyfind(X, 1, Mem) of
+ {X, Val} -> Val;
+ false -> false
+ end.
+
+cmp_memory(What, Mem1, Mem2, 1) ->
+ R1 = mem_get(What, Mem1),
+ R2 = mem_get(What, Mem2),
+ true = R1 == R2;
+cmp_memory(What, Mem1, Mem2, RelDiff) ->
+ %% We allow RealDiff diff
+ R1 = mem_get(What, Mem1),
+ R2 = mem_get(What, Mem2),
+ case R1 == R2 of
+ true ->
+ ok;
+ false ->
+ case R1 > R2 of
+ true ->
+ true = R2*RelDiff > R1;
+ false ->
+ true = R1*RelDiff > R2
+ end
+ end.
+
+pos_int(Val) when Val >= 0 ->
+ Val;
+pos_int(Val) ->
+ exit({not_pos_int, Val}).
+
+check_sane_memory(Mem) ->
+ Tot = pos_int(mem_get(total, Mem)),
+ Proc = pos_int(mem_get(processes, Mem)),
+ ProcUsed = pos_int(mem_get(processes_used, Mem)),
+ Sys = pos_int(mem_get(system, Mem)),
+ Atom = pos_int(mem_get(atom, Mem)),
+ AtomUsed = pos_int(mem_get(atom_used, Mem)),
+ Bin = pos_int(mem_get(binary, Mem)),
+ Code = pos_int(mem_get(code, Mem)),
+ Ets = pos_int(mem_get(ets, Mem)),
+
+ Tot = Proc + Sys,
+ true = Sys > Atom + Bin + Code + Ets,
+ true = Proc >= ProcUsed,
+ true = Atom >= AtomUsed,
+
+ case mem_get(maximum, Mem) of
+ false -> ok;
+ Max -> true = pos_int(Max) >= Tot
+ end,
+ ok.
+
+cmp_memory(MWs, Str) ->
+ erlang:display(Str),
+ lists:foreach(fun (MW) -> garbage_collect(MW) end, MWs),
+ garbage_collect(),
+ erts_debug:set_internal_state(wait, deallocations),
+
+ EDM = erts_debug:get_internal_state(memory),
+ EM = erlang:memory(),
+
+ io:format("~s:~n"
+ "erlang:memory() = ~p~n"
+ "crash dump memory = ~p~n",
+ [Str, EM, EDM]),
+
+ ?line check_sane_memory(EM),
+ ?line check_sane_memory(EDM),
+
+ %% We expect these to always give us exactly the same result
+
+ ?line cmp_memory(atom, EM, EDM, 1),
+ ?line cmp_memory(atom_used, EM, EDM, 1),
+ ?line cmp_memory(binary, EM, EDM, 1),
+ ?line cmp_memory(code, EM, EDM, 1),
+ ?line cmp_memory(ets, EM, EDM, 1),
+
+ %% Total, processes, processes_used, and system will seldom
+ %% give us exactly the same result since the two readings
+ %% aren't taken atomically.
+
+ ?line cmp_memory(total, EM, EDM, 1.05),
+ ?line cmp_memory(processes, EM, EDM, 1.05),
+ ?line cmp_memory(processes_used, EM, EDM, 1.05),
+ ?line cmp_memory(system, EM, EDM, 1.05),
+
+ ok.
+
+mapn(_Fun, 0) ->
+ [];
+mapn(Fun, N) ->
+ [Fun(N) | mapn(Fun, N-1)].
diff --git a/erts/emulator/test/system_profile_SUITE.erl b/erts/emulator/test/system_profile_SUITE.erl
index 32089e8872..ba94a371be 100644
--- a/erts/emulator/test/system_profile_SUITE.erl
+++ b/erts/emulator/test/system_profile_SUITE.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 2007-2011. All Rights Reserved.
+%% Copyright Ericsson AB 2007-2012. All Rights Reserved.
%%
%% The contents of this file are subject to the Erlang Public License,
%% Version 1.1, (the "License"); you may not use this file except in
@@ -27,6 +27,7 @@
system_profile_on_and_off/1,
runnable_procs/1,
runnable_ports/1,
+ dont_profile_profiler/1,
scheduler/1
]).
@@ -40,7 +41,7 @@
-define(default_timeout, ?t:minutes(1)).
init_per_testcase(_Case, Config) ->
- ?line Dog=?t:timetrap(?default_timeout),
+ Dog=?t:timetrap(?default_timeout),
[{watchdog, Dog}|Config].
end_per_testcase(_Case, Config) ->
Dog=?config(watchdog, Config),
@@ -51,7 +52,7 @@ suite() -> [{ct_hooks,[ts_install_cth]}].
all() ->
[system_profile_on_and_off, runnable_procs,
- runnable_ports, scheduler].
+ runnable_ports, scheduler, dont_profile_profiler].
groups() ->
[].
@@ -77,31 +78,31 @@ system_profile_on_and_off(suite) ->
system_profile_on_and_off(doc) ->
["Tests switching system_profiling on and off."];
system_profile_on_and_off(Config) when is_list(Config) ->
- ?line Pid = start_profiler_process(),
+ Pid = start_profiler_process(),
% Test runnable_ports on and off
- ?line undefined = erlang:system_profile(Pid, [runnable_ports]),
- ?line {Pid, [runnable_ports]} = erlang:system_profile(),
- ?line {Pid, [runnable_ports]} = erlang:system_profile(undefined, []),
+ undefined = erlang:system_profile(Pid, [runnable_ports]),
+ {Pid, [runnable_ports]} = erlang:system_profile(),
+ {Pid, [runnable_ports]} = erlang:system_profile(undefined, []),
% Test runnable_procs on and off
- ?line undefined = erlang:system_profile(Pid, [runnable_procs]),
- ?line {Pid, [runnable_procs]} = erlang:system_profile(),
- ?line {Pid, [runnable_procs]} = erlang:system_profile(undefined, []),
+ undefined = erlang:system_profile(Pid, [runnable_procs]),
+ {Pid, [runnable_procs]} = erlang:system_profile(),
+ {Pid, [runnable_procs]} = erlang:system_profile(undefined, []),
% Test scheduler on and off
- ?line undefined = erlang:system_profile(Pid, [scheduler]),
- ?line {Pid, [scheduler]} = erlang:system_profile(),
- ?line {Pid, [scheduler]} = erlang:system_profile(undefined, []),
+ undefined = erlang:system_profile(Pid, [scheduler]),
+ {Pid, [scheduler]} = erlang:system_profile(),
+ {Pid, [scheduler]} = erlang:system_profile(undefined, []),
% Test combined runnable_ports, runnable_procs, scheduler; on and off
- ?line undefined = erlang:system_profile(Pid, [scheduler, runnable_procs, runnable_ports]),
- ?line {Pid, [scheduler,runnable_procs,runnable_ports]} = erlang:system_profile(),
- ?line {Pid, [scheduler,runnable_procs,runnable_ports]} = erlang:system_profile(undefined, []),
+ undefined = erlang:system_profile(Pid, [scheduler, runnable_procs, runnable_ports]),
+ {Pid, [scheduler,runnable_procs,runnable_ports]} = erlang:system_profile(),
+ {Pid, [scheduler,runnable_procs,runnable_ports]} = erlang:system_profile(undefined, []),
% Test turned off and kill process
- ?line undefined = erlang:system_profile(),
- ?line exit(Pid,kill),
+ undefined = erlang:system_profile(),
+ exit(Pid,kill),
ok.
%% Test runnable_procs
@@ -111,25 +112,25 @@ runnable_procs(suite) ->
runnable_procs(doc) ->
["Tests system_profiling with runnable_procs."];
runnable_procs(Config) when is_list(Config) ->
- ?line Pid = start_profiler_process(),
+ Pid = start_profiler_process(),
% start a ring of processes
% FIXME: Set #laps and #nodes in config file
Nodes = 10,
Laps = 10,
- ?line Master = ring(Nodes),
- ?line undefined = erlang:system_profile(Pid, [runnable_procs]),
+ Master = ring(Nodes),
+ undefined = erlang:system_profile(Pid, [runnable_procs]),
% loop a message
- ?line ok = ring_message(Master, message, Laps),
- ?line Events = get_profiler_events(),
- ?line kill_em_all = kill_ring(Master),
- ?line erlang:system_profile(undefined, []),
+ ok = ring_message(Master, message, Laps),
+ Events = get_profiler_events(),
+ kill_em_all = kill_ring(Master),
+ erlang:system_profile(undefined, []),
put(master, Master),
put(laps, Laps),
- ?line true = has_runnable_event(Events),
+ true = has_runnable_event(Events),
Pids = sort_events_by_pid(Events),
- ?line ok = check_events(Pids),
+ ok = check_events(Pids),
erase(),
- ?line exit(Pid,kill),
+ exit(Pid,kill),
ok.
runnable_ports(suite) ->
@@ -137,21 +138,21 @@ runnable_ports(suite) ->
runnable_ports(doc) ->
["Tests system_profiling with runnable_port."];
runnable_ports(Config) when is_list(Config) ->
- ?line Pid = start_profiler_process(),
- ?line undefined = erlang:system_profile(Pid, [runnable_ports]),
- ?line EchoPid = echo(Config),
+ Pid = start_profiler_process(),
+ undefined = erlang:system_profile(Pid, [runnable_ports]),
+ EchoPid = echo(Config),
% FIXME: Set config to number_of_echos
Laps = 10,
put(laps, Laps),
- ?line ok = echo_message(EchoPid, Laps, message),
- ?line Events = get_profiler_events(),
- ?line kill_em_all = kill_echo(EchoPid),
- ?line erlang:system_profile(undefined, []),
- ?line true = has_runnable_event(Events),
+ ok = echo_message(EchoPid, Laps, message),
+ Events = get_profiler_events(),
+ kill_em_all = kill_echo(EchoPid),
+ erlang:system_profile(undefined, []),
+ true = has_runnable_event(Events),
Pids = sort_events_by_pid(Events),
- ?line ok = check_events(Pids),
+ ok = check_events(Pids),
erase(),
- ?line exit(Pid,kill),
+ exit(Pid,kill),
ok.
scheduler(suite) ->
@@ -160,46 +161,68 @@ scheduler(doc) ->
["Tests system_profiling with scheduler."];
scheduler(Config) when is_list(Config) ->
case {erlang:system_info(smp_support), erlang:system_info(schedulers_online)} of
- {false,_} -> ?line {skipped, "No need for scheduler test when smp support is disabled."};
- {_, 1} -> ?line {skipped, "No need for scheduler test when only one scheduler online."};
+ {false,_} -> {skipped, "No need for scheduler test when smp support is disabled."};
+ {_, 1} -> {skipped, "No need for scheduler test when only one scheduler online."};
_ ->
Nodes = 10,
- ?line ok = check_block_system(Nodes),
- ?line ok = check_multi_scheduling_block(Nodes),
- ok
+ ok = check_block_system(Nodes),
+ ok = check_multi_scheduling_block(Nodes)
end.
+% the profiler pid should not be profiled
+dont_profile_profiler(suite) ->
+ [];
+dont_profile_profiler(doc) ->
+ ["Ensure system profiler process is not profiled."];
+dont_profile_profiler(Config) when is_list(Config) ->
+ Pid = start_profiler_process(),
+
+ Nodes = 10,
+ Laps = 10,
+ Master = ring(Nodes),
+ undefined = erlang:system_profile(Pid, [runnable_procs]),
+ % loop a message
+ ok = ring_message(Master, message, Laps),
+ erlang:system_profile(undefined, []),
+ kill_em_all = kill_ring(Master),
+ Events = get_profiler_events(),
+ false = has_profiler_pid_event(Events, Pid),
+
+ exit(Pid,kill),
+ ok.
+
+
%%% Check scheduler profiling
check_multi_scheduling_block(Nodes) ->
- ?line Pid = start_profiler_process(),
- ?line undefined = erlang:system_profile(Pid, [scheduler]),
- ?line {ok, Supervisor} = start_load(Nodes),
- ?line erlang:system_flag(multi_scheduling, block),
- ?line erlang:system_flag(multi_scheduling, unblock),
- ?line {Pid, [scheduler]} = erlang:system_profile(undefined, []),
- ?line Events = get_profiler_events(),
- ?line true = has_scheduler_event(Events),
+ Pid = start_profiler_process(),
+ undefined = erlang:system_profile(Pid, [scheduler]),
+ {ok, Supervisor} = start_load(Nodes),
+ erlang:system_flag(multi_scheduling, block),
+ erlang:system_flag(multi_scheduling, unblock),
+ {Pid, [scheduler]} = erlang:system_profile(undefined, []),
+ Events = get_profiler_events(),
+ true = has_scheduler_event(Events),
stop_load(Supervisor),
- ?line exit(Pid,kill),
+ exit(Pid,kill),
erase(),
ok.
check_block_system(Nodes) ->
- ?line Dummy = spawn(?MODULE, profiler_process, [[]]),
- ?line Pid = start_profiler_process(),
- ?line undefined = erlang:system_profile(Pid, [scheduler]),
- ?line {ok, Supervisor} = start_load(Nodes),
+ Dummy = spawn(?MODULE, profiler_process, [[]]),
+ Pid = start_profiler_process(),
+ undefined = erlang:system_profile(Pid, [scheduler]),
+ {ok, Supervisor} = start_load(Nodes),
% FIXME: remove wait !!
wait(300),
- ?line undefined = erlang:system_monitor(Dummy, [busy_port]),
- ?line {Dummy, [busy_port]} = erlang:system_monitor(undefined, []),
- ?line {Pid, [scheduler]} = erlang:system_profile(undefined, []),
- ?line Events = get_profiler_events(),
- ?line true = has_scheduler_event(Events),
+ undefined = erlang:system_monitor(Dummy, [busy_port]),
+ {Dummy, [busy_port]} = erlang:system_monitor(undefined, []),
+ {Pid, [scheduler]} = erlang:system_profile(undefined, []),
+ Events = get_profiler_events(),
+ true = has_scheduler_event(Events),
stop_load(Supervisor),
- ?line exit(Pid,kill),
- ?line exit(Dummy,kill),
+ exit(Pid,kill),
+ exit(Dummy,kill),
erase(),
ok.
@@ -211,17 +234,17 @@ check_events([Pid | Pids]) ->
Laps = get(laps),
CheckPids = get(pids),
{Events, N} = get_pid_events(Pid),
- ?line ok = check_event_flow(Events),
- ?line ok = check_event_ts(Events),
+ ok = check_event_flow(Events),
+ ok = check_event_ts(Events),
IsMember = lists:member(Pid, CheckPids),
case Pid of
Master ->
io:format("Expected ~p and got ~p profile events from ~p: ok~n", [Laps*2+2, N, Pid]),
- ?line N = Laps*2 + 2,
+ N = Laps*2 + 2,
check_events(Pids);
Pid when IsMember == true ->
io:format("Expected ~p and got ~p profile events from ~p: ok~n", [Laps*2, N, Pid]),
- ?line N = Laps*2,
+ N = Laps*2,
check_events(Pids);
Pid ->
check_events(Pids)
@@ -448,6 +471,12 @@ has_runnable_event(Events) ->
end
end, Events).
+has_profiler_pid_event([], _) -> false;
+has_profiler_pid_event([{profile, Pid, _Activity, _MFA, _TS}|Events], Pid) -> true;
+has_profiler_pid_event([_|Events], Pid) ->
+ has_profiler_pid_event(Events, Pid).
+
+
wait(Time) -> receive after Time -> ok end.
%%%
diff --git a/erts/emulator/test/system_profile_SUITE_data/echo_drv.c b/erts/emulator/test/system_profile_SUITE_data/echo_drv.c
index d968ff06f9..e0b6ff804c 100644
--- a/erts/emulator/test/system_profile_SUITE_data/echo_drv.c
+++ b/erts/emulator/test/system_profile_SUITE_data/echo_drv.c
@@ -9,11 +9,9 @@ static EchoDrvData echo_drv_data, *echo_drv_data_p;
static EchoDrvData *echo_drv_start(ErlDrvPort port, char *command);
static void echo_drv_stop(EchoDrvData *data_p);
-static void echo_drv_output(EchoDrvData *data_p, char *buf, int len);
+static void echo_drv_output(ErlDrvData drv_data, char *buf,
+ ErlDrvSizeT len);
static void echo_drv_finish(void);
-static int echo_drv_control(EchoDrvData *data_p, unsigned int command,
- char *buf, int len,
- char **rbuf, int rlen);
static ErlDrvEntry echo_drv_entry = {
NULL, /* init */
@@ -25,10 +23,21 @@ static ErlDrvEntry echo_drv_entry = {
"echo_drv",
echo_drv_finish,
NULL, /* handle */
- echo_drv_control,
+ NULL, /* control */
NULL, /* timeout */
NULL, /* outputv */
- NULL /* ready_async */
+ NULL, /* ready_async */
+ NULL,
+ NULL,
+ NULL,
+ ERL_DRV_EXTENDED_MARKER,
+ ERL_DRV_EXTENDED_MAJOR_VERSION,
+ ERL_DRV_EXTENDED_MINOR_VERSION,
+ 0,
+ NULL,
+ NULL,
+ NULL
+
};
DRIVER_INIT(echo_drv)
@@ -51,16 +60,11 @@ static void echo_drv_stop(EchoDrvData *data_p) {
echo_drv_data_p = NULL;
}
-static void echo_drv_output(EchoDrvData *data_p, char *buf, int len) {
+static void echo_drv_output(ErlDrvData drv_data, char *buf, ErlDrvSizeT len) {
+ EchoDrvData* data_p = (EchoDrvData *) drv_data;
driver_output(data_p->erlang_port, buf, len);
}
static void echo_drv_finish() {
echo_drv_data_p = NULL;
}
-
-static int echo_drv_control(EchoDrvData *data_p, unsigned int command,
- char *buf, int len,
- char **rbuf, int rlen) {
- return 0;
-}
diff --git a/erts/emulator/test/time_SUITE.erl b/erts/emulator/test/time_SUITE.erl
index bd48a0a7db..4d12e3449c 100644
--- a/erts/emulator/test/time_SUITE.erl
+++ b/erts/emulator/test/time_SUITE.erl
@@ -32,6 +32,7 @@
-export([all/0, suite/0,groups/0,init_per_suite/1, end_per_suite/1,
init_per_group/2,end_per_group/2, univ_to_local/1, local_to_univ/1,
bad_univ_to_local/1, bad_local_to_univ/1,
+ univ_to_seconds/1, seconds_to_univ/1,
consistency/1,
now_unique/1, now_update/1, timestamp/1]).
@@ -59,7 +60,9 @@ suite() -> [{ct_hooks,[ts_install_cth]}].
all() ->
[univ_to_local, local_to_univ, local_to_univ_utc,
- bad_univ_to_local, bad_local_to_univ, consistency,
+ bad_univ_to_local, bad_local_to_univ,
+ univ_to_seconds, seconds_to_univ,
+ consistency,
{group, now}, timestamp].
groups() ->
@@ -162,6 +165,30 @@ bad_test_local_to_univ([Local|Rest]) ->
bad_test_local_to_univ([]) ->
ok.
+
+%% Test universaltime to seconds conversions
+univ_to_seconds(Config) when is_list(Config) ->
+ test_univ_to_seconds(ok_utc_seconds()).
+
+test_univ_to_seconds([{Datetime, Seconds}|DSs]) ->
+ io:format("universaltime = ~p -> seconds = ~p", [Datetime, Seconds]),
+ Seconds = erlang:universaltime_to_posixtime(Datetime),
+ test_univ_to_seconds(DSs);
+test_univ_to_seconds([]) ->
+ ok.
+
+%% Test seconds to universaltime conversions
+seconds_to_univ(Config) when is_list(Config) ->
+ test_seconds_to_univ(ok_utc_seconds()).
+
+test_seconds_to_univ([{Datetime, Seconds}|DSs]) ->
+ io:format("universaltime = ~p <- seconds = ~p", [Datetime, Seconds]),
+ Datetime = erlang:posixtime_to_universaltime(Seconds),
+ test_seconds_to_univ(DSs);
+test_seconds_to_univ([]) ->
+ ok.
+
+
%% Test that the the different time functions return
%% consistent results. (See the test case for assumptions
%% and limitations.)
@@ -453,6 +480,32 @@ dst_dates() ->
{1998, 06, 3},
{1999, 06, 4}].
+%% exakt utc {date(), time()} which corresponds to the same seconds since 1 jan 1970
+%% negative seconds are ok
+%% generated with date --date='1979-05-28 12:30:35 UTC' +%s
+ok_utc_seconds() -> [
+ { {{1970, 1, 1},{ 0, 0, 0}}, 0 },
+ { {{1970, 1, 1},{ 0, 0, 1}}, 1 },
+ { {{1969,12,31},{23,59,59}}, -1 },
+ { {{1920,12,31},{23,59,59}}, -1546300801 },
+ { {{1600,02,19},{15,14,08}}, -11671807552 },
+ { {{1979,05,28},{12,30,35}}, 296742635 },
+ { {{1999,12,31},{23,59,59}}, 946684799 },
+ { {{2000, 1, 1},{ 0, 0, 0}}, 946684800 },
+ { {{2000, 1, 1},{ 0, 0, 1}}, 946684801 },
+
+ { {{2038, 1,19},{03,14,07}}, 2147483647 }, % Sint32 full - 1
+ { {{2038, 1,19},{03,14,08}}, 2147483648 }, % Sint32 full
+ { {{2038, 1,19},{03,14,09}}, 2147483649 }, % Sint32 full + 1
+
+ { {{2106, 2, 7},{ 6,28,14}}, 4294967294 }, % Uint32 full 0xFFFFFFFF - 1
+ { {{2106, 2, 7},{ 6,28,15}}, 4294967295 }, % Uint32 full 0xFFFFFFFF
+ { {{2106, 2, 7},{ 6,28,16}}, 4294967296 }, % Uint32 full 0xFFFFFFFF + 1
+ { {{2012,12, 6},{16,28,08}}, 1354811288 },
+ { {{2412,12, 6},{16,28,08}}, 13977592088 }
+ ].
+
+
%% The following dates should not be near the end or beginning of
%% a month, because they will be used to test when the dates are
%% different in UTC and local time.
diff --git a/erts/emulator/test/trace_local_SUITE.erl b/erts/emulator/test/trace_local_SUITE.erl
index 091e960610..32e2a98e3c 100644
--- a/erts/emulator/test/trace_local_SUITE.erl
+++ b/erts/emulator/test/trace_local_SUITE.erl
@@ -767,8 +767,8 @@ exception_test(Opts, Func0, Args0) ->
end,
?line R1 = exc_slave(ExcOpts, Func, Args),
- ?line Stack2 = [{?MODULE,exc_top,3},{?MODULE,slave,2}],
- ?line Stack3 = [{?MODULE,exc,2}|Stack2],
+ ?line Stack2 = [{?MODULE,exc_top,3,[]},{?MODULE,slave,2,[]}],
+ ?line Stack3 = [{?MODULE,exc,2,[]}|Stack2],
?line Rs =
case x_exc_top(ExcOpts, Func, Args) of % Emulation
{crash,{Reason,Stack}}=R when is_list(Stack) ->
@@ -789,21 +789,29 @@ exception_test(Opts, Func0, Args0) ->
end,
?line expect({nm}).
-exception_validate(R1, [R2|Rs]) ->
+exception_validate(R0, Rs0) ->
+ R = clean_location(R0),
+ Rs = [clean_location(E) || E <- Rs0],
+ exception_validate_1(R, Rs).
+
+exception_validate_1(R1, [R2|Rs]) ->
case [R1|R2] of
[R|R] ->
ok;
- [{crash,{badarg,[{lists,reverse,[L1a,L1b]}|T]}}|
- {crash,{badarg,[{lists,reverse,[L2a,L2b]}|T]}}] ->
+ [{crash,{badarg,[{lists,reverse,[L1a,L1b],_}|T]}}|
+ {crash,{badarg,[{lists,reverse,[L2a,L2b],_}|T]}}] ->
same({crash,{badarg,[{lists,reverse,
- [lists:reverse(L1b, L1a),[]]}|T]}},
+ [lists:reverse(L1b, L1a),[]],[]}|T]}},
{crash,{badarg,[{lists,reverse,
- [lists:reverse(L2b, L2a),[]]}|T]}});
+ [lists:reverse(L2b, L2a),[]],[]}|T]}});
_ when is_list(Rs), Rs =/= [] ->
exception_validate(R1, Rs)
end.
-
+clean_location({crash,{Reason,Stk0}}) ->
+ Stk = [{M,F,A,[]} || {M,F,A,_} <- Stk0],
+ {crash,{Reason,Stk}};
+clean_location(Term) -> Term.
%%% Tracee target functions %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%
@@ -1057,10 +1065,10 @@ x_exc_exception(_Rtt, M, F, _, Arity, CR) ->
x_exc_stacktrace() ->
x_exc_stacktrace(erlang:get_stacktrace()).
%% Truncate stacktrace to below exc/2
-x_exc_stacktrace([{?MODULE,x_exc,4}|_]) -> [];
-x_exc_stacktrace([{?MODULE,x_exc_func,4}|_]) -> [];
-x_exc_stacktrace([{?MODULE,x_exc_body,4}|_]) -> [];
-x_exc_stacktrace([{?MODULE,exc,2}|_]) -> [];
+x_exc_stacktrace([{?MODULE,x_exc,4,_}|_]) -> [];
+x_exc_stacktrace([{?MODULE,x_exc_func,4,_}|_]) -> [];
+x_exc_stacktrace([{?MODULE,x_exc_body,4,_}|_]) -> [];
+x_exc_stacktrace([{?MODULE,exc,2,_}|_]) -> [];
x_exc_stacktrace([H|T]) ->
[H|x_exc_stacktrace(T)].
diff --git a/erts/emulator/test/trace_port_SUITE.erl b/erts/emulator/test/trace_port_SUITE.erl
index 0026da4979..f81cab3114 100644
--- a/erts/emulator/test/trace_port_SUITE.erl
+++ b/erts/emulator/test/trace_port_SUITE.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1999-2011. All Rights Reserved.
+%% Copyright Ericsson AB 1999-2012. All Rights Reserved.
%%
%% The contents of this file are subject to the Erlang Public License,
%% Version 1.1, (the "License"); you may not use this file except in
@@ -77,7 +77,8 @@ end_per_testcase(_Func, Config) ->
call_trace(doc) -> "Test sending call trace messages to a port.";
call_trace(Config) when is_list(Config) ->
- case test_server:is_native(?MODULE) of
+ case test_server:is_native(?MODULE) orelse
+ test_server:is_native(lists) of
true ->
{skip,"Native code"};
false ->
@@ -128,7 +129,8 @@ bs_sum_c(<<>>, Acc) -> Acc.
return_trace(doc) -> "Test the new return trace.";
return_trace(Config) when is_list(Config) ->
- case test_server:is_native(?MODULE) of
+ case test_server:is_native(?MODULE) orelse
+ test_server:is_native(lists) of
true ->
{skip,"Native code"};
false ->
diff --git a/erts/emulator/test/trace_port_SUITE_data/echo_drv.c b/erts/emulator/test/trace_port_SUITE_data/echo_drv.c
index 15c4ca11fe..a8d4ede4fe 100644
--- a/erts/emulator/test/trace_port_SUITE_data/echo_drv.c
+++ b/erts/emulator/test/trace_port_SUITE_data/echo_drv.c
@@ -25,12 +25,14 @@ static EchoDrvData echo_drv_data, *echo_drv_data_p;
**/
static EchoDrvData *echo_drv_start(ErlDrvPort port, char *command);
-static void echo_drv_stop(EchoDrvData *data_p);
-static void echo_drv_output(EchoDrvData *data_p, char *buf, int len);
+static void echo_drv_stop(ErlDrvData drv_data);
+static void echo_drv_output(ErlDrvData drv_data, char *buf,
+ ErlDrvSizeT len);
static void echo_drv_finish(void);
-static int echo_drv_control(EchoDrvData *data_p, unsigned int command,
- char *buf, int len,
- char **rbuf, int rlen);
+static ErlDrvSSizeT echo_drv_control(ErlDrvData drv_data,
+ unsigned int command,
+ char *buf, ErlDrvSizeT len,
+ char **rbuf, ErlDrvSizeT rlen);
static ErlDrvEntry echo_drv_entry = {
NULL, /* init */
@@ -45,11 +47,19 @@ static ErlDrvEntry echo_drv_entry = {
echo_drv_control,
NULL, /* timeout */
NULL, /* outputv */
- NULL /* ready_async */
+ NULL, /* ready_async */
+ NULL,
+ NULL,
+ NULL,
+ ERL_DRV_EXTENDED_MARKER,
+ ERL_DRV_EXTENDED_MAJOR_VERSION,
+ ERL_DRV_EXTENDED_MINOR_VERSION,
+ 0,
+ NULL,
+ NULL,
+ NULL
};
-
-
/* -------------------------------------------------------------------------
** Entry functions
**/
@@ -75,7 +85,8 @@ static void echo_drv_stop(EchoDrvData *data_p) {
echo_drv_data_p = NULL;
}
-static void echo_drv_output(EchoDrvData *data_p, char *buf, int len) {
+static void echo_drv_output(ErlDrvData drv_data, char *buf, ErlDrvSizeT len) {
+ EchoDrvData* data_p = (EchoDrvData *) drv_data;
driver_output(data_p->erlang_port, buf, len);
switch (data_p->heavy) {
case heavy_off:
@@ -95,9 +106,11 @@ static void echo_drv_finish() {
echo_drv_data_p = NULL;
}
-static int echo_drv_control(EchoDrvData *data_p, unsigned int command,
- char *buf, int len,
- char **rbuf, int rlen) {
+static ErlDrvSSizeT echo_drv_control(ErlDrvData drv_data,
+ unsigned int command,
+ char *buf, ErlDrvSizeT len,
+ char **rbuf, ErlDrvSizeT rlen) {
+ EchoDrvData* data_p = (EchoDrvData *) drv_data;
switch (command) {
case 'h':
data_p->heavy = heavy_set;
diff --git a/erts/emulator/utils/beam_makeops b/erts/emulator/utils/beam_makeops
index e7c57142c0..8fe2402ca8 100755
--- a/erts/emulator/utils/beam_makeops
+++ b/erts/emulator/utils/beam_makeops
@@ -2,7 +2,7 @@
#
# %CopyrightBegin%
#
-# Copyright Ericsson AB 1998-2010. All Rights Reserved.
+# Copyright Ericsson AB 1998-2012. All Rights Reserved.
#
# The contents of this file are subject to the Erlang Public License,
# Version 1.1, (the "License"); you may not use this file except in
@@ -28,6 +28,7 @@ my $verbose = 0;
my $hot = 1;
my $num_file_opcodes = 0;
my $wordsize = 32;
+my %defs; # Defines (from command line).
# This is shift counts and mask for the packer.
my $WHOLE_WORD = '';
@@ -67,6 +68,10 @@ my $max_gen_operands = 8;
# Must be even. The beam_load.c file must be updated, too.
my $max_spec_operands = 6;
+# The maximum number of primitive genop_types.
+
+my $max_genop_types = 16;
+
my %gen_opnum;
my %num_specific;
my %gen_to_spec;
@@ -92,6 +97,12 @@ my %unnumbered;
my %is_transformed;
#
+# Pre-processor.
+#
+my @if_val;
+my @if_line;
+
+#
# Code transformations.
#
my $te_max_vars = 0; # Max number of variables ever needed.
@@ -101,12 +112,14 @@ my %match_engine_ops; # All opcodes for the match engine.
my %gen_transform_offset;
my @transformations;
my @call_table;
+my %call_table;
my @pred_table;
+my %pred_table;
# Operand types for generic instructions.
my $compiler_types = "uiaxyfhz";
-my $loader_types = "nprvlq";
+my $loader_types = "nprvlqo";
my $genop_types = $compiler_types . $loader_types;
#
@@ -142,34 +155,67 @@ my %arg_size = ('r' => 0, # x(0) - x register zero
my %type_bit;
my @tag_type;
+sub define_type_bit {
+ my($tag,$val) = @_;
+ defined $type_bit{$tag} and
+ sanity("the tag '$tag' has already been defined with the value ",
+ $type_bit{$tag});
+ $type_bit{$tag} = $val;
+}
+
{
my($bit) = 1;
my(%bit);
foreach (split('', $genop_types)) {
push(@tag_type, $_);
- $type_bit{$_} = $bit;
+ define_type_bit($_, $bit);
$bit{$_} = $bit;
$bit *= 2;
}
# Composed types.
- $type_bit{'d'} = $type_bit{'x'} | $type_bit{'y'} | $type_bit{'r'};
- $type_bit{'c'} = $type_bit{'i'} | $type_bit{'a'} | $type_bit{'n'} | $type_bit{'q'};
- $type_bit{'s'} = $type_bit{'d'} | $type_bit{'i'} | $type_bit{'a'} | $type_bit{'n'};
- $type_bit{'j'} = $type_bit{'f'} | $type_bit{'p'};
+ define_type_bit('d', $type_bit{'x'} | $type_bit{'y'} | $type_bit{'r'});
+ define_type_bit('c', $type_bit{'i'} | $type_bit{'a'} |
+ $type_bit{'n'} | $type_bit{'q'});
+ define_type_bit('s', $type_bit{'d'} | $type_bit{'i'} |
+ $type_bit{'a'} | $type_bit{'n'});
+ define_type_bit('j', $type_bit{'f'} | $type_bit{'p'});
# Aliases (for matching purposes).
- $type_bit{'I'} = $type_bit{'u'};
- $type_bit{'t'} = $type_bit{'u'};
- $type_bit{'A'} = $type_bit{'u'};
- $type_bit{'L'} = $type_bit{'u'};
- $type_bit{'b'} = $type_bit{'u'};
- $type_bit{'N'} = $type_bit{'u'};
- $type_bit{'U'} = $type_bit{'u'};
- $type_bit{'e'} = $type_bit{'u'};
- $type_bit{'P'} = $type_bit{'u'};
- $type_bit{'Q'} = $type_bit{'u'};
+ define_type_bit('I', $type_bit{'u'});
+ define_type_bit('t', $type_bit{'u'});
+ define_type_bit('A', $type_bit{'u'});
+ define_type_bit('L', $type_bit{'u'});
+ define_type_bit('b', $type_bit{'u'});
+ define_type_bit('N', $type_bit{'u'});
+ define_type_bit('U', $type_bit{'u'});
+ define_type_bit('e', $type_bit{'u'});
+ define_type_bit('P', $type_bit{'u'});
+ define_type_bit('Q', $type_bit{'u'});
+}
+
+#
+# Pre-define the 'fail' instruction. It is used internally
+# by the 'try_me_else_fail' instruction.
+#
+$match_engine_ops{'TOP_fail'} = 1;
+
+#
+# Sanity checks.
+#
+
+{
+ if (@tag_type > $max_genop_types) {
+ sanity("\$max_genop_types is $max_genop_types, ",
+ "but there are ", scalar(@tag_type),
+ " primitive tags defined\n");
+ }
+
+ foreach my $tag (@tag_type) {
+ sanity("tag '$tag': primitive tags must be named with lowercase letters")
+ unless $tag =~ /^[a-z]$/;
+ }
}
#
@@ -184,6 +230,7 @@ while (@ARGV && $ARGV[0] =~ /^-(.*)/) {
($outdir = shift), next if /^outdir/;
($wordsize = shift), next if /^wordsize/;
($verbose = 1), next if /^v/;
+ ($defs{$1} = $2), next if /^D(\w+)=(\w+)/;
die "$0: Bad option: -$_\n";
}
@@ -200,7 +247,43 @@ while (<>) {
}
next if /^\s*$/;
next if /^\#/;
-
+
+ #
+ # Handle %if.
+ #
+ if (/^\%if (\w+)/) {
+ my $name = $1;
+ my $val = $defs{$name};
+ defined $val or error("'$name' is undefined");
+ push @if_val, $val;
+ push @if_line, $.;
+ next;
+ } elsif (/^\%unless (\w+)/) {
+ my $name = $1;
+ my $val = $defs{$name};
+ defined $val or error("'$name' is undefined");
+ push @if_val, !$val;
+ push @if_line, $.;
+ next;
+ } elsif (/^\%else$/) {
+ unless (@if_line) {
+ error("%else without a preceding %if/%unless");
+ }
+ $if_line[$#if_line] = $.;
+ $if_val[$#if_val] = !$if_val[$#if_val];
+ next;
+ } elsif (/^\%endif$/) {
+ unless (@if_line) {
+ error("%endif without a preceding %if/%unless/%else");
+ }
+ pop @if_val;
+ pop @if_line;
+ next;
+ }
+ if (@if_val and not $if_val[$#if_val]) {
+ next;
+ }
+
#
# Handle assignments.
#
@@ -310,7 +393,13 @@ while (<>) {
$unnumbered{$name,$arity} = 1;
}
} continue {
- close(ARGV) if eof(ARGV);
+ if (eof(ARGV)) {
+ close(ARGV);
+ if (@if_line) {
+ error("Unterminated %if/%unless/%else at " .
+ "line $if_line[$#if_line]\n");
+ }
+ }
}
$num_file_opcodes = @gen_opname;
@@ -436,12 +525,12 @@ sub emulator_output {
#
my(@bits) = (0) x ($max_spec_operands/2);
- my($shift) = 16;
my($i);
for ($i = 0; $i < $max_spec_operands && defined $args[$i]; $i++) {
my $t = $args[$i];
if (defined $type_bit{$t}) {
- $bits[int($i/2)] |= $type_bit{$t} << (16*($i%2));
+ my $shift = $max_genop_types * ($i % 2);
+ $bits[int($i/2)] |= $type_bit{$t} << $shift;
}
}
@@ -753,6 +842,10 @@ sub error {
die $where, @message, "\n";
}
+sub sanity {
+ die "internal error: ", @_, "\n";
+}
+
sub comment {
my($lang, @comments) = @_;
my($prefix);
@@ -1269,7 +1362,8 @@ sub tr_gen {
foreach $ref (@g) {
my($line, $orig_transform, $from_ref, $to_ref) = @$ref;
- my $so_far = tr_gen_from($line, @$from_ref);
+ my $used_ref = used_vars($from_ref, $to_ref);
+ my $so_far = tr_gen_from($line, $used_ref, @$from_ref);
tr_gen_to($line, $orig_transform, $so_far, @$to_ref);
}
@@ -1278,9 +1372,22 @@ sub tr_gen {
#
my($offset) = 0;
print "Uint op_transform[] = {\n";
- foreach $key (keys %gen_transform) {
+ foreach $key (sort keys %gen_transform) {
$gen_transform_offset{$key} = $offset;
- foreach $instr (@{$gen_transform{$key}}) {
+ my @instr = @{$gen_transform{$key}};
+
+ #
+ # If the last instruction is 'fail', remove it and
+ # convert the previous 'try_me_else' to 'try_me_else_fail'.
+ #
+ if (is_instr($instr[$#instr], 'fail')) {
+ pop(@instr);
+ my $i = $#instr;
+ $i-- while !is_instr($instr[$i], 'try_me_else');
+ $instr[$i] = make_op('', 'try_me_else_fail');
+ }
+
+ foreach $instr (@instr) {
my($size, $instr_ref, $comment) = @$instr;
my($op, @args) = @$instr_ref;
print " ";
@@ -1307,8 +1414,48 @@ sub tr_gen {
print "};\n\n";
}
+sub used_vars {
+ my($from_ref,$to_ref) = @_;
+ my %used;
+ my %seen;
+
+ foreach my $ref (@$from_ref) {
+ my($name,$arity,@ops) = @$ref;
+ if ($name =~ /^[.]/) {
+ foreach my $var (@ops) {
+ $used{$var} = 1;
+ }
+ } else {
+ # Any variable that is used at least twice on the
+ # left-hand side is used. (E.g. "move R R".)
+ foreach my $op (@ops) {
+ my($var, $type, $type_val) = @$op;
+ next if $var eq '';
+ $used{$var} = 1 if $seen{$var};
+ $seen{$var} = 1;
+ }
+ }
+ }
+
+ foreach my $ref (@$to_ref) {
+ my($name, $arity, @ops) = @$ref;
+ if ($name =~ /^[.]/) {
+ foreach my $var (@ops) {
+ $used{$var} = 1;
+ }
+ } else {
+ foreach my $op (@ops) {
+ my($var, $type, $type_val) = @$op;
+ next if $var eq '';
+ $used{$var} = 1;
+ }
+ }
+ }
+ \%used;
+}
+
sub tr_gen_from {
- my($line, @tr) = @_;
+ my($line,$used_ref,@tr) = @_;
my(%var) = ();
my(%var_type);
my($var_num) = 0;
@@ -1318,25 +1465,30 @@ sub tr_gen_from {
my(@fix_pred_funcs);
my($op, $ref); # Loop variables.
my $where = "left side of transformation in line $line: ";
+ my %var_used = %$used_ref;
+ my $may_fail = 0;
+ my $is_first = 1;
foreach $ref (@tr) {
my($name, $arity, @ops) = @$ref;
my($key) = "$name/$arity";
my($opnum);
+ $may_fail = 1 unless $is_first;
+ $is_first = 0;
+
#
# A name starting with a period is a C pred function to be called.
#
if ($name =~ /^\.(\w+)/) {
$name = $1;
+ $may_fail = 1;
my $var;
my(@args);
- my $next_instr = pop(@code); # Get rid of 'next_instr'
push(@fix_pred_funcs, scalar(@code));
push(@code, [$name, @ops]);
- push(@code, $next_instr);
next;
}
@@ -1348,17 +1500,21 @@ sub tr_gen_from {
unless defined $gen_opnum{$name,$arity};
$opnum = $gen_opnum{$name,$arity};
- push(@code, &make_op("$name/$arity", 'is_op', $opnum));
+ push(@code, make_op("$name/$arity", 'next_instr', $opnum));
$min_window++;
foreach $op (@ops) {
my($var, $type, $type_val, $cond, $val) = @$op;
+ my $ignored_var = "$var (ignored)";
if ($type ne '' && $type ne '*') {
+ $may_fail = 1;
+
#
# The is_bif, is_not_bif, and is_func instructions have
# their own built-in type test and don't need to
# be guarded with a type test instruction.
#
+ $ignored_var = '';
unless ($cond eq 'is_bif' or
$cond eq 'is_not_bif' or
$cond eq 'is_func') {
@@ -1372,7 +1528,7 @@ sub tr_gen_from {
push(@code, &make_op($types, 'is_type', $type_mask));
} else {
$cond = '';
- push(@code, &make_op($types, 'is_type_eq',
+ push(@code, &make_op("$types== $val", 'is_type_eq',
$type_mask, $val));
}
}
@@ -1380,46 +1536,55 @@ sub tr_gen_from {
if ($cond eq 'is_func') {
my($m, $f, $a) = split(/:/, $val);
+ $ignored_var = '';
+ $may_fail = 1;
push(@code, &make_op('', "$cond", "am_$m",
"am_$f", $a));
} elsif ($cond ne '') {
+ $ignored_var = '';
+ $may_fail = 1;
push(@code, &make_op('', "$cond", $val));
}
if ($var ne '') {
if (defined $var{$var}) {
+ $ignored_var = '';
+ $may_fail = 1;
push(@code, &make_op($var, 'is_same_var', $var{$var}));
} elsif ($type eq '*') {
#
# Reserve a hole for a 'rest_args' instruction.
#
+ $ignored_var = '';
push(@fix_rest_args, scalar(@code));
push(@code, $var);
- } else {
+ } elsif ($var_used{$var}) {
+ $ignored_var = '';
$var_type{$var} = 'scalar';
$var{$var} = $var_num;
$var_num++;
push(@code, &make_op($var, 'set_var', $var{$var}));
}
}
- if (is_set_var_instr($code[$#code])) {
+ if (is_instr($code[$#code], 'set_var')) {
my $ref = pop @code;
my $comment = $ref->[2];
my $var = $ref->[1][1];
push(@code, make_op($comment, 'set_var_next_arg', $var));
} else {
- push(@code, &make_op('', 'next_arg'));
+ push(@code, &make_op($ignored_var, 'next_arg'));
}
}
- push(@code, &make_op('', 'next_instr'));
- pop(@code) if $code[$#code]->[1][0] eq 'next_arg';
+
+ # Remove redundant 'next_arg' instructions before the end
+ # of the instruction.
+ pop(@code) while is_instr($code[$#code], 'next_arg');
}
#
# Insert the commit operation.
#
- pop(@code); # Get rid of 'next_instr'
- push(@code, &make_op('', 'commit'));
+ push(@code, make_op($may_fail ? '' : 'always reached', 'commit'));
#
# If there is an rest_args instruction, we must insert its correct
@@ -1449,9 +1614,8 @@ sub tr_gen_from {
push(@args, "var+$var{$var}");
}
}
- splice(@code, $index, 1, &make_op("$name()",
- 'pred', scalar(@pred_table)));
- push(@pred_table, [$name, @args]);
+ my $pi = tr_next_index(\@pred_table, \%pred_table, $name, @args);
+ splice(@code, $index, 1, make_op("$name()", 'pred', $pi));
}
$te_max_vars = $var_num
@@ -1468,6 +1632,10 @@ sub tr_gen_to {
my($op, $ref); # Loop variables.
my($where) = "right side of transformation in line $line: ";
+ my $last_instr = $code[$#code];
+ my $cannot_fail = is_instr($last_instr, 'commit') &&
+ (get_comment($last_instr) =~ /^always/);
+
foreach $ref (@tr) {
my($name, $arity, @ops) = @$ref;
@@ -1489,9 +1657,10 @@ sub tr_gen_to {
push(@args, "var+$var{$var}");
}
}
- pop(@code); # Get rid of 'next_instr'
- push(@code, &make_op("$name()", 'call', scalar(@call_table)));
- push(@call_table, [$name, @args]);
+ pop(@code); # Get rid of 'commit' instruction
+ my $index = tr_next_index(\@call_table, \%call_table,
+ $name, @args);
+ push(@code, make_op("$name()", 'call_end', $index));
last;
}
@@ -1508,27 +1677,27 @@ sub tr_gen_to {
# Create code to build the generic instruction.
#
- push(@code, &make_op('', 'new_instr'));
- push(@code, &make_op("$name/$arity", 'store_op', $opnum, $arity));
+ push(@code, make_op("$name/$arity", 'new_instr', $opnum));
foreach $op (@ops) {
my($var, $type, $type_val) = @$op;
if ($var ne '') {
&error($where, "variable '$var' unbound")
unless defined $var{$var};
- push(@code, &make_op($var, 'store_var', $var{$var}));
+ push(@code, &make_op($var, 'store_var_next_arg', $var{$var}));
} elsif ($type ne '') {
push(@code, &make_op('', 'store_type', "TAG_$type"));
if ($type_val) {
push(@code, &make_op('', 'store_val', $type_val));
}
+ push(@code, make_op('', 'next_arg'));
}
- push(@code, &make_op('', 'next_arg'));
}
- pop(@code) if $code[$#code]->[1][0] eq 'next_arg';
+ pop(@code) if is_instr($code[$#code], 'next_arg');
}
- push(@code, &make_op('', 'end'));
+ push(@code, make_op('', 'end'))
+ unless is_instr($code[$#code], 'call_end');
#
# Chain together all codes segments having the same first operation.
@@ -1540,11 +1709,20 @@ sub tr_gen_to {
$min_window{$key} = $min_window
if $min_window{$key} > $min_window;
- pop(@{$gen_transform{$key}})
+ my $prev_last;
+ $prev_last = pop(@{$gen_transform{$key}})
if defined @{$gen_transform{$key}}; # Fail
- my(@prefix) = (&make_op($comment), &make_op('', 'try_me_else', &tr_code_len(@code)));
- unshift(@code, @prefix);
- push(@{$gen_transform{$key}}, @code, &make_op('', 'fail'));
+
+ if ($prev_last && !is_instr($prev_last, 'fail')) {
+ error("Line $line: A previous transformation shadows '$orig_transform'");
+ }
+ unless ($cannot_fail) {
+ unshift(@code, make_op('', 'try_me_else',
+ tr_code_len(@code)));
+ push(@code, make_op(""), make_op("$key", 'fail'));
+ }
+ unshift(@code, make_op($comment));
+ push(@{$gen_transform{$key}}, @code),
}
sub tr_code_len {
@@ -1562,21 +1740,38 @@ sub make_op {
[scalar(@op), [@op], $comment];
}
-sub is_set_var_instr {
- my($ref) = @_;
+sub is_instr {
+ my($ref,$op) = @_;
return 0 unless ref($ref) eq 'ARRAY';
- $ref->[1][0] eq 'set_var';
+ $ref->[1][0] eq $op;
+}
+
+sub get_comment {
+ my($ref,$op) = @_;
+ return '' unless ref($ref) eq 'ARRAY';
+ $ref->[2];
+}
+
+sub tr_next_index {
+ my($lref,$href,$name,@args) = @_;
+ my $code = "RVAL = $name(" . join(', ', 'st', @args) . "); break;\n";
+ my $index;
+
+ if (defined $$href{$code}) {
+ $index = $$href{$code};
+ } else {
+ $index = scalar(@$lref);
+ push(@$lref, $code);
+ $$href{$code} = $index;
+ }
+ $index;
}
sub tr_gen_call {
my(@call_table) = @_;
my($i);
- print "\n";
for ($i = 0; $i < @call_table; $i++) {
- my $ref = $call_table[$i];
- my($name, @args) = @$ref;
- print "case $i: RVAL = $name(", join(', ', 'st', @args), "); break;\n";
+ print "case $i: $call_table[$i]";
}
- print "\n";
}
diff --git a/erts/emulator/utils/make_preload b/erts/emulator/utils/make_preload
index d0671e998d..13019d4062 100755
--- a/erts/emulator/utils/make_preload
+++ b/erts/emulator/utils/make_preload
@@ -2,7 +2,7 @@
#
# %CopyrightBegin%
#
-# Copyright Ericsson AB 1999-2009. All Rights Reserved.
+# Copyright Ericsson AB 1999-2011. All Rights Reserved.
#
# The contents of this file are subject to the Erlang Public License,
# Version 1.1, (the "License"); you may not use this file except in
@@ -39,6 +39,7 @@ use File::Basename;
my $gen_rc = 0;
my $gen_old = 0;
my $windres = 0;
+my $msys = 0;
my $file;
my $progname = basename($0);
@@ -49,6 +50,8 @@ while (@ARGV && $ARGV[0] =~ /^-(\w+)/) {
$gen_rc = 1;
} elsif ($opt eq '-windres') {
$windres = 1;
+ } elsif ($opt eq '-msys') {
+ $msys = 1;
} elsif ($opt eq '-old') {
$gen_old = 1;
} else {
@@ -68,7 +71,12 @@ foreach $file (@ARGV) {
unless $file =~ /\.beam$/;
my $module = basename($file, ".beam");
if ($gen_rc) {
- my ($win_file) = split("\n", `(cygpath -d $file 2>/dev/null || cygpath -w $file)`);
+ my $win_file;
+ if ($msys) {
+ ($win_file) = split("\n", `(msys2win_path.sh $file)`);
+ } else {
+ ($win_file) = split("\n", `(cygpath -d $file 2>/dev/null || cygpath -w $file)`);
+ }
$win_file =~ s&\\&\\\\&g;
print "$num ERLANG_CODE \"$win_file\"\n";
push(@modules, " ", -s $file, "L, $num, ",
@@ -88,6 +96,7 @@ foreach $file (@ARGV) {
print "unsigned char preloaded_$module", "[] = {\n";
for ($i = 0; $i < length($_); $i++) {
if ($i % 8 == 0 && $comment ne '') {
+ $comment =~ s@/\*@..@g; # Comment start -- avoid warning.
$comment =~ s@\*/@..@g; # Comment terminator.
print " /* $comment */\n ";
$comment = '';
diff --git a/erts/emulator/utils/make_tables b/erts/emulator/utils/make_tables
index 918ef62094..91efb4c023 100755
--- a/erts/emulator/utils/make_tables
+++ b/erts/emulator/utils/make_tables
@@ -181,8 +181,7 @@ for ($i = 0; $i < @bif; $i++) {
print "\n";
for ($i = 0; $i < @bif; $i++) {
- my $arity = $bif[$i]->[2];
- my $args = join(', ', 'Process*', ('Eterm') x $arity);
+ my $args = join(', ', 'Process*', 'Eterm*');
print "Eterm $bif[$i]->[3]($args);\n";
print "Eterm wrap_$bif[$i]->[3]($args, UWord *I);\n";
}
@@ -219,28 +218,10 @@ for ($i = 0; $i < @bif; $i++) {
next if $bif[$i]->[3] eq $bif[$i]->[4]; # Skip unwrapped bifs
my $arity = $bif[$i]->[2];
my $func = $bif[$i]->[3];
- my $arg;
print "Eterm\n";
- print "wrap_$func(Process* p";
- for ($arg = 1; $arg <= $arity; $arg++) {
- print ", Eterm arg$arg";
- }
- print ", UWord *I)\n";
+ print "wrap_$func(Process* p, Eterm* args, UWord* I)\n";
print "{\n";
- print " return erts_bif_trace($i, p";
- for ($arg = 1; $arg <= 3; $arg++) {
- if ($arg <= $arity) {
- print ", arg$arg";
- #} elsif ($arg == ($arity + 1)) {
- # # Place I in correct position
- # print ", (Eterm) I";
- } else {
- print ", 0";
- }
- }
- # I is always last, as well as in the correct position
- # Note that "last" and "correct position" may be the same...
- print ", I);\n";
+ print " return erts_bif_trace($i, p, args, I);\n";
print "}\n\n";
}
@@ -261,19 +242,9 @@ for ($i = 0; $i < @bif; $i++) {
my $orig_func = $1;
$orig_func = $implementation[$i] if $implementation[$i];
print "Eterm\n";
- print "$func(Process* p";
- for ($arg = 1; $arg <= $arity; $arg++) {
- print ", Eterm arg$arg";
- }
- print ")\n";
+ print "$func(Process* p, Eterm* BIF__ARGS)\n";
print "{\n";
- print " return $orig_func(p";
- for ($arg = 1; $arg <= 3; $arg++) {
- if ($arg <= $arity) {
- print ", arg$arg";
- }
- }
- print ");\n";
+ print " return $orig_func(p, BIF__ARGS);\n";
print "}\n\n";
}
diff --git a/erts/emulator/valgrind/suppress.patched.3.6.0 b/erts/emulator/valgrind/suppress.patched.3.6.0
new file mode 100644
index 0000000000..8cf4cba2c8
--- /dev/null
+++ b/erts/emulator/valgrind/suppress.patched.3.6.0
@@ -0,0 +1,350 @@
+# Valgrind suppression file updated to support the patched
+# Valgrind used in daily builds on ahmed.
+
+{
+ libc internal error
+ Memcheck:Addr8
+ obj:/lib64/ld-2.3.5.so
+}
+{
+ libc internal error
+ Memcheck:Addr8
+ fun:_dl_start
+}
+{
+ libc internal error
+ Memcheck:Addr8
+ fun:__libc_start_main
+ obj:*
+}
+{
+ libc internal error
+ Memcheck:Addr4
+ fun:__sigjmp_save
+ fun:__libc_start_main
+ obj:*
+}
+{
+ libc internal error
+ Memcheck:Addr8
+ fun:__sigsetjmp
+ fun:__libc_start_main
+ obj:*
+}
+{
+ Intentional error in testcase
+ Memcheck:Param
+ pipe(filedes)
+ fun:pipe
+ fun:chkio_drv_timeout
+}
+{
+ Intentional error in testcase
+ Memcheck:Param
+ pipe(filedes)
+ fun:pipe
+ fun:io_ready_exit_drv_control
+ fun:erts_port_control
+ fun:port_control_3
+ fun:process_main
+}
+{
+ Leak in libc putenv
+ Memcheck:Leak
+ fun:malloc
+ fun:realloc
+ fun:__add_to_environ
+ fun:putenv
+ fun:erts_sys_putenv
+ fun:os_putenv_2
+ fun:process_main
+}
+{
+Leak in libc putenv
+Memcheck:Leak
+fun:malloc
+fun:erts_sys_alloc
+...
+fun:erts_alloc
+fun:erts_sys_putenv
+fun:os_putenv_2
+fun:process_main
+}
+{
+ erronous warning
+ Memcheck:Leak
+ fun:malloc
+ fun:erts_sys_alloc
+ ...
+ fun:fix_core_alloc
+ fun:erts_init_fix_alloc
+ fun:erts_alloc_init
+ fun:early_init
+ fun:erl_start
+}
+{
+ pthread internal error
+ Memcheck:Param
+ futex(utime)
+ fun:__lll_mutex_unlock_wake
+}
+{
+ libc internal error
+ Memcheck:Param
+ socketcall.sendto(msg)
+ ...
+ fun:getifaddrs
+}
+{
+inet_drv; pointer inside allocated block
+Memcheck:Leak
+PossiblyLost
+fun:realloc
+fun:erts_sys_realloc
+...
+fun:erts_realloc_fnf
+fun:erts_bin_realloc_fnf
+fun:driver_realloc_binary
+}
+{
+inet_drv; pointer inside allocated block
+Memcheck:Leak
+PossiblyLost
+fun:malloc
+fun:erts_sys_alloc
+...
+fun:erts_alloc_fnf
+fun:erts_bin_drv_alloc_fnf
+fun:driver_alloc_binary
+}
+{
+pthread leak or erroneous valgrind warning
+Memcheck:Leak
+fun:calloc
+fun:allocate_dtv
+fun:_dl_allocate_tls
+fun:pthread_create@@GLIBC_2.2.5
+}
+{
+pthread leak or erroneous valgrind warning
+Memcheck:Leak
+fun:calloc
+fun:_dl_allocate_tls
+fun:pthread_create@@GLIBC_2.2.5
+}
+{
+zlib; ok according to zlib developers
+Memcheck:Cond
+fun:longest_match
+fun:deflate_slow
+fun:deflate
+}
+{
+zlib; ok according to zlib developers
+Memcheck:Cond
+fun:longest_match
+fun:deflate_fast
+fun:deflate
+}
+{
+zlib; ok accordnig to zlib (this one popped up with valgrind-3.6.0)
+Memcheck:Cond
+fun:deflate_slow
+fun:deflate
+fun:zlib_deflate
+fun:zlib_ctl
+}
+{
+No leak; pointer into block
+Memcheck:Leak
+fun:malloc
+fun:erts_sys_alloc
+...
+fun:erts_alloc
+fun:erts_init_scheduling
+fun:erl_init
+fun:erl_start
+fun:main
+}
+{
+No leak; pointer into block
+Memcheck:Leak
+PossiblyLost
+fun:malloc
+fun:erts_sys_alloc
+...
+fun:erts_alloc
+fun:init_db
+fun:erl_init
+fun:erl_start
+fun:main
+}
+{
+No leak; sometimes pointer into block
+Memcheck:Leak
+PossiblyLost
+fun:malloc
+fun:erts_sys_alloc
+...
+fun:erts_alloc_fnf
+fun:driver_alloc
+fun:get_bufstk
+fun:alloc_buffer
+}
+{
+ Crypto internal...
+Memcheck:Cond
+obj:*/crypto.valgrind.*
+}
+{
+ Crypto internal...
+Memcheck:Cond
+obj:*/libcrypto.*
+}
+{
+ Crypto internal...
+Memcheck:Cond
+obj:*/openssl.*
+}
+{
+ Crypto internal...
+Memcheck:Cond
+obj:*/ssleay.*
+}
+{
+ Crypto internal...
+Memcheck:Value8
+obj:*/crypto.valgrind.*
+}
+{
+ Crypto internal...
+Memcheck:Value8
+obj:*/libcrypto.*
+}
+{
+ Crypto internal...
+Memcheck:Value8
+obj:*/openssl.*
+}
+{
+ Crypto internal...
+Memcheck:Value8
+obj:*/ssleay.*
+}
+{
+ Crypto internal...
+ Memcheck:Cond
+ fun:memset
+ fun:BN_lshift
+ fun:BN_div
+ fun:BN_MONT_CTX_set
+ fun:BN_is_prime_fasttest_ex
+ fun:BN_generate_prime_ex
+ fun:DH_generate_parameters_ex
+ fun:DH_generate_parameters
+ fun:dh_generate_parameters_nif
+ fun:process_main
+ fun:sched_thread_func
+ fun:thr_wrapper
+}
+{
+ Crypto internal...
+ Memcheck:Cond
+ fun:memset
+ fun:BN_lshift
+ fun:BN_div
+ fun:BN_nnmod
+ fun:BN_mod_inverse
+ fun:BN_MONT_CTX_set
+ fun:BN_is_prime_fasttest_ex
+ fun:BN_generate_prime_ex
+ fun:DH_generate_parameters_ex
+ fun:DH_generate_parameters
+ fun:dh_generate_parameters_nif
+ fun:process_main
+}
+{
+ Crypto internal...
+ Memcheck:Value8
+ fun:BN_mod_exp_mont_consttime
+ fun:generate_key
+ fun:dh_generate_key_nif
+ fun:process_main
+ fun:sched_thread_func
+ fun:thr_wrapper
+ fun:start_thread
+ fun:clone
+}
+
+{
+erts_bits_init_state; Why is this needed?
+Memcheck:Leak
+PossiblyLost
+fun:malloc
+fun:erts_sys_alloc
+...
+fun:erts_alloc
+fun:erts_bits_init_state
+fun:erts_init_scheduling
+fun:erl_init
+fun:erl_start
+fun:main
+}
+
+{
+Prebuilt constant terms in os_info_init
+Memcheck:Leak
+PossiblyLost
+fun:malloc
+fun:erts_sys_alloc
+...
+fun:erts_alloc
+fun:os_info_init
+fun:erts_bif_info_init
+fun:erl_init
+fun:erl_start
+fun:main
+}
+
+{
+Permanent cache aligned malloc for array of mseg allocators
+Memcheck:Leak
+PossiblyLost
+fun:malloc
+fun:erts_mseg_init
+fun:erts_alloc_init
+fun:early_init
+fun:erl_start
+fun:main
+}
+
+{
+Early permanent cache aligned erl_process:aux_work_tmo
+Memcheck:Leak
+PossiblyLost
+fun:malloc
+fun:aux_work_timeout_early_init
+fun:erts_early_init_scheduling
+fun:early_init
+fun:erl_start
+fun:main
+}
+
+{
+Early permanent cache aligned ts_event_pool
+Memcheck:Leak
+PossiblyLost
+fun:malloc
+fun:erts_sys_alloc
+fun:erts_alloc_fnf
+fun:ethr_std_alloc
+fun:ts_event_pool
+fun:init_ts_event_alloc
+fun:ethr_late_init_common__
+fun:ethr_late_init
+fun:erts_thr_late_init
+fun:early_init
+fun:erl_start
+fun:main
+}
+
diff --git a/erts/emulator/valgrind/suppress.standard b/erts/emulator/valgrind/suppress.standard
new file mode 100644
index 0000000000..26e34e3757
--- /dev/null
+++ b/erts/emulator/valgrind/suppress.standard
@@ -0,0 +1,308 @@
+{
+ libc internal error
+ Memcheck:Addr8
+ obj:/lib64/ld-2.3.5.so
+}
+{
+ libc internal error
+ Memcheck:Addr8
+ fun:_dl_start
+}
+{
+ libc internal error
+ Memcheck:Addr8
+ fun:__libc_start_main
+ obj:*
+}
+{
+ libc internal error
+ Memcheck:Addr4
+ fun:__sigjmp_save
+ fun:__libc_start_main
+ obj:*
+}
+{
+ libc internal error
+ Memcheck:Addr8
+ fun:__sigsetjmp
+ fun:__libc_start_main
+ obj:*
+}
+{
+ Intentional error in testcase
+ Memcheck:Param
+ pipe(filedes)
+ fun:pipe
+ fun:chkio_drv_timeout
+}
+{
+ Intentional error in testcase
+ Memcheck:Param
+ pipe(filedes)
+ fun:pipe
+ fun:io_ready_exit_drv_control
+ fun:erts_port_control
+ fun:port_control_3
+ fun:process_main
+}
+{
+ Leak in libc putenv
+ Memcheck:Leak
+ fun:malloc
+ fun:realloc
+ fun:__add_to_environ
+ fun:putenv
+ fun:erts_sys_putenv
+ fun:os_putenv_2
+ fun:process_main
+}
+{
+Leak in libc putenv
+Memcheck:Leak
+fun:malloc
+fun:erts_sys_alloc
+...
+fun:erts_alloc
+fun:erts_sys_putenv
+fun:os_putenv_2
+fun:process_main
+}
+{
+ erronous warning
+ Memcheck:Leak
+ fun:malloc
+ fun:erts_sys_alloc
+ fun:fix_core_alloc
+ fun:erts_init_fix_alloc
+ fun:erts_alloc_init
+ fun:early_init
+ fun:erl_start
+}
+{
+ pthread internal error
+ Memcheck:Param
+ futex(utime)
+ fun:__lll_mutex_unlock_wake
+}
+{
+ libc internal error
+ Memcheck:Param
+ socketcall.sendto(msg)
+ ...
+ fun:getifaddrs
+}
+{
+inet_drv; pointer inside allocated block
+Memcheck:Leak
+fun:realloc
+fun:erts_sys_realloc
+...
+fun:erts_realloc_fnf
+fun:erts_bin_realloc_fnf
+fun:driver_realloc_binary
+}
+{
+inet_drv; pointer inside allocated block
+Memcheck:Leak
+fun:malloc
+fun:erts_sys_alloc
+...
+fun:erts_alloc_fnf
+fun:erts_bin_drv_alloc_fnf
+fun:driver_alloc_binary
+}
+{
+pthread leak or erroneous valgrind warning
+Memcheck:Leak
+fun:calloc
+fun:allocate_dtv
+fun:_dl_allocate_tls
+fun:pthread_create@@GLIBC_2.2.5
+}
+{
+zlib; ok according to zlib developers
+Memcheck:Cond
+fun:longest_match
+fun:deflate_slow
+fun:deflate
+}
+{
+zlib; ok according to zlib developers
+Memcheck:Cond
+fun:longest_match
+fun:deflate_fast
+fun:deflate
+}
+{
+No leak; pointer into block
+Memcheck:Leak
+fun:malloc
+fun:erts_sys_alloc
+...
+fun:erts_alloc
+fun:erts_init_scheduling
+fun:erl_init
+fun:erl_start
+fun:main
+}
+{
+No leak; pointer into block
+Memcheck:Leak
+fun:malloc
+fun:erts_sys_alloc
+...
+fun:erts_alloc
+fun:init_db
+fun:erl_init
+fun:erl_start
+fun:main
+}
+{
+No leak; sometimes pointer into block
+Memcheck:Leak
+fun:malloc
+fun:erts_sys_alloc
+...
+fun:erts_alloc_fnf
+fun:driver_alloc
+fun:get_bufstk
+fun:alloc_buffer
+}
+{
+ Crypto internal...
+Memcheck:Cond
+obj:*/crypto.valgrind.*
+}
+{
+ Crypto internal...
+Memcheck:Cond
+obj:*/libcrypto.*
+}
+{
+ Crypto internal...
+Memcheck:Cond
+obj:*/openssl.*
+}
+{
+ Crypto internal...
+Memcheck:Cond
+obj:*/ssleay.*
+}
+{
+ Crypto internal...
+Memcheck:Value8
+obj:*/crypto.valgrind.*
+}
+{
+ Crypto internal...
+Memcheck:Value8
+obj:*/libcrypto.*
+}
+{
+ Crypto internal...
+Memcheck:Value8
+obj:*/openssl.*
+}
+{
+ Crypto internal...
+Memcheck:Value8
+obj:*/ssleay.*
+}
+{
+ Crypto internal...
+ Memcheck:Cond
+ fun:memset
+ fun:BN_lshift
+ fun:BN_div
+ fun:BN_MONT_CTX_set
+ fun:BN_is_prime_fasttest_ex
+ fun:BN_generate_prime_ex
+ fun:DH_generate_parameters_ex
+ fun:DH_generate_parameters
+ fun:dh_generate_parameters_nif
+ fun:process_main
+ fun:sched_thread_func
+ fun:thr_wrapper
+}
+{
+ Crypto internal...
+ Memcheck:Cond
+ fun:memset
+ fun:BN_lshift
+ fun:BN_div
+ fun:BN_nnmod
+ fun:BN_mod_inverse
+ fun:BN_MONT_CTX_set
+ fun:BN_is_prime_fasttest_ex
+ fun:BN_generate_prime_ex
+ fun:DH_generate_parameters_ex
+ fun:DH_generate_parameters
+ fun:dh_generate_parameters_nif
+ fun:process_main
+}
+{
+ Crypto internal...
+ Memcheck:Value8
+ fun:BN_mod_exp_mont_consttime
+ fun:generate_key
+ fun:dh_generate_key_nif
+ fun:process_main
+ fun:sched_thread_func
+ fun:thr_wrapper
+ fun:start_thread
+ fun:clone
+}
+
+{
+Prebuilt constant terms in os_info_init (PossiblyLost)
+Memcheck:Leak
+fun:malloc
+fun:erts_sys_alloc
+...
+fun:erts_alloc
+fun:os_info_init
+fun:erts_bif_info_init
+fun:erl_init
+fun:erl_start
+fun:main
+}
+
+{
+Permanent cache aligned malloc for array of mseg allocators
+Memcheck:Leak
+fun:malloc
+fun:erts_mseg_init
+fun:erts_alloc_init
+fun:early_init
+fun:erl_start
+fun:main
+}
+
+{
+Early permanent cache aligned erl_process:aux_work_tmo
+Memcheck:Leak
+fun:malloc
+fun:aux_work_timeout_early_init
+fun:erts_early_init_scheduling
+fun:early_init
+fun:erl_start
+fun:main
+}
+
+{
+Early permanent cache aligned ts_event_pool
+Memcheck:Leak
+fun:malloc
+fun:erts_sys_alloc
+fun:erts_alloc_fnf
+fun:ethr_std_alloc
+fun:ts_event_pool
+fun:init_ts_event_alloc
+fun:ethr_late_init_common__
+fun:ethr_late_init
+fun:erts_thr_late_init
+fun:early_init
+fun:erl_start
+fun:main
+}
+
diff --git a/erts/emulator/zlib/Makefile.in b/erts/emulator/zlib/Makefile.in
deleted file mode 100644
index b44a87551d..0000000000
--- a/erts/emulator/zlib/Makefile.in
+++ /dev/null
@@ -1,102 +0,0 @@
-# Makefile for zlib
-# Copyright (C) 1995-1996 Jean-loup Gailly.
-# For conditions of distribution and use, see copyright notice in zlib.h
-
-# %ExternalCopyright%
-
-# To compile and test, type:
-# ./configure; make test
-# The call of configure is optional if you don't have special requirements
-
-# To install /usr/local/lib/libz.* and /usr/local/include/zlib.h, type:
-# make install
-# To install in $HOME instead of /usr/local, use:
-# make install prefix=$HOME
-
-ARFLAGS = rc
-CFLAGS = $(subst -O2, -O3, @CFLAGS@ @DEFS@ @EMU_THR_DEFS@)
-#CFLAGS=-O -DMAX_WBITS=14 -DMAX_MEM_LEVEL=7
-#CFLAGS=-g -DDEBUG
-#CFLAGS=-O3 -Wall -Wwrite-strings -Wpointer-arith -Wconversion \
-# -Wstrict-prototypes -Wmissing-prototypes
-
-VER=1.0.4
-
-O = adler32.o compress.o crc32.o uncompr.o deflate.o trees.o \
- zutil.o inflate.o inftrees.o inffast.o
-OBJS = $(O:%=$(OBJDIR)/%)
-
-
-#### Begin OTP targets
-
-include $(ERL_TOP)/make/target.mk
-
-ifeq ($(TYPE),gcov)
-CFLAGS = -O0 -fprofile-arcs -ftest-coverage @DEBUG_CFLAGS@ @DEFS@ @EMU_THR_DEFS@
-else # gcov
-ifeq ($(TYPE),debug)
-CFLAGS = @DEBUG_CFLAGS@ @DEFS@ @EMU_THR_DEFS@
-endif # debug
-endif # gcov
-
-# On windows we *need* a separate zlib during debug build
-OBJDIR= $(ERL_TOP)/erts/emulator/zlib/obj/$(TARGET)/$(TYPE)
-
-include $(ERL_TOP)/make/$(TARGET)/otp.mk
-
-ifeq ($(TARGET), win32)
-LIBRARY=$(OBJDIR)/z.lib
-else
-LIBRARY=$(OBJDIR)/libz.a
-endif
-
-all: $(LIBRARY)
-
-# ----------------------------------------------------
-# Release Target
-# ----------------------------------------------------
-include $(ERL_TOP)/make/otp_release_targets.mk
-
-release_spec: opt
-
-tests release_tests:
-
-docs release_docs release_docs_spec:
-
-clean:
- rm -f $(OBJS) $(OBJDIR)/libz.a
-
-#### end OTP targets
-
-ifeq ($(TARGET), win32)
-$(LIBRARY): $(OBJS)
- $(AR) -out:$@ $(OBJS)
-else
-$(LIBRARY): $(OBJS)
- $(AR) $(ARFLAGS) $@ $(OBJS)
- -@ ($(RANLIB) $@ || true) 2>/dev/null
-endif
-
-$(OBJDIR)/%.o: %.c
- $(CC) -c $(CFLAGS) -o $@ $<
-
-# DO NOT DELETE THIS LINE -- make depend depends on it.
-
-adler32.o: zlib.h zconf.h
-compress.o: zlib.h zconf.h
-crc32.o: zlib.h zconf.h
-deflate.o: deflate.h zutil.h zlib.h zconf.h
-example.o: zlib.h zconf.h
-gzio.o: zutil.h zlib.h zconf.h
-infblock.o: infblock.h inftrees.h infcodes.h infutil.h zutil.h zlib.h zconf.h
-infcodes.o: zutil.h zlib.h zconf.h
-infcodes.o: inftrees.h infblock.h infcodes.h infutil.h inffast.h
-inffast.o: zutil.h zlib.h zconf.h inftrees.h
-inffast.o: infblock.h infcodes.h infutil.h inffast.h
-inflate.o: zutil.h zlib.h zconf.h infblock.h
-inftrees.o: zutil.h zlib.h zconf.h inftrees.h
-infutil.o: zutil.h zlib.h zconf.h infblock.h inftrees.h infcodes.h infutil.h
-minigzip.o: zlib.h zconf.h
-trees.o: deflate.h zutil.h zlib.h zconf.h
-uncompr.o: zlib.h zconf.h
-zutil.o: zutil.h zlib.h zconf.h
diff --git a/erts/emulator/zlib/zlib.mk b/erts/emulator/zlib/zlib.mk
new file mode 100644
index 0000000000..fa1f159fae
--- /dev/null
+++ b/erts/emulator/zlib/zlib.mk
@@ -0,0 +1,74 @@
+#-*-makefile-*- ; force emacs to enter makefile-mode
+# ----------------------------------------------------
+# Make include file for zlib
+#
+# %CopyrightBegin%
+#
+# Copyright Ericsson AB 2011-2012. All Rights Reserved.
+#
+# The contents of this file are subject to the Erlang Public License,
+# Version 1.1, (the "License"); you may not use this file except in
+# compliance with the License. You should have received a copy of the
+# Erlang Public License along with this software. If not, it can be
+# retrieved online at http://www.erlang.org/.
+#
+# Software distributed under the License is distributed on an "AS IS"
+# basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+# the License for the specific language governing rights and limitations
+# under the License.
+#
+# %CopyrightEnd%
+#
+# ----------------------------------------------------
+# Copyright for zlib itself see copyright notice in zlib.h
+
+ZLIB_FILES = \
+ adler32 \
+ compress \
+ crc32 \
+ uncompr \
+ deflate \
+ trees \
+ zutil \
+ inflate \
+ inftrees \
+ inffast
+
+# On windows we *need* a separate zlib during debug build
+ZLIB_OBJDIR = $(ERL_TOP)/erts/emulator/zlib/obj/$(TARGET)/$(TYPE)
+
+ZLIB_OBJS = $(ZLIB_FILES:%=$(ZLIB_OBJDIR)/%.o)
+ZLIB_SRC = $(ZLIB_FILES:%=zlib/%.c)
+
+ifeq ($(TARGET), win32)
+ZLIB_LIBRARY = $(ZLIB_OBJDIR)/z.lib
+else
+ZLIB_LIBRARY = $(ZLIB_OBJDIR)/libz.a
+endif
+
+
+ifeq ($(TYPE),gcov)
+ZLIB_CFLAGS = -O0 -fprofile-arcs -ftest-coverage $(DEBUG_CFLAGS) $(DEFS) $(THR_DEFS)
+else # gcov
+ifeq ($(TYPE),debug)
+ZLIB_CFLAGS = $(DEBUG_CFLAGS) $(DEFS) $(THR_DEFS)
+else # debug
+ZLIB_CFLAGS = $(subst -O2, -O3, $(CONFIGURE_CFLAGS) $(DEFS) $(THR_DEFS))
+#ZLIB_CFLAGS=-O -DMAX_WBITS=14 -DMAX_MEM_LEVEL=7
+#ZLIB_CFLAGS=-g -DDEBUG
+#ZLIB_CFLAGS=-O3 -Wall -Wwrite-strings -Wpointer-arith -Wconversion \
+# -Wstrict-prototypes -Wmissing-prototypes
+endif # debug
+endif # gcov
+
+ifeq ($(TARGET), win32)
+$(ZLIB_LIBRARY): $(ZLIB_OBJS)
+ $(AR) -out:$@ $(ZLIB_OBJS)
+else
+$(ZLIB_LIBRARY): $(ZLIB_OBJS)
+ $(AR) $(ARFLAGS) $@ $(ZLIB_OBJS)
+ -@ ($(RANLIB) $@ || true) 2>/dev/null
+endif
+
+$(ZLIB_OBJDIR)/%.o: zlib/%.c
+ $(CC) -c $(ZLIB_CFLAGS) -o $@ $<
diff --git a/erts/epmd/src/Makefile.in b/erts/epmd/src/Makefile.in
index 2f5296a5e8..5767edc346 100644
--- a/erts/epmd/src/Makefile.in
+++ b/erts/epmd/src/Makefile.in
@@ -1,7 +1,7 @@
#
# %CopyrightBegin%
#
-# Copyright Ericsson AB 1998-2010. All Rights Reserved.
+# Copyright Ericsson AB 1998-2012. All Rights Reserved.
#
# The contents of this file are subject to the Erlang Public License,
# Version 1.1, (the "License"); you may not use this file except in
@@ -24,6 +24,7 @@ PURIFY =
TYPEMARKER = .debug
TYPE_FLAGS = -DDEBUG @DEBUG_FLAGS@
else
+
ifeq ($(TYPE),purify)
PURIFY = purify
TYPEMARKER =
@@ -33,6 +34,8 @@ else
TYPE_FLAGS = -O2 -DPURIFY
endif
else
+
+override TYPE = opt
PURIFY =
TYPEMARKER =
ifeq ($(findstring ose,$(TARGET)),ose)
@@ -65,6 +68,8 @@ ERTS_INTERNAL_LIBS=-L../../lib/internal/$(TARGET) -lerts_internal$(ERTS_LIB_TYPE
endif
endif
+ERTS_LIB = $(ERL_TOP)/erts/lib_src/obj/$(TARGET)/$(TYPE)/MADE
+
CC = @CC@
WFLAGS = @WFLAGS@
CFLAGS = @CFLAGS@ @DEFS@ $(TYPE_FLAGS) $(WFLAGS) $(ERTS_INCL)
@@ -106,7 +111,7 @@ EPMD_OBJS = $(OBJDIR)/epmd.o \
#---------------------------------
-all: erts_lib $(BINDIR)/$(EPMD)
+all: $(BINDIR)/$(EPMD)
docs:
@@ -122,13 +127,13 @@ clean:
# Objects & executables
#
-$(BINDIR)/$(EPMD): $(EPMD_OBJS)
+$(BINDIR)/$(EPMD): $(EPMD_OBJS) $(ERTS_LIB)
$(PURIFY) $(LD) $(LDFLAGS) -o $@ $(EPMD_OBJS) $(LIBS)
$(OBJDIR)/%.o: %.c epmd.h epmd_int.h
$(CC) $(CFLAGS) $(EPMD_FLAGS) -o $@ -c $<
-erts_lib:
+$(ERTS_LIB):
cd $(ERL_TOP)/erts/lib_src && $(MAKE) $(TYPE)
include $(ERL_TOP)/make/otp_release_targets.mk
@@ -139,4 +144,3 @@ release_spec: all
release_docs_spec:
-
diff --git a/erts/epmd/src/epmd.c b/erts/epmd/src/epmd.c
index 08576d923f..2267f9b12b 100644
--- a/erts/epmd/src/epmd.c
+++ b/erts/epmd/src/epmd.c
@@ -324,7 +324,11 @@ static void run_daemon(EpmdVars *g)
}
/* move cwd to root to make sure we are not on a mounted filesystem */
- chdir("/");
+ if (chdir("/") < 0)
+ {
+ dbg_perror(g,"epmd: chdir() failed");
+ epmd_cleanup_exit(g,1);
+ }
umask(0);
diff --git a/erts/epmd/src/epmd_cli.c b/erts/epmd/src/epmd_cli.c
index ac55ba6bb6..74408e3ebe 100644
--- a/erts/epmd/src/epmd_cli.c
+++ b/erts/epmd/src/epmd_cli.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1998-2010. All Rights Reserved.
+ * Copyright Ericsson AB 1998-2011. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -104,7 +104,10 @@ void epmd_call(EpmdVars *g,int what)
fd = conn_to_epmd(g);
put_int16(1,buf);
buf[2] = what;
- write(fd,buf,3);
+ if (write(fd, buf, 3) != 3) {
+ printf("epmd: Can't write to epmd\n");
+ epmd_cleanup_exit(g,1);
+ }
if (read(fd,(char *)&i,4) != 4) {
if (!g->silent)
printf("epmd: no response from local epmd\n");
diff --git a/erts/epmd/src/epmd_int.h b/erts/epmd/src/epmd_int.h
index 2a0de4df9c..14d05c3f19 100644
--- a/erts/epmd/src/epmd_int.h
+++ b/erts/epmd/src/epmd_int.h
@@ -2,7 +2,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1998-2010. All Rights Reserved.
+ * Copyright Ericsson AB 1998-2011. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -240,6 +240,14 @@
#define put_int16(i, s) {((unsigned char*)(s))[0] = ((i) >> 8) & 0xff; \
((unsigned char*)(s))[1] = (i) & 0xff;}
+#if defined(__GNUC__)
+# define EPMD_INLINE __inline__
+#elif defined(__WIN32__)
+# define EPMD_INLINE __inline
+#else
+# define EPMD_INLINE
+#endif
+
/* ************************************************************************ */
/* Stuctures used by server */
@@ -295,6 +303,7 @@ typedef struct {
unsigned delay_write;
int max_conn;
int active_conn;
+ int select_fd_top;
char *progname;
Connection *conn;
Nodes nodes;
diff --git a/erts/epmd/src/epmd_srv.c b/erts/epmd/src/epmd_srv.c
index 4d9b454f97..da575affa1 100644
--- a/erts/epmd/src/epmd_srv.c
+++ b/erts/epmd/src/epmd_srv.c
@@ -80,6 +80,13 @@ static int reply(EpmdVars*,int,char *,int);
static void dbg_print_buf(EpmdVars*,char *,int);
static void print_names(EpmdVars*);
+static EPMD_INLINE void select_fd_set(EpmdVars* g, int fd)
+{
+ FD_SET(fd, &g->orig_read_mask);
+ if (fd >= g->select_fd_top) {
+ g->select_fd_top = fd + 1;
+ }
+}
void run(EpmdVars *g)
{
@@ -95,7 +102,8 @@ void run(EpmdVars *g)
dbg_printf(g,2,"try to initiate listening port %d", g->port);
- if (g->addresses != NULL)
+ if (g->addresses != NULL && /* String contains non-separator characters if: */
+ g->addresses[strspn(g->addresses," ,")] != '\000')
{
char *tmp;
char *token;
@@ -171,6 +179,7 @@ void run(EpmdVars *g)
g->max_conn -= num_sockets;
FD_ZERO(&g->orig_read_mask);
+ g->select_fd_top = 0;
for (i = 0; i < num_sockets; i++)
{
@@ -232,14 +241,14 @@ void run(EpmdVars *g)
dbg_perror(g,"failed to listen on socket");
epmd_cleanup_exit(g,1);
}
- FD_SET(listensock[i],&g->orig_read_mask);
+ select_fd_set(g, listensock[i]);
}
dbg_tty_printf(g,2,"entering the main select() loop");
select_again:
while(1)
- {
+ {
fd_set read_mask = g->orig_read_mask;
struct timeval timeout;
int ret;
@@ -251,7 +260,8 @@ void run(EpmdVars *g)
timeout.tv_sec = (g->packet_timeout < IDLE_TIMEOUT) ? 1 : IDLE_TIMEOUT;
timeout.tv_usec = 0;
- if ((ret = select(g->max_conn,&read_mask,(fd_set *)0,(fd_set *)0,&timeout)) < 0) {
+ if ((ret = select(g->select_fd_top,
+ &read_mask, (fd_set *)0,(fd_set *)0,&timeout)) < 0) {
dbg_perror(g,"error in select ");
switch (errno) {
case EAGAIN:
@@ -821,7 +831,7 @@ static int conn_open(EpmdVars *g,int fd)
s = &g->conn[i];
/* From now on we want to know if there are data to be read */
- FD_SET(fd, &g->orig_read_mask);
+ select_fd_set(g, fd);
s->fd = fd;
s->open = EPMD_TRUE;
@@ -886,6 +896,7 @@ int epmd_conn_close(EpmdVars *g,Connection *s)
dbg_tty_printf(g,2,"closing connection on file descriptor %d",s->fd);
FD_CLR(s->fd,&g->orig_read_mask);
+ /* we don't bother lowering g->select_fd_top */
close(s->fd); /* Sometimes already closed but close anyway */
s->open = EPMD_FALSE;
if (s->buf != NULL) { /* Should never be NULL but test anyway */
@@ -1115,7 +1126,7 @@ static Node *node_reg2(EpmdVars *g,
node->extralen = extralen;
memcpy(node->extra,extra,extralen);
strcpy(node->symname,name);
- FD_SET(fd,&g->orig_read_mask);
+ select_fd_set(g, fd);
if (highvsn == 0) {
dbg_tty_printf(g,1,"registering '%s:%d', port %d",
diff --git a/erts/etc/common/Makefile.in b/erts/etc/common/Makefile.in
index 4754328c0b..28c5e5ccad 100644
--- a/erts/etc/common/Makefile.in
+++ b/erts/etc/common/Makefile.in
@@ -1,7 +1,7 @@
#
# %CopyrightBegin%
#
-# Copyright Ericsson AB 1996-2010. All Rights Reserved.
+# Copyright Ericsson AB 1996-2012. All Rights Reserved.
#
# The contents of this file are subject to the Erlang Public License,
# Version 1.1, (the "License"); you may not use this file except in
@@ -22,13 +22,14 @@ include $(ERL_TOP)/make/target.mk
ERTS_LIB_TYPEMARKER=.$(TYPE)
USING_MINGW=@MIXED_CYGWIN_MINGW@
-USING_VC=@MIXED_CYGWIN_VC@
+USING_VC=@MIXED_VC@
ifeq ($(TYPE),debug)
PURIFY =
TYPEMARKER = .debug
TYPE_FLAGS = -DDEBUG @DEBUG_FLAGS@
else
+
ifeq ($(TYPE),purify)
PURIFY = purify
TYPEMARKER =
@@ -38,6 +39,8 @@ else
TYPE_FLAGS = -g -O2 -DPURIFY
endif
else
+
+override TYPE=opt
PURIFY =
TYPEMARKER =
ERTS_LIB_TYPEMARKER=
@@ -101,6 +104,8 @@ else
ERTS_INTERNAL_LIBS=-L../../lib/internal/$(TARGET) -lerts_internal$(ERTS_LIB_TYPEMARKER) @ERTS_INTERNAL_X_LIBS@ -lm
endif
+ERTS_LIB = $(ERL_TOP)/erts/lib_src/obj/$(TARGET)/$(TYPE)/MADE
+
# ----------------------------------------------------
# Release directory specification
# ----------------------------------------------------
@@ -161,9 +166,8 @@ ERLSRV_OBJECTS= \
$(OBJDIR)/erlsrv_main.o \
$(OBJDIR)/erlsrv_util.o \
$(OBJDIR)/erlsrv_logmess.res
-MC_OUTPUTS= \
- $(OBJDIR)/erlsrv_logmess.h $(OBJDIR)/erlsrv_logmess.rc
- MT_FLAG="-MT"
+MC_OUTPUTS=$(OBJDIR)/erlsrv_logmess.h $(OBJDIR)/erlsrv_logmess.rc
+MT_FLAG="-MT"
else
ERLRES_OBJ=erl_res.o
ERLSRV_OBJECTS= \
@@ -173,9 +177,8 @@ ERLSRV_OBJECTS= \
$(OBJDIR)/erlsrv_main.o \
$(OBJDIR)/erlsrv_util.o \
$(OBJDIR)/erlsrv_logmess.o
-MC_OUTPUTS= \
- $(OBJDIR)/erlsrv_logmess.h $(OBJDIR)/erlsrv_logmess.res
- MT_FLAG="-MD"
+MC_OUTPUTS=$(OBJDIR)/erlsrv_logmess.h $(OBJDIR)/erlsrv_logmess.res
+MT_FLAG="-MD"
endif
INET_GETHOST = $(BINDIR)/inet_gethost.exe
INSTALL_EMBEDDED_PROGS += $(BINDIR)/typer.exe $(BINDIR)/dialyzer.exe $(BINDIR)/erlc.exe $(BINDIR)/start_erl.exe $(BINDIR)/escript.exe $(BINDIR)/ct_run.exe
@@ -232,14 +235,17 @@ endif
endif
endif
-etc: erts_lib $(ENTRY_OBJ) $(INSTALL_PROGS) $(INSTALL_LIBS) $(TEXTFILES) $(INSTALL_TOP_BIN)
+.PHONY: etc
+etc: $(ENTRY_OBJ) $(INSTALL_PROGS) $(INSTALL_LIBS) $(TEXTFILES) $(INSTALL_TOP_BIN)
# erlexec needs the erts_internal library...
-erts_lib:
+$(ERTS_LIB):
cd $(ERL_TOP)/erts/lib_src && $(MAKE) $(TYPE)
+.PHONY: docs
docs:
+.PHONY: clean
clean:
ifneq ($(INSTALL_PROGS),)
rm -f $(INSTALL_PROGS)
@@ -278,85 +284,9 @@ endif
rm -f $(ERL_TOP)/erts/obj*/$(TARGET)/vxcall.o
rm -f $(ERL_TOP)/erts/obj*/$(TARGET)/erl.o
rm -f $(ERL_TOP)/erts/obj*/$(TARGET)/werl.o
+ rm -f $(TEXTFILES)
rm -f *~ core
-#
-# Objects & executables
-#
-#$(OBJDIR)/%.o: %.c
-# $(CC) $(CFLAGS) -o $@ -c $<
-#
-#$(OBJDIR)/%.o: ../unix/%.c
-# $(CC) $(CFLAGS) -o $@ -c $<
-#
-#$(BINDIR)/%: $(OBJDIR)/%.o
-# $(PURIFY) $(LD) $(LDFLAGS) -o $@ $< $(LIBS)
-
-$(OBJDIR)/inet_gethost.o: inet_gethost.c
- $(CC) $(CFLAGS) -o $@ -c inet_gethost.c
-
-$(BINDIR)/inet_gethost@EXEEXT@: $(OBJDIR)/inet_gethost.o $(ENTRY_OBJ)
- $(PURIFY) $(LD) $(LDFLAGS) $(ENTRY_LDFLAGS) -o $@ $(OBJDIR)/inet_gethost.o $(ENTRY_OBJ) $(LIBS) $(ERTS_INTERNAL_LIBS)
-
-$(BINDIR)/run_erl: $(OBJDIR)/safe_string.o $(OBJDIR)/run_erl.o
- $(LD) $(LDFLAGS) -o $@ $(OBJDIR)/safe_string.o $(OBJDIR)/run_erl.o $(LIBS)
-
-$(OBJDIR)/run_erl.o: ../unix/run_erl.c
- $(CC) $(CFLAGS) -o $@ -c ../unix/run_erl.c
-
-$(BINDIR)/to_erl: $(OBJDIR)/safe_string.o $(OBJDIR)/to_erl.o
- $(LD) $(LDFLAGS) -o $@ $(OBJDIR)/safe_string.o $(OBJDIR)/to_erl.o
-
-$(OBJDIR)/to_erl.o: ../unix/to_erl.c
- $(CC) $(CFLAGS) -o $@ -c ../unix/to_erl.c
-
-$(BINDIR)/dyn_erl: $(OBJDIR)/safe_string.o $(OBJDIR)/dyn_erl.o
- $(LD) $(LDFLAGS) -o $@ $(OBJDIR)/safe_string.o $(OBJDIR)/dyn_erl.o
-
-$(OBJDIR)/dyn_erl.o: ../unix/dyn_erl.c
- $(CC) $(CFLAGS) -o $@ -c ../unix/dyn_erl.c
-
-$(OBJDIR)/safe_string.o: ../unix/safe_string.c
- $(CC) $(CFLAGS) -o $@ -c ../unix/safe_string.c
-
-ifneq ($(TARGET),win32)
-$(BINDIR)/$(ERLEXEC): $(OBJDIR)/$(ERLEXEC).o
- $(PURIFY) $(LD) $(LDFLAGS) -o $@ $(OBJDIR)/$(ERLEXEC).o $(ERTS_INTERNAL_LIBS)
-
-$(OBJDIR)/$(ERLEXEC).o: $(ERLEXECDIR)/$(ERLEXEC).c
- $(CC) -I$(EMUDIR) $(CFLAGS) -o $@ -c $(ERLEXECDIR)/$(ERLEXEC).c
-endif
-$(BINDIR)/erlc@EXEEXT@: $(OBJDIR)/erlc.o
- $(PURIFY) $(LD) $(LDFLAGS) -o $@ $(OBJDIR)/erlc.o -L$(OBJDIR) $(LIBS) $(ERTS_INTERNAL_LIBS)
-
-$(OBJDIR)/erlc.o: erlc.c
- $(CC) $(CFLAGS) -o $@ -c erlc.c
-
-$(BINDIR)/dialyzer@EXEEXT@: $(OBJDIR)/dialyzer.o
- $(PURIFY) $(LD) $(LDFLAGS) -o $@ $(OBJDIR)/dialyzer.o -L$(OBJDIR) $(LIBS) $(ERTS_INTERNAL_LIBS)
-
-$(OBJDIR)/dialyzer.o: dialyzer.c
- $(CC) $(CFLAGS) -o $@ -c dialyzer.c
-
-$(BINDIR)/typer@EXEEXT@: $(OBJDIR)/typer.o
- $(PURIFY) $(LD) $(LDFLAGS) -o $@ $(OBJDIR)/typer.o -L$(OBJDIR) $(LIBS) $(ERTS_INTERNAL_LIBS)
-
-$(OBJDIR)/typer.o: typer.c
- $(CC) $(CFLAGS) -o $@ -c typer.c
-
-$(BINDIR)/escript@EXEEXT@: $(OBJDIR)/escript.o
- $(PURIFY) $(LD) $(LDFLAGS) -o $@ $(OBJDIR)/escript.o -L$(OBJDIR) $(LIBS) $(ERTS_INTERNAL_LIBS)
-
-$(OBJDIR)/escript.o: escript.c
- $(CC) $(CFLAGS) -o $@ -c escript.c
-
-$(BINDIR)/ct_run@EXEEXT@: $(OBJDIR)/ct_run.o
- $(PURIFY) $(LD) $(LDFLAGS) -o $@ $(OBJDIR)/ct_run.o -L$(OBJDIR) $(LIBS) $(ERTS_INTERNAL_LIBS)
-
-$(OBJDIR)/ct_run.o: ct_run.c
- $(CC) $(CFLAGS) -o $@ -c ct_run.c
-
-
#------------------------------------------------------------------------
# Windows specific targets
# The windows platform is quite different from the others. erl/werl are small C programs
@@ -367,7 +297,7 @@ $(OBJDIR)/ct_run.o: ct_run.c
ifeq ($(TARGET),win32)
-$(BINDIR)/$(ERLEXEC): $(OBJDIR)/erlexec.o $(OBJDIR)/win_erlexec.o $(OBJDIR)/init_file.o $(OBJDIR)/$(ERLRES_OBJ)
+$(BINDIR)/$(ERLEXEC): $(OBJDIR)/erlexec.o $(OBJDIR)/win_erlexec.o $(OBJDIR)/init_file.o $(OBJDIR)/$(ERLRES_OBJ) $(ERTS_LIB)
$(LD) -dll $(LDFLAGS) -o $@ $(OBJDIR)/erlexec.o $(OBJDIR)/win_erlexec.o $(OBJDIR)/init_file.o $(OBJDIR)/$(ERLRES_OBJ) $(ERTS_INTERNAL_LIBS)
$(BINDIR)/erl@EXEEXT@: $(OBJDIR)/erl.o $(OBJDIR)/init_file.o $(OBJDIR)/$(ERLRES_OBJ)
@@ -382,58 +312,75 @@ $(BINDIR)/start_erl@EXEEXT@: $(OBJDIR)/start_erl.o
$(BINDIR)/Install@EXEEXT@: $(OBJDIR)/Install.o $(OBJDIR)/init_file.o
$(LD) $(LDFLAGS) -o $@ $(OBJDIR)/Install.o $(OBJDIR)/init_file.o
+# The service expects to be compiled with $(MT_FLAG) flag.
$(BINDIR)/erlsrv@EXEEXT@: $(ERLSRV_OBJECTS)
$(LD) $(LDFLAGS) $(MT_FLAG) -o $@ $(ERLSRV_OBJECTS)
-# The service expects to be compiled with $(MT_FLAG) flag.
-
-$(OBJDIR)/%.o: $(WINETC)/erlsrv/%.c $(ERLSRV_HEADERS)
- $(CC) $(CFLAGS) $(MT_FLAG) -o $@ -c $<
-
-$(OBJDIR)/erlsrv_util.o: $(WINETC)/erlsrv/erlsrv_util.c $(ERLSRV_HEADERS) \
-$(OBJDIR)/erlsrv_logmess.h
- $(CC) $(CFLAGS) -I$(OBJDIR) $(MT_FLAG) -o $@ -c $<
+# To fix a spooky parallel make build problem on Windows there are some
+# false dependencies on the $(MC), $(RC) and .o rules. The theory behind
+# is that the $(MC) and/or $(RC) compilers seems to temporarily create and
+# destroy a toplevel file vc100.pdb that simultaneous ordinary compilations
+# (that have been by flags explicitly ordered to not use .pdb files)
+# gets upset when that file vanishes from under their feet.
+#
+# The false dependencies are targeted to make make (pun intended) run
+# the $(MC) and $(RC) compilations to completion before all others.
+
+LOGMESS_GENERATED = $(OBJDIR)/LOGMESS-GENERATED
+$(MC_OUTPUTS): $(LOGMESS_GENERATED)
+$(LOGMESS_GENERATED): $(WINETC)/erlsrv/erlsrv_logmess.mc
+ $(MC) -o $(OBJDIR) $(WINETC)/erlsrv/erlsrv_logmess.mc && \
+ echo $? >$(LOGMESS_GENERATED)
+
+$(OBJDIR)/$(ERLRES_OBJ): $(WINETC)/erl.rc $(WINETC)/erlang.ico \
+ $(WINETC)/erl_icon.ico $(WINETC)/hrl_icon.ico \
+ $(WINETC)/beam_icon.ico $(LOGMESS_GENERATED)
+ $(RC) -o $@ -I$(WINETC) $(WINETC)/erl.rc
ifeq ($(USING_VC), yes)
-$(OBJDIR)/erlsrv_logmess.res: $(OBJDIR)/erlsrv_logmess.rc
+RC_GENERATED = $(OBJDIR)/erlsrv_logmess.res
+$(RC_GENERATED): $(OBJDIR)/erlsrv_logmess.rc $(OBJDIR)/$(ERLRES_OBJ)
$(RC) -o $(OBJDIR)/erlsrv_logmess.res -I$(OBJDIR) $(OBJDIR)/erlsrv_logmess.rc
else
-$(OBJDIR)/erlsrv_logmess.o: $(OBJDIR)/erlsrv_logmess.res
+RC_GENERATED = $(OBJDIR)/erlsrv_logmess.o
+$(RC_GENERATED): $(OBJDIR)/erlsrv_logmess.res $(OBJDIR)/$(ERLRES_OBJ)
$(RC) -o $(OBJDIR)/erlsrv_logmess.o -I$(OBJDIR) $(OBJDIR)/erlsrv_logmess.res
endif
-$(MC_OUTPUTS): $(WINETC)/erlsrv/erlsrv_logmess.mc
- $(MC) -o $(OBJDIR) $(WINETC)/erlsrv/erlsrv_logmess.mc
+# The service expects to be compiled with $(MT_FLAG) flag.
+$(OBJDIR)/%.o: $(WINETC)/erlsrv/%.c $(ERLSRV_HEADERS) $(RC_GENERATED)
+ $(CC) $(CFLAGS) $(MT_FLAG) -o $@ -c $<
-$(OBJDIR)/werl.o: $(WINETC)/erl.c
+$(OBJDIR)/erlsrv_util.o: $(WINETC)/erlsrv/erlsrv_util.c $(ERLSRV_HEADERS) \
+ $(OBJDIR)/erlsrv_logmess.h $(RC_GENERATED)
+ $(CC) $(CFLAGS) -I$(OBJDIR) $(MT_FLAG) -o $@ -c $<
+
+$(OBJDIR)/werl.o: $(WINETC)/erl.c $(WINETC)/init_file.h $(RC_GENERATED)
$(CC) $(CFLAGS) -DBUILD_TYPE=\"-$(TYPE)\" -DERL_RUN_SHARED_LIB=1 \
-DWIN32_WERL -o $@ -c $(WINETC)/erl.c
-$(OBJDIR)/erl.o: $(WINETC)/erl.c
+$(OBJDIR)/erl.o: $(WINETC)/erl.c $(WINETC)/init_file.h $(RC_GENERATED)
$(CC) $(CFLAGS) -DBUILD_TYPE=\"-$(TYPE)\" -DERL_RUN_SHARED_LIB=1 \
-o $@ -c $(WINETC)/erl.c
-$(OBJDIR)/erlexec.o: $(ERLEXECDIR)/erlexec.c
+$(OBJDIR)/erlexec.o: $(ERLEXECDIR)/erlexec.c $(RC_GENERATED)
$(CC) $(CFLAGS) -DBUILD_TYPE=\"-$(TYPE)\" -DERL_RUN_SHARED_LIB=1 \
-o $@ -c $(ERLEXECDIR)/erlexec.c
-$(OBJDIR)/win_erlexec.o: $(WINETC)/win_erlexec.c
+$(OBJDIR)/win_erlexec.o: $(WINETC)/win_erlexec.c $(RC_GENERATED)
$(CC) $(CFLAGS) -DBUILD_TYPE=\"-$(TYPE)\" -DERL_RUN_SHARED_LIB=1 \
-o $@ -c $(WINETC)/win_erlexec.c
-$(OBJDIR)/init_file.o: $(WINETC)/init_file.c $(WINETC)/init_file.h
+$(OBJDIR)/init_file.o: $(WINETC)/init_file.c $(WINETC)/init_file.h $(RC_GENERATED)
$(CC) $(CFLAGS) -o $@ -c $(WINETC)/init_file.c
-$(OBJDIR)/Install.o: $(WINETC)/Install.c $(WINETC)/init_file.h
+$(OBJDIR)/Install.o: $(WINETC)/Install.c $(WINETC)/init_file.h $(RC_GENERATED)
$(CC) $(CFLAGS) -o $@ -c $(WINETC)/Install.c
-$(OBJDIR)/$(ERLRES_OBJ): $(WINETC)/erl.rc $(WINETC)/erlang.ico $(WINETC)/erl_icon.ico $(WINETC)/hrl_icon.ico $(WINETC)/beam_icon.ico
- $(RC) -o $@ -I$(WINETC) $(WINETC)/erl.rc
-
-$(OBJDIR)/start_erl.o: $(WINETC)/start_erl.c
+$(OBJDIR)/start_erl.o: $(WINETC)/start_erl.c $(RC_GENERATED)
$(CC) $(CFLAGS) -o $@ -c $(WINETC)/start_erl.c
-$(ENTRY_OBJ): $(ENTRY_SRC)
+$(ENTRY_OBJ): $(ENTRY_SRC) $(RC_GENERATED)
$(CC) $(CFLAGS) -o $@ -c $(ENTRY_SRC)
Install.ini: ../$(TARGET)/Install.src ../../vsn.mk $(TARGET)/Makefile
@@ -442,6 +389,8 @@ Install.ini: ../$(TARGET)/Install.src ../../vsn.mk $(TARGET)/Makefile
../$(TARGET)/Install.src > Install.ini
+else
+RC_GENERATED =
endif
#---------------------------------------------------------
# End of windows specific targets.
@@ -469,7 +418,7 @@ $(BINDIR)/heart@EXEEXT@: $(OBJDIR)/heart.o $(ENTRY_OBJ)
$(LD) $(LDFLAGS) $(ENTRY_LDFLAGS) -o $@ $(OBJDIR)/heart.o \
$(ENTRY_OBJ) $(WINDSOCK)
-$(OBJDIR)/heart.o: heart.c
+$(OBJDIR)/heart.o: heart.c $(RC_GENERATED)
$(CC) $(CFLAGS) -o $@ -c heart.c
endif
@@ -497,6 +446,84 @@ $(OBJDIR)/vxcall.o: $(VXETC)/vxcall.c
+#
+# Objects & executables
+#
+#$(OBJDIR)/%.o: %.c
+# $(CC) $(CFLAGS) -o $@ -c $<
+#
+#$(OBJDIR)/%.o: ../unix/%.c
+# $(CC) $(CFLAGS) -o $@ -c $<
+#
+#$(BINDIR)/%: $(OBJDIR)/%.o
+# $(PURIFY) $(LD) $(LDFLAGS) -o $@ $< $(LIBS)
+
+$(OBJDIR)/inet_gethost.o: inet_gethost.c $(RC_GENERATED)
+ $(CC) $(CFLAGS) -o $@ -c inet_gethost.c
+
+$(BINDIR)/inet_gethost@EXEEXT@: $(OBJDIR)/inet_gethost.o $(ENTRY_OBJ) $(ERTS_LIB)
+ $(PURIFY) $(LD) $(LDFLAGS) $(ENTRY_LDFLAGS) -o $@ $(OBJDIR)/inet_gethost.o $(ENTRY_OBJ) $(LIBS) $(ERTS_INTERNAL_LIBS)
+
+$(BINDIR)/run_erl: $(OBJDIR)/safe_string.o $(OBJDIR)/run_erl.o
+ $(LD) $(LDFLAGS) -o $@ $(OBJDIR)/safe_string.o $(OBJDIR)/run_erl.o $(LIBS)
+
+$(OBJDIR)/run_erl.o: ../unix/run_erl.c $(RC_GENERATED)
+ $(CC) $(CFLAGS) -o $@ -c ../unix/run_erl.c
+
+$(BINDIR)/to_erl: $(OBJDIR)/safe_string.o $(OBJDIR)/to_erl.o
+ $(LD) $(LDFLAGS) -o $@ $(OBJDIR)/safe_string.o $(OBJDIR)/to_erl.o
+
+$(OBJDIR)/to_erl.o: ../unix/to_erl.c $(RC_GENERATED)
+ $(CC) $(CFLAGS) -o $@ -c ../unix/to_erl.c
+
+$(BINDIR)/dyn_erl: $(OBJDIR)/safe_string.o $(OBJDIR)/dyn_erl.o
+ $(LD) $(LDFLAGS) -o $@ $(OBJDIR)/safe_string.o $(OBJDIR)/dyn_erl.o
+
+$(OBJDIR)/dyn_erl.o: ../unix/dyn_erl.c $(RC_GENERATED)
+ $(CC) $(CFLAGS) -o $@ -c ../unix/dyn_erl.c
+
+$(OBJDIR)/safe_string.o: ../unix/safe_string.c $(RC_GENERATED)
+ $(CC) $(CFLAGS) -o $@ -c ../unix/safe_string.c
+
+ifneq ($(TARGET),win32)
+$(BINDIR)/$(ERLEXEC): $(OBJDIR)/$(ERLEXEC).o $(ERTS_LIB)
+ $(PURIFY) $(LD) $(LDFLAGS) -o $@ $(OBJDIR)/$(ERLEXEC).o $(ERTS_INTERNAL_LIBS)
+
+$(OBJDIR)/$(ERLEXEC).o: $(ERLEXECDIR)/$(ERLEXEC).c $(RC_GENERATED)
+ $(CC) -I$(EMUDIR) $(CFLAGS) -o $@ -c $(ERLEXECDIR)/$(ERLEXEC).c
+endif
+$(BINDIR)/erlc@EXEEXT@: $(OBJDIR)/erlc.o $(ERTS_LIB)
+ $(PURIFY) $(LD) $(LDFLAGS) -o $@ $(OBJDIR)/erlc.o -L$(OBJDIR) $(LIBS) $(ERTS_INTERNAL_LIBS)
+
+$(OBJDIR)/erlc.o: erlc.c $(RC_GENERATED)
+ $(CC) $(CFLAGS) -o $@ -c erlc.c
+
+$(BINDIR)/dialyzer@EXEEXT@: $(OBJDIR)/dialyzer.o $(ERTS_LIB)
+ $(PURIFY) $(LD) $(LDFLAGS) -o $@ $(OBJDIR)/dialyzer.o -L$(OBJDIR) $(LIBS) $(ERTS_INTERNAL_LIBS)
+
+$(OBJDIR)/dialyzer.o: dialyzer.c $(RC_GENERATED)
+ $(CC) $(CFLAGS) -o $@ -c dialyzer.c
+
+$(BINDIR)/typer@EXEEXT@: $(OBJDIR)/typer.o $(ERTS_LIB)
+ $(PURIFY) $(LD) $(LDFLAGS) -o $@ $(OBJDIR)/typer.o -L$(OBJDIR) $(LIBS) $(ERTS_INTERNAL_LIBS)
+
+$(OBJDIR)/typer.o: typer.c $(RC_GENERATED)
+ $(CC) $(CFLAGS) -o $@ -c typer.c
+
+$(BINDIR)/escript@EXEEXT@: $(OBJDIR)/escript.o $(ERTS_LIB)
+ $(PURIFY) $(LD) $(LDFLAGS) -o $@ $(OBJDIR)/escript.o -L$(OBJDIR) $(LIBS) $(ERTS_INTERNAL_LIBS)
+
+$(OBJDIR)/escript.o: escript.c $(RC_GENERATED)
+ $(CC) $(CFLAGS) -o $@ -c escript.c
+
+$(BINDIR)/ct_run@EXEEXT@: $(OBJDIR)/ct_run.o $(ERTS_LIB)
+ $(PURIFY) $(LD) $(LDFLAGS) -o $@ $(OBJDIR)/ct_run.o -L$(OBJDIR) $(LIBS) $(ERTS_INTERNAL_LIBS)
+
+$(OBJDIR)/ct_run.o: ct_run.c $(RC_GENERATED)
+ $(CC) $(CFLAGS) -o $@ -c ct_run.c
+
+
+
Install: ../unix/Install.src ../../vsn.mk $(TARGET)/Makefile
sed -e 's;%I_VSN%;$(VSN);' \
-e 's;%EMULATOR%;$(EMULATOR);' \
@@ -515,6 +542,7 @@ erl.src: ../unix/erl.src.src ../../vsn.mk $(TARGET)/Makefile
# ----------------------------------------------------
include $(ERL_TOP)/make/otp_release_targets.mk
+.PHONY: release_spec
release_spec: etc
ifneq ($(INSTALL_OBJS),)
$(INSTALL_DIR) $(RELEASE_PATH)/erts-$(VSN)/obj
@@ -561,9 +589,5 @@ ifneq ($(INSTALL_INCLUDES),)
$(INSTALL_DATA) $(INSTALL_INCLUDES) $(RELEASE_PATH)/erts-$(VSN)/include
endif
+.PHONY: release_docs_spec
release_docs_spec:
-
-
-
-
-
diff --git a/erts/etc/common/erlc.c b/erts/etc/common/erlc.c
index 35c360a99d..23f009ff4d 100644
--- a/erts/etc/common/erlc.c
+++ b/erts/etc/common/erlc.c
@@ -185,6 +185,7 @@ main(int argc, char** argv)
* Push initial arguments.
*/
+ PUSH("+sbtu");
PUSH("-noinput");
PUSH2("-mode", "minimal");
PUSH2("-boot", "start_clean");
diff --git a/erts/etc/common/erlexec.c b/erts/etc/common/erlexec.c
index 7e04724f5a..19b3bb82ef 100644
--- a/erts/etc/common/erlexec.c
+++ b/erts/etc/common/erlexec.c
@@ -64,8 +64,10 @@
static const char plusM_au_allocs[]= {
'u', /* all alloc_util allocators */
'B', /* binary_alloc */
+ 'C', /* sbmbc_alloc */
'D', /* std_alloc */
'E', /* ets_alloc */
+ 'F', /* fix_alloc */
'H', /* eheap_alloc */
'L', /* ll_alloc */
'R', /* driver_alloc */
@@ -93,6 +95,8 @@ static char *plusM_au_alloc_switches[] = {
"rsbcst",
"sbct",
"smbcs",
+ "sbmbcs",
+ "sbmbct",
NULL
};
@@ -107,8 +111,6 @@ static char *plusM_other_switches[] = {
"Mamcbf",
"Mrmcbf",
"Mmcs",
- "Mcci",
- "Fe",
"Ye",
"Ym",
"Ytp",
@@ -119,6 +121,7 @@ static char *plusM_other_switches[] = {
/* +s arguments with values */
static char *pluss_val_switches[] = {
"bt",
+ "cl",
"ct",
"wt",
"ss",
diff --git a/erts/etc/common/escript.c b/erts/etc/common/escript.c
index 6ed79c91e3..9e80ec6656 100644
--- a/erts/etc/common/escript.c
+++ b/erts/etc/common/escript.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2007-2010. All Rights Reserved.
+ * Copyright Ericsson AB 2007-2012. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -377,7 +377,8 @@ main(int argc, char** argv)
last_opt = argv;
#ifdef __WIN32__
- if (_stricmp(basename, "escript.exe") == 0) {
+ if ( (_stricmp(basename, "escript.exe") == 0)
+ ||(_stricmp(basename, "escript") == 0)) {
#else
if (strcmp(basename, "escript") == 0) {
#endif
diff --git a/erts/etc/common/heart.c b/erts/etc/common/heart.c
index 7a5746e630..755e308219 100644
--- a/erts/etc/common/heart.c
+++ b/erts/etc/common/heart.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1996-2011. All Rights Reserved.
+ * Copyright Ericsson AB 1996-2012. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -685,14 +685,16 @@ do_terminate(reason)
print_error("Would reboot. Terminating.");
else {
kill_old_erlang();
- system(command);
+ /* suppress gcc warning with 'if' */
+ if(system(command));
print_error("Executed \"%s\". Terminating.",command);
}
free_env_val(command);
}
else {
kill_old_erlang();
- system((char*)&cmd[0]);
+ /* suppress gcc warning with 'if' */
+ if(system((char*)&cmd[0]));
print_error("Executed \"%s\". Terminating.",cmd);
}
}
diff --git a/erts/etc/common/inet_gethost.c b/erts/etc/common/inet_gethost.c
index 77bfd5e2bc..e923233ce9 100644
--- a/erts/etc/common/inet_gethost.c
+++ b/erts/etc/common/inet_gethost.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1998-2011. All Rights Reserved.
+ * Copyright Ericsson AB 1998-2012. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -1141,14 +1141,12 @@ static Worker *pick_worker(void)
static Worker *pick_worker_greedy(AddrByte *domainbuff)
{
int i;
- int ql = 0;
int found = -1;
for (i=0; i < num_busy_workers; ++i) {
if (domaineq(busy_workers[i].domain, domainbuff)) {
if ((found < 0) || (busy_workers[i].que_size <
busy_workers[found].que_size)) {
found = i;
- ql = busy_workers[i].que_size;
}
}
}
@@ -1945,12 +1943,14 @@ static int worker_loop(void)
}
m = NULL;
#else
- write(1, reply, data_size); /* No signals expected */
+ /* expect no signals */
+ if (write(1, reply, data_size) < 0)
+ goto fail;
#endif
} /* for (;;) */
-#ifdef WIN32
fail:
+#ifdef WIN32
if (m != NULL) {
FREE(m);
}
@@ -1959,8 +1959,8 @@ static int worker_loop(void)
if (reply) {
FREE(reply);
}
- return 1;
#endif
+ return 1;
}
static int map_netdb_error(int netdb_code)
@@ -2561,7 +2561,8 @@ static void debugf(char *format, ...)
WriteFile(debug_console_allocated,buff,strlen(buff),&res,NULL);
}
#else
- write(2,buff,strlen(buff));
+ /* suppress warning with 'if' */
+ if(write(2,buff,strlen(buff)));
#endif
va_end(ap);
}
@@ -2583,7 +2584,8 @@ static void warning(char *format, ...)
WriteFile(GetStdHandle(STD_ERROR_HANDLE),buff,strlen(buff),&res,NULL);
}
#else
- write(2,buff,strlen(buff));
+ /* suppress warning with 'if' */
+ if(write(2,buff,strlen(buff)));
#endif
va_end(ap);
}
@@ -2605,7 +2607,8 @@ static void fatal(char *format, ...)
WriteFile(GetStdHandle(STD_ERROR_HANDLE),buff,strlen(buff),&res,NULL);
}
#else
- write(2,buff,strlen(buff));
+ /* suppress warning with 'if' */
+ if(write(2,buff,strlen(buff)));
#endif
va_end(ap);
#ifndef WIN32
diff --git a/erts/etc/unix/etp-commands b/erts/etc/unix/etp-commands
index 6a01e0b7e0..1c886620bb 100644
--- a/erts/etc/unix/etp-commands
+++ b/erts/etc/unix/etp-commands
@@ -1,7 +1,7 @@
#
# %CopyrightBegin%
#
-# Copyright Ericsson AB 2005-2009. All Rights Reserved.
+# Copyright Ericsson AB 2005-2012. All Rights Reserved.
#
# The contents of this file are subject to the Erlang Public License,
# Version 1.1, (the "License"); you may not use this file except in
@@ -1883,6 +1883,28 @@ document etp-ets-tables
%---------------------------------------------------------------------------
end
+define etp-ets-obj
+# Args: DbTerm*
+#
+ set $etp_ets_obj_i = 1
+ while $etp_ets_obj_i <= (($arg0)->tpl[0] >> 6)
+ if $etp_ets_obj_i == 1
+ printf "{"
+ else
+ printf ", "
+ end
+ set $etp_ets_elem = ($arg0)->tpl[$etp_ets_obj_i]
+ if ($etp_ets_elem & 3) == 0
+ printf "<compressed>"
+ else
+ etp-1 $etp_ets_elem 0
+ end
+ set $etp_ets_obj_i++
+ end
+ printf "}"
+end
+
+
define etp-ets-tabledump
# Args: int tableindex
#
@@ -1896,10 +1918,10 @@ define etp-ets-tabledump
if $etp_ets_tabledump_t->common.status & 0x130
# Hash table
set $etp_ets_tabledump_h = $etp_ets_tabledump_t->hash
- printf "%% nitems=%d\n", $etp_ets_tabledump_t->common.nitems
- while $etp_ets_tabledump_i < $etp_ets_tabledump_h->nactive
- set $etp_ets_tabledump_l = $etp_ets_tabledump_h->seg \
- [$etp_ets_tabledump_i>>8][$etp_ets_tabledump_i&0xFF]
+ printf "%% nitems=%d\n", (long) $etp_ets_tabledump_t->common.nitems
+ while $etp_ets_tabledump_i < (long) $etp_ets_tabledump_h->nactive
+ set $etp_ets_tabledump_seg = ((struct segment**)$etp_ets_tabledump_h->segtab)[$etp_ets_tabledump_i>>8]
+ set $etp_ets_tabledump_l = $etp_ets_tabledump_seg->buckets[$etp_ets_tabledump_i&0xFF]
if $etp_ets_tabledump_l
printf "%% Slot %d:\n", $etp_ets_tabledump_i
while $etp_ets_tabledump_l
@@ -1909,7 +1931,7 @@ define etp-ets-tabledump
printf "["
end
set $etp_ets_tabledump_n++
- etp-1 ((Eterm)($etp_ets_tabledump_l->dbterm.tpl)|0x2) 0
+ etp-ets-obj &($etp_ets_tabledump_l->dbterm)
if $etp_ets_tabledump_l->hvalue == ((unsigned long)-1)
printf "% *\n"
else
diff --git a/erts/etc/unix/to_erl.c b/erts/etc/unix/to_erl.c
index 11fa3ff4cc..754b349338 100644
--- a/erts/etc/unix/to_erl.c
+++ b/erts/etc/unix/to_erl.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1996-2011. All Rights Reserved.
+ * Copyright Ericsson AB 1996-2012. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -352,7 +352,10 @@ int main(int argc, char **argv)
* to trigger the version handshaking between to_erl and run_erl
* at the start of every new to_erl-session.
*/
- write(wfd, "\022", 1);
+
+ if (write(wfd, "\022", 1) < 0) {
+ fprintf(stderr, "Error in writing ^R to FIFO.\n");
+ }
/*
* read and write
@@ -412,7 +415,7 @@ int main(int argc, char **argv)
if (len) {
#ifdef DEBUG
- write(1, buf, len);
+ if(write(1, buf, len));
#endif
if (write_all(wfd, buf, len) != len) {
fprintf(stderr, "Error in writing to FIFO.\n");
diff --git a/erts/etc/win32/Install.c b/erts/etc/win32/Install.c
index 6e60512f6d..d680b67dd6 100644
--- a/erts/etc/win32/Install.c
+++ b/erts/etc/win32/Install.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2003-2010. All Rights Reserved.
+ * Copyright Ericsson AB 2003-2011. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -213,6 +213,9 @@ int main(int argc, char **argv)
fprintf(stderr,"Cannot continue installation, bailing out.\n");
exit(1);
}
+
+ /* OBS!!! If the format of the init file is changed, do not forget
+ to update release_handler:write_ini_file(...) */
ini_file = create_init_file();
ini_section = create_init_section("erlang");
add_init_section(ini_file,ini_section);
diff --git a/erts/etc/win32/cygwin_tools/vc/emu_cc.sh b/erts/etc/win32/cygwin_tools/vc/emu_cc.sh
index c74c35111b..6c179aed00 100755
--- a/erts/etc/win32/cygwin_tools/vc/emu_cc.sh
+++ b/erts/etc/win32/cygwin_tools/vc/emu_cc.sh
@@ -2,7 +2,7 @@
#
# %CopyrightBegin%
#
-# Copyright Ericsson AB 2002-2009. All Rights Reserved.
+# Copyright Ericsson AB 2002-2011. All Rights Reserved.
#
# The contents of this file are subject to the Erlang Public License,
# Version 1.1, (the "License"); you may not use this file except in
@@ -17,6 +17,11 @@
#
# %CopyrightEnd%
#
+if [ X"$CONFIG_SUBTYPE" = X"win64" ]; then
+ GCC="x86_64-w64-mingw32-gcc.exe"
+else
+ GCC="gcc"
+fi
TOOLDIR=$ERL_TOP/erts/etc/win32/cygwin_tools/vc
COFFIX=$TOOLDIR/coffix
WTOOLDIR=`(cygpath -d $TOOLDIR 2>/dev/null || cygpath -w $TOOLDIR)`
@@ -71,7 +76,7 @@ if [ $SKIP_COFFIX = false ]; then
if [ "X$EMU_CC_SH_DEBUG_LOG" != "X" ]; then
echo "gcc -o $TEMPFILE -D__WIN32__ -DWIN32 -DWINDOWS -fomit-frame-pointer $CMD" >> $EMU_CC_SH_DEBUG_LOG 2>&1
fi
- eval gcc -o $TEMPFILE -D__WIN32__ -DWIN32 -DWINDOWS -fomit-frame-pointer $CMD
+ eval $GCC -o $TEMPFILE -D__WIN32__ -DWIN32 -DWINDOWS -fomit-frame-pointer $CMD
RES=$?
if [ $RES = 0 ]; then
$COFFIX.exe -e `(cygpath -d $TEMPFILE 2>/dev/null || cygpath -w $TEMPFILE)`
@@ -85,6 +90,6 @@ if [ $SKIP_COFFIX = false ]; then
rm -f $TEMPFILE
exit $RES
else
- eval gcc -D__WIN32__ -DWIN32 -DWINDOWS -fomit-frame-pointer -fno-tree-copyrename $CMD 2>/dev/null
+ eval $GCC -D__WIN32__ -DWIN32 -DWINDOWS -fomit-frame-pointer -fno-tree-copyrename $CMD 2>/dev/null
exit $?
fi
diff --git a/erts/etc/win32/erlsrv/erlsrv_interactive.c b/erts/etc/win32/erlsrv/erlsrv_interactive.c
index 13e029b364..736eabac79 100644
--- a/erts/etc/win32/erlsrv/erlsrv_interactive.c
+++ b/erts/etc/win32/erlsrv/erlsrv_interactive.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1998-2009. All Rights Reserved.
+ * Copyright Ericsson AB 1998-2011. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -135,7 +135,12 @@ void print_last_error(void){
fprintf(stderr,"Error: %s",mes);
LocalFree(mes);
}
-
+
+static int get_last_error(void)
+{
+ return (last_error) ? last_error : GetLastError();
+}
+
static BOOL install_service(void){
SC_HANDLE scm;
SC_HANDLE service;
@@ -508,7 +513,7 @@ int do_usage(char *arg0){
"\t[{-sn[ame] | -n[ame]} [<nodename>]]\n"
"\t[-d[ebugtype] [{new|reuse|console}]]\n"
"\t[-ar[gs] [<limited erl arguments>]]\n\n"
- "%s {start | stop | disable | enable} <servicename>\n\n"
+ "%s {start | start_disabled | stop | disable | enable} <servicename>\n\n"
"%s remove <servicename>\n\n"
"%s rename <servicename> <servicename>\n\n"
"%s list [<servicename>]\n\n"
@@ -561,6 +566,45 @@ int do_manage(int argc,char **argv){
return 0;
}
}
+ if(!_stricmp(action,"start_disabled")){
+ if(!enable_service()){
+ fprintf(stderr,"%s: Failed to enable service %s.\n",
+ argv[0],service_name);
+ print_last_error();
+ return 1;
+ }
+ if(!start_service() && get_last_error() != ERROR_SERVICE_ALREADY_RUNNING){
+ fprintf(stderr,"%s: Failed to start service %s.\n",
+ argv[0],service_name);
+ print_last_error();
+ goto failure_starting;
+ }
+
+ if(!wait_service_trans(SERVICE_STOPPED, SERVICE_START_PENDING,
+ SERVICE_RUNNING, 60)){
+ fprintf(stderr,"%s: Failed to start service %s.\n",
+ argv[0],service_name);
+ print_last_error();
+ goto failure_starting;
+ }
+
+ if(!disable_service()){
+ fprintf(stderr,"%s: Failed to disable service %s.\n",
+ argv[0],service_name);
+ print_last_error();
+ return 1;
+ }
+ printf("%s: Service %s started.\n",
+ argv[0],service_name);
+ return 0;
+ failure_starting:
+ if(!disable_service()){
+ fprintf(stderr,"%s: Failed to disable service %s.\n",
+ argv[0],service_name);
+ print_last_error();
+ }
+ return 1;
+ }
if(!_stricmp(action,"stop")){
if(!stop_service()){
fprintf(stderr,"%s: Failed to stop service %s.\n",
@@ -841,6 +885,7 @@ int do_add_or_set(int argc, char **argv){
argv[0], service_name);
return 0;
}
+
int do_rename(int argc, char **argv){
RegEntry *current = empty_reg_tab();
RegEntry *dummy = empty_reg_tab();
@@ -1129,35 +1174,131 @@ void read_arguments(int *pargc, char ***pargv){
*pargc = argc;
*pargv = argv;
}
+
+/* Create a free-for-all ACL to set on the semaphore */
+PACL get_acl(PSECURITY_DESCRIPTOR secdescp)
+{
+ DWORD acl_length = 0;
+ PSID auth_users_sidp = NULL;
+ PACL aclp = NULL;
+ SID_IDENTIFIER_AUTHORITY ntauth = SECURITY_NT_AUTHORITY;
+
+ if(!InitializeSecurityDescriptor(secdescp, SECURITY_DESCRIPTOR_REVISION)) {
+ return NULL;
+ }
+
+ if(!AllocateAndInitializeSid(&ntauth,
+ 1,
+ SECURITY_AUTHENTICATED_USER_RID,
+ 0, 0, 0, 0, 0, 0, 0,
+ &auth_users_sidp)) {
+ return NULL;
+ }
+
+ acl_length = sizeof(ACL) +
+ sizeof(ACCESS_ALLOWED_ACE) - sizeof(DWORD) +
+ GetLengthSid(auth_users_sidp);
+
+ if((aclp = (PACL) HeapAlloc(GetProcessHeap(), HEAP_ZERO_MEMORY, acl_length)) == NULL) {
+ FreeSid(auth_users_sidp);
+ return NULL;
+ }
+
+ if(!InitializeAcl(aclp, acl_length, ACL_REVISION)) {
+ FreeSid(auth_users_sidp);
+ HeapFree(GetProcessHeap(), 0, aclp);
+ return NULL;
+ }
+
+ if(!AddAccessAllowedAce(aclp, ACL_REVISION, SEMAPHORE_ALL_ACCESS, auth_users_sidp)) {
+ FreeSid(auth_users_sidp);
+ HeapFree(GetProcessHeap(), 0, aclp);
+ return NULL;
+ }
+
+ if(!SetSecurityDescriptorDacl(secdescp, TRUE, aclp, FALSE)) {
+ FreeSid(auth_users_sidp);
+ HeapFree(GetProcessHeap(), 0, aclp);
+ return NULL;
+ }
+ return aclp;
+}
+
+static HANDLE lock_semaphore = NULL;
+
+int take_lock(void) {
+ SECURITY_ATTRIBUTES attr;
+ PACL aclp;
+ SECURITY_DESCRIPTOR secdesc;
+
+ if ((aclp = get_acl(&secdesc)) == NULL) {
+ return -1;
+ }
+
+ memset(&attr,0,sizeof(attr));
+ attr.nLength = sizeof(attr);
+ attr.lpSecurityDescriptor = &secdesc;
+ attr.bInheritHandle = FALSE;
+
+ if ((lock_semaphore = CreateSemaphore(&attr, 1, 1, ERLSRV_INTERACTIVE_GLOBAL_SEMAPHORE)) == NULL) {
+ return -1;
+ }
+
+ if (WaitForSingleObject(lock_semaphore,INFINITE) != WAIT_OBJECT_0) {
+ return -1;
+ }
+
+ HeapFree(GetProcessHeap(), 0, aclp);
+ return 0;
+}
+
+void release_lock(void) {
+ ReleaseSemaphore(lock_semaphore,1,NULL);
+}
+
int interactive_main(int argc, char **argv){
char *action = argv[1];
-
+ int res;
+
+ if (take_lock() != 0) {
+ fprintf(stderr,"%s: unable to acquire global lock (%s).\n",argv[0],
+ ERLSRV_INTERACTIVE_GLOBAL_SEMAPHORE);
+ return 1;
+ }
+
if(!_stricmp(action,"readargs")){
- read_arguments(&argc,&argv);
- action = argv[1];
+ read_arguments(&argc,&argv);
+ action = argv[1];
}
if(!_stricmp(action,"set") || !_stricmp(action,"add"))
- return do_add_or_set(argc,argv);
- if(!_stricmp(action,"rename"))
- return do_rename(argc,argv);
- if(!_stricmp(action,"remove"))
- return do_remove(argc,argv);
- if(!_stricmp(action,"list"))
- return do_list(argc,argv);
- if(!_stricmp(action,"start") ||
- !_stricmp(action,"stop") ||
- !_stricmp(action,"enable") ||
- !_stricmp(action,"disable"))
- return do_manage(argc,argv);
- if(_stricmp(action,"?") &&
- _stricmp(action,"/?") &&
- _stricmp(action,"-?") &&
- *action != 'h' &&
- *action != 'H')
+ res = do_add_or_set(argc,argv);
+ else if(!_stricmp(action,"rename"))
+ res = do_rename(argc,argv);
+ else if(!_stricmp(action,"remove"))
+ res = do_remove(argc,argv);
+ else if(!_stricmp(action,"list"))
+ res = do_list(argc,argv);
+ else if(!_stricmp(action,"start") ||
+ !_stricmp(action,"start_disabled") ||
+ !_stricmp(action,"stop") ||
+ !_stricmp(action,"enable") ||
+ !_stricmp(action,"disable"))
+ res = do_manage(argc,argv);
+ else if(_stricmp(action,"?") &&
+ _stricmp(action,"/?") &&
+ _stricmp(action,"-?") &&
+ *action != 'h' &&
+ *action != 'H') {
fprintf(stderr,"%s: action %s not implemented.\n",argv[0],action);
- do_usage(argv[0]);
- return 1;
+ do_usage(argv[0]);
+ res = 1;
+ } else {
+ do_usage(argv[0]);
+ res = 0;
+ }
+ release_lock();
+ return res;
}
diff --git a/erts/etc/win32/erlsrv/erlsrv_interactive.h b/erts/etc/win32/erlsrv/erlsrv_interactive.h
index deacf81899..23e69e508d 100644
--- a/erts/etc/win32/erlsrv/erlsrv_interactive.h
+++ b/erts/etc/win32/erlsrv/erlsrv_interactive.h
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1998-2009. All Rights Reserved.
+ * Copyright Ericsson AB 1998-2011. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -19,6 +19,8 @@
#ifndef _ERLSRV_INTERACTIVE_H
#define _ERLSRV_INTERACTIVE_H
+#define ERLSRV_INTERACTIVE_GLOBAL_SEMAPHORE "{468d6954-e355-415f-968f-d257cb0feef4}"
+
int interactive_main(int argc, char **argv);
#endif /* _ERLSRV_INTERACTIVE_H */
diff --git a/erts/etc/win32/msys_tools/erl b/erts/etc/win32/msys_tools/erl
new file mode 100644
index 0000000000..525253fd84
--- /dev/null
+++ b/erts/etc/win32/msys_tools/erl
@@ -0,0 +1,42 @@
+#! /bin/sh
+#
+# %CopyrightBegin%
+#
+# Copyright Ericsson AB 2002-2011. All Rights Reserved.
+#
+# The contents of this file are subject to the Erlang Public License,
+# Version 1.1, (the "License"); you may not use this file except in
+# compliance with the License. You should have received a copy of the
+# Erlang Public License along with this software. If not, it can be
+# retrieved online at http://www.erlang.org/.
+#
+# Software distributed under the License is distributed on an "AS IS"
+# basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+# the License for the specific language governing rights and limitations
+# under the License.
+#
+# %CopyrightEnd%
+#
+# Note! This shellscript expects to be run in a cygwin environment,
+# it converts erlc command lines to native windows erlc commands, which
+# basically means running the command cygpath on whatever is a path...
+
+CMD=""
+for x in "$@"; do
+ case "$x" in
+ -I/*|-o/*)
+ y=`echo $x | sed 's,^-[Io]\(/.*\),\1,g'`;
+ z=`echo $x | sed 's,^-\([Io]\)\(/.*\),\1,g'`;
+ MPATH=`msys2win_path.sh -m $y`;
+ CMD="$CMD -$z\"$MPATH\"";;
+ /*)
+ MPATH=`msys2win_path.sh -m $x`;
+ CMD="$CMD \"$MPATH\"";;
+ *)
+ y=`echo $x | sed 's,",\\\",g'`;
+ CMD="$CMD \"$y\"";;
+ esac
+done
+ERL_TOP=`msys2win_path.sh -m $ERL_TOP`
+export ERL_TOP
+eval erl.exe $CMD
diff --git a/erts/etc/win32/msys_tools/erlc b/erts/etc/win32/msys_tools/erlc
new file mode 100644
index 0000000000..3f53ef7f4f
--- /dev/null
+++ b/erts/etc/win32/msys_tools/erlc
@@ -0,0 +1,59 @@
+#! /bin/sh
+#
+# %CopyrightBegin%
+#
+# Copyright Ericsson AB 2002-2011. All Rights Reserved.
+#
+# The contents of this file are subject to the Erlang Public License,
+# Version 1.1, (the "License"); you may not use this file except in
+# compliance with the License. You should have received a copy of the
+# Erlang Public License along with this software. If not, it can be
+# retrieved online at http://www.erlang.org/.
+#
+# Software distributed under the License is distributed on an "AS IS"
+# basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+# the License for the specific language governing rights and limitations
+# under the License.
+#
+# %CopyrightEnd%
+#
+
+
+CMD=""
+ECHO_ONLY=false
+for x in "$@"; do
+ case "$x" in
+ --echo_only)
+ ECHO_ONLY=true;;
+ -I/*|-o/*)
+ y=`echo $x | sed 's,^-[Io]\(/.*\),\1,g'`;
+ z=`echo $x | sed 's,^-\([Io]\)\(/.*\),\1,g'`;
+ MPATH=`msys2win_path.sh -m $y`;
+ CMD="$CMD -$z$MPATH";;
+ -pa/*)
+ y=`echo $x | sed 's,^-pa\(/.*\),\1,g'`;
+ MPATH=`msys2win_path.sh -m $y`;
+ CMD="$CMD -pa $MPATH";;
+ /*)
+ MPATH=`msys2win_path.sh -m $x`;
+ CMD="$CMD \"$MPATH\"";;
+# Needed for +'{preproc_flags,whatever}'
+ +{preproc_flags,*})
+ y=`echo $x | sed 's,^+{preproc_flags\,"\(.*\)"},\1,g'`;
+ z=`eval $0 --echo_only $y`;
+ case "$z" in # Dont "doubledoublequote"
+ \"*\")
+ CMD="$CMD +'{preproc_flags,$z}'";;
+ *)
+ CMD="$CMD +'{preproc_flags,\"$z\"}'";;
+ esac;;
+ *)
+ y=`echo $x | sed 's,",\\\",g'`;
+ CMD="$CMD \"$y\"";;
+ esac
+done
+if [ $ECHO_ONLY = true ]; then
+ echo $CMD
+else
+ eval erlc.exe $CMD
+fi
diff --git a/erts/etc/win32/msys_tools/javac.sh b/erts/etc/win32/msys_tools/javac.sh
new file mode 100644
index 0000000000..2d884bc2c8
--- /dev/null
+++ b/erts/etc/win32/msys_tools/javac.sh
@@ -0,0 +1,65 @@
+#! /bin/sh
+#
+# %CopyrightBegin%
+#
+# Copyright Ericsson AB 2002-2011. All Rights Reserved.
+#
+# The contents of this file are subject to the Erlang Public License,
+# Version 1.1, (the "License"); you may not use this file except in
+# compliance with the License. You should have received a copy of the
+# Erlang Public License along with this software. If not, it can be
+# retrieved online at http://www.erlang.org/.
+#
+# Software distributed under the License is distributed on an "AS IS"
+# basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+# the License for the specific language governing rights and limitations
+# under the License.
+#
+# %CopyrightEnd%
+#
+# Note! This shellscript expects to be run in a cygwin environment,
+# it converts erlc command lines to native windows erlc commands, which
+# basically means running the command cygpath on whatever is a path...
+
+CMD=""
+save_IFS=$IFS
+IFS=":"
+NEWCLASSPATH=""
+for x in $CLASSPATH; do
+ TMP=`msys2win_path.sh -m $x`
+ if [ -z "$NEWCLASSPATH" ]; then
+ NEWCLASSPATH="$TMP"
+ else
+ NEWCLASSPATH="$NEWCLASSPATH;$TMP"
+ fi
+done
+IFS=$save_IFS
+CLASSPATH="$NEWCLASSPATH"
+export CLASSPATH
+#echo "CLASSPATH=$CLASSPATH"
+SAVE="$@"
+while test -n "$1" ; do
+ x="$1"
+ case "$x" in
+ -I/*|-o/*|-d/*)
+ y=`echo $x | sed 's,^-[Iod]\(/.*\),\1,g'`;
+ z=`echo $x | sed 's,^-\([Iod]\)\(/.*\),\1,g'`;
+ #echo "Foooo:$z"
+ MPATH=`msys2win_path.sh -m $y`;
+ CMD="$CMD -$z\"$MPATH\"";;
+ -d|-I|-o)
+ shift;
+ MPATH=`msys2win_path.sh -m $1`;
+ CMD="$CMD $x $MPATH";;
+ /*)
+ #echo "absolute:"$x;
+ MPATH=`msys2win_path.sh -m $x`;
+ CMD="$CMD \"$MPATH\"";;
+ *)
+ y=`echo $x | sed 's,",\\\",g'`;
+ CMD="$CMD \"$y\"";;
+ esac
+ shift
+done
+#echo javac.exe "$CMD"
+eval javac.exe "$CMD"
diff --git a/erts/emulator/zlib/Makefile b/erts/etc/win32/msys_tools/make_bootstrap_ini.sh
index def8e1aa47..b61965f546 100644
--- a/erts/emulator/zlib/Makefile
+++ b/erts/etc/win32/msys_tools/make_bootstrap_ini.sh
@@ -1,7 +1,8 @@
+#! /bin/bash
#
# %CopyrightBegin%
#
-# Copyright Ericsson AB 1997-2009. All Rights Reserved.
+# Copyright Ericsson AB 2003-2011. All Rights Reserved.
#
# The contents of this file are subject to the Erlang Public License,
# Version 1.1, (the "License"); you may not use this file except in
@@ -16,8 +17,28 @@
#
# %CopyrightEnd%
#
-#
-# Invoke with GNU make or clearmake -C gnu.
-#
+# Create a local init-file for erlang in the build environment.
+if [ -z "$1" ]; then
+ echo "error: $0: No rootdir given"
+ exit 1
+else
+ RDIR=$1
+fi
+if [ -z "$2" ]; then
+ echo "error: $0: No bindir given"
+ exit 1
+else
+ BDIR=$2
+fi
+
+DRDIR=`msys2win_path.sh $RDIR | sed 's,\\\,\\\\\\\\,g'`
+DBDIR=`msys2win_path.sh $BDIR | sed 's,\\\,\\\\\\\\,g'`
+
+
+cat > $RDIR/bin/erl.ini <<EOF
+[erlang]
+Bindir=$DBDIR
+Progname=erl
+Rootdir=$DRDIR
+EOF
-include $(ERL_TOP)/make/run_make.mk
diff --git a/erts/emulator/pcre/Makefile b/erts/etc/win32/msys_tools/make_local_ini.sh
index 72eea01130..6c5d84c4f5 100644
--- a/erts/emulator/pcre/Makefile
+++ b/erts/etc/win32/msys_tools/make_local_ini.sh
@@ -1,7 +1,8 @@
+#! /bin/bash
#
# %CopyrightBegin%
#
-# Copyright Ericsson AB 2008-2009. All Rights Reserved.
+# Copyright Ericsson AB 2003-2011. All Rights Reserved.
#
# The contents of this file are subject to the Erlang Public License,
# Version 1.1, (the "License"); you may not use this file except in
@@ -16,11 +17,25 @@
#
# %CopyrightEnd%
#
-#
-# Invoke with GNU make or clearmake -C gnu.
-#
+# Create a local init-file for erlang in the build environment.
+if [ -z "$1" ]; then
+ if [ -z $ERL_TOP ]; then
+ echo "error: $0: No rootdir available"
+ exit 1
+ else
+ RDIR=$ERL_TOP
+ fi
+else
+ RDIR=$1
+fi
-include $(ERL_TOP)/make/run_make.mk
+DDIR=`msys2win_path.sh $RDIR | sed 's,\\\,\\\\\\\\,g'`
+
+
+cat > $RDIR/bin/erl.ini <<EOF
+[erlang]
+Bindir=$DDIR\\\\bin\\\\win32
+Progname=erl
+Rootdir=$DDIR
+EOF
-table:
- $(MAKE) -f $(TARGET)/Makefile $@ \ No newline at end of file
diff --git a/erts/etc/win32/msys_tools/msys2win_path.sh b/erts/etc/win32/msys_tools/msys2win_path.sh
new file mode 100644
index 0000000000..679a9d6a8f
--- /dev/null
+++ b/erts/etc/win32/msys_tools/msys2win_path.sh
@@ -0,0 +1,44 @@
+#! /bin/bash
+MIXED=false
+ABSOLUTE=false
+done=false
+while [ $done = false ]; do
+ case "$1" in
+ -m)
+ MIXED=true;
+ shift;;
+ -a)
+ ABSOLUTE=true;
+ shift;;
+ *)
+ done=true;;
+ esac
+done
+if [ -z "$1" ]; then
+ echo "Usage: $0 <path>" >&2
+ exit 1;
+fi
+MSYS_PATH=`win2msys_path.sh "$1"` # Clean up spaces
+if [ $ABSOLUTE = true ]; then
+ MSYS_DIR=`dirname "$MSYS_PATH"`
+ MSYS_FILE=`basename "$MSYS_PATH"`
+ if [ X"$MSYS_FILE" = X".." ]; then
+ MSYS_DIR="$MSYS_DIR/$MSYS_FILE"
+ WIN_ADD=""
+ else
+ WIN_ADD="/$MSYS_FILE"
+ fi
+ SAVEDIR=`pwd`
+ cd $MSYS_DIR
+ CURRENT=`pwd`
+ cd $SAVEDIR
+ WINPATH=`cmd //C echo $CURRENT`
+ WINPATH="$WINPATH$WIN_ADD"
+else
+ WINPATH=`cmd //c echo $MSYS_PATH`
+fi
+if [ $MIXED = true ]; then
+ echo $WINPATH
+else
+ echo $WINPATH | sed 's,/,\\,g'
+fi \ No newline at end of file
diff --git a/erts/etc/win32/msys_tools/reg_query.sh b/erts/etc/win32/msys_tools/reg_query.sh
new file mode 100644
index 0000000000..ae6d5c3218
--- /dev/null
+++ b/erts/etc/win32/msys_tools/reg_query.sh
@@ -0,0 +1,24 @@
+#! /bin/sh
+BAT_FILE=/tmp/w$$.bat
+if [ -z "$1" -o -z "$2" ]; then
+ echo "Usage:" "$0" '<key> <valuename>'
+ exit 1
+fi
+BACKED=`echo "$1" | sed 's,/,\\\\,g'`
+# We need to get the 64bit part of the registry, hence we need to execute
+# a 64bit reg.exe, but c:\windows\system32 is redirected to 32bit versions
+# if we ate in the 32bit virtual environment, why we need to use the
+# sysnative trick to get to the 64bit executable of reg.exe (ouch!)
+if [ -d $WINDIR/sysnative ]; then
+ REG_CMD="$WINDIR\\sysnative\\reg.exe"
+else
+ REG_CMD="reg"
+fi
+cat > $BAT_FILE <<EOF
+@echo off
+$REG_CMD query "$BACKED" /v "$2"
+EOF
+#echo $BAT_FILE
+#cat $BAT_FILE
+RESULT=`cmd //C $BAT_FILE`
+echo $RESULT | sed "s,.*$2 REG_[^ ]* ,,"
diff --git a/erts/etc/win32/msys_tools/vc/ar.sh b/erts/etc/win32/msys_tools/vc/ar.sh
new file mode 100644
index 0000000000..f4c61e1d92
--- /dev/null
+++ b/erts/etc/win32/msys_tools/vc/ar.sh
@@ -0,0 +1,47 @@
+#! /bin/sh
+#
+# %CopyrightBegin%
+#
+# Copyright Ericsson AB 2002-2011. All Rights Reserved.
+#
+# The contents of this file are subject to the Erlang Public License,
+# Version 1.1, (the "License"); you may not use this file except in
+# compliance with the License. You should have received a copy of the
+# Erlang Public License along with this software. If not, it can be
+# retrieved online at http://www.erlang.org/.
+#
+# Software distributed under the License is distributed on an "AS IS"
+# basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+# the License for the specific language governing rights and limitations
+# under the License.
+#
+# %CopyrightEnd%
+#
+CMD=""
+while test -n "$1" ; do
+ x="$1"
+ case "$x" in
+ -out:)
+ shift
+ case "$1" in
+ /*)
+ MPATH=`msys2win_path.sh -m $1`;;
+ *)
+ MPATH=$1;;
+ esac
+ CMD="$CMD -out:\"$MPATH\"";;
+ -out:/*)
+ y=`echo $x | sed 's,^-out:\(/.*\),\1,g'`;
+ MPATH=`msys2win_path.sh -m $y`;
+ CMD="$CMD -out:\"$MPATH\"";;
+ /*)
+ MPATH=`msys2win_path.sh -m $x`;
+ CMD="$CMD \"$MPATH\"";;
+ *)
+ y=`echo $x | sed 's,",\\\",g'`;
+ CMD="$CMD \"$y\"";;
+ esac
+ shift
+done
+
+eval lib.exe $CMD
diff --git a/erts/etc/win32/msys_tools/vc/cc.sh b/erts/etc/win32/msys_tools/vc/cc.sh
new file mode 100644
index 0000000000..38b3d2ee81
--- /dev/null
+++ b/erts/etc/win32/msys_tools/vc/cc.sh
@@ -0,0 +1,320 @@
+#! /bin/sh
+#
+# %CopyrightBegin%
+#
+# Copyright Ericsson AB 2002-2011. All Rights Reserved.
+#
+# The contents of this file are subject to the Erlang Public License,
+# Version 1.1, (the "License"); you may not use this file except in
+# compliance with the License. You should have received a copy of the
+# Erlang Public License along with this software. If not, it can be
+# retrieved online at http://www.erlang.org/.
+#
+# Software distributed under the License is distributed on an "AS IS"
+# basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+# the License for the specific language governing rights and limitations
+# under the License.
+#
+# %CopyrightEnd%
+#
+# Icky cl wrapper that does it's best to behave like a Unixish cc.
+# Made to work for Erlang builds and to make configure happy, not really
+# general I suspect.
+# set -x
+# Save the command line for debug outputs
+
+SAVE="$@"
+
+# Constants
+COMMON_CFLAGS="-nologo -D__WIN32__ -DWIN32 -DWINDOWS -D_WIN32 -DNT -D_CRT_SECURE_NO_DEPRECATE"
+
+# Variables
+# The stdout and stderr for the compiler
+MSG_FILE=/tmp/cl.exe.$$.1
+ERR_FILE=/tmp/cl.exe.$$.2
+
+# "Booleans" determined during "command line parsing"
+# If the stdlib option is explicitly passed to this program
+MD_FORCED=false
+# If we're preprocession (only) i.e. -E
+PREPROCESSING=false
+# If we're generating dependencies (implies preprocesing)
+DEPENDENCIES=false
+# If this is supposed to be a debug build
+DEBUG_BUILD=false
+# If this is supposed to be an optimized build (there can only be one...)
+OPTIMIZED_BUILD=false
+# If we're linking or only compiling
+LINKING=true
+
+# This data is accumulated during command line "parsing"
+# The stdlibrary option, default multithreaded dynamic
+MD=-MD
+# Flags for debug compilation
+DEBUG_FLAGS=""
+# Flags for optimization
+OPTIMIZE_FLAGS=""
+# The specified output filename (if any), may be either object or exe.
+OUTFILE=""
+# Unspecified command line options for the compiler
+CMD=""
+# All the c source files, in unix style
+SOURCES=""
+# All the options to pass to the linker, kept in Unix style
+LINKCMD=""
+
+
+# Loop through the parameters and set the above variables accordingly
+# Also convert some cygwin filenames to "mixed style" dito (understood by the
+# compiler very well), except for anything passed to the linker, that script
+# handles those and the sources, which are also kept unixish for now
+
+while test -n "$1" ; do
+ x="$1"
+ case "$x" in
+ -Wall)
+ ;;
+ -c)
+ LINKING=false;;
+ #CMD="$CMD -c";;
+ -MM)
+ PREPROCESSING=true;
+ LINKING=false;
+ DEPENDENCIES=true;;
+ -E)
+ PREPROCESSING=true;
+ LINKING=false;; # Obviously...
+ #CMD="$CMD -E";;
+ -Owx)
+ # Optimization hardcoded of wxErlang, needs to disable debugging too
+ OPTIMIZE_FLAGS="-Ob2ity -Gs -Zi";
+ DEBUG_FLAGS="";
+ DEBUG_BUILD=false;
+ if [ $MD_FORCED = false ]; then
+ MD=-MD;
+ fi
+ OPTIMIZED_BUILD=true;;
+ -O*)
+ # Optimization hardcoded, needs to disable debugging too
+ OPTIMIZE_FLAGS="-Ox -Zi";
+ DEBUG_FLAGS="";
+ DEBUG_BUILD=false;
+ if [ $MD_FORCED = false ]; then
+ MD=-MD;
+ fi
+ OPTIMIZED_BUILD=true;;
+ -g|-ggdb)
+ if [ $OPTIMIZED_BUILD = false ];then
+ # Hardcoded windows debug flags
+ DEBUG_FLAGS="-Z7";
+ if [ $MD_FORCED = false ]; then
+ MD=-MDd;
+ fi
+ LINKCMD="$LINKCMD -g";
+ DEBUG_BUILD=true;
+ fi;;
+ # Allow forcing of stdlib
+ -mt|-MT)
+ MD="-MT";
+ MD_FORCED=true;;
+ -md|-MD)
+ MD="-MD";
+ MD_FORCED=true;;
+ -ml|-ML)
+ MD="-ML";
+ MD_FORCED=true;;
+ -mdd|-MDD|-MDd)
+ MD="-MDd";
+ MD_FORCED=true;;
+ -mtd|-MTD|-MTd)
+ MD="-MTd";
+ MD_FORCED=true;;
+ -mld|-MLD|-MLd)
+ MD="-MLd";
+ MD_FORCED=true;;
+ -o)
+ shift;
+ OUTFILE="$1";;
+ -o*)
+ y=`echo $x | sed 's,^-[Io]\(.*\),\1,g'`;
+ OUTFILE="$y";;
+ -I/*)
+ y=`echo $x | sed 's,^-[Io]\(/.*\),\1,g'`;
+ z=`echo $x | sed 's,^-\([Io]\)\(/.*\),\1,g'`;
+ MPATH=`echo $y`;
+ CMD="$CMD -$z\"$MPATH\"";;
+ -I*)
+ y=`echo $x | sed 's,",\\\",g'`;
+ CMD="$CMD $y";;
+ -D*)
+ y=`echo $x | sed 's,",\\\",g'`;
+ CMD="$CMD $y";;
+ -EH*)
+ y=`echo $x | sed 's,",\\\",g'`;
+ CMD="$CMD $y";;
+ -TP|-Tp)
+ y=`echo $x | sed 's,",\\\",g'`;
+ CMD="$CMD $y";;
+ -l*)
+ y=`echo $x | sed 's,^-l\(.*\),\1,g'`;
+ LINKCMD="$LINKCMD $x";;
+ /*.c)
+ SOURCES="$SOURCES $x";;
+ *.c)
+ SOURCES="$SOURCES $x";;
+ /*.cc)
+ SOURCES="$SOURCES $x";;
+ *.cc)
+ SOURCES="$SOURCES $x";;
+ /*.cpp)
+ SOURCES="$SOURCES $x";;
+ *.cpp)
+ SOURCES="$SOURCES $x";;
+ /*.o)
+ LINKCMD="$LINKCMD $x";;
+ *.o)
+ LINKCMD="$LINKCMD $x";;
+ *)
+ # Try to quote uninterpreted options
+ y=`echo $x | sed 's,",\\\",g'`;
+ LINKCMD="$LINKCMD $y";;
+ esac
+ shift
+done
+
+#Return code from compiler, linker.sh and finally this script...
+RES=0
+
+# Accumulated object names
+ACCUM_OBJECTS=""
+
+# A temporary object file location
+TMPOBJDIR=/tmp/tmpobj$$
+mkdir $TMPOBJDIR
+
+# Compile
+for x in $SOURCES; do
+ start_time=`date '+%s'`
+ # Compile each source
+ if [ $LINKING = false ]; then
+ # We should have an output defined, which is a directory
+ # or an object file
+ case $OUTFILE in
+ /*.o)
+ # Simple output, SOURCES should be one single
+ n=`echo $SOURCES | wc -w`;
+ if [ $n -gt 1 ]; then
+ echo "cc.sh:Error, multiple sources, one object output.";
+ exit 1;
+ else
+ output_filename=`echo $OUTFILE`;
+ fi;;
+ *.o)
+ # Relative path needs no translation
+ n=`echo $SOURCES | wc -w`
+ if [ $n -gt 1 ]; then
+ echo "cc.sh:Error, multiple sources, one object output."
+ exit 1
+ else
+ output_filename=$OUTFILE
+ fi;;
+ /*)
+ # Absolute directory
+ o=`echo $x | sed 's,.*/,,' | sed 's,\.c$,.o,'`
+ output_filename=`echo $OUTFILE`
+ output_filename="$output_filename/${o}";;
+ *)
+ # Relative_directory or empty string (.//x.o is valid)
+ o=`echo $x | sed 's,.*/,,' | sed 's,\.cp*$,.o,'`
+ output_filename="./${OUTFILE}/${o}";;
+ esac
+ else
+ # We are linking, which means we build objects in a temporary
+ # directory and link from there. We should retain the basename
+ # of each source to make examining the exe easier...
+ o=`echo $x | sed 's,.*/,,' | sed 's,\.c$,.o,'`
+ output_filename=$TMPOBJDIR/$o
+ ACCUM_OBJECTS="$ACCUM_OBJECTS $output_filename"
+ fi
+ # Now we know enough, lets try a compilation...
+ MPATH=`echo $x`
+ if [ $PREPROCESSING = true ]; then
+ output_flag="-E"
+ else
+ output_flag="-c -Fo`cmd //C echo ${output_filename}`"
+ fi
+ params="$COMMON_CFLAGS $MD $DEBUG_FLAGS $OPTIMIZE_FLAGS \
+ $CMD ${output_flag} $MPATH"
+ if [ "X$CC_SH_DEBUG_LOG" != "X" ]; then
+ echo cc.sh "$SAVE" >>$CC_SH_DEBUG_LOG
+ echo cl.exe $params >>$CC_SH_DEBUG_LOG
+ fi
+ eval cl.exe $params >$MSG_FILE 2>$ERR_FILE
+ RES=$?
+ if test $PREPROCESSING = false; then
+ cat $ERR_FILE >&2
+ tail -n +2 $MSG_FILE
+ else
+ tail -n +2 $ERR_FILE >&2
+ if test $DEPENDENCIES = true; then
+ if test `grep -v $x $MSG_FILE | grep -c '#line'` != "0"; then
+ o=`echo $x | sed 's,.*/,,' | sed 's,\.cp*$,.o,'`
+ echo -n $o':'
+# cat $MSG_FILE | grep '#line' | grep -v $x | awk -F\" '{printf("%s\n",$2)}' | sort -u | grep -v " " | xargs -n 1 win2msys_path.sh | awk '{printf("\\\n %s ",$0)}'
+ cat $MSG_FILE | grep '#line' | grep -v $x | awk -F\" '{printf("%s\n",$2)}' | sort -u | grep -v " " | sed 's,^\([A-Za-z]\):[\\/]*,/\1/,;s,\\\\*,/,g'| awk '{printf("\\\n %s ",$0)}'
+ echo
+ echo
+ after_sed=`date '+%s'`
+ echo Made dependencises for $x':' `expr $after_sed '-' $start_time` 's' >&2
+ fi
+ else
+ cat $MSG_FILE
+ fi
+ fi
+ rm -f $ERR_FILE $MSG_FILE
+ if [ $RES != 0 ]; then
+ rm -rf $TMPOBJDIR
+ exit $RES
+ fi
+done
+
+# If we got here, we succeeded in compiling (if there were anything to compile)
+# The output filename should name an executable if we're linking
+if [ $LINKING = true ]; then
+ case $OUTFILE in
+ "")
+ # Use the first source name to name the executable
+ first_source=""
+ for x in $SOURCES; do first_source=$x; break; done;
+ if [ -n "$first_source" ]; then
+ e=`echo $x | sed 's,.*/,,' | sed 's,\.c$,.exe,'`;
+ out_spec="-o $e";
+ else
+ out_spec="";
+ fi;;
+ *)
+ out_spec="-o $OUTFILE";;
+ esac
+ # Descide which standard library to link against
+ case $MD in
+ -ML)
+ stdlib="-lLIBC";;
+ -MLd)
+ stdlib="-lLIBCD";;
+ -MD)
+ stdlib="-lMSVCRT";;
+ -MDd)
+ stdlib="-lMSVCRTD";;
+ -MT)
+ stdlib="-lLIBCMT";;
+ -MTd)
+ stdlib="-lLIBMTD";;
+ esac
+ # And finally call the next script to do the linking...
+ params="$out_spec $LINKCMD $stdlib"
+ eval ld.sh $ACCUM_OBJECTS $params
+ RES=$?
+fi
+rm -rf $TMPOBJDIR
+
+exit $RES
diff --git a/erts/etc/win32/msys_tools/vc/coffix.c b/erts/etc/win32/msys_tools/vc/coffix.c
new file mode 100644
index 0000000000..1773b222fe
--- /dev/null
+++ b/erts/etc/win32/msys_tools/vc/coffix.c
@@ -0,0 +1,161 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 1999-2011. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+/*
+** This mini tool fixes an incompatibility between
+** Microsoft's tools, who dont like the virtual size being put in
+** the physical address field, but rely on the raw size field for
+** sizing the ".bss" section.
+** This fixes some of the problems with linking gcc compiled objects
+** together with MSVC dito.
+**
+** Courtesy DJ Delorie for describing the COFF file format on
+** http://www.delorie.com/djgpp/doc/coff/
+** The coff structures are fetched from Microsofts headers though.
+*/
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <stdarg.h>
+
+#include <windows.h>
+#include <winnt.h> /* Structure definitions for PE (COFF) */
+
+static int dump_edit(char *filename, int edit);
+static int v_printf(char *format, ...);
+
+
+char *progname;
+int verbouse = 0;
+
+int main(int argc, char **argv)
+{
+ int findex = 1;
+ int edit = 0;
+ int ret;
+
+ progname = argv[0];
+ if (argc == 1) {
+ fprintf(stderr,"Format : %s [-e] [-v] <filename>\n", progname);
+ return 1;
+ }
+ for (findex = 1;
+ findex < argc && (*argv[findex] == '-' || *argv[findex] == '/');
+ ++findex)
+ switch (argv[findex][1]) {
+ case 'e':
+ case 'E':
+ edit = 1;
+ break;
+ case 'v':
+ case 'V':
+ verbouse = 1;
+ default:
+ fprintf(stderr, "%s: unknown option %s\n", progname, argv[findex]);
+ break;
+ }
+ if (findex == argc) {
+ fprintf(stderr,"%s: No filenames given.\n", progname);
+ return 1;
+ }
+ for(; findex < argc; ++findex)
+ if ((ret = dump_edit(argv[findex],edit)) != 0)
+ return ret;
+ return 0;
+}
+
+int dump_edit(char *filename, int edit)
+{
+ FILE *f = fopen(filename, (edit) ? "r+b" : "rb");
+ IMAGE_FILE_HEADER filhdr;
+ IMAGE_SECTION_HEADER scnhdr;
+ int i;
+
+ if (f == NULL) {
+ fprintf(stderr, "%s: cannot open %s.\n", progname, filename);
+ return 1;
+ }
+
+ if (fread(&filhdr, sizeof(filhdr), 1, f) == 0) {
+ fprintf(stderr,"%s: Could not read COFF header from %s,"
+ " is this a PE (COFF) file?\n", progname, filename);
+ fclose(f);
+ return 1;
+ }
+ v_printf("File: %s\n", filename);
+ v_printf("Magic number: 0x%08x\n", filhdr.Machine);
+ v_printf("Number of sections: %d\n",filhdr.NumberOfSections);
+
+ if (fseek(f, (long) filhdr.SizeOfOptionalHeader, SEEK_CUR) != 0) {
+ fprintf(stderr,"%s: Could not read COFF optional header from %s,"
+ " is this a PE (COFF) file?\n", progname, filename);
+ fclose(f);
+ return 1;
+ }
+
+ for (i = 0; i < filhdr.NumberOfSections; ++i) {
+ if (fread(&scnhdr, sizeof(scnhdr), 1, f) == 0) {
+ fprintf(stderr,"%s: Could not read section header from %s,"
+ " is this a PE (COFF) file?\n", progname, filename);
+ fclose(f);
+ return 1;
+ }
+ v_printf("Section %s:\n", scnhdr.Name);
+ v_printf("Physical address: 0x%08x\n", scnhdr.Misc.PhysicalAddress);
+ v_printf("Size: 0x%08x\n", scnhdr.SizeOfRawData);
+ if (scnhdr.Misc.PhysicalAddress != 0 &&
+ scnhdr.SizeOfRawData == 0) {
+ printf("Section header %s in file %s will confuse MSC linker, "
+ "virtual size is 0x%08x and raw size is 0\n",
+ scnhdr.Name, filename, scnhdr.Misc.PhysicalAddress,
+ scnhdr.SizeOfRawData);
+ if (edit) {
+ scnhdr.SizeOfRawData = scnhdr.Misc.PhysicalAddress;
+ scnhdr.Misc.PhysicalAddress = 0;
+ if (fseek(f, (long) -((long)sizeof(scnhdr)), SEEK_CUR) != 0 ||
+ fwrite(&scnhdr, sizeof(scnhdr), 1, f) == 0) {
+ fprintf(stderr,"%s: could not edit file %s.\n",
+ progname, filename);
+ fclose(f);
+ return 1;
+ }
+ printf("Edited object, virtual size is now 0, and "
+ "raw size is 0x%08x.\n", scnhdr.SizeOfRawData);
+ } else {
+ printf("Specify option '-e' to correct the problem.\n");
+ }
+ }
+ }
+ fclose(f);
+ return 0;
+}
+
+
+static int v_printf(char *format, ...)
+{
+ va_list ap;
+ int ret = 0;
+ if (verbouse) {
+ va_start(ap, format);
+ ret = vfprintf(stdout, format, ap);
+ va_end(ap);
+ }
+ return ret;
+}
+
diff --git a/erts/etc/win32/msys_tools/vc/emu_cc.sh b/erts/etc/win32/msys_tools/vc/emu_cc.sh
new file mode 100644
index 0000000000..68ce4e359f
--- /dev/null
+++ b/erts/etc/win32/msys_tools/vc/emu_cc.sh
@@ -0,0 +1,95 @@
+#! /bin/sh
+#
+# %CopyrightBegin%
+#
+# Copyright Ericsson AB 2002-2011. All Rights Reserved.
+#
+# The contents of this file are subject to the Erlang Public License,
+# Version 1.1, (the "License"); you may not use this file except in
+# compliance with the License. You should have received a copy of the
+# Erlang Public License along with this software. If not, it can be
+# retrieved online at http://www.erlang.org/.
+#
+# Software distributed under the License is distributed on an "AS IS"
+# basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+# the License for the specific language governing rights and limitations
+# under the License.
+#
+# %CopyrightEnd%
+#
+if [ X"$CONFIG_SUBTYPE" = X"win64" ]; then
+ GCC="x86_64-w64-mingw32-gcc.exe"
+else
+ GCC="gcc"
+fi
+TOOLDIR=$ERL_TOP/erts/etc/win32/msys_tools/vc
+COFFIX=$TOOLDIR/coffix
+WTOOLDIR0=`win2msys_path.sh "$TOOLDIR"`
+WTOOLDIR=`cmd //C echo $WTOOLDIR0`
+# Do primitive 'make'
+newer_exe=`find $TOOLDIR -newer $COFFIX.c -name coffix.exe -print`
+if [ -z $newer_exe ]; then
+ echo recompiling $COFFIX.exe
+ cl.exe -Fe${WTOOLDIR}/coffix.exe ${WTOOLDIR}/coffix.c
+ rm -f $COFFIX.obj coffix.obj $COFFIX.pdb coffix.pdb
+fi
+
+# Try to find out the output filename and remove it from command line
+CMD=""
+OUTFILE=""
+INFILE=""
+SKIP_COFFIX=false
+while test -n "$1" ; do
+ x="$1"
+ case "$x" in
+ -o/*)
+ OUTFILE=`echo $x | sed 's,^-[Io]\(/.*\),\1,g'`;;
+ -o)
+ shift
+ OUTFILE=$1;;
+ -MM)
+ SKIP_COFFIX=true
+ CMD="$CMD \"$x\"";;
+ *.c)
+ INFILE="$INFILE $x";
+ CMD="$CMD \"$x\"";;
+ *)
+ CMD="$CMD \"$x\"";;
+ esac
+ shift
+done
+if [ -z "$INFILE" ]; then
+ echo 'emu_cc.sh: please give an input filename for the compiler' >&2
+ exit 1
+fi
+if [ -z "$OUTFILE" ]; then
+ OUTFILE=`echo $INFILE | sed 's,\.c$,.o,'`
+fi
+
+if [ $SKIP_COFFIX = false ]; then
+ n=`echo $INFILE | wc -w`;
+ if [ $n -gt 1 ]; then
+ echo "emu_cc.sh:Error, multiple sources, one object output.";
+ exit 1;
+ fi
+ TEMPFILE=/tmp/tmp_emu_cc$$.o
+ if [ "X$EMU_CC_SH_DEBUG_LOG" != "X" ]; then
+ echo "$GCC -o $TEMPFILE -D__WIN32__ -DWIN32 -DWINDOWS -fomit-frame-pointer $CMD" >> $EMU_CC_SH_DEBUG_LOG 2>&1
+ fi
+ eval $GCC -o $TEMPFILE -D__WIN32__ -DWIN32 -DWINDOWS -fomit-frame-pointer $CMD
+ RES=$?
+ if [ $RES = 0 ]; then
+ $COFFIX.exe -e `win2msys_path.sh $TEMPFILE`
+ RES=$?
+ if [ $RES = 0 ]; then
+ cp $TEMPFILE $OUTFILE
+ else
+ echo "emu_cc.sh: fatal: coffix failed!" >&2
+ fi
+ fi
+ rm -f $TEMPFILE
+ exit $RES
+else
+ eval $GCC -D__WIN32__ -DWIN32 -DWINDOWS -fomit-frame-pointer -fno-tree-copyrename $CMD 2>/dev/null
+ exit $?
+fi
diff --git a/erts/etc/win32/msys_tools/vc/ld.sh b/erts/etc/win32/msys_tools/vc/ld.sh
new file mode 100644
index 0000000000..0fcbf6f7d9
--- /dev/null
+++ b/erts/etc/win32/msys_tools/vc/ld.sh
@@ -0,0 +1,198 @@
+#! /bin/sh
+# set -x
+#
+# %CopyrightBegin%
+#
+# Copyright Ericsson AB 2002-2011. All Rights Reserved.
+#
+# The contents of this file are subject to the Erlang Public License,
+# Version 1.1, (the "License"); you may not use this file except in
+# compliance with the License. You should have received a copy of the
+# Erlang Public License along with this software. If not, it can be
+# retrieved online at http://www.erlang.org/.
+#
+# Software distributed under the License is distributed on an "AS IS"
+# basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+# the License for the specific language governing rights and limitations
+# under the License.
+#
+# %CopyrightEnd%
+#
+# Save the command line for debug outputs
+
+SAVE="$@"
+kernel_libs="kernel32.lib advapi32.lib"
+gdi_libs="gdi32.lib user32.lib comctl32.lib comdlg32.lib shell32.lib"
+DEFAULT_LIBRARIES="$kernel_libs $gdi_libs"
+
+CMD=""
+STDLIB=MSVCRT.LIB
+DEBUG_BUILD=false
+STDLIB_FORCED=false
+BUILD_DLL=false
+OUTPUT_FILENAME=""
+
+while test -n "$1" ; do
+ x="$1"
+ case "$x" in
+ -dll| -DLL)
+ BUILD_DLL=true;;
+ -L/*|-L.*)
+ y=`echo $x | sed 's,^-L\(.*\),\1,g'`;
+ MPATH=`msys2win_path.sh -m $y`;
+ CMD="$CMD -libpath:\"$MPATH\"";;
+ -lMSVCRT|-lmsvcrt)
+ STDLIB_FORCED=true;
+ STDLIB=MSVCRT.LIB;;
+ -lMSVCRTD|-lmsvcrtd)
+ STDLIB_FORCED=true;
+ STDLIB=MSVCRTD.LIB;;
+ -lLIBCMT|-llibcmt)
+ STDLIB_FORCED=true;
+ STDLIB=LIBCMT.LIB;;
+ -lLIBCMTD|-llibcmtd)
+ STDLIB_FORCED=true;
+ STDLIB=LIBCMTD.LIB;;
+ -lsocket)
+ DEFAULT_LIBRARIES="$DEFAULT_LIBRARIES WS2_32.LIB IPHLPAPI.LIB";;
+ -l*)
+ y=`echo $x | sed 's,^-l\(.*\),\1,g'`;
+ MPATH=`msys2win_path.sh -m $y`;
+ CMD="$CMD \"${MPATH}.lib\"";;
+ -g)
+ DEBUG_BUILD=true;;
+ -pdb:none|-incremental:no)
+ ;;
+ -implib:*)
+ y=`echo $x | sed 's,^-implib:\(.*\),\1,g'`;
+ MPATH=`msys2win_path.sh -m $y`;
+ CMD="$CMD -implib:\"${MPATH}\"";;
+ -def:*)
+ y=`echo $x | sed 's,^-def:\(.*\),\1,g'`;
+ MPATH=`msys2win_path.sh -m $y`;
+ CMD="$CMD -def:\"${MPATH}\"";;
+ -o)
+ shift
+ MPATH=`msys2win_path.sh -m $1`;
+ OUTPUT_FILENAME="$MPATH";;
+ -o/*)
+ y=`echo $x | sed 's,^-[Io]\(/.*\),\1,g'`;
+ MPATH=`msys2win_path.sh -m $y`;
+ OUTPUT_FILENAME="$MPATH";;
+ /*)
+ MPATH=`msys2win_path.sh -m $x`;
+ CMD="$CMD \"$MPATH\"";;
+ *)
+ y=`echo $x | sed 's,",\\\",g'`;
+ CMD="$CMD \"$y\"";;
+ esac
+ shift
+done
+if [ $DEBUG_BUILD = true ]; then
+ linktype="-debug -pdb:none"
+ if [ $STDLIB_FORCED = false ]; then
+ STDLIB=MSVCRTD.LIB
+ fi
+fi
+# Generate a PDB
+linkadd_pdb=""
+case "$OUTPUT_FILENAME" in
+ *.exe|*.EXE)
+ fn=`echo "$OUTPUT_FILENAME" | sed 's,[eE][xX][eE]$,,g'`;
+ linkadd_pdb="-pdb:\"${fn}pdb\"";;
+ *.dll|*.DLL)
+ fn=`echo "$OUTPUT_FILENAME" | sed 's,[dD][lL][lL]$,,g'`;
+ linkadd_pdb="-pdb:\"${fn}pdb\"";;
+ "")
+ linkadd_pdb="-pdb:\"a.pdb\"";;
+ *)
+ linkadd_pdb="-pdb:\"${OUTPUT_FILENAME}.pdb\"";;
+esac
+
+ linktype="-debug $linkadd_pdb"
+
+CHMOD_FILE=""
+
+if [ $BUILD_DLL = true ];then
+ case "$OUTPUT_FILENAME" in
+ *.exe|*.EXE)
+ echo "Warning, output set to .exe when building DLL" >&2
+ CHMOD_FILE="$OUTPUT_FILENAME";
+ CMD="-dll -out:\"$OUTPUT_FILENAME\" $CMD";
+ OUTPUTRES="${OUTPUT_FILENAME}\;2";
+ MANIFEST="${OUTPUT_FILENAME}.manifest";;
+ *.dll|*.DLL)
+ CMD="-dll -out:\"$OUTPUT_FILENAME\" $CMD";
+ OUTPUTRES="${OUTPUT_FILENAME}\;2";
+ MANIFEST="${OUTPUT_FILENAME}.manifest";;
+ "")
+ CMD="-dll -out:\"a.dll\" $CMD";
+ OUTPUTRES="a.dll\;2";
+ MANIFEST="a.dll.manifest";;
+ *)
+ CMD="-dll -out:\"${OUTPUT_FILENAME}.dll\" $CMD";
+ OUTPUTRES="${OUTPUT_FILENAME}.dll\;2";
+ MANIFEST="${OUTPUT_FILENAME}.dll.manifest";;
+ esac
+else
+ case "$OUTPUT_FILENAME" in
+ *.exe|*.EXE)
+ CHMOD_FILE="$OUTPUT_FILENAME";
+ CMD="-out:\"$OUTPUT_FILENAME\" $CMD";
+ OUTPUTRES="${OUTPUT_FILENAME}\;1"
+ MANIFEST="${OUTPUT_FILENAME}.manifest";;
+ *.dll|*.DLL)
+ echo "Warning, output set to .dll when building EXE" >&2
+ CMD="-out:\"$OUTPUT_FILENAME\" $CMD";
+ OUTPUTRES="${OUTPUT_FILENAME}\;1";
+ MANIFEST="${OUTPUT_FILENAME}.manifest";;
+ "")
+ CHMOD_FILE="a.exe";
+ CMD="-out:\"a.exe\" $CMD";
+ OUTPUTRES="a.exe\;1";
+ MANIFEST="a.exe.manifest";;
+ *)
+ CMD="-out:\"${OUTPUT_FILENAME}.exe\" $CMD";
+ OUTPUTRES="${OUTPUT_FILENAME}.exe\;1";
+ MANIFEST="${OUTPUT_FILENAME}.exe.manifest";;
+ esac
+fi
+
+p=$$
+CMD="$linktype -nologo -incremental:no $CMD $STDLIB $DEFAULT_LIBRARIES"
+if [ "X$LD_SH_DEBUG_LOG" != "X" ]; then
+ echo ld.sh "$SAVE" >>$LD_SH_DEBUG_LOG
+ echo link.exe $CMD >>$LD_SH_DEBUG_LOG
+fi
+eval link.exe "$CMD" >/tmp/link.exe.${p}.1 2>/tmp/link.exe.${p}.2
+RES=$?
+
+CMANIFEST=`win2msys_path.sh $MANIFEST`
+if [ "$RES" = "0" -a -f "$CMANIFEST" ]; then
+ # Add stuff to manifest to turn off "virtualization"
+ sed -n -i '1h;1!H;${;g;s,<trustInfo.*</trustInfo>.,,g;p;}' $CMANIFEST 2>/dev/null
+ sed -i "s/<\/assembly>/ <ms_asmv2:trustInfo xmlns:ms_asmv2=\"urn:schemas-microsoft-com:asm.v2\">\n <ms_asmv2:security>\n <ms_asmv2:requestedPrivileges>\n <ms_asmv2:requestedExecutionLevel level=\"AsInvoker\" uiAccess=\"false\"\/>\n <\/ms_asmv2:requestedPrivileges>\n <\/ms_asmv2:security>\n <\/ms_asmv2:trustInfo>\n<\/assembly>/" $CMANIFEST 2>/dev/null
+
+ eval mt.exe -nologo -manifest "$MANIFEST" -outputresource:"$OUTPUTRES" >>/tmp/link.exe.${p}.1 2>>/tmp/link.exe.${p}.2
+ RES=$?
+ if [ "$RES" != "0" ]; then
+ REMOVE=`echo "$OUTPUTRES" | sed 's,\\\;[12]$,,g'`
+ CREMOVE=`cygpath $REMOVE`
+ rm -f "$CREMOVE"
+ fi
+ rm -f "$CMANIFEST"
+fi
+
+# This works around some strange behaviour
+# in cygwin 1.7 Beta on Windows 7 with samba drive.
+# Configure will think the compiler failed if test -x fails,
+# which it might do as we might not be the owner of the
+# file.
+if [ '!' -z "$CHMOD_FILE" -a -s "$CHMOD_FILE" -a '!' -x "$CHMOD_FILE" ]; then
+ chmod +x $CHMOD_FILE
+fi
+
+tail -n +2 /tmp/link.exe.${p}.2 >&2
+cat /tmp/link.exe.${p}.1
+rm -f /tmp/link.exe.${p}.2 /tmp/link.exe.${p}.1
+exit $RES
diff --git a/erts/etc/win32/msys_tools/vc/mc.sh b/erts/etc/win32/msys_tools/vc/mc.sh
new file mode 100644
index 0000000000..27d985f73e
--- /dev/null
+++ b/erts/etc/win32/msys_tools/vc/mc.sh
@@ -0,0 +1,87 @@
+#! /bin/sh
+# set -x
+#
+# %CopyrightBegin%
+#
+# Copyright Ericsson AB 2002-2011. All Rights Reserved.
+#
+# The contents of this file are subject to the Erlang Public License,
+# Version 1.1, (the "License"); you may not use this file except in
+# compliance with the License. You should have received a copy of the
+# Erlang Public License along with this software. If not, it can be
+# retrieved online at http://www.erlang.org/.
+#
+# Software distributed under the License is distributed on an "AS IS"
+# basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+# the License for the specific language governing rights and limitations
+# under the License.
+#
+# %CopyrightEnd%
+#
+# Save the command line for debug outputs
+SAVE="$@"
+CMD=""
+OUTPUT_DIRNAME=""
+
+# Find the correct mc.exe. This could be done by the configure script,
+# But as we seldom use the message compiler, it might as well be done here...
+MCC=""
+save_ifs=$IFS
+IFS=:
+for p in $PATH; do
+ if [ -f $p/mc.exe ]; then
+ if [ -n "`$p/mc.exe -? 2>&1 >/dev/null </dev/null \
+ | grep -i \"message compiler\"`" ]; then
+ MCC=`echo "$p/mc.exe" | sed 's/ /\\\\ /g'`
+ fi
+ fi
+done
+IFS=$save_ifs
+
+if [ -z "$MCC" ]; then
+ echo 'mc.exe not found!' >&2
+ exit 1
+fi
+
+while test -n "$1" ; do
+ x="$1"
+ case "$x" in
+ -o)
+ shift
+ OUTPUT_DIRNAME="$1";;
+ -o/*)
+ y=`echo $x | sed 's,^-[Io]\(/.*\),\1,g'`;
+ OUTPUT_DIRNAME="$y";;
+ -I)
+ shift
+ MPATH=`msys2win_path.sh -m $1`;
+ CMD="$CMD -I\"$MPATH\"";;
+ -I/*)
+ y=`echo $x | sed 's,^-[Io]\(/.*\),\1,g'`;
+ MPATH=`msys2win_path.sh -m $y`;
+ CMD="$CMD -I\"$MPATH\"";;
+ *)
+ MPATH=`msys2win_path.sh -m -a $x`;
+ CMD="$CMD \"$MPATH\"";;
+ esac
+ shift
+done
+p=$$
+if [ "X$MC_SH_DEBUG_LOG" != "X" ]; then
+ echo mc.sh "$SAVE" >>$MC_SH_DEBUG_LOG
+ echo mc.exe $CMD >>$MC_SH_DEBUG_LOG
+fi
+if [ -n "$OUTPUT_DIRNAME" ]; then
+ cd $OUTPUT_DIRNAME
+ RES=$?
+ if [ "$RES" != "0" ]; then
+ echo "mc.sh: Error: could not cd to $OUTPUT_DIRNAME">&2
+ exit $RES
+ fi
+fi
+eval $MCC "$CMD" >/tmp/mc.exe.${p}.1 2>/tmp/mc.exe.${p}.2
+RES=$?
+tail +2 /tmp/mc.exe.${p}.2 >&2
+cat /tmp/mc.exe.${p}.1
+rm -f /tmp/mc.exe.${p}.2 /tmp/mc.exe.${p}.1
+exit $RES
diff --git a/erts/etc/win32/msys_tools/vc/rc.sh b/erts/etc/win32/msys_tools/vc/rc.sh
new file mode 100644
index 0000000000..dfa9e324db
--- /dev/null
+++ b/erts/etc/win32/msys_tools/vc/rc.sh
@@ -0,0 +1,86 @@
+#! /bin/sh
+# set -x
+#
+# %CopyrightBegin%
+#
+# Copyright Ericsson AB 2002-2011. All Rights Reserved.
+#
+# The contents of this file are subject to the Erlang Public License,
+# Version 1.1, (the "License"); you may not use this file except in
+# compliance with the License. You should have received a copy of the
+# Erlang Public License along with this software. If not, it can be
+# retrieved online at http://www.erlang.org/.
+#
+# Software distributed under the License is distributed on an "AS IS"
+# basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+# the License for the specific language governing rights and limitations
+# under the License.
+#
+# %CopyrightEnd%
+#
+# Save the command line for debug outputs
+SAVE="$@"
+CMD=""
+OUTPUT_FILENAME=""
+
+# Find the correct rc.exe. This could be done by the configure script,
+# But as we seldom use the resource compiler, it might as well be done here...
+RCC=""
+save_ifs=$IFS
+IFS=:
+for p in $PATH; do
+ if [ -f $p/rc.exe ]; then
+ if [ -n "`$p/rc.exe -? 2>&1 | grep -i "resource compiler"`" ]; then
+ RCC=`echo "$p/rc.exe" | sed 's/ /\\\\ /g'`
+ fi
+ fi
+done
+IFS=$save_ifs
+
+if [ -z "$RCC" ]; then
+ echo 'rc.exe not found!' >&2
+ exit 1
+fi
+
+while test -n "$1" ; do
+ x="$1"
+ case "$x" in
+ -o)
+ shift
+ MPATH=`msys2win_path.sh -m $1`;
+ OUTPUT_FILENAME="$MPATH";;
+ -o/*)
+ y=`echo $x | sed 's,^-[Io]\(/.*\),\1,g'`;
+ MPATH=`msys2win_path.sh -m $y`;
+ OUTPUT_FILENAME="$MPATH";;
+ -I)
+ shift
+ MPATH=`msys2win_path.sh -m $1`;
+ CMD="$CMD -I\"$MPATH\"";;
+ -I/*)
+ y=`echo $x | sed 's,^-[Io]\(/.*\),\1,g'`;
+ MPATH=`msys2win_path.sh -m $y`;
+ CMD="$CMD -I\"$MPATH\"";;
+ /*)
+ MPATH=`msys2win_path.sh -m $x`;
+ CMD="$CMD \"$MPATH\"";;
+ *)
+ y=`echo $x | sed 's,",\\\",g'`;
+ CMD="$CMD \"$y\"";;
+ esac
+ shift
+done
+p=$$
+if [ -n "$OUTPUT_FILENAME" ]; then
+ CMD="-Fo$OUTPUT_FILENAME $CMD"
+fi
+if [ "X$RC_SH_DEBUG_LOG" != "X" ]; then
+ echo rc.sh "$SAVE" >>$RC_SH_DEBUG_LOG
+ echo rc.exe $CMD >>$RC_SH_DEBUG_LOG
+fi
+eval $RCC "$CMD" >/tmp/rc.exe.${p}.1 2>/tmp/rc.exe.${p}.2
+RES=$?
+tail +2 /tmp/rc.exe.${p}.2 >&2
+cat /tmp/rc.exe.${p}.1
+rm -f /tmp/rc.exe.${p}.2 /tmp/rc.exe.${p}.1
+exit $RES
diff --git a/erts/etc/win32/msys_tools/win2msys_path.sh b/erts/etc/win32/msys_tools/win2msys_path.sh
new file mode 100644
index 0000000000..6558a15ecd
--- /dev/null
+++ b/erts/etc/win32/msys_tools/win2msys_path.sh
@@ -0,0 +1,38 @@
+#! /bin/bash
+if [ -z "$1" ]; then
+ echo "Usage: $0 <path>" >&2
+ exit 1;
+fi
+
+MSYS_PATH=`echo "$1" | sed 's,^\([a-zA-Z]\):\\\\,/\L\1/,;s,\\\\,/,g'`
+if [ -z "$MSYS_PATH" ]; then
+ echo "$0: Could not translate $1 to msys format" >&2
+ exit 2;
+fi
+
+DELBLANK=`echo "$MSYS_PATH" | sed 's, ,,g'`
+
+if [ "X$DELBLANK" != "X$MSYS_PATH" ]; then
+ if [ -d "$MSYS_PATH" ]; then
+ C1=`(cd "$MSYS_PATH" && cmd //C "for %i in (".") do @echo %~fsi")`
+ MSYS_PATH=`echo "$C1" | sed 's,^\([a-zA-Z]\):\\\\,/\L\1/,;s,\\\\,/,g'`
+ else
+ MSYS_DIR=`dirname "$MSYS_PATH"`
+ MSYS_FILE=`basename "$MSYS_PATH"`
+ if [ -d "$MSYS_DIR" ]; then
+ C1=`(cd "$MSYS_DIR" && cmd //C "for %i in (".") do @echo %~fsi")`
+ BAT_FILE=/tmp/w$$.bat
+ # I simply cannot get the quoting right for this,
+ # need an intermediate bat file
+ cat > $BAT_FILE <<EOF
+@echo off
+for %%i in ("$MSYS_FILE") do @echo %%~snxi
+EOF
+ C2=`(cd "$MSYS_DIR" && cmd //C $BAT_FILE)`
+ rm -f $BAT_FILE
+ MSYS_PATH=`echo "$C1/$C2" | sed 's,^\([a-zA-Z]\):\\\\,/\L\1/,;s,\\\\,/,g;s," ", ,g'`
+ fi
+ fi
+fi
+echo $MSYS_PATH
+exit 0 \ No newline at end of file
diff --git a/erts/etc/win32/nsis/Makefile b/erts/etc/win32/nsis/Makefile
index ae2343b420..6a93c5153d 100644
--- a/erts/etc/win32/nsis/Makefile
+++ b/erts/etc/win32/nsis/Makefile
@@ -40,10 +40,29 @@ clean:
include $(ERL_TOP)/make/otp_release_targets.mk
TARGET_DIR = $(RELEASE_PATH)
-WTESTROOT=$(shell (cygpath -d $(RELEASE_PATH) 2>/dev/null || cygpath -w $(RELEASE_PATH)))
-WTARGET_DIR=$(shell (cygpath -d $(TARGET_DIR) 2>/dev/null || cygpath -d $(TARGET_DIR)))
+
+ifeq ($(MSYSTEM),MINGW32)
+
+ MAKENSISFLAGS = //V2
+ WTESTROOT=$(shell (msys2win_path.sh $(RELEASE_PATH)))
+ WTARGET_DIR=$(shell (msys2win_path.sh $(TARGET_DIR)))
+
+else
+
+ MAKENSISFLAGS = /V2
+ WTESTROOT=$(shell (cygpath -d $(RELEASE_PATH) 2>/dev/null || cygpath -w $(RELEASE_PATH)))
+ WTARGET_DIR=$(shell (cygpath -d $(TARGET_DIR) 2>/dev/null || cygpath -d $(TARGET_DIR)))
+
+endif
+
+ifeq ($(CONFIG_SUBTYPE),win64)
+ WINTYPE=win64
+else
+ WINTYPE=win32
+endif
REDIST_FILE=$(shell (sh ./find_redist.sh || echo ""))
+REDIST_TARGET=$(shell (sh ./find_redist.sh -n || echo ""))
REDIST_DLL_VERSION=$(shell (sh ./dll_version_helper.sh || echo ""))
REDIST_DLL_NAME=$(shell (sh ./dll_version_helper.sh -n || echo ""))
@@ -65,16 +84,18 @@ release_spec:
echo '!define ERTS_VERSION "$(VSN)"' >> $(VERSION_HEADER);\
echo '!define TESTROOT "$(WTESTROOT)"' >> $(VERSION_HEADER);\
echo '!define OUTFILEDIR "$(WTARGET_DIR)"' >> $(VERSION_HEADER);\
+ echo '!define WINTYPE "$(WINTYPE)"' >> $(VERSION_HEADER);\
if [ -f $(CUSTOM_MODERN) ];\
then \
echo '!define HAVE_CUSTOM_MODERN 1' >> $(VERSION_HEADER); \
fi;\
if [ '!' -z "$(REDIST_FILE)" -a '!' -z "$(REDIST_DLL_VERSION)" ];\
then \
- cp $(REDIST_FILE) $(RELEASE_PATH)/vcredist_x86.exe;\
+ cp $(REDIST_FILE) $(RELEASE_PATH)/$(REDIST_TARGET);\
echo '!define HAVE_REDIST_FILE 1' >> $(VERSION_HEADER); \
echo '!define REDIST_DLL_VERSION "$(REDIST_DLL_VERSION)"' >> $(VERSION_HEADER);\
echo '!define REDIST_DLL_NAME "$(REDIST_DLL_NAME)"' >> $(VERSION_HEADER);\
+ echo '!define REDIST_EXECUTABLE "$(REDIST_TARGET)"' >> $(VERSION_HEADER);\
fi;\
if [ -f $(RELEASE_PATH)/docs/doc/index.html ];\
then \
diff --git a/erts/etc/win32/nsis/dll_version_helper.sh b/erts/etc/win32/nsis/dll_version_helper.sh
index eecd4a72b5..96e4532b7a 100755
--- a/erts/etc/win32/nsis/dll_version_helper.sh
+++ b/erts/etc/win32/nsis/dll_version_helper.sh
@@ -36,11 +36,11 @@ int main(void)
}
EOF
-cl /MD hello.c > /dev/null 2>&1
+cl -MD hello.c > /dev/null 2>&1
if [ '!' -f hello.exe.manifest ]; then
# Gah - VC 2010 changes the way it handles DLL's and manifests... Again...
# need another way of getting the version
- DLLNAME=`dumpbin.exe /imports hello.exe | egrep MSVCR.*dll`
+ DLLNAME=`dumpbin.exe -imports hello.exe | egrep MSVCR.*dll`
DLLNAME=`echo $DLLNAME`
cat > helper.c <<EOF
#include <windows.h>
@@ -59,11 +59,7 @@ int main(void)
char *vs_verinfo;
unsigned int vs_ver_size;
- struct LANGANDCODEPAGE {
- WORD language;
- WORD codepage;
- } *translate;
-
+ WORD *translate;
unsigned int tr_size;
if (!(versize = GetFileVersionInfoSize(REQ_MODULE,&dummy))) {
@@ -79,10 +75,10 @@ int main(void)
fprintf(stderr,"No translation info in %s!\n",REQ_MODULE);
exit(3);
}
- n = tr_size/sizeof(translate);
+ n = tr_size/(2*sizeof(*translate));
for(i=0; i < n; ++i) {
sprintf(buff,"\\\\StringFileInfo\\\\%04x%04x\\\\FileVersion",
- translate[i].language,translate[i].codepage);
+ translate[i*2],translate[i*2+1]);
if (VerQueryValue(versinfo,buff,&vs_verinfo,&vs_ver_size)) {
printf("%s\n",(char *) vs_verinfo);
return 0;
@@ -92,7 +88,7 @@ int main(void)
return 0;
}
EOF
- cl /MD helper.c version.lib > /dev/null 2>&1
+ cl -MD helper.c version.lib > /dev/null 2>&1
if [ '!' -f helper.exe ]; then
echo "Failed to build helper program." >&2
exit 1
diff --git a/erts/etc/win32/nsis/erlang20.nsi b/erts/etc/win32/nsis/erlang20.nsi
index 941e8e6f5d..fb0eff3867 100644
--- a/erts/etc/win32/nsis/erlang20.nsi
+++ b/erts/etc/win32/nsis/erlang20.nsi
@@ -31,17 +31,24 @@ Var STARTMENU_FOLDER
!define MY_STARTMENUPAGE_REGISTRY_VALUENAME "Start Menu Folder"
;General
- OutFile "${OUTFILEDIR}\otp_win32_${OTP_VERSION}.exe"
+ OutFile "${OUTFILEDIR}\otp_${WINTYPE}_${OTP_VERSION}.exe"
;Folder selection page
+!if ${WINTYPE} == "win64"
+ InstallDir "$PROGRAMFILES64\erl${ERTS_VERSION}"
+!else
InstallDir "$PROGRAMFILES\erl${ERTS_VERSION}"
-
+!endif
;Remember install folder
InstallDirRegKey HKLM "SOFTWARE\Ericsson\Erlang\${ERTS_VERSION}" ""
; Set the default start menu folder
+!if ${WINTYPE} == "win64"
+ !define MUI_STARTMENUPAGE_DEFAULTFOLDER "${OTP_PRODUCT} ${OTP_VERSION} (x64)"
+!else
!define MUI_STARTMENUPAGE_DEFAULTFOLDER "${OTP_PRODUCT} ${OTP_VERSION}"
+!endif
;--------------------------------
;Modern UI Configuration
@@ -98,12 +105,12 @@ Var STARTMENU_FOLDER
Section "Microsoft redistributable libraries." SecMSRedist
SetOutPath "$INSTDIR"
- File "${TESTROOT}\vcredist_x86.exe"
+ File "${TESTROOT}\${REDIST_EXECUTABLE}"
; Set back verbosity...
!verbose 1
; Run the setup program
- ExecWait '"$INSTDIR\vcredist_x86.exe"'
+ ExecWait '"$INSTDIR\${REDIST_EXECUTABLE}"'
!verbose 1
SectionEnd ; MSRedist
diff --git a/erts/etc/win32/nsis/erlang_uninst.ico b/erts/etc/win32/nsis/erlang_uninst.ico
index edbd8a6f2c..edbd8a6f2c 100755..100644
--- a/erts/etc/win32/nsis/erlang_uninst.ico
+++ b/erts/etc/win32/nsis/erlang_uninst.ico
Binary files differ
diff --git a/erts/etc/win32/nsis/find_redist.sh b/erts/etc/win32/nsis/find_redist.sh
index bc4260ecba..7c449c9e4e 100755
--- a/erts/etc/win32/nsis/find_redist.sh
+++ b/erts/etc/win32/nsis/find_redist.sh
@@ -65,7 +65,6 @@ remove_path_element()
else
echo "${ACC}/$1"
fi
-
#echo "ACC=$ACC" >&2
#echo "1=$1" >&2
}
@@ -89,20 +88,39 @@ add_path_element()
echo "$PA"
}
+
CLPATH=`lookup_prog_in_path cl`
if [ -z "$CLPATH" ]; then
- echo "Can not locate cl.exe and vcredist_x86.exe - OK if using mingw" >&2
+ echo "Can not locate cl.exe and vcredist_x86/x64.exe - OK if using mingw" >&2
exit 1
fi
-#echo $CLPATH
+# Look to see if it's 64bit
+XX=`remove_path_element cl "$CLPATH"`
+YY=`remove_path_element amd64 "$XX"`
+if [ "$YY" != "$XX" ]; then
+ AMD64DIR=true
+ VCREDIST=vcredist_x64
+ COMPONENTS="cl amd64 bin vc"
+else
+ AMD64DIR=false
+ VCREDIST=vcredist_x86
+ COMPONENTS="cl bin vc"
+fi
+
+if [ X"$1" = X"-n" ]; then
+ echo $VCREDIST.exe
+ exit 0
+fi
+
+# echo $CLPATH
BPATH=$CLPATH
-for x in cl bin vc; do
- #echo $x
+for x in $COMPONENTS; do
+ # echo $x
NBPATH=`remove_path_element $x "$BPATH"`
if [ "$NBPATH" = "$BPATH" ]; then
- echo "Failed to locate vcredist_x86.exe because cl.exe was in an unexpected location" >&2
+ echo "Failed to locate $VCREDIST.exe because cl.exe was in an unexpected location" >&2
exit 2
fi
BPATH="$NBPATH"
@@ -115,7 +133,12 @@ fail=false
if [ '!' -z "$RCPATH" ]; then
BPATH=$RCPATH
allow_fail=false
- for x in rc bin @ANY v6.0A v7.0A v7.1; do
+ if [ $AMD64DIR = true ]; then
+ COMPONENTS="rc x64 bin @ANY v6.0A v7.0A v7.1"
+ else
+ COMPONENTS="rc bin @ANY v6.0A v7.0A v7.1"
+ fi
+ for x in $COMPONENTS; do
if [ $x = @ANY ]; then
allow_fail=true
else
@@ -131,6 +154,7 @@ if [ '!' -z "$RCPATH" ]; then
BPATH_LIST="$BPATH_LIST $BPATH"
fi
fi
+# echo "BPATH_LIST=$BPATH_LIST"
# Frantic search through two roots with different
# version directories. We want to be very specific about the
@@ -143,7 +167,7 @@ for BP in $BPATH_LIST; do
BPATH=$BP
fail=false
allow_fail=false
- for x in $verdir @ANY bootstrapper packages vcredist_x86 Redist VC @ALL vcredist_x86.exe; do
+ for x in $verdir @ANY bootstrapper packages $VCREDIST Redist VC @ALL $VCREDIST.exe; do
#echo "x=$x"
#echo "BPATH=$BPATH"
#echo "allow_fail=$allow_fail"
@@ -170,18 +194,18 @@ for BP in $BPATH_LIST; do
fi
done
-# shortcut for locating vcredist_x86.exe is to put it into $ERL_TOP
-if [ -f $ERL_TOP/vcredist_x86.exe ]; then
- echo $ERL_TOP/vcredist_x86.exe
+# shortcut for locating $VCREDIST.exe is to put it into $ERL_TOP
+if [ -f $ERL_TOP/$VCREDIST.exe ]; then
+ echo $ERL_TOP/$VCREDIST.exe
exit 0
fi
# or $ERL_TOP/.. to share across multiple builds
-if [ -f $ERL_TOP/../vcredist_x86.exe ]; then
- echo $ERL_TOP/../vcredist_x86.exe
+if [ -f $ERL_TOP/../$VCREDIST.exe ]; then
+ echo $ERL_TOP/../$VCREDIST.exe
exit 0
fi
-echo "Failed to locate vcredist_x86.exe because directory structure was unexpected" >&2
+echo "Failed to locate $VCREDIST.exe because directory structure was unexpected" >&2
exit 3
diff --git a/erts/etc/win32/start_erl.c b/erts/etc/win32/start_erl.c
index dcf8c8b281..28c8e55bd3 100644
--- a/erts/etc/win32/start_erl.c
+++ b/erts/etc/win32/start_erl.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1998-2009. All Rights Reserved.
+ * Copyright Ericsson AB 1998-2011. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -44,6 +44,8 @@ char *progname;
#endif
#define RELEASE_SUBDIR "\\releases"
+#define ERTS_SUBDIR_PREFIX "\\erts-"
+#define BIN_SUBDIR "\\bin"
#define REGISTRY_BASE "Software\\Ericsson\\Erlang\\"
#define DEFAULT_DATAFILE "start_erl.data"
@@ -101,7 +103,8 @@ void exit_help(char *err)
printf("Usage:\n%s\n"
" [<erlang options>] ++\n"
" [-data <datafile>]\n"
- " [-reldir <releasedir>]\n"
+ " {-rootdir <erlang root directory> | \n"
+ " -reldir <releasedir>}\n"
" [-bootflags <bootflagsfile>]\n"
" [-noconfig]\n", progname);
@@ -177,8 +180,9 @@ void split_commandline(void)
*/
char * unquote_optionarg(char *str, char **strp)
{
- char *newstr = (char *)malloc(strlen(str)+1); /* This one is realloc:ed later */
- int i=0, inquote=0;
+ char *newstr = (char *)malloc(strlen(str)+1); /* This one is
+ realloc:ed later */
+ int i = 0, inquote = 0;
assert(newstr);
assert(str);
@@ -223,8 +227,8 @@ char * unquote_optionarg(char *str, char **strp)
/*
- * Parses MyCommandLine and tries to fill in all the required option variables
- * (one way or another).
+ * Parses MyCommandLine and tries to fill in all the required option
+ * variables (in one way or another).
*/
void parse_commandline(void)
{
@@ -237,6 +241,11 @@ void parse_commandline(void)
*cmdline++;
if( strnicmp(cmdline, "data", 4) == 0) {
DataFileName = unquote_optionarg(cmdline+4, &cmdline);
+ } else if( strnicmp(cmdline, "rootdir", 7) == 0) {
+ RootDir = unquote_optionarg(cmdline+7, &cmdline);
+#ifdef _DEBUG
+ fprintf(stderr, "RootDir: '%s'\n", RootDir);
+#endif
} else if( strnicmp(cmdline, "reldir", 6) == 0) {
RelDir = unquote_optionarg(cmdline+6, &cmdline);
#ifdef _DEBUG
@@ -266,8 +275,8 @@ void parse_commandline(void)
* Read the data file specified and get the version and release number
* from it.
*
- * This function also construct the correct RegistryKey from the version information
- * retrieved.
+ * This function also construct the correct RegistryKey from the version
+ * information retrieved.
*/
void read_datafile(void)
{
@@ -325,88 +334,6 @@ void read_datafile(void)
/*
- * Read the registry keys we need
- */
-void read_registry_keys(void)
-{
- HKEY hReg;
- ULONG lLen;
-
- /* Create the RegistryKey name */
- RegistryKey = (char *) malloc(strlen(REGISTRY_BASE) +
- strlen(Version) + 1);
- assert(RegistryKey);
- sprintf(RegistryKey, REGISTRY_BASE "%s", Version);
-
- /* We always need to find BinDir */
- if( (RegOpenKeyEx(HKEY_LOCAL_MACHINE,
- RegistryKey,
- 0,
- KEY_READ,
- &hReg)) != ERROR_SUCCESS ) {
- exit_help("Could not open registry key.");
- }
-
- /* First query size of data */
- if( (RegQueryValueEx(hReg,
- "Bindir",
- NULL,
- NULL,
- NULL,
- &lLen)) != ERROR_SUCCESS) {
- exit_help("Failed to query BinDir of release.\n");
- }
-
- /* Allocate enough space */
- BinDir = (char *)malloc(lLen+1);
- assert(BinDir);
- /* Retrieve the value */
- if( (RegQueryValueEx(hReg,
- "Bindir",
- NULL,
- NULL,
- (unsigned char *) BinDir,
- &lLen)) != ERROR_SUCCESS) {
- exit_help("Failed to query BinDir of release (2).\n");
- }
-
-#ifdef _DEBUG
- fprintf(stderr, "Bindir: '%s'\n", BinDir);
-#endif
-
- /* We also need the rootdir, in case we need to build RelDir later */
-
- /* First query size of data */
- if( (RegQueryValueEx(hReg,
- "Rootdir",
- NULL,
- NULL,
- NULL,
- &lLen)) != ERROR_SUCCESS) {
- exit_help("Failed to query RootDir of release.\n");
- }
-
- /* Allocate enough space */
- RootDir = (char *) malloc(lLen+1);
- assert(RootDir);
- /* Retrieve the value */
- if( (RegQueryValueEx(hReg,
- "Rootdir",
- NULL,
- NULL,
- (unsigned char *) RootDir,
- &lLen)) != ERROR_SUCCESS) {
- exit_help("Failed to query RootDir of release (2).\n");
- }
-
-#ifdef _DEBUG
- fprintf(stderr, "Rootdir: '%s'\n", RootDir);
-#endif
-
- RegCloseKey(hReg);
-}
-
-/*
* Read the bootflags. This file contains extra command line options to erl.exe
*/
void read_bootflags(void)
@@ -424,7 +351,8 @@ void read_bootflags(void)
exit_help("Need -reldir when -bootflags "
"filename has relative path.");
} else {
- newname = (char *)malloc(strlen(BootFlagsFile)+strlen(RelDir)+strlen(Release)+3);
+ newname = (char *)malloc(strlen(BootFlagsFile)+
+ strlen(RelDir)+strlen(Release)+3);
assert(newname);
sprintf(newname, "%s\\%s\\%s", RelDir, Release, BootFlagsFile);
free(BootFlagsFile);
@@ -436,8 +364,6 @@ void read_bootflags(void)
fprintf(stderr, "BootFlagsFile: '%s'\n", BootFlagsFile);
#endif
-
-
if( (fp=fopen(BootFlagsFile, "rb")) == NULL) {
exit_help("Could not open BootFlags file.");
}
@@ -605,32 +531,49 @@ void complete_options(void)
sz = nsz;
}
if (RelDir == NULL) {
- if(DataFileName){
- /* Needs to be absolute for this to work, but we
- can try... */
- read_datafile();
- read_registry_keys();
- } else {
- /* Impossible to find all data... */
- exit_help("Need either Release directory or an absolute "
- "datafile name.");
- }
- /* Ok, construct our own RelDir from RootDir */
- RelDir = (char *) malloc(strlen(RootDir)+strlen(RELEASE_SUBDIR)+1);
- assert(RelDir);
- sprintf(RelDir, "%s" RELEASE_SUBDIR, RootDir);
+ if (!RootDir) {
+ /* Impossible to find all data... */
+ exit_help("Need either Root directory nor Release directory.");
+ }
+ /* Ok, construct our own RelDir from RootDir */
+ RelDir = (char *) malloc(strlen(RootDir)+strlen(RELEASE_SUBDIR)+1);
+ assert(RelDir);
+ sprintf(RelDir, "%s" RELEASE_SUBDIR, RootDir);
+ read_datafile();
} else {
read_datafile();
- read_registry_keys();
}
} else {
read_datafile();
- read_registry_keys();
}
+ if( !RootDir ) {
+ /* Try to construct RootDir from RelDir */
+ char *p;
+ RootDir = malloc(strlen(RelDir)+1);
+ strcpy(RootDir,RelDir);
+ p = RootDir+strlen(RootDir)-1;
+ if (p >= RootDir && (*p == '/' || *p == '\\'))
+ --p;
+ while (p >= RootDir && *p != '/' && *p != '\\')
+ --p;
+ if (p <= RootDir) { /* Empty RootDir is also an error */
+ exit_help("Cannot determine Root directory from "
+ "Release directory.");
+ }
+ *p = '\0';
+ }
+
+
+ BinDir = (char *) malloc(strlen(RootDir)+strlen(ERTS_SUBDIR_PREFIX)+
+ strlen(Version)+strlen(BIN_SUBDIR)+1);
+ assert(BinDir);
+ sprintf(BinDir, "%s" ERTS_SUBDIR_PREFIX "%s" BIN_SUBDIR, RootDir, Version);
+
read_bootflags();
#ifdef _DEBUG
fprintf(stderr, "RelDir: '%s'\n", RelDir);
+ fprintf(stderr, "BinDir: '%s'\n", BinDir);
#endif
}
diff --git a/erts/etc/win32/win_erlexec.c b/erts/etc/win32/win_erlexec.c
index 0eed8e28b9..11cc6a30f7 100644
--- a/erts/etc/win32/win_erlexec.c
+++ b/erts/etc/win32/win_erlexec.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1997-2009. All Rights Reserved.
+ * Copyright Ericsson AB 1997-2011. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -30,6 +30,9 @@
#include <winuser.h>
#include <wincon.h>
#include <process.h>
+#ifdef HAVE_CONFIG_H
+# include "config.h"
+#endif
#include "sys.h"
#include "erl_driver.h"
diff --git a/erts/example/matrix_nif.c b/erts/example/matrix_nif.c
index c5e01dade5..404329e36c 100644
--- a/erts/example/matrix_nif.c
+++ b/erts/example/matrix_nif.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2010. All Rights Reserved.
+ * Copyright Ericsson AB 2010-2011. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -31,7 +31,19 @@ typedef struct
unsigned nrows;
unsigned ncols;
double* data;
-}Matrix;
+} Matrix;
+
+/*
+ * Use a union for pointer type conversion to avoid compiler warnings
+ * about strict-aliasing violations with gcc-4.1. gcc >= 4.2 does not
+ * emit the warning.
+ * TODO: Reconsider use of union once gcc-4.1 is obsolete?
+ */
+typedef union
+{
+ void* vp;
+ Matrix* p;
+} mx_t;
#define POS(MX, ROW, COL) ((MX)->data[(ROW)* (MX)->ncols + (COL)])
@@ -44,8 +56,9 @@ static ErlNifResourceType* resource_type = NULL;
static int load(ErlNifEnv* env, void** priv_data, ERL_NIF_TERM load_info)
{
- ErlNifResourceType* rt = enif_open_resource_type(env, "matrix_nif_example",
- matrix_dtor,
+ ErlNifResourceType* rt = enif_open_resource_type(env, NULL,
+ "matrix_nif_example",
+ matrix_dtor,
ERL_NIF_RT_CREATE, NULL);
if (rt == NULL) {
return -1;
@@ -90,12 +103,12 @@ static ERL_NIF_TERM create(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
}
ret = enif_make_resource(env, mx);
- enif_release_resource(env, mx);
+ enif_release_resource(mx);
return ret;
badarg:
if (mx != NULL) {
- enif_release_resource(env,mx);
+ enif_release_resource(mx);
}
return enif_make_badarg(env);
}
@@ -104,14 +117,14 @@ badarg:
static ERL_NIF_TERM pos(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{
/* pos(Matrix, Row, Column) -> float() */
- Matrix* mx;
+ mx_t mx;
unsigned i, j;
- if (!enif_get_resource(env, argv[0], resource_type, (void**)&mx) ||
- !enif_get_uint(env, argv[1], &i) || (--i >= mx->nrows) ||
- !enif_get_uint(env, argv[2], &j) || (--j >= mx->ncols)) {
+ if (!enif_get_resource(env, argv[0], resource_type, &mx.vp) ||
+ !enif_get_uint(env, argv[1], &i) || (--i >= mx.p->nrows) ||
+ !enif_get_uint(env, argv[2], &j) || (--j >= mx.p->ncols)) {
return enif_make_badarg(env);
}
- return enif_make_double(env, POS(mx, i,j));
+ return enif_make_double(env, POS(mx.p, i,j));
}
static ERL_NIF_TERM add(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
@@ -119,37 +132,38 @@ static ERL_NIF_TERM add(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
/* add(Matrix_A, Matrix_B) -> Matrix_Sum */
unsigned i, j;
ERL_NIF_TERM ret;
- Matrix* mxA = NULL;
- Matrix* mxB = NULL;
- Matrix* mxS = NULL;
+ mx_t mxA, mxB, mxS;
+ mxA.p = NULL;
+ mxB.p = NULL;
+ mxS.p = NULL;
- if (!enif_get_resource(env, argv[0], resource_type, (void**)&mxA) ||
- !enif_get_resource(env, argv[1], resource_type, (void**)&mxB) ||
- mxA->nrows != mxB->nrows ||
- mxB->ncols != mxB->ncols) {
+ if (!enif_get_resource(env, argv[0], resource_type, &mxA.vp) ||
+ !enif_get_resource(env, argv[1], resource_type, &mxB.vp) ||
+ mxA.p->nrows != mxB.p->nrows ||
+ mxB.p->ncols != mxB.p->ncols) {
return enif_make_badarg(env);
}
- mxS = alloc_matrix(env, mxA->nrows, mxA->ncols);
- for (i = 0; i < mxA->nrows; i++) {
- for (j = 0; j < mxA->ncols; j++) {
- POS(mxS, i, j) = POS(mxA, i, j) + POS(mxB, i, j);
+ mxS.p = alloc_matrix(env, mxA.p->nrows, mxA.p->ncols);
+ for (i = 0; i < mxA.p->nrows; i++) {
+ for (j = 0; j < mxA.p->ncols; j++) {
+ POS(mxS.p, i, j) = POS(mxA.p, i, j) + POS(mxB.p, i, j);
}
}
- ret = enif_make_resource(env, mxS);
- enif_release_resource(env, mxS);
+ ret = enif_make_resource(env, mxS.p);
+ enif_release_resource(mxS.p);
return ret;
}
static ERL_NIF_TERM size_of(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{
/* size(Matrix) -> {Nrows, Ncols} */
- Matrix* mx;
- if (!enif_get_resource(env, argv[0], resource_type, (void**)&mx)) {
+ mx_t mx;
+ if (!enif_get_resource(env, argv[0], resource_type, &mx.vp)) {
return enif_make_badarg(env);
}
- return enif_make_tuple2(env, enif_make_uint(env, mx->nrows),
- enif_make_uint(env, mx->ncols));
+ return enif_make_tuple2(env, enif_make_uint(env, mx.p->nrows),
+ enif_make_uint(env, mx.p->ncols));
}
static ERL_NIF_TERM to_term(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
@@ -157,16 +171,17 @@ static ERL_NIF_TERM to_term(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
/* to_term(Matrix) -> [[first row], [second row], ...,[last row]] */
unsigned i, j;
ERL_NIF_TERM res;
- Matrix* mx = NULL;
+ mx_t mx;
+ mx.p = NULL;
- if (!enif_get_resource(env, argv[0], resource_type, (void**)&mx)) {
+ if (!enif_get_resource(env, argv[0], resource_type, &mx.vp)) {
return enif_make_badarg(env);
}
res = enif_make_list(env, 0);
- for (i = mx->nrows; i-- > 0; ) {
+ for (i = mx.p->nrows; i-- > 0; ) {
ERL_NIF_TERM row = enif_make_list(env, 0);
- for (j = mx->ncols; j-- > 0; ) {
- row = enif_make_list_cell(env, enif_make_double(env, POS(mx,i,j)),
+ for (j = mx.p->ncols; j-- > 0; ) {
+ row = enif_make_list_cell(env, enif_make_double(env, POS(mx.p,i,j)),
row);
}
res = enif_make_list_cell(env, row, res);
@@ -183,17 +198,17 @@ static int get_number(ErlNifEnv* env, ERL_NIF_TERM term, double* dp)
static Matrix* alloc_matrix(ErlNifEnv* env, unsigned nrows, unsigned ncols)
{
- Matrix* mx = enif_alloc_resource(env, resource_type, sizeof(Matrix));
+ Matrix* mx = enif_alloc_resource(resource_type, sizeof(Matrix));
mx->nrows = nrows;
mx->ncols = ncols;
- mx->data = enif_alloc(env, nrows*ncols*sizeof(double));
+ mx->data = enif_alloc(nrows*ncols*sizeof(double));
return mx;
}
static void matrix_dtor(ErlNifEnv* env, void* obj)
{
Matrix* mx = (Matrix*) obj;
- enif_free(env, mx->data);
+ enif_free(mx->data);
mx->data = NULL;
}
diff --git a/erts/include/erl_int_sizes_config.h.in b/erts/include/erl_int_sizes_config.h.in
index 4b8a8e1b98..14d7c6a702 100644
--- a/erts/include/erl_int_sizes_config.h.in
+++ b/erts/include/erl_int_sizes_config.h.in
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2004-2010. All Rights Reserved.
+ * Copyright Ericsson AB 2004-2011. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -32,5 +32,8 @@
/* The number of bytes in a long long. */
#undef SIZEOF_LONG_LONG
+/* The size of a pointer. */
+#undef SIZEOF_VOID_P
+
/* Define if building a halfword-heap 64bit emulator (needed for NIF's) */
#undef HALFWORD_HEAP_EMULATOR
diff --git a/erts/include/internal/ethr_atomics.h b/erts/include/internal/ethr_atomics.h
index 1caf4d0567..612894b8c1 100644
--- a/erts/include/internal/ethr_atomics.h
+++ b/erts/include/internal/ethr_atomics.h
@@ -1,7 +1,16 @@
/*
+ * --------------- DO NOT EDIT THIS FILE! ---------------
+ * This file was automatically generated by the
+ * $ERL_TOP/erts/lib_src/utils/make_atomics_api script.
+ * If you need to make changes, edit the script and
+ * regenerate this file.
+ * --------------- DO NOT EDIT THIS FILE! ---------------
+ */
+
+/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2010. All Rights Reserved.
+ * Copyright Ericsson AB 2011-2012. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -18,709 +27,9141 @@
*/
/*
- * Description: The ethread atomic API
+ * Description: The ethread atomics API
* Author: Rickard Green
*/
-#ifndef ETHR_ATOMIC_H__
-#define ETHR_ATOMIC_H__
+/*
+ * This file maps native atomic implementations to ethread
+ * API atomics. If no native atomic implementation
+ * is available, a less efficient fallback is used instead.
+ * The API consists of 32-bit size, word size (pointer size),
+ * and double word size atomics.
+ *
+ * The following atomic operations are implemented for
+ * 32-bit size, and word size atomics:
+ * - cmpxchg
+ * - xchg
+ * - set
+ * - init
+ * - add_read
+ * - read
+ * - inc_read
+ * - dec_read
+ * - add
+ * - inc
+ * - dec
+ * - read_band
+ * - read_bor
+ *
+ * The following atomic operations are implemented for
+ * double word size atomics:
+ * - cmpxchg
+ * - set
+ * - read
+ * - init
+ *
+ * Appart from a function implementing the atomic operation
+ * with unspecified memory barrier semantics, there are
+ * functions implementing each operation with the following
+ * implied memory barrier semantics:
+ * - mb - Full memory barrier. Orders both loads, and
+ * stores before, and after the atomic operation.
+ * No load or store is allowed to be reordered
+ * over the atomic operation.
+ * - relb - Release barrier. Orders both loads, and
+ * stores appearing *before* the atomic
+ * operation. These are not allowed to be
+ * reordered over the atomic operation.
+ * - acqb - Acquire barrier. Orders both loads, and stores
+ * appearing *after* the atomic operation. These
+ * are not allowed to be reordered over the
+ * atomic operation.
+ * - wb - Write barrier. Orders *only* stores. These are
+ * not allowed to be reordered over the barrier.
+ * Store in atomic operation is ordered *after*
+ * the barrier.
+ * - rb - Read barrier. Orders *only* loads. These are
+ * not allowed to be reordered over the barrier.
+ * Load in atomic operation is ordered *before*
+ * the barrier.
+ * - ddrb - Data dependency read barrier. Orders *only*
+ * loads according to data dependency across the
+ * barrier. Load in atomic operation is ordered
+ * before the barrier.
+ *
+ * We implement all of these operation/barrier
+ * combinations, regardless of whether they are useful
+ * or not (some of them are useless).
+ *
+ * Double word size atomic functions are on the followning
+ * form:
+ * ethr_dw_atomic_<OP>[_<BARRIER>]
+ *
+ * Word size atomic functions are on the followning
+ * form:
+ * ethr_atomic_<OP>[_<BARRIER>]
+ *
+ * 32-bit size atomic functions are on the followning
+ * form:
+ * ethr_atomic32_<OP>[_<BARRIER>]
+ *
+ * Apart from the operation/barrier functions
+ * described above also 'addr' functions are implemented
+ * which return the actual memory address used of the
+ * atomic variable. The 'addr' functions have no barrier
+ * versions.
+ *
+ * The native atomic implementation does not need to
+ * implement all operation/barrier combinations.
+ * Functions that have no native implementation will be
+ * constructed from existing native functionality. These
+ * functions will perform the wanted operation and will
+ * produce sufficient memory barriers, but may
+ * in some cases be less efficient than pure native
+ * versions.
+ *
+ * When we create ethread API operation/barrier functions by
+ * adding barriers before and after native operations it is
+ * assumed that:
+ * - A native read operation begins, and ends with a load.
+ * - A native set operation begins, and ends with a store.
+ * - An init operation begins with either a load, or a store,
+ * and ends with either a load, or a store.
+ * - All other operations begins with a load, and ends with
+ * either a load, or a store.
+ *
+ * This is the minimum functionality that a native
+ * implementation needs to provide:
+ *
+ * - Functions that need to be implemented:
+ *
+ * - ethr_native_[dw_|su_dw_]atomic[BITS]_addr
+ * - ethr_native_[dw_|su_dw_]atomic[BITS]_cmpxchg[_<BARRIER>]
+ * (at least one cmpxchg of optional barrier)
+ *
+ * - Macros that needs to be defined:
+ *
+ * A macro informing about the presence of the native
+ * implementation:
+ *
+ * - ETHR_HAVE_NATIVE_[DW_|SU_DW_]ATOMIC[BITS]
+ *
+ * A macro naming (a string constant) the implementation:
+ *
+ * - ETHR_NATIVE_[DW_]ATOMIC[BITS]_IMPL
+ *
+ * Each implemented native atomic function has to
+ * be accompanied by a defined macro on the following
+ * form informing about its presence:
+ *
+ * - ETHR_HAVE_ETHR_NATIVE_[DW_|SU_DW_]ATOMIC[BITS]_<OP>[_<BARRIER>]
+ *
+ * A (sparc-v9 style) membar macro:
+ *
+ * - ETHR_MEMBAR(B)
+ *
+ * Which takes a combination of the following macros
+ * or:ed (using |) together:
+ *
+ * - ETHR_LoadLoad
+ * - ETHR_LoadStore
+ * - ETHR_StoreLoad
+ * - ETHR_StoreStore
+ *
+ */
-#if !defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_ATOMIC_IMPL__)
-# define ETHR_NEED_ATOMIC_PROTOTYPES__
-#endif
+#ifndef ETHR_ATOMICS_H__
+#define ETHR_ATOMICS_H__
-#ifndef ETHR_HAVE_NATIVE_ATOMICS
+#undef ETHR_AMC_FALLBACK__
+#undef ETHR_AMC_NO_ATMCS__
+#undef ETHR_AMC_ATMC_T__
+#undef ETHR_AMC_ATMC_FUNC__
+
+/* -- 32-bit atomics -- */
+
+#undef ETHR_NAINT32_T__
+#undef ETHR_NATMC32_FUNC__
+#undef ETHR_NATMC32_ADDR_FUNC__
+#undef ETHR_NATMC32_BITS__
+#if defined(ETHR_HAVE_NATIVE_ATOMIC32)
+# define ETHR_NEED_NATMC32_ADDR
+# define ETHR_NATMC32_ADDR_FUNC__ ethr_native_atomic32_addr
+typedef ethr_native_atomic32_t ethr_atomic32_t;
+# define ETHR_NAINT32_T__ ethr_sint32_t
+# define ETHR_NATMC32_FUNC__(X) ethr_native_atomic32_ ## X
+# define ETHR_NATMC32_BITS__ 32
+#elif defined(ETHR_HAVE_NATIVE_ATOMIC64)
+# define ETHR_NEED_NATMC64_ADDR
+#ifdef ETHR_BIGENDIAN
+# define ETHR_NATMC32_ADDR_FUNC__(VAR) \
+ (((ethr_sint32_t *) ethr_native_atomic64_addr((VAR))) + 1)
+#else
+# define ETHR_NATMC32_ADDR_FUNC__(VAR) \
+ ((ethr_sint32_t *) ethr_native_atomic64_addr((VAR)))
+#endif
+typedef ethr_native_atomic64_t ethr_atomic32_t;
+# define ETHR_NAINT32_T__ ethr_sint64_t
+# define ETHR_NATMC32_FUNC__(X) ethr_native_atomic64_ ## X
+# define ETHR_NATMC32_BITS__ 64
+#else
/*
- * No native atomic implementation available. :(
+ * No native atomics usable for 32-bits atomics :(
* Use fallback...
*/
typedef ethr_sint32_t ethr_atomic32_t;
-typedef ethr_sint_t ethr_atomic_t;
-#else
-/*
- * Map ethread native atomics to ethread API atomics.
- *
- * We do at least have a native atomic implementation that
- * can handle integers of a size larger than or equal to
- * the size of pointers.
- */
+#endif
+
+#undef ETHR_ATMC32_INLINE__
+#ifdef ETHR_NATMC32_BITS__
+# ifdef ETHR_TRY_INLINE_FUNCS
+# define ETHR_ATMC32_INLINE__
+# endif
+# define ETHR_HAVE_32BIT_NATIVE_ATOMIC_OPS
+#endif
+
+#if !defined(ETHR_ATMC32_INLINE__) || defined(ETHR_ATOMIC_IMPL__)
+# define ETHR_NEED_ATMC32_PROTOTYPES__
+#endif
+
+#ifndef ETHR_INLINE_ATMC32_FUNC_NAME_
+# define ETHR_INLINE_ATMC32_FUNC_NAME_(X) X
+#endif
+
+#undef ETHR_ATMC32_FUNC__
+#define ETHR_ATMC32_FUNC__(X) ETHR_INLINE_ATMC32_FUNC_NAME_(ethr_atomic32_ ## X)
-/* -- Pointer size atomics -- */
+
+/* -- Word size atomics -- */
+
+#undef ETHR_NEED_NATMC32_ADDR
+#undef ETHR_NEED_NATMC64_ADDR
#undef ETHR_NAINT_T__
#undef ETHR_NATMC_FUNC__
#undef ETHR_NATMC_ADDR_FUNC__
-#if ETHR_SIZEOF_PTR == 8
-# if defined(ETHR_HAVE_NATIVE_ATOMIC64)
-# define ETHR_NATMC_ADDR_FUNC__ ethr_native_atomic64_addr
+#undef ETHR_NATMC_BITS__
+#if ETHR_SIZEOF_PTR == 8 && defined(ETHR_HAVE_NATIVE_ATOMIC64)
+# ifndef ETHR_NEED_NATMC64_ADDR
+# define ETHR_NEED_NATMC64_ADDR
+# endif
+# define ETHR_NATMC_ADDR_FUNC__ ethr_native_atomic64_addr
typedef ethr_native_atomic64_t ethr_atomic_t;
-# define ETHR_NAINT_T__ ethr_sint64_t
-# define ETHR_NATMC_FUNC__(X) ethr_native_atomic64_ ## X
-# else
-# error "Missing native atomic implementation"
+# define ETHR_NAINT_T__ ethr_sint64_t
+# define ETHR_NATMC_FUNC__(X) ethr_native_atomic64_ ## X
+# define ETHR_NATMC_BITS__ 64
+#elif ETHR_SIZEOF_PTR == 4 && defined(ETHR_HAVE_NATIVE_ATOMIC32)
+# ifndef ETHR_NEED_NATMC64_ADDR
+# define ETHR_NEED_NATMC32_ADDR
# endif
-#elif ETHR_SIZEOF_PTR == 4
# define ETHR_NATMC_ADDR_FUNC__ ethr_native_atomic32_addr
-# ifdef ETHR_HAVE_NATIVE_ATOMIC32
typedef ethr_native_atomic32_t ethr_atomic_t;
-# define ETHR_NAINT_T__ ethr_sint32_t
-# define ETHR_NATMC_FUNC__(X) ethr_native_atomic32_ ## X
-# elif defined(ETHR_HAVE_NATIVE_ATOMIC64)
+# define ETHR_NAINT_T__ ethr_sint32_t
+# define ETHR_NATMC_FUNC__(X) ethr_native_atomic32_ ## X
+# define ETHR_NATMC_BITS__ 32
+#elif ETHR_SIZEOF_PTR == 4 && defined(ETHR_HAVE_NATIVE_ATOMIC64)
+# ifndef ETHR_NEED_NATMC64_ADDR
+# define ETHR_NEED_NATMC64_ADDR
+# endif
+#ifdef ETHR_BIGENDIAN
+# define ETHR_NATMC_ADDR_FUNC__(VAR) \
+ (((ethr_sint32_t *) ethr_native_atomic64_addr((VAR))) + 1)
+#else
+# define ETHR_NATMC_ADDR_FUNC__(VAR) \
+ ((ethr_sint32_t *) ethr_native_atomic64_addr((VAR)))
+#endif
typedef ethr_native_atomic64_t ethr_atomic_t;
-# define ETHR_NATMC_T__ ethr_native_atomic64_t
-# define ETHR_NAINT_T__ ethr_sint64_t
-# define ETHR_NATMC_FUNC__(X) ethr_native_atomic64_ ## X
+# define ETHR_NATMC_T__ ethr_native_atomic64_t
+# define ETHR_NAINT_T__ ethr_sint64_t
+# define ETHR_NATMC_FUNC__(X) ethr_native_atomic64_ ## X
+# define ETHR_NATMC_BITS__ 64
+#else
+/*
+ * No native atomics usable for pointer size atomics :(
+ * Use fallback...
+ */
+
+# if defined(ETHR_HAVE_32BIT_NATIVE_ATOMIC_OPS)
+# define ETHR_AMC_FALLBACK__
+# define ETHR_AMC_NO_ATMCS__ 2
+# define ETHR_AMC_SINT_T__ ethr_sint32_t
+# define ETHR_AMC_ATMC_T__ ethr_atomic32_t
+# define ETHR_AMC_ATMC_FUNC__(X) ETHR_INLINE_ATMC32_FUNC_NAME_(ethr_atomic32_ ## X)
+typedef struct {
+ ETHR_AMC_ATMC_T__ atomic[ETHR_AMC_NO_ATMCS__];
+} ethr_amc_t;
+typedef struct {
+ ethr_amc_t amc;
+ ethr_sint_t sint;
+} ethr_atomic_t;
+# else /* locked fallback */
+typedef ethr_sint_t ethr_atomic_t;
+# endif
+#endif
+
+#undef ETHR_ATMC_INLINE__
+#ifdef ETHR_NATMC_BITS__
+# ifdef ETHR_TRY_INLINE_FUNCS
+# define ETHR_ATMC_INLINE__
+# endif
+# define ETHR_HAVE_WORD_SZ_NATIVE_ATOMIC_OPS
+#endif
+
+#if !defined(ETHR_ATMC_INLINE__) || defined(ETHR_ATOMIC_IMPL__)
+# define ETHR_NEED_ATMC_PROTOTYPES__
+#endif
+
+#ifndef ETHR_INLINE_ATMC_FUNC_NAME_
+# define ETHR_INLINE_ATMC_FUNC_NAME_(X) X
+#endif
+
+#undef ETHR_ATMC_FUNC__
+#define ETHR_ATMC_FUNC__(X) ETHR_INLINE_ATMC_FUNC_NAME_(ethr_atomic_ ## X)
+
+/* -- Double word atomics -- */
+
+#undef ETHR_SU_DW_NAINT_T__
+#undef ETHR_SU_DW_NATMC_FUNC__
+#undef ETHR_SU_DW_NATMC_ADDR_FUNC__
+#undef ETHR_DW_NATMC_FUNC__
+#undef ETHR_DW_NATMC_ADDR_FUNC__
+#undef ETHR_DW_NATMC_BITS__
+#if defined(ETHR_HAVE_NATIVE_DW_ATOMIC) || defined(ETHR_HAVE_NATIVE_SU_DW_ATOMIC)
+# define ETHR_NEED_DW_NATMC_ADDR
+# define ETHR_DW_NATMC_ADDR_FUNC__ ethr_native_dw_atomic_addr
+# define ETHR_NATIVE_DW_ATOMIC_T__ ethr_native_dw_atomic_t
+# define ETHR_DW_NATMC_FUNC__(X) ethr_native_dw_atomic_ ## X
+# define ETHR_SU_DW_NATMC_FUNC__(X) ethr_native_su_dw_atomic_ ## X
+# if ETHR_SIZEOF_PTR == 8
+# define ETHR_DW_NATMC_BITS__ 128
+# elif ETHR_SIZEOF_PTR == 4
+# define ETHR_DW_NATMC_BITS__ 64
# else
-# error "Missing native atomic implementation"
+# error "Word size not supported"
+# endif
+# ifdef ETHR_NATIVE_SU_DW_SINT_T
+# define ETHR_SU_DW_NAINT_T__ ETHR_NATIVE_SU_DW_SINT_T
# endif
+#elif ETHR_SIZEOF_PTR == 4 && defined(ETHR_HAVE_NATIVE_ATOMIC64)
+# define ETHR_HAVE_NATIVE_SU_DW_ATOMIC
+# ifndef ETHR_NEED_NATMC64_ADDR
+# define ETHR_NEED_NATMC64_ADDR
+# endif
+# define ETHR_DW_NATMC_ADDR_FUNC__(VAR) \
+ ((ethr_dw_sint_t *) ethr_native_atomic64_addr((VAR)))
+# define ETHR_NATIVE_DW_ATOMIC_T__ ethr_native_atomic64_t
+# define ETHR_SU_DW_NAINT_T__ ethr_sint64_t
+# define ETHR_SU_DW_NATMC_FUNC__(X) ethr_native_atomic64_ ## X
+# define ETHR_DW_NATMC_BITS__ 64
#endif
-/* -- 32-bit atomics -- */
+#if defined(ETHR_RTCHK_USE_NATIVE_DW_ATOMIC_IMPL__)
+#define ETHR_DW_ATOMIC_FUNC__(X) ethr_dw_atomic_ ## X ## _fallback__
+#else
+#define ETHR_DW_ATOMIC_FUNC__(X) ethr_dw_atomic_ ## X
+#endif
-#undef ETHR_NAINT32_T__
-#undef ETHR_NATMC32_FUNC__
-#if defined(ETHR_HAVE_NATIVE_ATOMIC32)
-typedef ethr_native_atomic32_t ethr_atomic32_t;
-# define ETHR_NAINT32_T__ ethr_sint32_t
-# define ETHR_NATMC32_FUNC__(X) ethr_native_atomic32_ ## X
-#elif defined(ETHR_HAVE_NATIVE_ATOMIC64)
-typedef ethr_native_atomic64_t ethr_atomic32_t;
-# define ETHR_NAINT32_T__ ethr_sint64_t
-# define ETHR_NATMC32_FUNC__(X) ethr_native_atomic64_ ## X
+#if !defined(ETHR_DW_NATMC_BITS__) || defined(ETHR_RTCHK_USE_NATIVE_DW_ATOMIC_IMPL__)
+# define ETHR_NEED_DW_FALLBACK__
+#endif
+
+#if defined(ETHR_NEED_DW_FALLBACK__)
+/*
+ * No native atomics usable for double word atomics :(
+ * Use fallback...
+ */
+
+# ifndef ETHR_AMC_FALLBACK__
+# if ETHR_SIZEOF_PTR == 8 && defined(ETHR_HAVE_WORD_SZ_NATIVE_ATOMIC_OPS)
+# define ETHR_AMC_FALLBACK__
+# define ETHR_AMC_NO_ATMCS__ 1
+# define ETHR_AMC_SINT_T__ ethr_sint_t
+# define ETHR_AMC_ATMC_T__ ethr_atomic_t
+# define ETHR_AMC_ATMC_FUNC__(X) ETHR_INLINE_ATMC_FUNC_NAME_(ethr_atomic_ ## X)
+# elif defined(ETHR_HAVE_32BIT_NATIVE_ATOMIC_OPS)
+# define ETHR_AMC_FALLBACK__
+# define ETHR_AMC_NO_ATMCS__ 2
+# define ETHR_AMC_SINT_T__ ethr_sint32_t
+# define ETHR_AMC_ATMC_T__ ethr_atomic32_t
+# define ETHR_AMC_ATMC_FUNC__(X) ETHR_INLINE_ATMC32_FUNC_NAME_(ethr_atomic32_ ## X)
+# endif
+# ifdef ETHR_AMC_FALLBACK__
+typedef struct {
+ ETHR_AMC_ATMC_T__ atomic[ETHR_AMC_NO_ATMCS__];
+} ethr_amc_t;
+# endif
+# endif
+
+typedef struct {
+#ifdef ETHR_AMC_FALLBACK__
+ ethr_amc_t amc;
+#endif
+ ethr_sint_t sint[2];
+} ethr_dw_atomic_fallback_t;
+
+#endif
+
+typedef union {
+#ifdef ETHR_NATIVE_DW_ATOMIC_T__
+ ETHR_NATIVE_DW_ATOMIC_T__ native;
+#endif
+#ifdef ETHR_NEED_DW_FALLBACK__
+ ethr_dw_atomic_fallback_t fallback;
+#endif
+ ethr_sint_t sint[2];
+} ethr_dw_atomic_t;
+
+typedef union {
+#ifdef ETHR_SU_DW_NAINT_T__
+ ETHR_SU_DW_NAINT_T__ dw_sint;
+#endif
+ ethr_sint_t sint[2];
+} ethr_dw_sint_t;
+
+#ifdef ETHR_BIGENDIAN
+# define ETHR_DW_SINT_LOW_WORD 1
+# define ETHR_DW_SINT_HIGH_WORD 0
#else
-# error "Missing native atomic implementation"
+# define ETHR_DW_SINT_LOW_WORD 0
+# define ETHR_DW_SINT_HIGH_WORD 1
#endif
+#undef ETHR_DW_ATMC_INLINE__
+#ifdef ETHR_DW_NATMC_BITS__
+# ifdef ETHR_TRY_INLINE_FUNCS
+# define ETHR_ATMC32_INLINE__
+# endif
+# define ETHR_HAVE_DOUBLE_WORD_SZ_NATIVE_ATOMIC_OPS
#endif
-#ifdef ETHR_NEED_ATOMIC_PROTOTYPES__
-ethr_sint_t *ethr_atomic_addr(ethr_atomic_t *);
-void ethr_atomic_init(ethr_atomic_t *, ethr_sint_t);
-void ethr_atomic_set(ethr_atomic_t *, ethr_sint_t);
-ethr_sint_t ethr_atomic_read(ethr_atomic_t *);
-ethr_sint_t ethr_atomic_inc_read(ethr_atomic_t *);
-ethr_sint_t ethr_atomic_dec_read(ethr_atomic_t *);
-void ethr_atomic_inc(ethr_atomic_t *);
-void ethr_atomic_dec(ethr_atomic_t *);
-ethr_sint_t ethr_atomic_add_read(ethr_atomic_t *, ethr_sint_t);
-void ethr_atomic_add(ethr_atomic_t *, ethr_sint_t);
-ethr_sint_t ethr_atomic_read_band(ethr_atomic_t *, ethr_sint_t);
-ethr_sint_t ethr_atomic_read_bor(ethr_atomic_t *, ethr_sint_t);
-ethr_sint_t ethr_atomic_xchg(ethr_atomic_t *, ethr_sint_t);
-ethr_sint_t ethr_atomic_cmpxchg(ethr_atomic_t *, ethr_sint_t, ethr_sint_t);
-ethr_sint_t ethr_atomic_read_acqb(ethr_atomic_t *);
-ethr_sint_t ethr_atomic_inc_read_acqb(ethr_atomic_t *);
-void ethr_atomic_set_relb(ethr_atomic_t *, ethr_sint_t);
-void ethr_atomic_dec_relb(ethr_atomic_t *);
-ethr_sint_t ethr_atomic_dec_read_relb(ethr_atomic_t *);
-ethr_sint_t ethr_atomic_cmpxchg_acqb(ethr_atomic_t *, ethr_sint_t, ethr_sint_t);
-ethr_sint_t ethr_atomic_cmpxchg_relb(ethr_atomic_t *, ethr_sint_t, ethr_sint_t);
-
-ethr_sint32_t *ethr_atomic32_addr(ethr_atomic32_t *);
-void ethr_atomic32_init(ethr_atomic32_t *, ethr_sint32_t);
-void ethr_atomic32_set(ethr_atomic32_t *, ethr_sint32_t);
-ethr_sint32_t ethr_atomic32_read(ethr_atomic32_t *);
-ethr_sint32_t ethr_atomic32_inc_read(ethr_atomic32_t *);
-ethr_sint32_t ethr_atomic32_dec_read(ethr_atomic32_t *);
-void ethr_atomic32_inc(ethr_atomic32_t *);
-void ethr_atomic32_dec(ethr_atomic32_t *);
-ethr_sint32_t ethr_atomic32_add_read(ethr_atomic32_t *, ethr_sint32_t);
-void ethr_atomic32_add(ethr_atomic32_t *, ethr_sint32_t);
-ethr_sint32_t ethr_atomic32_read_band(ethr_atomic32_t *, ethr_sint32_t);
-ethr_sint32_t ethr_atomic32_read_bor(ethr_atomic32_t *, ethr_sint32_t);
-ethr_sint32_t ethr_atomic32_xchg(ethr_atomic32_t *, ethr_sint32_t);
-ethr_sint32_t ethr_atomic32_cmpxchg(ethr_atomic32_t *,
- ethr_sint32_t,
- ethr_sint32_t);
-ethr_sint32_t ethr_atomic32_read_acqb(ethr_atomic32_t *);
-ethr_sint32_t ethr_atomic32_inc_read_acqb(ethr_atomic32_t *);
-void ethr_atomic32_set_relb(ethr_atomic32_t *, ethr_sint32_t);
-void ethr_atomic32_dec_relb(ethr_atomic32_t *);
-ethr_sint32_t ethr_atomic32_dec_read_relb(ethr_atomic32_t *);
-ethr_sint32_t ethr_atomic32_cmpxchg_acqb(ethr_atomic32_t *,
- ethr_sint32_t,
- ethr_sint32_t);
-ethr_sint32_t ethr_atomic32_cmpxchg_relb(ethr_atomic32_t *,
- ethr_sint32_t,
- ethr_sint32_t);
+#if !defined(ETHR_DW_ATMC_INLINE__) || defined(ETHR_ATOMIC_IMPL__)
+# define ETHR_NEED_DW_ATMC_PROTOTYPES__
#endif
-int ethr_init_atomics(void);
+#ifndef ETHR_INLINE_DW_ATMC_FUNC_NAME_
+# define ETHR_INLINE_DW_ATMC_FUNC_NAME_(X) X
+#endif
+
+#undef ETHR_DW_ATMC_FUNC__
+#define ETHR_DW_ATMC_FUNC__(X) ETHR_INLINE_DW_ATMC_FUNC_NAME_(ethr_dw_atomic_ ## X)
+
+#if defined(ETHR_NEED_DW_ATMC_PROTOTYPES__)
+int ethr_have_native_dw_atomic(void);
+#endif
+#if defined(ETHR_DW_ATMC_INLINE__) || defined(ETHR_ATOMIC_IMPL__)
+static ETHR_INLINE int
+ETHR_INLINE_DW_ATMC_FUNC_NAME_(ethr_have_native_dw_atomic)(void)
+{
+#if defined(ETHR_RTCHK_USE_NATIVE_DW_ATOMIC_IMPL__)
+ return ETHR_RTCHK_USE_NATIVE_DW_ATOMIC_IMPL__;
+#elif defined(ETHR_DW_NATMC_BITS__)
+ return 1;
+#else
+ return 0;
+#endif
+}
+#endif
+
+/* -- Misc -- */
#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_ATOMIC_IMPL__)
+/*
+ * Unusual values are used by read() fallbacks implemented via cmpxchg().
+ * We want to use an unusual value in hope that it is more efficient
+ * not to match the value in memory.
+ *
+ * - Negative integer values are probably more unusual.
+ * - Very large absolute integer values are probably more unusual.
+ * - Odd pointers are probably more unusual (only char pointers can be odd).
+ */
+# define ETHR_UNUSUAL_SINT32_VAL__ ((ethr_sint32_t) 0x81818181)
+# if ETHR_SIZEOF_PTR == 4
+# define ETHR_UNUSUAL_SINT_VAL__ ((ethr_sint_t) ETHR_UNUSUAL_SINT32_VAL__)
+# elif ETHR_SIZEOF_PTR == 8
+# define ETHR_UNUSUAL_SINT_VAL__ ((ethr_sint_t) 0x8181818181818181L)
+# else
+# error "Word size not supported"
+# endif
+# if defined(ETHR_NEED_DW_NATMC_ADDR) && !defined(ETHR_HAVE_ETHR_NATIVE_DW_ATOMIC_ADDR)
+# error "No ethr_native_dw_atomic_addr() available"
+# endif
+# if defined(ETHR_NEED_NATMC32_ADDR) && !defined(ETHR_HAVE_ETHR_NATIVE_ATOMIC32_ADDR)
+# error "No ethr_native_atomic32_addr() available"
+# endif
+# if defined(ETHR_NEED_NATMC64_ADDR) && !defined(ETHR_HAVE_ETHR_NATIVE_ATOMIC64_ADDR)
+# error "No ethr_native_atomic64_addr() available"
+# endif
+#endif
+
+#if defined(__GNUC__)
+# ifndef ETHR_COMPILER_BARRIER
+# define ETHR_COMPILER_BARRIER __asm__ __volatile__("" : : : "memory")
+# endif
+#elif defined(ETHR_WIN32_THREADS)
+# ifndef ETHR_COMPILER_BARRIER
+# include <intrin.h>
+# pragma intrinsic(_ReadWriteBarrier)
+# define ETHR_COMPILER_BARRIER _ReadWriteBarrier()
+# endif
+#endif
+
+void ethr_compiler_barrier_fallback(void);
+#ifndef ETHR_COMPILER_BARRIER
+# define ETHR_COMPILER_BARRIER ethr_compiler_barrier_fallback()
+#endif
+
+int ethr_init_atomics(void);
+
+/* info */
+char **ethr_native_atomic32_ops(void);
+char **ethr_native_atomic64_ops(void);
+char **ethr_native_dw_atomic_ops(void);
+char **ethr_native_su_dw_atomic_ops(void);
-#ifndef ETHR_HAVE_NATIVE_ATOMICS
+#if !defined(ETHR_DW_NATMC_BITS__) && !defined(ETHR_NATMC_BITS__) && !defined(ETHR_NATMC32_BITS__)
/*
- * Fallbacks for atomics used in absence of a native implementation.
+ * ETHR_*MEMORY_BARRIER orders between locked and atomic accesses only,
+ * i.e. when no native atomic implementation exist and only our lock
+ * based atomic fallback is used, a noop is sufficient.
*/
+# undef ETHR_MEMORY_BARRIER
+# undef ETHR_WRITE_MEMORY_BARRIER
+# undef ETHR_READ_MEMORY_BARRIER
+# undef ETHR_READ_DEPEND_MEMORY_BARRIER
+# undef ETHR_MEMBAR
+# define ETHR_MEMBAR(B) do { } while (0)
+#endif
-#define ETHR_ATOMIC_ADDR_BITS 10
-#define ETHR_ATOMIC_ADDR_SHIFT 6
+#ifndef ETHR_MEMBAR
+# error "No ETHR_MEMBAR defined"
+#endif
-typedef struct {
- union {
- ethr_spinlock_t lck;
- char buf[ETHR_CACHE_LINE_SIZE];
- } u;
-} ethr_atomic_protection_t;
+#define ETHR_MEMORY_BARRIER ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore)
+#define ETHR_WRITE_MEMORY_BARRIER ETHR_MEMBAR(ETHR_StoreStore)
+#define ETHR_READ_MEMORY_BARRIER ETHR_MEMBAR(ETHR_LoadLoad)
+#ifdef ETHR_READ_DEPEND_MEMORY_BARRIER
+# undef ETHR_ORDERED_READ_DEPEND
+#else
+# define ETHR_READ_DEPEND_MEMORY_BARRIER ETHR_COMPILER_BARRIER
+# define ETHR_ORDERED_READ_DEPEND
+#endif
+
+
+/* ---------- Double word size atomic implementation ---------- */
+
+
+#ifdef ETHR_NEED_DW_ATMC_PROTOTYPES__
+ethr_sint_t *ethr_dw_atomic_addr(ethr_dw_atomic_t *var);
+int ethr_dw_atomic_cmpxchg(ethr_dw_atomic_t *var, ethr_dw_sint_t *val, ethr_dw_sint_t *old_val);
+int ethr_dw_atomic_cmpxchg_ddrb(ethr_dw_atomic_t *var, ethr_dw_sint_t *val, ethr_dw_sint_t *old_val);
+int ethr_dw_atomic_cmpxchg_rb(ethr_dw_atomic_t *var, ethr_dw_sint_t *val, ethr_dw_sint_t *old_val);
+int ethr_dw_atomic_cmpxchg_wb(ethr_dw_atomic_t *var, ethr_dw_sint_t *val, ethr_dw_sint_t *old_val);
+int ethr_dw_atomic_cmpxchg_acqb(ethr_dw_atomic_t *var, ethr_dw_sint_t *val, ethr_dw_sint_t *old_val);
+int ethr_dw_atomic_cmpxchg_relb(ethr_dw_atomic_t *var, ethr_dw_sint_t *val, ethr_dw_sint_t *old_val);
+int ethr_dw_atomic_cmpxchg_mb(ethr_dw_atomic_t *var, ethr_dw_sint_t *val, ethr_dw_sint_t *old_val);
+void ethr_dw_atomic_set(ethr_dw_atomic_t *var, ethr_dw_sint_t *val);
+void ethr_dw_atomic_set_ddrb(ethr_dw_atomic_t *var, ethr_dw_sint_t *val);
+void ethr_dw_atomic_set_rb(ethr_dw_atomic_t *var, ethr_dw_sint_t *val);
+void ethr_dw_atomic_set_wb(ethr_dw_atomic_t *var, ethr_dw_sint_t *val);
+void ethr_dw_atomic_set_acqb(ethr_dw_atomic_t *var, ethr_dw_sint_t *val);
+void ethr_dw_atomic_set_relb(ethr_dw_atomic_t *var, ethr_dw_sint_t *val);
+void ethr_dw_atomic_set_mb(ethr_dw_atomic_t *var, ethr_dw_sint_t *val);
+void ethr_dw_atomic_read(ethr_dw_atomic_t *var, ethr_dw_sint_t *val);
+void ethr_dw_atomic_read_ddrb(ethr_dw_atomic_t *var, ethr_dw_sint_t *val);
+void ethr_dw_atomic_read_rb(ethr_dw_atomic_t *var, ethr_dw_sint_t *val);
+void ethr_dw_atomic_read_wb(ethr_dw_atomic_t *var, ethr_dw_sint_t *val);
+void ethr_dw_atomic_read_acqb(ethr_dw_atomic_t *var, ethr_dw_sint_t *val);
+void ethr_dw_atomic_read_relb(ethr_dw_atomic_t *var, ethr_dw_sint_t *val);
+void ethr_dw_atomic_read_mb(ethr_dw_atomic_t *var, ethr_dw_sint_t *val);
+void ethr_dw_atomic_init(ethr_dw_atomic_t *var, ethr_dw_sint_t *val);
+void ethr_dw_atomic_init_ddrb(ethr_dw_atomic_t *var, ethr_dw_sint_t *val);
+void ethr_dw_atomic_init_rb(ethr_dw_atomic_t *var, ethr_dw_sint_t *val);
+void ethr_dw_atomic_init_wb(ethr_dw_atomic_t *var, ethr_dw_sint_t *val);
+void ethr_dw_atomic_init_acqb(ethr_dw_atomic_t *var, ethr_dw_sint_t *val);
+void ethr_dw_atomic_init_relb(ethr_dw_atomic_t *var, ethr_dw_sint_t *val);
+void ethr_dw_atomic_init_mb(ethr_dw_atomic_t *var, ethr_dw_sint_t *val);
+#if defined(ETHR_RTCHK_USE_NATIVE_DW_ATOMIC_IMPL__)
+ethr_sint_t *ETHR_DW_ATOMIC_FUNC__(addr)(ethr_dw_atomic_t *var);
+int ETHR_DW_ATOMIC_FUNC__(cmpxchg)(ethr_dw_atomic_t *var, ethr_dw_sint_t *val, ethr_dw_sint_t *old_val);
+int ETHR_DW_ATOMIC_FUNC__(cmpxchg_ddrb)(ethr_dw_atomic_t *var, ethr_dw_sint_t *val, ethr_dw_sint_t *old_val);
+int ETHR_DW_ATOMIC_FUNC__(cmpxchg_rb)(ethr_dw_atomic_t *var, ethr_dw_sint_t *val, ethr_dw_sint_t *old_val);
+int ETHR_DW_ATOMIC_FUNC__(cmpxchg_wb)(ethr_dw_atomic_t *var, ethr_dw_sint_t *val, ethr_dw_sint_t *old_val);
+int ETHR_DW_ATOMIC_FUNC__(cmpxchg_acqb)(ethr_dw_atomic_t *var, ethr_dw_sint_t *val, ethr_dw_sint_t *old_val);
+int ETHR_DW_ATOMIC_FUNC__(cmpxchg_relb)(ethr_dw_atomic_t *var, ethr_dw_sint_t *val, ethr_dw_sint_t *old_val);
+int ETHR_DW_ATOMIC_FUNC__(cmpxchg_mb)(ethr_dw_atomic_t *var, ethr_dw_sint_t *val, ethr_dw_sint_t *old_val);
+void ETHR_DW_ATOMIC_FUNC__(set)(ethr_dw_atomic_t *var, ethr_dw_sint_t *val);
+void ETHR_DW_ATOMIC_FUNC__(set_ddrb)(ethr_dw_atomic_t *var, ethr_dw_sint_t *val);
+void ETHR_DW_ATOMIC_FUNC__(set_rb)(ethr_dw_atomic_t *var, ethr_dw_sint_t *val);
+void ETHR_DW_ATOMIC_FUNC__(set_wb)(ethr_dw_atomic_t *var, ethr_dw_sint_t *val);
+void ETHR_DW_ATOMIC_FUNC__(set_acqb)(ethr_dw_atomic_t *var, ethr_dw_sint_t *val);
+void ETHR_DW_ATOMIC_FUNC__(set_relb)(ethr_dw_atomic_t *var, ethr_dw_sint_t *val);
+void ETHR_DW_ATOMIC_FUNC__(set_mb)(ethr_dw_atomic_t *var, ethr_dw_sint_t *val);
+void ETHR_DW_ATOMIC_FUNC__(read)(ethr_dw_atomic_t *var, ethr_dw_sint_t *val);
+void ETHR_DW_ATOMIC_FUNC__(read_ddrb)(ethr_dw_atomic_t *var, ethr_dw_sint_t *val);
+void ETHR_DW_ATOMIC_FUNC__(read_rb)(ethr_dw_atomic_t *var, ethr_dw_sint_t *val);
+void ETHR_DW_ATOMIC_FUNC__(read_wb)(ethr_dw_atomic_t *var, ethr_dw_sint_t *val);
+void ETHR_DW_ATOMIC_FUNC__(read_acqb)(ethr_dw_atomic_t *var, ethr_dw_sint_t *val);
+void ETHR_DW_ATOMIC_FUNC__(read_relb)(ethr_dw_atomic_t *var, ethr_dw_sint_t *val);
+void ETHR_DW_ATOMIC_FUNC__(read_mb)(ethr_dw_atomic_t *var, ethr_dw_sint_t *val);
+void ETHR_DW_ATOMIC_FUNC__(init)(ethr_dw_atomic_t *var, ethr_dw_sint_t *val);
+void ETHR_DW_ATOMIC_FUNC__(init_ddrb)(ethr_dw_atomic_t *var, ethr_dw_sint_t *val);
+void ETHR_DW_ATOMIC_FUNC__(init_rb)(ethr_dw_atomic_t *var, ethr_dw_sint_t *val);
+void ETHR_DW_ATOMIC_FUNC__(init_wb)(ethr_dw_atomic_t *var, ethr_dw_sint_t *val);
+void ETHR_DW_ATOMIC_FUNC__(init_acqb)(ethr_dw_atomic_t *var, ethr_dw_sint_t *val);
+void ETHR_DW_ATOMIC_FUNC__(init_relb)(ethr_dw_atomic_t *var, ethr_dw_sint_t *val);
+void ETHR_DW_ATOMIC_FUNC__(init_mb)(ethr_dw_atomic_t *var, ethr_dw_sint_t *val);
+#endif
+#endif /* ETHR_NEED_DW_ATMC_PROTOTYPES__ */
+
+#if (defined(ETHR_HAVE_DOUBLE_WORD_SZ_NATIVE_ATOMIC_OPS) \
+ && (defined(ETHR_DW_ATMC_INLINE__) || defined(ETHR_ATOMIC_IMPL__)))
+
+#if !defined(ETHR_DW_NATMC_BITS__)
+# error "Missing native atomic implementation"
+#elif defined(ETHR_HAVE_NATIVE_DW_ATOMIC) || defined(ETHR_HAVE_NATIVE_SU_DW_ATOMIC)
+# undef ETHR_HAVE_SU_DW_NATMC_CMPXCHG
+# undef ETHR_HAVE_DW_NATMC_CMPXCHG
+# ifdef ETHR_HAVE_ETHR_NATIVE_DW_ATOMIC_CMPXCHG
+# define ETHR_HAVE_DW_NATMC_CMPXCHG 1
+# endif
+# ifdef ETHR_HAVE_ETHR_NATIVE_SU_DW_ATOMIC_CMPXCHG
+# define ETHR_HAVE_SU_DW_NATMC_CMPXCHG 1
+# endif
+# undef ETHR_HAVE_SU_DW_NATMC_CMPXCHG_RB
+# undef ETHR_HAVE_DW_NATMC_CMPXCHG_RB
+# ifdef ETHR_HAVE_ETHR_NATIVE_DW_ATOMIC_CMPXCHG_RB
+# define ETHR_HAVE_DW_NATMC_CMPXCHG_RB 1
+# endif
+# ifdef ETHR_HAVE_ETHR_NATIVE_SU_DW_ATOMIC_CMPXCHG_RB
+# define ETHR_HAVE_SU_DW_NATMC_CMPXCHG_RB 1
+# endif
+# undef ETHR_HAVE_SU_DW_NATMC_CMPXCHG_WB
+# undef ETHR_HAVE_DW_NATMC_CMPXCHG_WB
+# ifdef ETHR_HAVE_ETHR_NATIVE_DW_ATOMIC_CMPXCHG_WB
+# define ETHR_HAVE_DW_NATMC_CMPXCHG_WB 1
+# endif
+# ifdef ETHR_HAVE_ETHR_NATIVE_SU_DW_ATOMIC_CMPXCHG_WB
+# define ETHR_HAVE_SU_DW_NATMC_CMPXCHG_WB 1
+# endif
+# undef ETHR_HAVE_SU_DW_NATMC_CMPXCHG_ACQB
+# undef ETHR_HAVE_DW_NATMC_CMPXCHG_ACQB
+# ifdef ETHR_HAVE_ETHR_NATIVE_DW_ATOMIC_CMPXCHG_ACQB
+# define ETHR_HAVE_DW_NATMC_CMPXCHG_ACQB 1
+# endif
+# ifdef ETHR_HAVE_ETHR_NATIVE_SU_DW_ATOMIC_CMPXCHG_ACQB
+# define ETHR_HAVE_SU_DW_NATMC_CMPXCHG_ACQB 1
+# endif
+# undef ETHR_HAVE_SU_DW_NATMC_CMPXCHG_RELB
+# undef ETHR_HAVE_DW_NATMC_CMPXCHG_RELB
+# ifdef ETHR_HAVE_ETHR_NATIVE_DW_ATOMIC_CMPXCHG_RELB
+# define ETHR_HAVE_DW_NATMC_CMPXCHG_RELB 1
+# endif
+# ifdef ETHR_HAVE_ETHR_NATIVE_SU_DW_ATOMIC_CMPXCHG_RELB
+# define ETHR_HAVE_SU_DW_NATMC_CMPXCHG_RELB 1
+# endif
+# undef ETHR_HAVE_SU_DW_NATMC_CMPXCHG_MB
+# undef ETHR_HAVE_DW_NATMC_CMPXCHG_MB
+# ifdef ETHR_HAVE_ETHR_NATIVE_DW_ATOMIC_CMPXCHG_MB
+# define ETHR_HAVE_DW_NATMC_CMPXCHG_MB 1
+# endif
+# ifdef ETHR_HAVE_ETHR_NATIVE_SU_DW_ATOMIC_CMPXCHG_MB
+# define ETHR_HAVE_SU_DW_NATMC_CMPXCHG_MB 1
+# endif
+# undef ETHR_HAVE_SU_DW_NATMC_SET
+# undef ETHR_HAVE_DW_NATMC_SET
+# ifdef ETHR_HAVE_ETHR_NATIVE_DW_ATOMIC_SET
+# define ETHR_HAVE_DW_NATMC_SET 1
+# endif
+# ifdef ETHR_HAVE_ETHR_NATIVE_SU_DW_ATOMIC_SET
+# define ETHR_HAVE_SU_DW_NATMC_SET 1
+# endif
+# undef ETHR_HAVE_SU_DW_NATMC_SET_RB
+# undef ETHR_HAVE_DW_NATMC_SET_RB
+# ifdef ETHR_HAVE_ETHR_NATIVE_DW_ATOMIC_SET_RB
+# define ETHR_HAVE_DW_NATMC_SET_RB 1
+# endif
+# ifdef ETHR_HAVE_ETHR_NATIVE_SU_DW_ATOMIC_SET_RB
+# define ETHR_HAVE_SU_DW_NATMC_SET_RB 1
+# endif
+# undef ETHR_HAVE_SU_DW_NATMC_SET_WB
+# undef ETHR_HAVE_DW_NATMC_SET_WB
+# ifdef ETHR_HAVE_ETHR_NATIVE_DW_ATOMIC_SET_WB
+# define ETHR_HAVE_DW_NATMC_SET_WB 1
+# endif
+# ifdef ETHR_HAVE_ETHR_NATIVE_SU_DW_ATOMIC_SET_WB
+# define ETHR_HAVE_SU_DW_NATMC_SET_WB 1
+# endif
+# undef ETHR_HAVE_SU_DW_NATMC_SET_ACQB
+# undef ETHR_HAVE_DW_NATMC_SET_ACQB
+# ifdef ETHR_HAVE_ETHR_NATIVE_DW_ATOMIC_SET_ACQB
+# define ETHR_HAVE_DW_NATMC_SET_ACQB 1
+# endif
+# ifdef ETHR_HAVE_ETHR_NATIVE_SU_DW_ATOMIC_SET_ACQB
+# define ETHR_HAVE_SU_DW_NATMC_SET_ACQB 1
+# endif
+# undef ETHR_HAVE_SU_DW_NATMC_SET_RELB
+# undef ETHR_HAVE_DW_NATMC_SET_RELB
+# ifdef ETHR_HAVE_ETHR_NATIVE_DW_ATOMIC_SET_RELB
+# define ETHR_HAVE_DW_NATMC_SET_RELB 1
+# endif
+# ifdef ETHR_HAVE_ETHR_NATIVE_SU_DW_ATOMIC_SET_RELB
+# define ETHR_HAVE_SU_DW_NATMC_SET_RELB 1
+# endif
+# undef ETHR_HAVE_SU_DW_NATMC_SET_MB
+# undef ETHR_HAVE_DW_NATMC_SET_MB
+# ifdef ETHR_HAVE_ETHR_NATIVE_DW_ATOMIC_SET_MB
+# define ETHR_HAVE_DW_NATMC_SET_MB 1
+# endif
+# ifdef ETHR_HAVE_ETHR_NATIVE_SU_DW_ATOMIC_SET_MB
+# define ETHR_HAVE_SU_DW_NATMC_SET_MB 1
+# endif
+# undef ETHR_HAVE_SU_DW_NATMC_READ
+# undef ETHR_HAVE_DW_NATMC_READ
+# ifdef ETHR_HAVE_ETHR_NATIVE_DW_ATOMIC_READ
+# define ETHR_HAVE_DW_NATMC_READ 1
+# endif
+# ifdef ETHR_HAVE_ETHR_NATIVE_SU_DW_ATOMIC_READ
+# define ETHR_HAVE_SU_DW_NATMC_READ 1
+# endif
+# undef ETHR_HAVE_SU_DW_NATMC_READ_RB
+# undef ETHR_HAVE_DW_NATMC_READ_RB
+# ifdef ETHR_HAVE_ETHR_NATIVE_DW_ATOMIC_READ_RB
+# define ETHR_HAVE_DW_NATMC_READ_RB 1
+# endif
+# ifdef ETHR_HAVE_ETHR_NATIVE_SU_DW_ATOMIC_READ_RB
+# define ETHR_HAVE_SU_DW_NATMC_READ_RB 1
+# endif
+# undef ETHR_HAVE_SU_DW_NATMC_READ_WB
+# undef ETHR_HAVE_DW_NATMC_READ_WB
+# ifdef ETHR_HAVE_ETHR_NATIVE_DW_ATOMIC_READ_WB
+# define ETHR_HAVE_DW_NATMC_READ_WB 1
+# endif
+# ifdef ETHR_HAVE_ETHR_NATIVE_SU_DW_ATOMIC_READ_WB
+# define ETHR_HAVE_SU_DW_NATMC_READ_WB 1
+# endif
+# undef ETHR_HAVE_SU_DW_NATMC_READ_ACQB
+# undef ETHR_HAVE_DW_NATMC_READ_ACQB
+# ifdef ETHR_HAVE_ETHR_NATIVE_DW_ATOMIC_READ_ACQB
+# define ETHR_HAVE_DW_NATMC_READ_ACQB 1
+# endif
+# ifdef ETHR_HAVE_ETHR_NATIVE_SU_DW_ATOMIC_READ_ACQB
+# define ETHR_HAVE_SU_DW_NATMC_READ_ACQB 1
+# endif
+# undef ETHR_HAVE_SU_DW_NATMC_READ_RELB
+# undef ETHR_HAVE_DW_NATMC_READ_RELB
+# ifdef ETHR_HAVE_ETHR_NATIVE_DW_ATOMIC_READ_RELB
+# define ETHR_HAVE_DW_NATMC_READ_RELB 1
+# endif
+# ifdef ETHR_HAVE_ETHR_NATIVE_SU_DW_ATOMIC_READ_RELB
+# define ETHR_HAVE_SU_DW_NATMC_READ_RELB 1
+# endif
+# undef ETHR_HAVE_SU_DW_NATMC_READ_MB
+# undef ETHR_HAVE_DW_NATMC_READ_MB
+# ifdef ETHR_HAVE_ETHR_NATIVE_DW_ATOMIC_READ_MB
+# define ETHR_HAVE_DW_NATMC_READ_MB 1
+# endif
+# ifdef ETHR_HAVE_ETHR_NATIVE_SU_DW_ATOMIC_READ_MB
+# define ETHR_HAVE_SU_DW_NATMC_READ_MB 1
+# endif
+# undef ETHR_HAVE_SU_DW_NATMC_INIT
+# undef ETHR_HAVE_DW_NATMC_INIT
+# ifdef ETHR_HAVE_ETHR_NATIVE_DW_ATOMIC_INIT
+# define ETHR_HAVE_DW_NATMC_INIT 1
+# endif
+# ifdef ETHR_HAVE_ETHR_NATIVE_SU_DW_ATOMIC_INIT
+# define ETHR_HAVE_SU_DW_NATMC_INIT 1
+# endif
+# undef ETHR_HAVE_SU_DW_NATMC_INIT_RB
+# undef ETHR_HAVE_DW_NATMC_INIT_RB
+# ifdef ETHR_HAVE_ETHR_NATIVE_DW_ATOMIC_INIT_RB
+# define ETHR_HAVE_DW_NATMC_INIT_RB 1
+# endif
+# ifdef ETHR_HAVE_ETHR_NATIVE_SU_DW_ATOMIC_INIT_RB
+# define ETHR_HAVE_SU_DW_NATMC_INIT_RB 1
+# endif
+# undef ETHR_HAVE_SU_DW_NATMC_INIT_WB
+# undef ETHR_HAVE_DW_NATMC_INIT_WB
+# ifdef ETHR_HAVE_ETHR_NATIVE_DW_ATOMIC_INIT_WB
+# define ETHR_HAVE_DW_NATMC_INIT_WB 1
+# endif
+# ifdef ETHR_HAVE_ETHR_NATIVE_SU_DW_ATOMIC_INIT_WB
+# define ETHR_HAVE_SU_DW_NATMC_INIT_WB 1
+# endif
+# undef ETHR_HAVE_SU_DW_NATMC_INIT_ACQB
+# undef ETHR_HAVE_DW_NATMC_INIT_ACQB
+# ifdef ETHR_HAVE_ETHR_NATIVE_DW_ATOMIC_INIT_ACQB
+# define ETHR_HAVE_DW_NATMC_INIT_ACQB 1
+# endif
+# ifdef ETHR_HAVE_ETHR_NATIVE_SU_DW_ATOMIC_INIT_ACQB
+# define ETHR_HAVE_SU_DW_NATMC_INIT_ACQB 1
+# endif
+# undef ETHR_HAVE_SU_DW_NATMC_INIT_RELB
+# undef ETHR_HAVE_DW_NATMC_INIT_RELB
+# ifdef ETHR_HAVE_ETHR_NATIVE_DW_ATOMIC_INIT_RELB
+# define ETHR_HAVE_DW_NATMC_INIT_RELB 1
+# endif
+# ifdef ETHR_HAVE_ETHR_NATIVE_SU_DW_ATOMIC_INIT_RELB
+# define ETHR_HAVE_SU_DW_NATMC_INIT_RELB 1
+# endif
+# undef ETHR_HAVE_SU_DW_NATMC_INIT_MB
+# undef ETHR_HAVE_DW_NATMC_INIT_MB
+# ifdef ETHR_HAVE_ETHR_NATIVE_DW_ATOMIC_INIT_MB
+# define ETHR_HAVE_DW_NATMC_INIT_MB 1
+# endif
+# ifdef ETHR_HAVE_ETHR_NATIVE_SU_DW_ATOMIC_INIT_MB
+# define ETHR_HAVE_SU_DW_NATMC_INIT_MB 1
+# endif
+#elif ETHR_DW_NATMC_BITS__ == 64
+# undef ETHR_HAVE_DW_NATMC_CMPXCHG
+# undef ETHR_HAVE_SU_DW_NATMC_CMPXCHG
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_CMPXCHG
+# define ETHR_HAVE_SU_DW_NATMC_CMPXCHG 1
+# endif
+# undef ETHR_HAVE_DW_NATMC_CMPXCHG_RB
+# undef ETHR_HAVE_SU_DW_NATMC_CMPXCHG_RB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_CMPXCHG_RB
+# define ETHR_HAVE_SU_DW_NATMC_CMPXCHG_RB 1
+# endif
+# undef ETHR_HAVE_DW_NATMC_CMPXCHG_WB
+# undef ETHR_HAVE_SU_DW_NATMC_CMPXCHG_WB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_CMPXCHG_WB
+# define ETHR_HAVE_SU_DW_NATMC_CMPXCHG_WB 1
+# endif
+# undef ETHR_HAVE_DW_NATMC_CMPXCHG_ACQB
+# undef ETHR_HAVE_SU_DW_NATMC_CMPXCHG_ACQB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_CMPXCHG_ACQB
+# define ETHR_HAVE_SU_DW_NATMC_CMPXCHG_ACQB 1
+# endif
+# undef ETHR_HAVE_DW_NATMC_CMPXCHG_RELB
+# undef ETHR_HAVE_SU_DW_NATMC_CMPXCHG_RELB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_CMPXCHG_RELB
+# define ETHR_HAVE_SU_DW_NATMC_CMPXCHG_RELB 1
+# endif
+# undef ETHR_HAVE_DW_NATMC_CMPXCHG_MB
+# undef ETHR_HAVE_SU_DW_NATMC_CMPXCHG_MB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_CMPXCHG_MB
+# define ETHR_HAVE_SU_DW_NATMC_CMPXCHG_MB 1
+# endif
+# undef ETHR_HAVE_DW_NATMC_SET
+# undef ETHR_HAVE_SU_DW_NATMC_SET
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_SET
+# define ETHR_HAVE_SU_DW_NATMC_SET 1
+# endif
+# undef ETHR_HAVE_DW_NATMC_SET_RB
+# undef ETHR_HAVE_SU_DW_NATMC_SET_RB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_SET_RB
+# define ETHR_HAVE_SU_DW_NATMC_SET_RB 1
+# endif
+# undef ETHR_HAVE_DW_NATMC_SET_WB
+# undef ETHR_HAVE_SU_DW_NATMC_SET_WB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_SET_WB
+# define ETHR_HAVE_SU_DW_NATMC_SET_WB 1
+# endif
+# undef ETHR_HAVE_DW_NATMC_SET_ACQB
+# undef ETHR_HAVE_SU_DW_NATMC_SET_ACQB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_SET_ACQB
+# define ETHR_HAVE_SU_DW_NATMC_SET_ACQB 1
+# endif
+# undef ETHR_HAVE_DW_NATMC_SET_RELB
+# undef ETHR_HAVE_SU_DW_NATMC_SET_RELB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_SET_RELB
+# define ETHR_HAVE_SU_DW_NATMC_SET_RELB 1
+# endif
+# undef ETHR_HAVE_DW_NATMC_SET_MB
+# undef ETHR_HAVE_SU_DW_NATMC_SET_MB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_SET_MB
+# define ETHR_HAVE_SU_DW_NATMC_SET_MB 1
+# endif
+# undef ETHR_HAVE_DW_NATMC_READ
+# undef ETHR_HAVE_SU_DW_NATMC_READ
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_READ
+# define ETHR_HAVE_SU_DW_NATMC_READ 1
+# endif
+# undef ETHR_HAVE_DW_NATMC_READ_RB
+# undef ETHR_HAVE_SU_DW_NATMC_READ_RB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_READ_RB
+# define ETHR_HAVE_SU_DW_NATMC_READ_RB 1
+# endif
+# undef ETHR_HAVE_DW_NATMC_READ_WB
+# undef ETHR_HAVE_SU_DW_NATMC_READ_WB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_READ_WB
+# define ETHR_HAVE_SU_DW_NATMC_READ_WB 1
+# endif
+# undef ETHR_HAVE_DW_NATMC_READ_ACQB
+# undef ETHR_HAVE_SU_DW_NATMC_READ_ACQB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_READ_ACQB
+# define ETHR_HAVE_SU_DW_NATMC_READ_ACQB 1
+# endif
+# undef ETHR_HAVE_DW_NATMC_READ_RELB
+# undef ETHR_HAVE_SU_DW_NATMC_READ_RELB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_READ_RELB
+# define ETHR_HAVE_SU_DW_NATMC_READ_RELB 1
+# endif
+# undef ETHR_HAVE_DW_NATMC_READ_MB
+# undef ETHR_HAVE_SU_DW_NATMC_READ_MB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_READ_MB
+# define ETHR_HAVE_SU_DW_NATMC_READ_MB 1
+# endif
+# undef ETHR_HAVE_DW_NATMC_INIT
+# undef ETHR_HAVE_SU_DW_NATMC_INIT
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_INIT
+# define ETHR_HAVE_SU_DW_NATMC_INIT 1
+# endif
+# undef ETHR_HAVE_DW_NATMC_INIT_RB
+# undef ETHR_HAVE_SU_DW_NATMC_INIT_RB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_INIT_RB
+# define ETHR_HAVE_SU_DW_NATMC_INIT_RB 1
+# endif
+# undef ETHR_HAVE_DW_NATMC_INIT_WB
+# undef ETHR_HAVE_SU_DW_NATMC_INIT_WB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_INIT_WB
+# define ETHR_HAVE_SU_DW_NATMC_INIT_WB 1
+# endif
+# undef ETHR_HAVE_DW_NATMC_INIT_ACQB
+# undef ETHR_HAVE_SU_DW_NATMC_INIT_ACQB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_INIT_ACQB
+# define ETHR_HAVE_SU_DW_NATMC_INIT_ACQB 1
+# endif
+# undef ETHR_HAVE_DW_NATMC_INIT_RELB
+# undef ETHR_HAVE_SU_DW_NATMC_INIT_RELB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_INIT_RELB
+# define ETHR_HAVE_SU_DW_NATMC_INIT_RELB 1
+# endif
+# undef ETHR_HAVE_DW_NATMC_INIT_MB
+# undef ETHR_HAVE_SU_DW_NATMC_INIT_MB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_INIT_MB
+# define ETHR_HAVE_SU_DW_NATMC_INIT_MB 1
+# endif
+#else
+# error "Invalid native atomic size"
+#endif
-extern ethr_atomic_protection_t ethr_atomic_protection__[1 << ETHR_ATOMIC_ADDR_BITS];
-#define ETHR_ATOMIC_PTR2LCK__(PTR) \
-(&ethr_atomic_protection__[((((ethr_uint_t) (PTR)) >> ETHR_ATOMIC_ADDR_SHIFT) \
- & ((1 << ETHR_ATOMIC_ADDR_BITS) - 1))].u.lck)
+#if defined(ETHR_HAVE_NATIVE_DW_ATOMIC)
+#if (!defined(ETHR_HAVE_DW_NATMC_CMPXCHG) \
+ && !defined(ETHR_HAVE_DW_NATMC_CMPXCHG_RB) \
+ && !defined(ETHR_HAVE_DW_NATMC_CMPXCHG_WB) \
+ && !defined(ETHR_HAVE_DW_NATMC_CMPXCHG_ACQB) \
+ && !defined(ETHR_HAVE_DW_NATMC_CMPXCHG_RELB) \
+ && !defined(ETHR_HAVE_DW_NATMC_CMPXCHG_MB))
+# error "No native cmpxchg() op available"
+#endif
+
+
+/*
+ * Read op used together with cmpxchg() fallback when no native op present.
+ */
+#if defined(ETHR_HAVE_DW_NATMC_READ)
+#define ETHR_DW_NATMC_CMPXCHG_FALLBACK_READ__(VAR, VAL) \
+ ETHR_DW_NATMC_FUNC__(read)(VAR, VAL)
+#elif defined(ETHR_HAVE_SU_DW_NATMC_READ)
+#define ETHR_DW_NATMC_CMPXCHG_FALLBACK_READ__(VAR, VAL) \
+ VAL.dw_sint = ETHR_SU_DW_NATMC_FUNC__(read)(VAR)
+#elif defined(ETHR_HAVE_DW_NATMC_READ_RB)
+#define ETHR_DW_NATMC_CMPXCHG_FALLBACK_READ__(VAR, VAL) \
+ ETHR_DW_NATMC_FUNC__(read_rb)(VAR, VAL)
+#elif defined(ETHR_HAVE_SU_DW_NATMC_READ_RB)
+#define ETHR_DW_NATMC_CMPXCHG_FALLBACK_READ__(VAR, VAL) \
+ VAL.dw_sint = ETHR_SU_DW_NATMC_FUNC__(read_rb)(VAR)
+#elif defined(ETHR_HAVE_DW_NATMC_READ_WB)
+#define ETHR_DW_NATMC_CMPXCHG_FALLBACK_READ__(VAR, VAL) \
+ ETHR_DW_NATMC_FUNC__(read_wb)(VAR, VAL)
+#elif defined(ETHR_HAVE_SU_DW_NATMC_READ_WB)
+#define ETHR_DW_NATMC_CMPXCHG_FALLBACK_READ__(VAR, VAL) \
+ VAL.dw_sint = ETHR_SU_DW_NATMC_FUNC__(read_wb)(VAR)
+#elif defined(ETHR_HAVE_DW_NATMC_READ_ACQB)
+#define ETHR_DW_NATMC_CMPXCHG_FALLBACK_READ__(VAR, VAL) \
+ ETHR_DW_NATMC_FUNC__(read_acqb)(VAR, VAL)
+#elif defined(ETHR_HAVE_SU_DW_NATMC_READ_ACQB)
+#define ETHR_DW_NATMC_CMPXCHG_FALLBACK_READ__(VAR, VAL) \
+ VAL.dw_sint = ETHR_SU_DW_NATMC_FUNC__(read_acqb)(VAR)
+#elif defined(ETHR_HAVE_DW_NATMC_READ_RELB)
+#define ETHR_DW_NATMC_CMPXCHG_FALLBACK_READ__(VAR, VAL) \
+ ETHR_DW_NATMC_FUNC__(read_relb)(VAR, VAL)
+#elif defined(ETHR_HAVE_SU_DW_NATMC_READ_RELB)
+#define ETHR_DW_NATMC_CMPXCHG_FALLBACK_READ__(VAR, VAL) \
+ VAL.dw_sint = ETHR_SU_DW_NATMC_FUNC__(read_relb)(VAR)
+#elif defined(ETHR_HAVE_DW_NATMC_READ_MB)
+#define ETHR_DW_NATMC_CMPXCHG_FALLBACK_READ__(VAR, VAL) \
+ ETHR_DW_NATMC_FUNC__(read_mb)(VAR, VAL)
+#elif defined(ETHR_HAVE_SU_DW_NATMC_READ_MB)
+#define ETHR_DW_NATMC_CMPXCHG_FALLBACK_READ__(VAR, VAL) \
+ VAL.dw_sint = ETHR_SU_DW_NATMC_FUNC__(read_mb)(VAR)
+#else
+/*
+ * We have no native read() op; guess zero and then use the
+ * the atomics actual value returned from cmpxchg().
+ */
+#define ETHR_DW_NATMC_CMPXCHG_FALLBACK_READ__(VAR, VAL) \
+do { \
+ VAL.sint[0] = (ethr_sint_t) 0; \
+ VAL.sint[1] = (ethr_sint_t) 0; \
+} while (0)
+#endif
-#define ETHR_ATOMIC_OP_FALLBACK_IMPL__(AP, EXPS) \
-do { \
- ethr_spinlock_t *slp__ = ETHR_ATOMIC_PTR2LCK__((AP)); \
- ethr_spin_lock(slp__); \
- { EXPS; } \
- ethr_spin_unlock(slp__); \
+/*
+ * Native cmpxchg() fallback used when no native op present.
+ */
+#define ETHR_DW_NATMC_CMPXCHG_FALLBACK__(CMPXCHG, VAR, AVAL, OPS) \
+do { \
+ int res__; \
+ ethr_dw_sint_t AVAL, exp_act__; \
+ ETHR_DW_NATMC_CMPXCHG_FALLBACK_READ__(VAR, exp_act__); \
+ do { \
+ AVAL.sint[0] = exp_act__.sint[0]; \
+ AVAL.sint[1] = exp_act__.sint[1]; \
+ { OPS; } \
+ res__ = CMPXCHG(VAR, AVAL.sint, exp_act__.sint); \
+ } while (__builtin_expect(res__ == 0, 0)); \
} while (0)
+
+#elif defined(ETHR_HAVE_NATIVE_SU_DW_ATOMIC)
+
+#if (!defined(ETHR_HAVE_SU_DW_NATMC_CMPXCHG) \
+ && !defined(ETHR_HAVE_SU_DW_NATMC_CMPXCHG_RB) \
+ && !defined(ETHR_HAVE_SU_DW_NATMC_CMPXCHG_WB) \
+ && !defined(ETHR_HAVE_SU_DW_NATMC_CMPXCHG_ACQB) \
+ && !defined(ETHR_HAVE_SU_DW_NATMC_CMPXCHG_RELB) \
+ && !defined(ETHR_HAVE_SU_DW_NATMC_CMPXCHG_MB))
+# error "No native cmpxchg() op available"
#endif
+
+/*
+ * Read op used together with cmpxchg() fallback when no native op present.
+ */
+#if defined(ETHR_HAVE_SU_DW_NATMC_READ)
+#define ETHR_SU_DW_NATMC_CMPXCHG_FALLBACK_READ__(VAR) \
+ ETHR_SU_DW_NATMC_FUNC__(read)(VAR)
+#elif defined(ETHR_HAVE_SU_DW_NATMC_READ_RB)
+#define ETHR_SU_DW_NATMC_CMPXCHG_FALLBACK_READ__(VAR) \
+ ETHR_SU_DW_NATMC_FUNC__(read_rb)(VAR)
+#elif defined(ETHR_HAVE_SU_DW_NATMC_READ_WB)
+#define ETHR_SU_DW_NATMC_CMPXCHG_FALLBACK_READ__(VAR) \
+ ETHR_SU_DW_NATMC_FUNC__(read_wb)(VAR)
+#elif defined(ETHR_HAVE_SU_DW_NATMC_READ_ACQB)
+#define ETHR_SU_DW_NATMC_CMPXCHG_FALLBACK_READ__(VAR) \
+ ETHR_SU_DW_NATMC_FUNC__(read_acqb)(VAR)
+#elif defined(ETHR_HAVE_SU_DW_NATMC_READ_RELB)
+#define ETHR_SU_DW_NATMC_CMPXCHG_FALLBACK_READ__(VAR) \
+ ETHR_SU_DW_NATMC_FUNC__(read_relb)(VAR)
+#elif defined(ETHR_HAVE_SU_DW_NATMC_READ_MB)
+#define ETHR_SU_DW_NATMC_CMPXCHG_FALLBACK_READ__(VAR) \
+ ETHR_SU_DW_NATMC_FUNC__(read_mb)(VAR)
+#else
/*
- * --- Pointer size atomics ---------------------------------------------------
+ * We have no native read() op; guess zero and then use the
+ * the atomics actual value returned from cmpxchg().
*/
+#define ETHR_SU_DW_NATMC_CMPXCHG_FALLBACK_READ__(VAR) \
+ ((ETHR_SU_DW_NAINT_T__) 0)
+#endif
+
+/*
+ * Native cmpxchg() fallback used when no native op present.
+ */
+#define ETHR_SU_DW_NATMC_CMPXCHG_FALLBACK__(CMPXCHG, VAR, AVAL, OPS) \
+do { \
+ ETHR_SU_DW_NAINT_T__ AVAL; \
+ ETHR_SU_DW_NAINT_T__ new__, act__, exp__; \
+ act__ = ETHR_SU_DW_NATMC_CMPXCHG_FALLBACK_READ__(VAR); \
+ do { \
+ exp__ = act__; \
+ AVAL = (ETHR_SU_DW_NAINT_T__) act__; \
+ { OPS; } \
+ new__ = (ETHR_SU_DW_NAINT_T__) AVAL; \
+ act__ = CMPXCHG(VAR, new__, exp__); \
+ } while (__builtin_expect(act__ != exp__, 0)); \
+} while (0)
+
+
+#else
+# error "?!?"
+#endif
+
+
+
+/* --- addr() --- */
+
+static ETHR_INLINE ethr_sint_t *ETHR_DW_ATMC_FUNC__(addr)(ethr_dw_atomic_t *var)
+{
+#if defined(ETHR_RTCHK_USE_NATIVE_DW_ATOMIC_IMPL__)
+ if (ETHR_RTCHK_USE_NATIVE_DW_ATOMIC_IMPL__) {
+#endif
+
+ return (ethr_sint_t *) ETHR_DW_NATMC_ADDR_FUNC__((&var->native));
+
+#if defined(ETHR_RTCHK_USE_NATIVE_DW_ATOMIC_IMPL__)
+ } else { return ETHR_DW_ATOMIC_FUNC__(addr)(var); }
+#endif
+
+}
+
+
+/* --- cmpxchg() --- */
+
+
+static ETHR_INLINE int ETHR_DW_ATMC_FUNC__(cmpxchg)(ethr_dw_atomic_t *var, ethr_dw_sint_t *val, ethr_dw_sint_t *old_val)
+{
+ int res;
+
+#if defined(ETHR_RTCHK_USE_NATIVE_DW_ATOMIC_IMPL__)
+ if (ETHR_RTCHK_USE_NATIVE_DW_ATOMIC_IMPL__) {
+#endif
+
+#if defined(ETHR_HAVE_SU_DW_NATMC_CMPXCHG)
+ ETHR_SU_DW_NAINT_T__ act;
+ act = ETHR_SU_DW_NATMC_FUNC__(cmpxchg)(&var->native, val->dw_sint, old_val->dw_sint);
+ res = (act == old_val->dw_sint);
+ old_val->dw_sint = act;
+#elif defined(ETHR_HAVE_SU_DW_NATMC_CMPXCHG_RB)
+ ETHR_SU_DW_NAINT_T__ act;
+ act = ETHR_SU_DW_NATMC_FUNC__(cmpxchg_rb)(&var->native, val->dw_sint, old_val->dw_sint);
+ res = (act == old_val->dw_sint);
+ old_val->dw_sint = act;
+#elif defined(ETHR_HAVE_SU_DW_NATMC_CMPXCHG_WB)
+ ETHR_SU_DW_NAINT_T__ act;
+ act = ETHR_SU_DW_NATMC_FUNC__(cmpxchg_wb)(&var->native, val->dw_sint, old_val->dw_sint);
+ res = (act == old_val->dw_sint);
+ old_val->dw_sint = act;
+#elif defined(ETHR_HAVE_SU_DW_NATMC_CMPXCHG_ACQB)
+ ETHR_SU_DW_NAINT_T__ act;
+ act = ETHR_SU_DW_NATMC_FUNC__(cmpxchg_acqb)(&var->native, val->dw_sint, old_val->dw_sint);
+ res = (act == old_val->dw_sint);
+ old_val->dw_sint = act;
+#elif defined(ETHR_HAVE_SU_DW_NATMC_CMPXCHG_RELB)
+ ETHR_SU_DW_NAINT_T__ act;
+ act = ETHR_SU_DW_NATMC_FUNC__(cmpxchg_relb)(&var->native, val->dw_sint, old_val->dw_sint);
+ res = (act == old_val->dw_sint);
+ old_val->dw_sint = act;
+#elif defined(ETHR_HAVE_SU_DW_NATMC_CMPXCHG_MB)
+ ETHR_SU_DW_NAINT_T__ act;
+ act = ETHR_SU_DW_NATMC_FUNC__(cmpxchg_mb)(&var->native, val->dw_sint, old_val->dw_sint);
+ res = (act == old_val->dw_sint);
+ old_val->dw_sint = act;
+#elif defined(ETHR_HAVE_DW_NATMC_CMPXCHG)
+ res = ETHR_DW_NATMC_FUNC__(cmpxchg)(&var->native, val->sint, old_val->sint);
+#elif defined(ETHR_HAVE_DW_NATMC_CMPXCHG_RB)
+ res = ETHR_DW_NATMC_FUNC__(cmpxchg_rb)(&var->native, val->sint, old_val->sint);
+#elif defined(ETHR_HAVE_DW_NATMC_CMPXCHG_WB)
+ res = ETHR_DW_NATMC_FUNC__(cmpxchg_wb)(&var->native, val->sint, old_val->sint);
+#elif defined(ETHR_HAVE_DW_NATMC_CMPXCHG_ACQB)
+ res = ETHR_DW_NATMC_FUNC__(cmpxchg_acqb)(&var->native, val->sint, old_val->sint);
+#elif defined(ETHR_HAVE_DW_NATMC_CMPXCHG_RELB)
+ res = ETHR_DW_NATMC_FUNC__(cmpxchg_relb)(&var->native, val->sint, old_val->sint);
+#elif defined(ETHR_HAVE_DW_NATMC_CMPXCHG_MB)
+ res = ETHR_DW_NATMC_FUNC__(cmpxchg_mb)(&var->native, val->sint, old_val->sint);
+#else
+#error "Missing implementation of ethr_dw_atomic_cmpxchg()!"
+#endif
+
+#if defined(ETHR_RTCHK_USE_NATIVE_DW_ATOMIC_IMPL__)
+ } else { res = ETHR_DW_ATOMIC_FUNC__(cmpxchg)(var, val, old_val); }
+#endif
+
+ return res;
+}
+
+static ETHR_INLINE int ETHR_DW_ATMC_FUNC__(cmpxchg_rb)(ethr_dw_atomic_t *var, ethr_dw_sint_t *val, ethr_dw_sint_t *old_val)
+{
+ int res;
+
+#if defined(ETHR_RTCHK_USE_NATIVE_DW_ATOMIC_IMPL__)
+ if (ETHR_RTCHK_USE_NATIVE_DW_ATOMIC_IMPL__) {
+#endif
+
+#if defined(ETHR_HAVE_SU_DW_NATMC_CMPXCHG_RB)
+ ETHR_SU_DW_NAINT_T__ act;
+ act = ETHR_SU_DW_NATMC_FUNC__(cmpxchg_rb)(&var->native, val->dw_sint, old_val->dw_sint);
+ res = (act == old_val->dw_sint);
+ old_val->dw_sint = act;
+#elif defined(ETHR_HAVE_SU_DW_NATMC_CMPXCHG)
+ ETHR_SU_DW_NAINT_T__ act;
+ act = ETHR_SU_DW_NATMC_FUNC__(cmpxchg)(&var->native, val->dw_sint, old_val->dw_sint);
+ ETHR_MEMBAR(ETHR_LoadLoad);
+ res = (act == old_val->dw_sint);
+ old_val->dw_sint = act;
+#elif defined(ETHR_HAVE_SU_DW_NATMC_CMPXCHG_MB)
+ ETHR_SU_DW_NAINT_T__ act;
+ act = ETHR_SU_DW_NATMC_FUNC__(cmpxchg_mb)(&var->native, val->dw_sint, old_val->dw_sint);
+ res = (act == old_val->dw_sint);
+ old_val->dw_sint = act;
+#elif defined(ETHR_HAVE_SU_DW_NATMC_CMPXCHG_WB)
+ ETHR_SU_DW_NAINT_T__ act;
+ act = ETHR_SU_DW_NATMC_FUNC__(cmpxchg_wb)(&var->native, val->dw_sint, old_val->dw_sint);
+ ETHR_MEMBAR(ETHR_LoadLoad);
+ res = (act == old_val->dw_sint);
+ old_val->dw_sint = act;
+#elif defined(ETHR_HAVE_SU_DW_NATMC_CMPXCHG_ACQB)
+ ETHR_SU_DW_NAINT_T__ act;
+ act = ETHR_SU_DW_NATMC_FUNC__(cmpxchg_acqb)(&var->native, val->dw_sint, old_val->dw_sint);
+ ETHR_MEMBAR(ETHR_LoadLoad);
+ res = (act == old_val->dw_sint);
+ old_val->dw_sint = act;
+#elif defined(ETHR_HAVE_SU_DW_NATMC_CMPXCHG_RELB)
+ ETHR_SU_DW_NAINT_T__ act;
+ act = ETHR_SU_DW_NATMC_FUNC__(cmpxchg_relb)(&var->native, val->dw_sint, old_val->dw_sint);
+ ETHR_MEMBAR(ETHR_LoadLoad);
+ res = (act == old_val->dw_sint);
+ old_val->dw_sint = act;
+#elif defined(ETHR_HAVE_DW_NATMC_CMPXCHG_RB)
+ res = ETHR_DW_NATMC_FUNC__(cmpxchg_rb)(&var->native, val->sint, old_val->sint);
+#elif defined(ETHR_HAVE_DW_NATMC_CMPXCHG)
+ res = ETHR_DW_NATMC_FUNC__(cmpxchg)(&var->native, val->sint, old_val->sint);
+ ETHR_MEMBAR(ETHR_LoadLoad);
+#elif defined(ETHR_HAVE_DW_NATMC_CMPXCHG_MB)
+ res = ETHR_DW_NATMC_FUNC__(cmpxchg_mb)(&var->native, val->sint, old_val->sint);
+#elif defined(ETHR_HAVE_DW_NATMC_CMPXCHG_WB)
+ res = ETHR_DW_NATMC_FUNC__(cmpxchg_wb)(&var->native, val->sint, old_val->sint);
+ ETHR_MEMBAR(ETHR_LoadLoad);
+#elif defined(ETHR_HAVE_DW_NATMC_CMPXCHG_ACQB)
+ res = ETHR_DW_NATMC_FUNC__(cmpxchg_acqb)(&var->native, val->sint, old_val->sint);
+ ETHR_MEMBAR(ETHR_LoadLoad);
+#elif defined(ETHR_HAVE_DW_NATMC_CMPXCHG_RELB)
+ res = ETHR_DW_NATMC_FUNC__(cmpxchg_relb)(&var->native, val->sint, old_val->sint);
+ ETHR_MEMBAR(ETHR_LoadLoad);
+#else
+#error "Missing implementation of ethr_dw_atomic_cmpxchg_rb()!"
+#endif
+
+#if defined(ETHR_RTCHK_USE_NATIVE_DW_ATOMIC_IMPL__)
+ } else { res = ETHR_DW_ATOMIC_FUNC__(cmpxchg_rb)(var, val, old_val); }
+#endif
+
+ return res;
+}
+
+static ETHR_INLINE int ETHR_DW_ATMC_FUNC__(cmpxchg_wb)(ethr_dw_atomic_t *var, ethr_dw_sint_t *val, ethr_dw_sint_t *old_val)
+{
+ int res;
+
+#if defined(ETHR_RTCHK_USE_NATIVE_DW_ATOMIC_IMPL__)
+ if (ETHR_RTCHK_USE_NATIVE_DW_ATOMIC_IMPL__) {
+#endif
+
+#if defined(ETHR_HAVE_SU_DW_NATMC_CMPXCHG_WB)
+ ETHR_SU_DW_NAINT_T__ act;
+ act = ETHR_SU_DW_NATMC_FUNC__(cmpxchg_wb)(&var->native, val->dw_sint, old_val->dw_sint);
+ res = (act == old_val->dw_sint);
+ old_val->dw_sint = act;
+#elif defined(ETHR_HAVE_SU_DW_NATMC_CMPXCHG)
+ ETHR_SU_DW_NAINT_T__ act;
+ ETHR_MEMBAR(ETHR_StoreStore);
+ act = ETHR_SU_DW_NATMC_FUNC__(cmpxchg)(&var->native, val->dw_sint, old_val->dw_sint);
+ res = (act == old_val->dw_sint);
+ old_val->dw_sint = act;
+#elif defined(ETHR_HAVE_SU_DW_NATMC_CMPXCHG_MB)
+ ETHR_SU_DW_NAINT_T__ act;
+ act = ETHR_SU_DW_NATMC_FUNC__(cmpxchg_mb)(&var->native, val->dw_sint, old_val->dw_sint);
+ res = (act == old_val->dw_sint);
+ old_val->dw_sint = act;
+#elif defined(ETHR_HAVE_SU_DW_NATMC_CMPXCHG_RB)
+ ETHR_SU_DW_NAINT_T__ act;
+ ETHR_MEMBAR(ETHR_StoreStore);
+ act = ETHR_SU_DW_NATMC_FUNC__(cmpxchg_rb)(&var->native, val->dw_sint, old_val->dw_sint);
+ res = (act == old_val->dw_sint);
+ old_val->dw_sint = act;
+#elif defined(ETHR_HAVE_SU_DW_NATMC_CMPXCHG_ACQB)
+ ETHR_SU_DW_NAINT_T__ act;
+ ETHR_MEMBAR(ETHR_StoreStore);
+ act = ETHR_SU_DW_NATMC_FUNC__(cmpxchg_acqb)(&var->native, val->dw_sint, old_val->dw_sint);
+ res = (act == old_val->dw_sint);
+ old_val->dw_sint = act;
+#elif defined(ETHR_HAVE_SU_DW_NATMC_CMPXCHG_RELB)
+ ETHR_SU_DW_NAINT_T__ act;
+ ETHR_MEMBAR(ETHR_StoreStore);
+ act = ETHR_SU_DW_NATMC_FUNC__(cmpxchg_relb)(&var->native, val->dw_sint, old_val->dw_sint);
+ res = (act == old_val->dw_sint);
+ old_val->dw_sint = act;
+#elif defined(ETHR_HAVE_DW_NATMC_CMPXCHG_WB)
+ res = ETHR_DW_NATMC_FUNC__(cmpxchg_wb)(&var->native, val->sint, old_val->sint);
+#elif defined(ETHR_HAVE_DW_NATMC_CMPXCHG)
+ ETHR_MEMBAR(ETHR_StoreStore);
+ res = ETHR_DW_NATMC_FUNC__(cmpxchg)(&var->native, val->sint, old_val->sint);
+#elif defined(ETHR_HAVE_DW_NATMC_CMPXCHG_MB)
+ res = ETHR_DW_NATMC_FUNC__(cmpxchg_mb)(&var->native, val->sint, old_val->sint);
+#elif defined(ETHR_HAVE_DW_NATMC_CMPXCHG_RB)
+ ETHR_MEMBAR(ETHR_StoreStore);
+ res = ETHR_DW_NATMC_FUNC__(cmpxchg_rb)(&var->native, val->sint, old_val->sint);
+#elif defined(ETHR_HAVE_DW_NATMC_CMPXCHG_ACQB)
+ ETHR_MEMBAR(ETHR_StoreStore);
+ res = ETHR_DW_NATMC_FUNC__(cmpxchg_acqb)(&var->native, val->sint, old_val->sint);
+#elif defined(ETHR_HAVE_DW_NATMC_CMPXCHG_RELB)
+ ETHR_MEMBAR(ETHR_StoreStore);
+ res = ETHR_DW_NATMC_FUNC__(cmpxchg_relb)(&var->native, val->sint, old_val->sint);
+#else
+#error "Missing implementation of ethr_dw_atomic_cmpxchg_wb()!"
+#endif
+
+#if defined(ETHR_RTCHK_USE_NATIVE_DW_ATOMIC_IMPL__)
+ } else { res = ETHR_DW_ATOMIC_FUNC__(cmpxchg_wb)(var, val, old_val); }
+#endif
+
+ return res;
+}
+
+static ETHR_INLINE int ETHR_DW_ATMC_FUNC__(cmpxchg_acqb)(ethr_dw_atomic_t *var, ethr_dw_sint_t *val, ethr_dw_sint_t *old_val)
+{
+ int res;
+
+#if defined(ETHR_RTCHK_USE_NATIVE_DW_ATOMIC_IMPL__)
+ if (ETHR_RTCHK_USE_NATIVE_DW_ATOMIC_IMPL__) {
+#endif
+
+#if defined(ETHR_HAVE_SU_DW_NATMC_CMPXCHG_ACQB)
+ ETHR_SU_DW_NAINT_T__ act;
+ act = ETHR_SU_DW_NATMC_FUNC__(cmpxchg_acqb)(&var->native, val->dw_sint, old_val->dw_sint);
+ res = (act == old_val->dw_sint);
+ old_val->dw_sint = act;
+#elif defined(ETHR_HAVE_SU_DW_NATMC_CMPXCHG_RB)
+ ETHR_SU_DW_NAINT_T__ act;
+ act = ETHR_SU_DW_NATMC_FUNC__(cmpxchg_rb)(&var->native, val->dw_sint, old_val->dw_sint);
+ ETHR_MEMBAR(ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+ res = (act == old_val->dw_sint);
+ old_val->dw_sint = act;
+#elif defined(ETHR_HAVE_SU_DW_NATMC_CMPXCHG)
+ ETHR_SU_DW_NAINT_T__ act;
+ act = ETHR_SU_DW_NATMC_FUNC__(cmpxchg)(&var->native, val->dw_sint, old_val->dw_sint);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+ res = (act == old_val->dw_sint);
+ old_val->dw_sint = act;
+#elif defined(ETHR_HAVE_SU_DW_NATMC_CMPXCHG_MB)
+ ETHR_SU_DW_NAINT_T__ act;
+ act = ETHR_SU_DW_NATMC_FUNC__(cmpxchg_mb)(&var->native, val->dw_sint, old_val->dw_sint);
+ res = (act == old_val->dw_sint);
+ old_val->dw_sint = act;
+#elif defined(ETHR_HAVE_SU_DW_NATMC_CMPXCHG_WB)
+ ETHR_SU_DW_NAINT_T__ act;
+ act = ETHR_SU_DW_NATMC_FUNC__(cmpxchg_wb)(&var->native, val->dw_sint, old_val->dw_sint);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+ res = (act == old_val->dw_sint);
+ old_val->dw_sint = act;
+#elif defined(ETHR_HAVE_SU_DW_NATMC_CMPXCHG_RELB)
+ ETHR_SU_DW_NAINT_T__ act;
+ act = ETHR_SU_DW_NATMC_FUNC__(cmpxchg_relb)(&var->native, val->dw_sint, old_val->dw_sint);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+ res = (act == old_val->dw_sint);
+ old_val->dw_sint = act;
+#elif defined(ETHR_HAVE_DW_NATMC_CMPXCHG_ACQB)
+ res = ETHR_DW_NATMC_FUNC__(cmpxchg_acqb)(&var->native, val->sint, old_val->sint);
+#elif defined(ETHR_HAVE_DW_NATMC_CMPXCHG_RB)
+ res = ETHR_DW_NATMC_FUNC__(cmpxchg_rb)(&var->native, val->sint, old_val->sint);
+ ETHR_MEMBAR(ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_DW_NATMC_CMPXCHG)
+ res = ETHR_DW_NATMC_FUNC__(cmpxchg)(&var->native, val->sint, old_val->sint);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_DW_NATMC_CMPXCHG_MB)
+ res = ETHR_DW_NATMC_FUNC__(cmpxchg_mb)(&var->native, val->sint, old_val->sint);
+#elif defined(ETHR_HAVE_DW_NATMC_CMPXCHG_WB)
+ res = ETHR_DW_NATMC_FUNC__(cmpxchg_wb)(&var->native, val->sint, old_val->sint);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_DW_NATMC_CMPXCHG_RELB)
+ res = ETHR_DW_NATMC_FUNC__(cmpxchg_relb)(&var->native, val->sint, old_val->sint);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#else
+#error "Missing implementation of ethr_dw_atomic_cmpxchg_acqb()!"
+#endif
+
+#if defined(ETHR_RTCHK_USE_NATIVE_DW_ATOMIC_IMPL__)
+ } else { res = ETHR_DW_ATOMIC_FUNC__(cmpxchg_acqb)(var, val, old_val); }
+#endif
+
+ return res;
+}
+
+static ETHR_INLINE int ETHR_DW_ATMC_FUNC__(cmpxchg_relb)(ethr_dw_atomic_t *var, ethr_dw_sint_t *val, ethr_dw_sint_t *old_val)
+{
+ int res;
+
+#if defined(ETHR_RTCHK_USE_NATIVE_DW_ATOMIC_IMPL__)
+ if (ETHR_RTCHK_USE_NATIVE_DW_ATOMIC_IMPL__) {
+#endif
+
+#if defined(ETHR_HAVE_SU_DW_NATMC_CMPXCHG_RELB)
+ ETHR_SU_DW_NAINT_T__ act;
+ act = ETHR_SU_DW_NATMC_FUNC__(cmpxchg_relb)(&var->native, val->dw_sint, old_val->dw_sint);
+ res = (act == old_val->dw_sint);
+ old_val->dw_sint = act;
+#elif defined(ETHR_HAVE_SU_DW_NATMC_CMPXCHG_WB)
+ ETHR_SU_DW_NAINT_T__ act;
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ act = ETHR_SU_DW_NATMC_FUNC__(cmpxchg_wb)(&var->native, val->dw_sint, old_val->dw_sint);
+ res = (act == old_val->dw_sint);
+ old_val->dw_sint = act;
+#elif defined(ETHR_HAVE_SU_DW_NATMC_CMPXCHG)
+ ETHR_SU_DW_NAINT_T__ act;
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ act = ETHR_SU_DW_NATMC_FUNC__(cmpxchg)(&var->native, val->dw_sint, old_val->dw_sint);
+ res = (act == old_val->dw_sint);
+ old_val->dw_sint = act;
+#elif defined(ETHR_HAVE_SU_DW_NATMC_CMPXCHG_MB)
+ ETHR_SU_DW_NAINT_T__ act;
+ act = ETHR_SU_DW_NATMC_FUNC__(cmpxchg_mb)(&var->native, val->dw_sint, old_val->dw_sint);
+ res = (act == old_val->dw_sint);
+ old_val->dw_sint = act;
+#elif defined(ETHR_HAVE_SU_DW_NATMC_CMPXCHG_RB)
+ ETHR_SU_DW_NAINT_T__ act;
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ act = ETHR_SU_DW_NATMC_FUNC__(cmpxchg_rb)(&var->native, val->dw_sint, old_val->dw_sint);
+ res = (act == old_val->dw_sint);
+ old_val->dw_sint = act;
+#elif defined(ETHR_HAVE_SU_DW_NATMC_CMPXCHG_ACQB)
+ ETHR_SU_DW_NAINT_T__ act;
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ act = ETHR_SU_DW_NATMC_FUNC__(cmpxchg_acqb)(&var->native, val->dw_sint, old_val->dw_sint);
+ res = (act == old_val->dw_sint);
+ old_val->dw_sint = act;
+#elif defined(ETHR_HAVE_DW_NATMC_CMPXCHG_RELB)
+ res = ETHR_DW_NATMC_FUNC__(cmpxchg_relb)(&var->native, val->sint, old_val->sint);
+#elif defined(ETHR_HAVE_DW_NATMC_CMPXCHG_WB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ res = ETHR_DW_NATMC_FUNC__(cmpxchg_wb)(&var->native, val->sint, old_val->sint);
+#elif defined(ETHR_HAVE_DW_NATMC_CMPXCHG)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ res = ETHR_DW_NATMC_FUNC__(cmpxchg)(&var->native, val->sint, old_val->sint);
+#elif defined(ETHR_HAVE_DW_NATMC_CMPXCHG_MB)
+ res = ETHR_DW_NATMC_FUNC__(cmpxchg_mb)(&var->native, val->sint, old_val->sint);
+#elif defined(ETHR_HAVE_DW_NATMC_CMPXCHG_RB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ res = ETHR_DW_NATMC_FUNC__(cmpxchg_rb)(&var->native, val->sint, old_val->sint);
+#elif defined(ETHR_HAVE_DW_NATMC_CMPXCHG_ACQB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ res = ETHR_DW_NATMC_FUNC__(cmpxchg_acqb)(&var->native, val->sint, old_val->sint);
+#else
+#error "Missing implementation of ethr_dw_atomic_cmpxchg_relb()!"
+#endif
+
+#if defined(ETHR_RTCHK_USE_NATIVE_DW_ATOMIC_IMPL__)
+ } else { res = ETHR_DW_ATOMIC_FUNC__(cmpxchg_relb)(var, val, old_val); }
+#endif
+
+ return res;
+}
+
+static ETHR_INLINE int ETHR_DW_ATMC_FUNC__(cmpxchg_mb)(ethr_dw_atomic_t *var, ethr_dw_sint_t *val, ethr_dw_sint_t *old_val)
+{
+ int res;
+
+#if defined(ETHR_RTCHK_USE_NATIVE_DW_ATOMIC_IMPL__)
+ if (ETHR_RTCHK_USE_NATIVE_DW_ATOMIC_IMPL__) {
+#endif
+
+#if defined(ETHR_HAVE_SU_DW_NATMC_CMPXCHG_MB)
+ ETHR_SU_DW_NAINT_T__ act;
+ act = ETHR_SU_DW_NATMC_FUNC__(cmpxchg_mb)(&var->native, val->dw_sint, old_val->dw_sint);
+ res = (act == old_val->dw_sint);
+ old_val->dw_sint = act;
+#elif defined(ETHR_HAVE_SU_DW_NATMC_CMPXCHG_RELB)
+ ETHR_SU_DW_NAINT_T__ act;
+ act = ETHR_SU_DW_NATMC_FUNC__(cmpxchg_relb)(&var->native, val->dw_sint, old_val->dw_sint);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+ res = (act == old_val->dw_sint);
+ old_val->dw_sint = act;
+#elif defined(ETHR_HAVE_SU_DW_NATMC_CMPXCHG_ACQB)
+ ETHR_SU_DW_NAINT_T__ act;
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ act = ETHR_SU_DW_NATMC_FUNC__(cmpxchg_acqb)(&var->native, val->dw_sint, old_val->dw_sint);
+ res = (act == old_val->dw_sint);
+ old_val->dw_sint = act;
+#elif defined(ETHR_HAVE_SU_DW_NATMC_CMPXCHG_WB)
+ ETHR_SU_DW_NAINT_T__ act;
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ act = ETHR_SU_DW_NATMC_FUNC__(cmpxchg_wb)(&var->native, val->dw_sint, old_val->dw_sint);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+ res = (act == old_val->dw_sint);
+ old_val->dw_sint = act;
+#elif defined(ETHR_HAVE_SU_DW_NATMC_CMPXCHG_RB)
+ ETHR_SU_DW_NAINT_T__ act;
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ act = ETHR_SU_DW_NATMC_FUNC__(cmpxchg_rb)(&var->native, val->dw_sint, old_val->dw_sint);
+ ETHR_MEMBAR(ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+ res = (act == old_val->dw_sint);
+ old_val->dw_sint = act;
+#elif defined(ETHR_HAVE_SU_DW_NATMC_CMPXCHG)
+ ETHR_SU_DW_NAINT_T__ act;
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ act = ETHR_SU_DW_NATMC_FUNC__(cmpxchg)(&var->native, val->dw_sint, old_val->dw_sint);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+ res = (act == old_val->dw_sint);
+ old_val->dw_sint = act;
+#elif defined(ETHR_HAVE_DW_NATMC_CMPXCHG_MB)
+ res = ETHR_DW_NATMC_FUNC__(cmpxchg_mb)(&var->native, val->sint, old_val->sint);
+#elif defined(ETHR_HAVE_DW_NATMC_CMPXCHG_RELB)
+ res = ETHR_DW_NATMC_FUNC__(cmpxchg_relb)(&var->native, val->sint, old_val->sint);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_DW_NATMC_CMPXCHG_ACQB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ res = ETHR_DW_NATMC_FUNC__(cmpxchg_acqb)(&var->native, val->sint, old_val->sint);
+#elif defined(ETHR_HAVE_DW_NATMC_CMPXCHG_WB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ res = ETHR_DW_NATMC_FUNC__(cmpxchg_wb)(&var->native, val->sint, old_val->sint);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_DW_NATMC_CMPXCHG_RB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ res = ETHR_DW_NATMC_FUNC__(cmpxchg_rb)(&var->native, val->sint, old_val->sint);
+ ETHR_MEMBAR(ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_DW_NATMC_CMPXCHG)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ res = ETHR_DW_NATMC_FUNC__(cmpxchg)(&var->native, val->sint, old_val->sint);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#else
+#error "Missing implementation of ethr_dw_atomic_cmpxchg_mb()!"
+#endif
+
+#if defined(ETHR_RTCHK_USE_NATIVE_DW_ATOMIC_IMPL__)
+ } else { res = ETHR_DW_ATOMIC_FUNC__(cmpxchg_mb)(var, val, old_val); }
+#endif
+
+ return res;
+}
+
+static ETHR_INLINE int ETHR_DW_ATMC_FUNC__(cmpxchg_ddrb)(ethr_dw_atomic_t *var, ethr_dw_sint_t *val, ethr_dw_sint_t *old_val)
+{
+#ifdef ETHR_ORDERED_READ_DEPEND
+ return ETHR_DW_ATMC_FUNC__(cmpxchg)(var, val, old_val);
+#else
+ return ETHR_DW_ATMC_FUNC__(cmpxchg_rb)(var, val, old_val);
+#endif
+}
+
+
+/* --- set() --- */
+
+
+static ETHR_INLINE void ETHR_DW_ATMC_FUNC__(set)(ethr_dw_atomic_t *var, ethr_dw_sint_t *val)
+{
+
+#if defined(ETHR_RTCHK_USE_NATIVE_DW_ATOMIC_IMPL__)
+ if (ETHR_RTCHK_USE_NATIVE_DW_ATOMIC_IMPL__) {
+#endif
+
+#if defined(ETHR_HAVE_SU_DW_NATMC_SET)
+ ETHR_SU_DW_NATMC_FUNC__(set)(&var->native, val->dw_sint);
+#elif defined(ETHR_HAVE_SU_DW_NATMC_SET_RB)
+ ETHR_SU_DW_NATMC_FUNC__(set_rb)(&var->native, val->dw_sint);
+#elif defined(ETHR_HAVE_SU_DW_NATMC_SET_WB)
+ ETHR_SU_DW_NATMC_FUNC__(set_wb)(&var->native, val->dw_sint);
+#elif defined(ETHR_HAVE_SU_DW_NATMC_SET_ACQB)
+ ETHR_SU_DW_NATMC_FUNC__(set_acqb)(&var->native, val->dw_sint);
+#elif defined(ETHR_HAVE_SU_DW_NATMC_SET_RELB)
+ ETHR_SU_DW_NATMC_FUNC__(set_relb)(&var->native, val->dw_sint);
+#elif defined(ETHR_HAVE_SU_DW_NATMC_SET_MB)
+ ETHR_SU_DW_NATMC_FUNC__(set_mb)(&var->native, val->dw_sint);
+#elif defined(ETHR_HAVE_DW_NATMC_SET)
+ ETHR_DW_NATMC_FUNC__(set)(&var->native, val->sint);
+#elif defined(ETHR_HAVE_DW_NATMC_SET_RB)
+ ETHR_DW_NATMC_FUNC__(set_rb)(&var->native, val->sint);
+#elif defined(ETHR_HAVE_DW_NATMC_SET_WB)
+ ETHR_DW_NATMC_FUNC__(set_wb)(&var->native, val->sint);
+#elif defined(ETHR_HAVE_DW_NATMC_SET_ACQB)
+ ETHR_DW_NATMC_FUNC__(set_acqb)(&var->native, val->sint);
+#elif defined(ETHR_HAVE_DW_NATMC_SET_RELB)
+ ETHR_DW_NATMC_FUNC__(set_relb)(&var->native, val->sint);
+#elif defined(ETHR_HAVE_DW_NATMC_SET_MB)
+ ETHR_DW_NATMC_FUNC__(set_mb)(&var->native, val->sint);
+#elif defined(ETHR_HAVE_SU_DW_NATMC_CMPXCHG)
+ ETHR_SU_DW_NATMC_CMPXCHG_FALLBACK__(ETHR_SU_DW_NATMC_FUNC__(cmpxchg), &var->native, aval, aval = val->dw_sint);
+#elif defined(ETHR_HAVE_SU_DW_NATMC_CMPXCHG_RB)
+ ETHR_SU_DW_NATMC_CMPXCHG_FALLBACK__(ETHR_SU_DW_NATMC_FUNC__(cmpxchg_rb), &var->native, aval, aval = val->dw_sint);
+#elif defined(ETHR_HAVE_SU_DW_NATMC_CMPXCHG_WB)
+ ETHR_SU_DW_NATMC_CMPXCHG_FALLBACK__(ETHR_SU_DW_NATMC_FUNC__(cmpxchg_wb), &var->native, aval, aval = val->dw_sint);
+#elif defined(ETHR_HAVE_SU_DW_NATMC_CMPXCHG_ACQB)
+ ETHR_SU_DW_NATMC_CMPXCHG_FALLBACK__(ETHR_SU_DW_NATMC_FUNC__(cmpxchg_acqb), &var->native, aval, aval = val->dw_sint);
+#elif defined(ETHR_HAVE_SU_DW_NATMC_CMPXCHG_RELB)
+ ETHR_SU_DW_NATMC_CMPXCHG_FALLBACK__(ETHR_SU_DW_NATMC_FUNC__(cmpxchg_relb), &var->native, aval, aval = val->dw_sint);
+#elif defined(ETHR_HAVE_SU_DW_NATMC_CMPXCHG_MB)
+ ETHR_SU_DW_NATMC_CMPXCHG_FALLBACK__(ETHR_SU_DW_NATMC_FUNC__(cmpxchg_mb), &var->native, aval, aval = val->dw_sint);
+#elif defined(ETHR_HAVE_DW_NATMC_CMPXCHG)
+ ETHR_DW_NATMC_CMPXCHG_FALLBACK__(ETHR_DW_NATMC_FUNC__(cmpxchg), &var->native, aval, aval.sint[0] = val->sint[0]; aval.sint[1] = val->sint[1]);
+#elif defined(ETHR_HAVE_DW_NATMC_CMPXCHG_RB)
+ ETHR_DW_NATMC_CMPXCHG_FALLBACK__(ETHR_DW_NATMC_FUNC__(cmpxchg_rb), &var->native, aval, aval.sint[0] = val->sint[0]; aval.sint[1] = val->sint[1]);
+#elif defined(ETHR_HAVE_DW_NATMC_CMPXCHG_WB)
+ ETHR_DW_NATMC_CMPXCHG_FALLBACK__(ETHR_DW_NATMC_FUNC__(cmpxchg_wb), &var->native, aval, aval.sint[0] = val->sint[0]; aval.sint[1] = val->sint[1]);
+#elif defined(ETHR_HAVE_DW_NATMC_CMPXCHG_ACQB)
+ ETHR_DW_NATMC_CMPXCHG_FALLBACK__(ETHR_DW_NATMC_FUNC__(cmpxchg_acqb), &var->native, aval, aval.sint[0] = val->sint[0]; aval.sint[1] = val->sint[1]);
+#elif defined(ETHR_HAVE_DW_NATMC_CMPXCHG_RELB)
+ ETHR_DW_NATMC_CMPXCHG_FALLBACK__(ETHR_DW_NATMC_FUNC__(cmpxchg_relb), &var->native, aval, aval.sint[0] = val->sint[0]; aval.sint[1] = val->sint[1]);
+#elif defined(ETHR_HAVE_DW_NATMC_CMPXCHG_MB)
+ ETHR_DW_NATMC_CMPXCHG_FALLBACK__(ETHR_DW_NATMC_FUNC__(cmpxchg_mb), &var->native, aval, aval.sint[0] = val->sint[0]; aval.sint[1] = val->sint[1]);
+#else
+#error "Missing implementation of ethr_dw_atomic_set()!"
+#endif
+
+#if defined(ETHR_RTCHK_USE_NATIVE_DW_ATOMIC_IMPL__)
+ } else { ETHR_DW_ATOMIC_FUNC__(set)(var, val); }
+#endif
+
+}
+
+static ETHR_INLINE void ETHR_DW_ATMC_FUNC__(set_rb)(ethr_dw_atomic_t *var, ethr_dw_sint_t *val)
+{
+
+#if defined(ETHR_RTCHK_USE_NATIVE_DW_ATOMIC_IMPL__)
+ if (ETHR_RTCHK_USE_NATIVE_DW_ATOMIC_IMPL__) {
+#endif
+
+#if defined(ETHR_HAVE_SU_DW_NATMC_SET_RB)
+ ETHR_SU_DW_NATMC_FUNC__(set_rb)(&var->native, val->dw_sint);
+#elif defined(ETHR_HAVE_SU_DW_NATMC_SET)
+ ETHR_SU_DW_NATMC_FUNC__(set)(&var->native, val->dw_sint);
+ ETHR_MEMBAR(ETHR_LoadLoad);
+#elif defined(ETHR_HAVE_SU_DW_NATMC_SET_MB)
+ ETHR_SU_DW_NATMC_FUNC__(set_mb)(&var->native, val->dw_sint);
+#elif defined(ETHR_HAVE_SU_DW_NATMC_SET_WB)
+ ETHR_SU_DW_NATMC_FUNC__(set_wb)(&var->native, val->dw_sint);
+ ETHR_MEMBAR(ETHR_LoadLoad);
+#elif defined(ETHR_HAVE_SU_DW_NATMC_SET_ACQB)
+ ETHR_SU_DW_NATMC_FUNC__(set_acqb)(&var->native, val->dw_sint);
+ ETHR_MEMBAR(ETHR_LoadLoad);
+#elif defined(ETHR_HAVE_SU_DW_NATMC_SET_RELB)
+ ETHR_SU_DW_NATMC_FUNC__(set_relb)(&var->native, val->dw_sint);
+ ETHR_MEMBAR(ETHR_LoadLoad);
+#elif defined(ETHR_HAVE_DW_NATMC_SET_RB)
+ ETHR_DW_NATMC_FUNC__(set_rb)(&var->native, val->sint);
+#elif defined(ETHR_HAVE_DW_NATMC_SET)
+ ETHR_DW_NATMC_FUNC__(set)(&var->native, val->sint);
+ ETHR_MEMBAR(ETHR_LoadLoad);
+#elif defined(ETHR_HAVE_DW_NATMC_SET_MB)
+ ETHR_DW_NATMC_FUNC__(set_mb)(&var->native, val->sint);
+#elif defined(ETHR_HAVE_DW_NATMC_SET_WB)
+ ETHR_DW_NATMC_FUNC__(set_wb)(&var->native, val->sint);
+ ETHR_MEMBAR(ETHR_LoadLoad);
+#elif defined(ETHR_HAVE_DW_NATMC_SET_ACQB)
+ ETHR_DW_NATMC_FUNC__(set_acqb)(&var->native, val->sint);
+ ETHR_MEMBAR(ETHR_LoadLoad);
+#elif defined(ETHR_HAVE_DW_NATMC_SET_RELB)
+ ETHR_DW_NATMC_FUNC__(set_relb)(&var->native, val->sint);
+ ETHR_MEMBAR(ETHR_LoadLoad);
+#elif defined(ETHR_HAVE_SU_DW_NATMC_CMPXCHG_RB)
+ ETHR_SU_DW_NATMC_CMPXCHG_FALLBACK__(ETHR_SU_DW_NATMC_FUNC__(cmpxchg_rb), &var->native, aval, aval = val->dw_sint);
+#elif defined(ETHR_HAVE_SU_DW_NATMC_CMPXCHG)
+ ETHR_SU_DW_NATMC_CMPXCHG_FALLBACK__(ETHR_SU_DW_NATMC_FUNC__(cmpxchg), &var->native, aval, aval = val->dw_sint);
+ ETHR_MEMBAR(ETHR_LoadLoad);
+#elif defined(ETHR_HAVE_SU_DW_NATMC_CMPXCHG_MB)
+ ETHR_SU_DW_NATMC_CMPXCHG_FALLBACK__(ETHR_SU_DW_NATMC_FUNC__(cmpxchg_mb), &var->native, aval, aval = val->dw_sint);
+#elif defined(ETHR_HAVE_SU_DW_NATMC_CMPXCHG_WB)
+ ETHR_SU_DW_NATMC_CMPXCHG_FALLBACK__(ETHR_SU_DW_NATMC_FUNC__(cmpxchg_wb), &var->native, aval, aval = val->dw_sint);
+ ETHR_MEMBAR(ETHR_LoadLoad);
+#elif defined(ETHR_HAVE_SU_DW_NATMC_CMPXCHG_ACQB)
+ ETHR_SU_DW_NATMC_CMPXCHG_FALLBACK__(ETHR_SU_DW_NATMC_FUNC__(cmpxchg_acqb), &var->native, aval, aval = val->dw_sint);
+ ETHR_MEMBAR(ETHR_LoadLoad);
+#elif defined(ETHR_HAVE_SU_DW_NATMC_CMPXCHG_RELB)
+ ETHR_SU_DW_NATMC_CMPXCHG_FALLBACK__(ETHR_SU_DW_NATMC_FUNC__(cmpxchg_relb), &var->native, aval, aval = val->dw_sint);
+ ETHR_MEMBAR(ETHR_LoadLoad);
+#elif defined(ETHR_HAVE_DW_NATMC_CMPXCHG_RB)
+ ETHR_DW_NATMC_CMPXCHG_FALLBACK__(ETHR_DW_NATMC_FUNC__(cmpxchg_rb), &var->native, aval, aval.sint[0] = val->sint[0]; aval.sint[1] = val->sint[1]);
+#elif defined(ETHR_HAVE_DW_NATMC_CMPXCHG)
+ ETHR_DW_NATMC_CMPXCHG_FALLBACK__(ETHR_DW_NATMC_FUNC__(cmpxchg), &var->native, aval, aval.sint[0] = val->sint[0]; aval.sint[1] = val->sint[1]);
+ ETHR_MEMBAR(ETHR_LoadLoad);
+#elif defined(ETHR_HAVE_DW_NATMC_CMPXCHG_MB)
+ ETHR_DW_NATMC_CMPXCHG_FALLBACK__(ETHR_DW_NATMC_FUNC__(cmpxchg_mb), &var->native, aval, aval.sint[0] = val->sint[0]; aval.sint[1] = val->sint[1]);
+#elif defined(ETHR_HAVE_DW_NATMC_CMPXCHG_WB)
+ ETHR_DW_NATMC_CMPXCHG_FALLBACK__(ETHR_DW_NATMC_FUNC__(cmpxchg_wb), &var->native, aval, aval.sint[0] = val->sint[0]; aval.sint[1] = val->sint[1]);
+ ETHR_MEMBAR(ETHR_LoadLoad);
+#elif defined(ETHR_HAVE_DW_NATMC_CMPXCHG_ACQB)
+ ETHR_DW_NATMC_CMPXCHG_FALLBACK__(ETHR_DW_NATMC_FUNC__(cmpxchg_acqb), &var->native, aval, aval.sint[0] = val->sint[0]; aval.sint[1] = val->sint[1]);
+ ETHR_MEMBAR(ETHR_LoadLoad);
+#elif defined(ETHR_HAVE_DW_NATMC_CMPXCHG_RELB)
+ ETHR_DW_NATMC_CMPXCHG_FALLBACK__(ETHR_DW_NATMC_FUNC__(cmpxchg_relb), &var->native, aval, aval.sint[0] = val->sint[0]; aval.sint[1] = val->sint[1]);
+ ETHR_MEMBAR(ETHR_LoadLoad);
+#else
+#error "Missing implementation of ethr_dw_atomic_set_rb()!"
+#endif
+
+#if defined(ETHR_RTCHK_USE_NATIVE_DW_ATOMIC_IMPL__)
+ } else { ETHR_DW_ATOMIC_FUNC__(set_rb)(var, val); }
+#endif
+
+}
+
+static ETHR_INLINE void ETHR_DW_ATMC_FUNC__(set_wb)(ethr_dw_atomic_t *var, ethr_dw_sint_t *val)
+{
+
+#if defined(ETHR_RTCHK_USE_NATIVE_DW_ATOMIC_IMPL__)
+ if (ETHR_RTCHK_USE_NATIVE_DW_ATOMIC_IMPL__) {
+#endif
+
+#if defined(ETHR_HAVE_SU_DW_NATMC_SET_WB)
+ ETHR_SU_DW_NATMC_FUNC__(set_wb)(&var->native, val->dw_sint);
+#elif defined(ETHR_HAVE_SU_DW_NATMC_SET)
+ ETHR_MEMBAR(ETHR_StoreStore);
+ ETHR_SU_DW_NATMC_FUNC__(set)(&var->native, val->dw_sint);
+#elif defined(ETHR_HAVE_SU_DW_NATMC_SET_MB)
+ ETHR_SU_DW_NATMC_FUNC__(set_mb)(&var->native, val->dw_sint);
+#elif defined(ETHR_HAVE_SU_DW_NATMC_SET_RB)
+ ETHR_MEMBAR(ETHR_StoreStore);
+ ETHR_SU_DW_NATMC_FUNC__(set_rb)(&var->native, val->dw_sint);
+#elif defined(ETHR_HAVE_SU_DW_NATMC_SET_ACQB)
+ ETHR_MEMBAR(ETHR_StoreStore);
+ ETHR_SU_DW_NATMC_FUNC__(set_acqb)(&var->native, val->dw_sint);
+#elif defined(ETHR_HAVE_SU_DW_NATMC_SET_RELB)
+ ETHR_MEMBAR(ETHR_StoreStore);
+ ETHR_SU_DW_NATMC_FUNC__(set_relb)(&var->native, val->dw_sint);
+#elif defined(ETHR_HAVE_DW_NATMC_SET_WB)
+ ETHR_DW_NATMC_FUNC__(set_wb)(&var->native, val->sint);
+#elif defined(ETHR_HAVE_DW_NATMC_SET)
+ ETHR_MEMBAR(ETHR_StoreStore);
+ ETHR_DW_NATMC_FUNC__(set)(&var->native, val->sint);
+#elif defined(ETHR_HAVE_DW_NATMC_SET_MB)
+ ETHR_DW_NATMC_FUNC__(set_mb)(&var->native, val->sint);
+#elif defined(ETHR_HAVE_DW_NATMC_SET_RB)
+ ETHR_MEMBAR(ETHR_StoreStore);
+ ETHR_DW_NATMC_FUNC__(set_rb)(&var->native, val->sint);
+#elif defined(ETHR_HAVE_DW_NATMC_SET_ACQB)
+ ETHR_MEMBAR(ETHR_StoreStore);
+ ETHR_DW_NATMC_FUNC__(set_acqb)(&var->native, val->sint);
+#elif defined(ETHR_HAVE_DW_NATMC_SET_RELB)
+ ETHR_MEMBAR(ETHR_StoreStore);
+ ETHR_DW_NATMC_FUNC__(set_relb)(&var->native, val->sint);
+#elif defined(ETHR_HAVE_SU_DW_NATMC_CMPXCHG_WB)
+ ETHR_SU_DW_NATMC_CMPXCHG_FALLBACK__(ETHR_SU_DW_NATMC_FUNC__(cmpxchg_wb), &var->native, aval, aval = val->dw_sint);
+#elif defined(ETHR_HAVE_SU_DW_NATMC_CMPXCHG)
+ ETHR_MEMBAR(ETHR_StoreStore);
+ ETHR_SU_DW_NATMC_CMPXCHG_FALLBACK__(ETHR_SU_DW_NATMC_FUNC__(cmpxchg), &var->native, aval, aval = val->dw_sint);
+#elif defined(ETHR_HAVE_SU_DW_NATMC_CMPXCHG_MB)
+ ETHR_SU_DW_NATMC_CMPXCHG_FALLBACK__(ETHR_SU_DW_NATMC_FUNC__(cmpxchg_mb), &var->native, aval, aval = val->dw_sint);
+#elif defined(ETHR_HAVE_SU_DW_NATMC_CMPXCHG_RB)
+ ETHR_MEMBAR(ETHR_StoreStore);
+ ETHR_SU_DW_NATMC_CMPXCHG_FALLBACK__(ETHR_SU_DW_NATMC_FUNC__(cmpxchg_rb), &var->native, aval, aval = val->dw_sint);
+#elif defined(ETHR_HAVE_SU_DW_NATMC_CMPXCHG_ACQB)
+ ETHR_MEMBAR(ETHR_StoreStore);
+ ETHR_SU_DW_NATMC_CMPXCHG_FALLBACK__(ETHR_SU_DW_NATMC_FUNC__(cmpxchg_acqb), &var->native, aval, aval = val->dw_sint);
+#elif defined(ETHR_HAVE_SU_DW_NATMC_CMPXCHG_RELB)
+ ETHR_MEMBAR(ETHR_StoreStore);
+ ETHR_SU_DW_NATMC_CMPXCHG_FALLBACK__(ETHR_SU_DW_NATMC_FUNC__(cmpxchg_relb), &var->native, aval, aval = val->dw_sint);
+#elif defined(ETHR_HAVE_DW_NATMC_CMPXCHG_WB)
+ ETHR_DW_NATMC_CMPXCHG_FALLBACK__(ETHR_DW_NATMC_FUNC__(cmpxchg_wb), &var->native, aval, aval.sint[0] = val->sint[0]; aval.sint[1] = val->sint[1]);
+#elif defined(ETHR_HAVE_DW_NATMC_CMPXCHG)
+ ETHR_MEMBAR(ETHR_StoreStore);
+ ETHR_DW_NATMC_CMPXCHG_FALLBACK__(ETHR_DW_NATMC_FUNC__(cmpxchg), &var->native, aval, aval.sint[0] = val->sint[0]; aval.sint[1] = val->sint[1]);
+#elif defined(ETHR_HAVE_DW_NATMC_CMPXCHG_MB)
+ ETHR_DW_NATMC_CMPXCHG_FALLBACK__(ETHR_DW_NATMC_FUNC__(cmpxchg_mb), &var->native, aval, aval.sint[0] = val->sint[0]; aval.sint[1] = val->sint[1]);
+#elif defined(ETHR_HAVE_DW_NATMC_CMPXCHG_RB)
+ ETHR_MEMBAR(ETHR_StoreStore);
+ ETHR_DW_NATMC_CMPXCHG_FALLBACK__(ETHR_DW_NATMC_FUNC__(cmpxchg_rb), &var->native, aval, aval.sint[0] = val->sint[0]; aval.sint[1] = val->sint[1]);
+#elif defined(ETHR_HAVE_DW_NATMC_CMPXCHG_ACQB)
+ ETHR_MEMBAR(ETHR_StoreStore);
+ ETHR_DW_NATMC_CMPXCHG_FALLBACK__(ETHR_DW_NATMC_FUNC__(cmpxchg_acqb), &var->native, aval, aval.sint[0] = val->sint[0]; aval.sint[1] = val->sint[1]);
+#elif defined(ETHR_HAVE_DW_NATMC_CMPXCHG_RELB)
+ ETHR_MEMBAR(ETHR_StoreStore);
+ ETHR_DW_NATMC_CMPXCHG_FALLBACK__(ETHR_DW_NATMC_FUNC__(cmpxchg_relb), &var->native, aval, aval.sint[0] = val->sint[0]; aval.sint[1] = val->sint[1]);
+#else
+#error "Missing implementation of ethr_dw_atomic_set_wb()!"
+#endif
+
+#if defined(ETHR_RTCHK_USE_NATIVE_DW_ATOMIC_IMPL__)
+ } else { ETHR_DW_ATOMIC_FUNC__(set_wb)(var, val); }
+#endif
+
+}
+
+static ETHR_INLINE void ETHR_DW_ATMC_FUNC__(set_acqb)(ethr_dw_atomic_t *var, ethr_dw_sint_t *val)
+{
+
+#if defined(ETHR_RTCHK_USE_NATIVE_DW_ATOMIC_IMPL__)
+ if (ETHR_RTCHK_USE_NATIVE_DW_ATOMIC_IMPL__) {
+#endif
+
+#if defined(ETHR_HAVE_SU_DW_NATMC_SET_ACQB)
+ ETHR_SU_DW_NATMC_FUNC__(set_acqb)(&var->native, val->dw_sint);
+#elif defined(ETHR_HAVE_SU_DW_NATMC_SET_RB)
+ ETHR_SU_DW_NATMC_FUNC__(set_rb)(&var->native, val->dw_sint);
+ ETHR_MEMBAR(ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_SU_DW_NATMC_SET)
+ ETHR_SU_DW_NATMC_FUNC__(set)(&var->native, val->dw_sint);
+ ETHR_MEMBAR(ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_SU_DW_NATMC_SET_MB)
+ ETHR_SU_DW_NATMC_FUNC__(set_mb)(&var->native, val->dw_sint);
+#elif defined(ETHR_HAVE_SU_DW_NATMC_SET_WB)
+ ETHR_SU_DW_NATMC_FUNC__(set_wb)(&var->native, val->dw_sint);
+ ETHR_MEMBAR(ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_SU_DW_NATMC_SET_RELB)
+ ETHR_SU_DW_NATMC_FUNC__(set_relb)(&var->native, val->dw_sint);
+ ETHR_MEMBAR(ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_DW_NATMC_SET_ACQB)
+ ETHR_DW_NATMC_FUNC__(set_acqb)(&var->native, val->sint);
+#elif defined(ETHR_HAVE_DW_NATMC_SET_RB)
+ ETHR_DW_NATMC_FUNC__(set_rb)(&var->native, val->sint);
+ ETHR_MEMBAR(ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_DW_NATMC_SET)
+ ETHR_DW_NATMC_FUNC__(set)(&var->native, val->sint);
+ ETHR_MEMBAR(ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_DW_NATMC_SET_MB)
+ ETHR_DW_NATMC_FUNC__(set_mb)(&var->native, val->sint);
+#elif defined(ETHR_HAVE_DW_NATMC_SET_WB)
+ ETHR_DW_NATMC_FUNC__(set_wb)(&var->native, val->sint);
+ ETHR_MEMBAR(ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_DW_NATMC_SET_RELB)
+ ETHR_DW_NATMC_FUNC__(set_relb)(&var->native, val->sint);
+ ETHR_MEMBAR(ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_SU_DW_NATMC_CMPXCHG_ACQB)
+ ETHR_SU_DW_NATMC_CMPXCHG_FALLBACK__(ETHR_SU_DW_NATMC_FUNC__(cmpxchg_acqb), &var->native, aval, aval = val->dw_sint);
+#elif defined(ETHR_HAVE_SU_DW_NATMC_CMPXCHG_RB)
+ ETHR_SU_DW_NATMC_CMPXCHG_FALLBACK__(ETHR_SU_DW_NATMC_FUNC__(cmpxchg_rb), &var->native, aval, aval = val->dw_sint);
+ ETHR_MEMBAR(ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_SU_DW_NATMC_CMPXCHG)
+ ETHR_SU_DW_NATMC_CMPXCHG_FALLBACK__(ETHR_SU_DW_NATMC_FUNC__(cmpxchg), &var->native, aval, aval = val->dw_sint);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_SU_DW_NATMC_CMPXCHG_MB)
+ ETHR_SU_DW_NATMC_CMPXCHG_FALLBACK__(ETHR_SU_DW_NATMC_FUNC__(cmpxchg_mb), &var->native, aval, aval = val->dw_sint);
+#elif defined(ETHR_HAVE_SU_DW_NATMC_CMPXCHG_WB)
+ ETHR_SU_DW_NATMC_CMPXCHG_FALLBACK__(ETHR_SU_DW_NATMC_FUNC__(cmpxchg_wb), &var->native, aval, aval = val->dw_sint);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_SU_DW_NATMC_CMPXCHG_RELB)
+ ETHR_SU_DW_NATMC_CMPXCHG_FALLBACK__(ETHR_SU_DW_NATMC_FUNC__(cmpxchg_relb), &var->native, aval, aval = val->dw_sint);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_DW_NATMC_CMPXCHG_ACQB)
+ ETHR_DW_NATMC_CMPXCHG_FALLBACK__(ETHR_DW_NATMC_FUNC__(cmpxchg_acqb), &var->native, aval, aval.sint[0] = val->sint[0]; aval.sint[1] = val->sint[1]);
+#elif defined(ETHR_HAVE_DW_NATMC_CMPXCHG_RB)
+ ETHR_DW_NATMC_CMPXCHG_FALLBACK__(ETHR_DW_NATMC_FUNC__(cmpxchg_rb), &var->native, aval, aval.sint[0] = val->sint[0]; aval.sint[1] = val->sint[1]);
+ ETHR_MEMBAR(ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_DW_NATMC_CMPXCHG)
+ ETHR_DW_NATMC_CMPXCHG_FALLBACK__(ETHR_DW_NATMC_FUNC__(cmpxchg), &var->native, aval, aval.sint[0] = val->sint[0]; aval.sint[1] = val->sint[1]);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_DW_NATMC_CMPXCHG_MB)
+ ETHR_DW_NATMC_CMPXCHG_FALLBACK__(ETHR_DW_NATMC_FUNC__(cmpxchg_mb), &var->native, aval, aval.sint[0] = val->sint[0]; aval.sint[1] = val->sint[1]);
+#elif defined(ETHR_HAVE_DW_NATMC_CMPXCHG_WB)
+ ETHR_DW_NATMC_CMPXCHG_FALLBACK__(ETHR_DW_NATMC_FUNC__(cmpxchg_wb), &var->native, aval, aval.sint[0] = val->sint[0]; aval.sint[1] = val->sint[1]);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_DW_NATMC_CMPXCHG_RELB)
+ ETHR_DW_NATMC_CMPXCHG_FALLBACK__(ETHR_DW_NATMC_FUNC__(cmpxchg_relb), &var->native, aval, aval.sint[0] = val->sint[0]; aval.sint[1] = val->sint[1]);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#else
+#error "Missing implementation of ethr_dw_atomic_set_acqb()!"
+#endif
+
+#if defined(ETHR_RTCHK_USE_NATIVE_DW_ATOMIC_IMPL__)
+ } else { ETHR_DW_ATOMIC_FUNC__(set_acqb)(var, val); }
+#endif
+
+}
+
+static ETHR_INLINE void ETHR_DW_ATMC_FUNC__(set_relb)(ethr_dw_atomic_t *var, ethr_dw_sint_t *val)
+{
+
+#if defined(ETHR_RTCHK_USE_NATIVE_DW_ATOMIC_IMPL__)
+ if (ETHR_RTCHK_USE_NATIVE_DW_ATOMIC_IMPL__) {
+#endif
+
+#if defined(ETHR_HAVE_SU_DW_NATMC_SET_RELB)
+ ETHR_SU_DW_NATMC_FUNC__(set_relb)(&var->native, val->dw_sint);
+#elif defined(ETHR_HAVE_SU_DW_NATMC_SET_WB)
+ ETHR_MEMBAR(ETHR_LoadStore);
+ ETHR_SU_DW_NATMC_FUNC__(set_wb)(&var->native, val->dw_sint);
+#elif defined(ETHR_HAVE_SU_DW_NATMC_SET)
+ ETHR_MEMBAR(ETHR_LoadStore|ETHR_StoreStore);
+ ETHR_SU_DW_NATMC_FUNC__(set)(&var->native, val->dw_sint);
+#elif defined(ETHR_HAVE_SU_DW_NATMC_SET_MB)
+ ETHR_SU_DW_NATMC_FUNC__(set_mb)(&var->native, val->dw_sint);
+#elif defined(ETHR_HAVE_SU_DW_NATMC_SET_RB)
+ ETHR_MEMBAR(ETHR_LoadStore|ETHR_StoreStore);
+ ETHR_SU_DW_NATMC_FUNC__(set_rb)(&var->native, val->dw_sint);
+#elif defined(ETHR_HAVE_SU_DW_NATMC_SET_ACQB)
+ ETHR_MEMBAR(ETHR_LoadStore|ETHR_StoreStore);
+ ETHR_SU_DW_NATMC_FUNC__(set_acqb)(&var->native, val->dw_sint);
+#elif defined(ETHR_HAVE_DW_NATMC_SET_RELB)
+ ETHR_DW_NATMC_FUNC__(set_relb)(&var->native, val->sint);
+#elif defined(ETHR_HAVE_DW_NATMC_SET_WB)
+ ETHR_MEMBAR(ETHR_LoadStore);
+ ETHR_DW_NATMC_FUNC__(set_wb)(&var->native, val->sint);
+#elif defined(ETHR_HAVE_DW_NATMC_SET)
+ ETHR_MEMBAR(ETHR_LoadStore|ETHR_StoreStore);
+ ETHR_DW_NATMC_FUNC__(set)(&var->native, val->sint);
+#elif defined(ETHR_HAVE_DW_NATMC_SET_MB)
+ ETHR_DW_NATMC_FUNC__(set_mb)(&var->native, val->sint);
+#elif defined(ETHR_HAVE_DW_NATMC_SET_RB)
+ ETHR_MEMBAR(ETHR_LoadStore|ETHR_StoreStore);
+ ETHR_DW_NATMC_FUNC__(set_rb)(&var->native, val->sint);
+#elif defined(ETHR_HAVE_DW_NATMC_SET_ACQB)
+ ETHR_MEMBAR(ETHR_LoadStore|ETHR_StoreStore);
+ ETHR_DW_NATMC_FUNC__(set_acqb)(&var->native, val->sint);
+#elif defined(ETHR_HAVE_SU_DW_NATMC_CMPXCHG_RELB)
+ ETHR_SU_DW_NATMC_CMPXCHG_FALLBACK__(ETHR_SU_DW_NATMC_FUNC__(cmpxchg_relb), &var->native, aval, aval = val->dw_sint);
+#elif defined(ETHR_HAVE_SU_DW_NATMC_CMPXCHG_WB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ ETHR_SU_DW_NATMC_CMPXCHG_FALLBACK__(ETHR_SU_DW_NATMC_FUNC__(cmpxchg_wb), &var->native, aval, aval = val->dw_sint);
+#elif defined(ETHR_HAVE_SU_DW_NATMC_CMPXCHG)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ ETHR_SU_DW_NATMC_CMPXCHG_FALLBACK__(ETHR_SU_DW_NATMC_FUNC__(cmpxchg), &var->native, aval, aval = val->dw_sint);
+#elif defined(ETHR_HAVE_SU_DW_NATMC_CMPXCHG_MB)
+ ETHR_SU_DW_NATMC_CMPXCHG_FALLBACK__(ETHR_SU_DW_NATMC_FUNC__(cmpxchg_mb), &var->native, aval, aval = val->dw_sint);
+#elif defined(ETHR_HAVE_SU_DW_NATMC_CMPXCHG_RB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ ETHR_SU_DW_NATMC_CMPXCHG_FALLBACK__(ETHR_SU_DW_NATMC_FUNC__(cmpxchg_rb), &var->native, aval, aval = val->dw_sint);
+#elif defined(ETHR_HAVE_SU_DW_NATMC_CMPXCHG_ACQB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ ETHR_SU_DW_NATMC_CMPXCHG_FALLBACK__(ETHR_SU_DW_NATMC_FUNC__(cmpxchg_acqb), &var->native, aval, aval = val->dw_sint);
+#elif defined(ETHR_HAVE_DW_NATMC_CMPXCHG_RELB)
+ ETHR_DW_NATMC_CMPXCHG_FALLBACK__(ETHR_DW_NATMC_FUNC__(cmpxchg_relb), &var->native, aval, aval.sint[0] = val->sint[0]; aval.sint[1] = val->sint[1]);
+#elif defined(ETHR_HAVE_DW_NATMC_CMPXCHG_WB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ ETHR_DW_NATMC_CMPXCHG_FALLBACK__(ETHR_DW_NATMC_FUNC__(cmpxchg_wb), &var->native, aval, aval.sint[0] = val->sint[0]; aval.sint[1] = val->sint[1]);
+#elif defined(ETHR_HAVE_DW_NATMC_CMPXCHG)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ ETHR_DW_NATMC_CMPXCHG_FALLBACK__(ETHR_DW_NATMC_FUNC__(cmpxchg), &var->native, aval, aval.sint[0] = val->sint[0]; aval.sint[1] = val->sint[1]);
+#elif defined(ETHR_HAVE_DW_NATMC_CMPXCHG_MB)
+ ETHR_DW_NATMC_CMPXCHG_FALLBACK__(ETHR_DW_NATMC_FUNC__(cmpxchg_mb), &var->native, aval, aval.sint[0] = val->sint[0]; aval.sint[1] = val->sint[1]);
+#elif defined(ETHR_HAVE_DW_NATMC_CMPXCHG_RB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ ETHR_DW_NATMC_CMPXCHG_FALLBACK__(ETHR_DW_NATMC_FUNC__(cmpxchg_rb), &var->native, aval, aval.sint[0] = val->sint[0]; aval.sint[1] = val->sint[1]);
+#elif defined(ETHR_HAVE_DW_NATMC_CMPXCHG_ACQB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ ETHR_DW_NATMC_CMPXCHG_FALLBACK__(ETHR_DW_NATMC_FUNC__(cmpxchg_acqb), &var->native, aval, aval.sint[0] = val->sint[0]; aval.sint[1] = val->sint[1]);
+#else
+#error "Missing implementation of ethr_dw_atomic_set_relb()!"
+#endif
+
+#if defined(ETHR_RTCHK_USE_NATIVE_DW_ATOMIC_IMPL__)
+ } else { ETHR_DW_ATOMIC_FUNC__(set_relb)(var, val); }
+#endif
+
+}
+
+static ETHR_INLINE void ETHR_DW_ATMC_FUNC__(set_mb)(ethr_dw_atomic_t *var, ethr_dw_sint_t *val)
+{
+
+#if defined(ETHR_RTCHK_USE_NATIVE_DW_ATOMIC_IMPL__)
+ if (ETHR_RTCHK_USE_NATIVE_DW_ATOMIC_IMPL__) {
+#endif
+
+#if defined(ETHR_HAVE_SU_DW_NATMC_SET_MB)
+ ETHR_SU_DW_NATMC_FUNC__(set_mb)(&var->native, val->dw_sint);
+#elif defined(ETHR_HAVE_SU_DW_NATMC_SET_RELB)
+ ETHR_SU_DW_NATMC_FUNC__(set_relb)(&var->native, val->dw_sint);
+ ETHR_MEMBAR(ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_SU_DW_NATMC_SET_ACQB)
+ ETHR_MEMBAR(ETHR_LoadStore|ETHR_StoreStore);
+ ETHR_SU_DW_NATMC_FUNC__(set_acqb)(&var->native, val->dw_sint);
+#elif defined(ETHR_HAVE_SU_DW_NATMC_SET_WB)
+ ETHR_MEMBAR(ETHR_LoadStore);
+ ETHR_SU_DW_NATMC_FUNC__(set_wb)(&var->native, val->dw_sint);
+ ETHR_MEMBAR(ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_SU_DW_NATMC_SET_RB)
+ ETHR_MEMBAR(ETHR_LoadStore|ETHR_StoreStore);
+ ETHR_SU_DW_NATMC_FUNC__(set_rb)(&var->native, val->dw_sint);
+ ETHR_MEMBAR(ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_SU_DW_NATMC_SET)
+ ETHR_MEMBAR(ETHR_LoadStore|ETHR_StoreStore);
+ ETHR_SU_DW_NATMC_FUNC__(set)(&var->native, val->dw_sint);
+ ETHR_MEMBAR(ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_DW_NATMC_SET_MB)
+ ETHR_DW_NATMC_FUNC__(set_mb)(&var->native, val->sint);
+#elif defined(ETHR_HAVE_DW_NATMC_SET_RELB)
+ ETHR_DW_NATMC_FUNC__(set_relb)(&var->native, val->sint);
+ ETHR_MEMBAR(ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_DW_NATMC_SET_ACQB)
+ ETHR_MEMBAR(ETHR_LoadStore|ETHR_StoreStore);
+ ETHR_DW_NATMC_FUNC__(set_acqb)(&var->native, val->sint);
+#elif defined(ETHR_HAVE_DW_NATMC_SET_WB)
+ ETHR_MEMBAR(ETHR_LoadStore);
+ ETHR_DW_NATMC_FUNC__(set_wb)(&var->native, val->sint);
+ ETHR_MEMBAR(ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_DW_NATMC_SET_RB)
+ ETHR_MEMBAR(ETHR_LoadStore|ETHR_StoreStore);
+ ETHR_DW_NATMC_FUNC__(set_rb)(&var->native, val->sint);
+ ETHR_MEMBAR(ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_DW_NATMC_SET)
+ ETHR_MEMBAR(ETHR_LoadStore|ETHR_StoreStore);
+ ETHR_DW_NATMC_FUNC__(set)(&var->native, val->sint);
+ ETHR_MEMBAR(ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_SU_DW_NATMC_CMPXCHG_MB)
+ ETHR_SU_DW_NATMC_CMPXCHG_FALLBACK__(ETHR_SU_DW_NATMC_FUNC__(cmpxchg_mb), &var->native, aval, aval = val->dw_sint);
+#elif defined(ETHR_HAVE_SU_DW_NATMC_CMPXCHG_RELB)
+ ETHR_SU_DW_NATMC_CMPXCHG_FALLBACK__(ETHR_SU_DW_NATMC_FUNC__(cmpxchg_relb), &var->native, aval, aval = val->dw_sint);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_SU_DW_NATMC_CMPXCHG_ACQB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ ETHR_SU_DW_NATMC_CMPXCHG_FALLBACK__(ETHR_SU_DW_NATMC_FUNC__(cmpxchg_acqb), &var->native, aval, aval = val->dw_sint);
+#elif defined(ETHR_HAVE_SU_DW_NATMC_CMPXCHG_WB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ ETHR_SU_DW_NATMC_CMPXCHG_FALLBACK__(ETHR_SU_DW_NATMC_FUNC__(cmpxchg_wb), &var->native, aval, aval = val->dw_sint);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_SU_DW_NATMC_CMPXCHG_RB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ ETHR_SU_DW_NATMC_CMPXCHG_FALLBACK__(ETHR_SU_DW_NATMC_FUNC__(cmpxchg_rb), &var->native, aval, aval = val->dw_sint);
+ ETHR_MEMBAR(ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_SU_DW_NATMC_CMPXCHG)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ ETHR_SU_DW_NATMC_CMPXCHG_FALLBACK__(ETHR_SU_DW_NATMC_FUNC__(cmpxchg), &var->native, aval, aval = val->dw_sint);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_DW_NATMC_CMPXCHG_MB)
+ ETHR_DW_NATMC_CMPXCHG_FALLBACK__(ETHR_DW_NATMC_FUNC__(cmpxchg_mb), &var->native, aval, aval.sint[0] = val->sint[0]; aval.sint[1] = val->sint[1]);
+#elif defined(ETHR_HAVE_DW_NATMC_CMPXCHG_RELB)
+ ETHR_DW_NATMC_CMPXCHG_FALLBACK__(ETHR_DW_NATMC_FUNC__(cmpxchg_relb), &var->native, aval, aval.sint[0] = val->sint[0]; aval.sint[1] = val->sint[1]);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_DW_NATMC_CMPXCHG_ACQB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ ETHR_DW_NATMC_CMPXCHG_FALLBACK__(ETHR_DW_NATMC_FUNC__(cmpxchg_acqb), &var->native, aval, aval.sint[0] = val->sint[0]; aval.sint[1] = val->sint[1]);
+#elif defined(ETHR_HAVE_DW_NATMC_CMPXCHG_WB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ ETHR_DW_NATMC_CMPXCHG_FALLBACK__(ETHR_DW_NATMC_FUNC__(cmpxchg_wb), &var->native, aval, aval.sint[0] = val->sint[0]; aval.sint[1] = val->sint[1]);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_DW_NATMC_CMPXCHG_RB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ ETHR_DW_NATMC_CMPXCHG_FALLBACK__(ETHR_DW_NATMC_FUNC__(cmpxchg_rb), &var->native, aval, aval.sint[0] = val->sint[0]; aval.sint[1] = val->sint[1]);
+ ETHR_MEMBAR(ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_DW_NATMC_CMPXCHG)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ ETHR_DW_NATMC_CMPXCHG_FALLBACK__(ETHR_DW_NATMC_FUNC__(cmpxchg), &var->native, aval, aval.sint[0] = val->sint[0]; aval.sint[1] = val->sint[1]);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#else
+#error "Missing implementation of ethr_dw_atomic_set_mb()!"
+#endif
+
+#if defined(ETHR_RTCHK_USE_NATIVE_DW_ATOMIC_IMPL__)
+ } else { ETHR_DW_ATOMIC_FUNC__(set_mb)(var, val); }
+#endif
+
+}
-static ETHR_INLINE ethr_sint_t *
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_addr)(ethr_atomic_t *var)
+static ETHR_INLINE void ETHR_DW_ATMC_FUNC__(set_ddrb)(ethr_dw_atomic_t *var, ethr_dw_sint_t *val)
+{
+#ifdef ETHR_ORDERED_READ_DEPEND
+ ETHR_DW_ATMC_FUNC__(set)(var, val);
+#else
+ ETHR_DW_ATMC_FUNC__(set_rb)(var, val);
+#endif
+}
+
+
+/* --- read() --- */
+
+
+static ETHR_INLINE void ETHR_DW_ATMC_FUNC__(read)(ethr_dw_atomic_t *var, ethr_dw_sint_t *val)
+{
+
+#if defined(ETHR_RTCHK_USE_NATIVE_DW_ATOMIC_IMPL__)
+ if (ETHR_RTCHK_USE_NATIVE_DW_ATOMIC_IMPL__) {
+#endif
+
+#if defined(ETHR_HAVE_SU_DW_NATMC_READ)
+ val->dw_sint = ETHR_SU_DW_NATMC_FUNC__(read)(&var->native);
+#elif defined(ETHR_HAVE_SU_DW_NATMC_READ_RB)
+ val->dw_sint = ETHR_SU_DW_NATMC_FUNC__(read_rb)(&var->native);
+#elif defined(ETHR_HAVE_SU_DW_NATMC_READ_WB)
+ val->dw_sint = ETHR_SU_DW_NATMC_FUNC__(read_wb)(&var->native);
+#elif defined(ETHR_HAVE_SU_DW_NATMC_READ_ACQB)
+ val->dw_sint = ETHR_SU_DW_NATMC_FUNC__(read_acqb)(&var->native);
+#elif defined(ETHR_HAVE_SU_DW_NATMC_READ_RELB)
+ val->dw_sint = ETHR_SU_DW_NATMC_FUNC__(read_relb)(&var->native);
+#elif defined(ETHR_HAVE_SU_DW_NATMC_READ_MB)
+ val->dw_sint = ETHR_SU_DW_NATMC_FUNC__(read_mb)(&var->native);
+#elif defined(ETHR_HAVE_DW_NATMC_READ)
+ ETHR_DW_NATMC_FUNC__(read)(&var->native, val->sint);
+#elif defined(ETHR_HAVE_DW_NATMC_READ_RB)
+ ETHR_DW_NATMC_FUNC__(read_rb)(&var->native, val->sint);
+#elif defined(ETHR_HAVE_DW_NATMC_READ_WB)
+ ETHR_DW_NATMC_FUNC__(read_wb)(&var->native, val->sint);
+#elif defined(ETHR_HAVE_DW_NATMC_READ_ACQB)
+ ETHR_DW_NATMC_FUNC__(read_acqb)(&var->native, val->sint);
+#elif defined(ETHR_HAVE_DW_NATMC_READ_RELB)
+ ETHR_DW_NATMC_FUNC__(read_relb)(&var->native, val->sint);
+#elif defined(ETHR_HAVE_DW_NATMC_READ_MB)
+ ETHR_DW_NATMC_FUNC__(read_mb)(&var->native, val->sint);
+#else
+ ethr_dw_sint_t tmp;
+ tmp.sint[0] = ETHR_UNUSUAL_SINT_VAL__;
+ tmp.sint[1] = ETHR_UNUSUAL_SINT_VAL__;
+ val->sint[0] = ETHR_UNUSUAL_SINT_VAL__;
+ val->sint[1] = ETHR_UNUSUAL_SINT_VAL__;
+ (void) ETHR_DW_ATMC_FUNC__(cmpxchg)(var, &tmp, val);
+#endif
+
+#if defined(ETHR_RTCHK_USE_NATIVE_DW_ATOMIC_IMPL__)
+ } else { ETHR_DW_ATOMIC_FUNC__(read)(var, val); }
+#endif
+
+}
+
+static ETHR_INLINE void ETHR_DW_ATMC_FUNC__(read_rb)(ethr_dw_atomic_t *var, ethr_dw_sint_t *val)
+{
+
+#if defined(ETHR_RTCHK_USE_NATIVE_DW_ATOMIC_IMPL__)
+ if (ETHR_RTCHK_USE_NATIVE_DW_ATOMIC_IMPL__) {
+#endif
+
+#if defined(ETHR_HAVE_SU_DW_NATMC_READ_RB)
+ val->dw_sint = ETHR_SU_DW_NATMC_FUNC__(read_rb)(&var->native);
+#elif defined(ETHR_HAVE_SU_DW_NATMC_READ)
+ val->dw_sint = ETHR_SU_DW_NATMC_FUNC__(read)(&var->native);
+ ETHR_MEMBAR(ETHR_LoadLoad);
+#elif defined(ETHR_HAVE_SU_DW_NATMC_READ_MB)
+ val->dw_sint = ETHR_SU_DW_NATMC_FUNC__(read_mb)(&var->native);
+#elif defined(ETHR_HAVE_SU_DW_NATMC_READ_WB)
+ val->dw_sint = ETHR_SU_DW_NATMC_FUNC__(read_wb)(&var->native);
+ ETHR_MEMBAR(ETHR_LoadLoad);
+#elif defined(ETHR_HAVE_SU_DW_NATMC_READ_ACQB)
+ val->dw_sint = ETHR_SU_DW_NATMC_FUNC__(read_acqb)(&var->native);
+ ETHR_MEMBAR(ETHR_LoadLoad);
+#elif defined(ETHR_HAVE_SU_DW_NATMC_READ_RELB)
+ val->dw_sint = ETHR_SU_DW_NATMC_FUNC__(read_relb)(&var->native);
+ ETHR_MEMBAR(ETHR_LoadLoad);
+#elif defined(ETHR_HAVE_DW_NATMC_READ_RB)
+ ETHR_DW_NATMC_FUNC__(read_rb)(&var->native, val->sint);
+#elif defined(ETHR_HAVE_DW_NATMC_READ)
+ ETHR_DW_NATMC_FUNC__(read)(&var->native, val->sint);
+ ETHR_MEMBAR(ETHR_LoadLoad);
+#elif defined(ETHR_HAVE_DW_NATMC_READ_MB)
+ ETHR_DW_NATMC_FUNC__(read_mb)(&var->native, val->sint);
+#elif defined(ETHR_HAVE_DW_NATMC_READ_WB)
+ ETHR_DW_NATMC_FUNC__(read_wb)(&var->native, val->sint);
+ ETHR_MEMBAR(ETHR_LoadLoad);
+#elif defined(ETHR_HAVE_DW_NATMC_READ_ACQB)
+ ETHR_DW_NATMC_FUNC__(read_acqb)(&var->native, val->sint);
+ ETHR_MEMBAR(ETHR_LoadLoad);
+#elif defined(ETHR_HAVE_DW_NATMC_READ_RELB)
+ ETHR_DW_NATMC_FUNC__(read_relb)(&var->native, val->sint);
+ ETHR_MEMBAR(ETHR_LoadLoad);
+#else
+ ethr_dw_sint_t tmp;
+ tmp.sint[0] = ETHR_UNUSUAL_SINT_VAL__;
+ tmp.sint[1] = ETHR_UNUSUAL_SINT_VAL__;
+ val->sint[0] = ETHR_UNUSUAL_SINT_VAL__;
+ val->sint[1] = ETHR_UNUSUAL_SINT_VAL__;
+ (void) ETHR_DW_ATMC_FUNC__(cmpxchg_rb)(var, &tmp, val);
+#endif
+
+#if defined(ETHR_RTCHK_USE_NATIVE_DW_ATOMIC_IMPL__)
+ } else { ETHR_DW_ATOMIC_FUNC__(read_rb)(var, val); }
+#endif
+
+}
+
+static ETHR_INLINE void ETHR_DW_ATMC_FUNC__(read_wb)(ethr_dw_atomic_t *var, ethr_dw_sint_t *val)
+{
+
+#if defined(ETHR_RTCHK_USE_NATIVE_DW_ATOMIC_IMPL__)
+ if (ETHR_RTCHK_USE_NATIVE_DW_ATOMIC_IMPL__) {
+#endif
+
+#if defined(ETHR_HAVE_SU_DW_NATMC_READ_WB)
+ val->dw_sint = ETHR_SU_DW_NATMC_FUNC__(read_wb)(&var->native);
+#elif defined(ETHR_HAVE_SU_DW_NATMC_READ)
+ ETHR_MEMBAR(ETHR_StoreStore);
+ val->dw_sint = ETHR_SU_DW_NATMC_FUNC__(read)(&var->native);
+#elif defined(ETHR_HAVE_SU_DW_NATMC_READ_MB)
+ val->dw_sint = ETHR_SU_DW_NATMC_FUNC__(read_mb)(&var->native);
+#elif defined(ETHR_HAVE_SU_DW_NATMC_READ_RB)
+ ETHR_MEMBAR(ETHR_StoreStore);
+ val->dw_sint = ETHR_SU_DW_NATMC_FUNC__(read_rb)(&var->native);
+#elif defined(ETHR_HAVE_SU_DW_NATMC_READ_ACQB)
+ ETHR_MEMBAR(ETHR_StoreStore);
+ val->dw_sint = ETHR_SU_DW_NATMC_FUNC__(read_acqb)(&var->native);
+#elif defined(ETHR_HAVE_SU_DW_NATMC_READ_RELB)
+ ETHR_MEMBAR(ETHR_StoreStore);
+ val->dw_sint = ETHR_SU_DW_NATMC_FUNC__(read_relb)(&var->native);
+#elif defined(ETHR_HAVE_DW_NATMC_READ_WB)
+ ETHR_DW_NATMC_FUNC__(read_wb)(&var->native, val->sint);
+#elif defined(ETHR_HAVE_DW_NATMC_READ)
+ ETHR_MEMBAR(ETHR_StoreStore);
+ ETHR_DW_NATMC_FUNC__(read)(&var->native, val->sint);
+#elif defined(ETHR_HAVE_DW_NATMC_READ_MB)
+ ETHR_DW_NATMC_FUNC__(read_mb)(&var->native, val->sint);
+#elif defined(ETHR_HAVE_DW_NATMC_READ_RB)
+ ETHR_MEMBAR(ETHR_StoreStore);
+ ETHR_DW_NATMC_FUNC__(read_rb)(&var->native, val->sint);
+#elif defined(ETHR_HAVE_DW_NATMC_READ_ACQB)
+ ETHR_MEMBAR(ETHR_StoreStore);
+ ETHR_DW_NATMC_FUNC__(read_acqb)(&var->native, val->sint);
+#elif defined(ETHR_HAVE_DW_NATMC_READ_RELB)
+ ETHR_MEMBAR(ETHR_StoreStore);
+ ETHR_DW_NATMC_FUNC__(read_relb)(&var->native, val->sint);
+#else
+ ethr_dw_sint_t tmp;
+ tmp.sint[0] = ETHR_UNUSUAL_SINT_VAL__;
+ tmp.sint[1] = ETHR_UNUSUAL_SINT_VAL__;
+ val->sint[0] = ETHR_UNUSUAL_SINT_VAL__;
+ val->sint[1] = ETHR_UNUSUAL_SINT_VAL__;
+ (void) ETHR_DW_ATMC_FUNC__(cmpxchg_wb)(var, &tmp, val);
+#endif
+
+#if defined(ETHR_RTCHK_USE_NATIVE_DW_ATOMIC_IMPL__)
+ } else { ETHR_DW_ATOMIC_FUNC__(read_wb)(var, val); }
+#endif
+
+}
+
+static ETHR_INLINE void ETHR_DW_ATMC_FUNC__(read_acqb)(ethr_dw_atomic_t *var, ethr_dw_sint_t *val)
+{
+
+#if defined(ETHR_RTCHK_USE_NATIVE_DW_ATOMIC_IMPL__)
+ if (ETHR_RTCHK_USE_NATIVE_DW_ATOMIC_IMPL__) {
+#endif
+
+#if defined(ETHR_HAVE_SU_DW_NATMC_READ_ACQB)
+ val->dw_sint = ETHR_SU_DW_NATMC_FUNC__(read_acqb)(&var->native);
+#elif defined(ETHR_HAVE_SU_DW_NATMC_READ_RB)
+ val->dw_sint = ETHR_SU_DW_NATMC_FUNC__(read_rb)(&var->native);
+ ETHR_MEMBAR(ETHR_LoadStore);
+#elif defined(ETHR_HAVE_SU_DW_NATMC_READ)
+ val->dw_sint = ETHR_SU_DW_NATMC_FUNC__(read)(&var->native);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore);
+#elif defined(ETHR_HAVE_SU_DW_NATMC_READ_MB)
+ val->dw_sint = ETHR_SU_DW_NATMC_FUNC__(read_mb)(&var->native);
+#elif defined(ETHR_HAVE_SU_DW_NATMC_READ_WB)
+ val->dw_sint = ETHR_SU_DW_NATMC_FUNC__(read_wb)(&var->native);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore);
+#elif defined(ETHR_HAVE_SU_DW_NATMC_READ_RELB)
+ val->dw_sint = ETHR_SU_DW_NATMC_FUNC__(read_relb)(&var->native);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore);
+#elif defined(ETHR_HAVE_DW_NATMC_READ_ACQB)
+ ETHR_DW_NATMC_FUNC__(read_acqb)(&var->native, val->sint);
+#elif defined(ETHR_HAVE_DW_NATMC_READ_RB)
+ ETHR_DW_NATMC_FUNC__(read_rb)(&var->native, val->sint);
+ ETHR_MEMBAR(ETHR_LoadStore);
+#elif defined(ETHR_HAVE_DW_NATMC_READ)
+ ETHR_DW_NATMC_FUNC__(read)(&var->native, val->sint);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore);
+#elif defined(ETHR_HAVE_DW_NATMC_READ_MB)
+ ETHR_DW_NATMC_FUNC__(read_mb)(&var->native, val->sint);
+#elif defined(ETHR_HAVE_DW_NATMC_READ_WB)
+ ETHR_DW_NATMC_FUNC__(read_wb)(&var->native, val->sint);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore);
+#elif defined(ETHR_HAVE_DW_NATMC_READ_RELB)
+ ETHR_DW_NATMC_FUNC__(read_relb)(&var->native, val->sint);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore);
+#else
+ ethr_dw_sint_t tmp;
+ tmp.sint[0] = ETHR_UNUSUAL_SINT_VAL__;
+ tmp.sint[1] = ETHR_UNUSUAL_SINT_VAL__;
+ val->sint[0] = ETHR_UNUSUAL_SINT_VAL__;
+ val->sint[1] = ETHR_UNUSUAL_SINT_VAL__;
+ (void) ETHR_DW_ATMC_FUNC__(cmpxchg_acqb)(var, &tmp, val);
+#endif
+
+#if defined(ETHR_RTCHK_USE_NATIVE_DW_ATOMIC_IMPL__)
+ } else { ETHR_DW_ATOMIC_FUNC__(read_acqb)(var, val); }
+#endif
+
+}
+
+static ETHR_INLINE void ETHR_DW_ATMC_FUNC__(read_relb)(ethr_dw_atomic_t *var, ethr_dw_sint_t *val)
+{
+
+#if defined(ETHR_RTCHK_USE_NATIVE_DW_ATOMIC_IMPL__)
+ if (ETHR_RTCHK_USE_NATIVE_DW_ATOMIC_IMPL__) {
+#endif
+
+#if defined(ETHR_HAVE_SU_DW_NATMC_READ_RELB)
+ val->dw_sint = ETHR_SU_DW_NATMC_FUNC__(read_relb)(&var->native);
+#elif defined(ETHR_HAVE_SU_DW_NATMC_READ_WB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ val->dw_sint = ETHR_SU_DW_NATMC_FUNC__(read_wb)(&var->native);
+#elif defined(ETHR_HAVE_SU_DW_NATMC_READ)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ val->dw_sint = ETHR_SU_DW_NATMC_FUNC__(read)(&var->native);
+#elif defined(ETHR_HAVE_SU_DW_NATMC_READ_MB)
+ val->dw_sint = ETHR_SU_DW_NATMC_FUNC__(read_mb)(&var->native);
+#elif defined(ETHR_HAVE_SU_DW_NATMC_READ_RB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ val->dw_sint = ETHR_SU_DW_NATMC_FUNC__(read_rb)(&var->native);
+#elif defined(ETHR_HAVE_SU_DW_NATMC_READ_ACQB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ val->dw_sint = ETHR_SU_DW_NATMC_FUNC__(read_acqb)(&var->native);
+#elif defined(ETHR_HAVE_DW_NATMC_READ_RELB)
+ ETHR_DW_NATMC_FUNC__(read_relb)(&var->native, val->sint);
+#elif defined(ETHR_HAVE_DW_NATMC_READ_WB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ ETHR_DW_NATMC_FUNC__(read_wb)(&var->native, val->sint);
+#elif defined(ETHR_HAVE_DW_NATMC_READ)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ ETHR_DW_NATMC_FUNC__(read)(&var->native, val->sint);
+#elif defined(ETHR_HAVE_DW_NATMC_READ_MB)
+ ETHR_DW_NATMC_FUNC__(read_mb)(&var->native, val->sint);
+#elif defined(ETHR_HAVE_DW_NATMC_READ_RB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ ETHR_DW_NATMC_FUNC__(read_rb)(&var->native, val->sint);
+#elif defined(ETHR_HAVE_DW_NATMC_READ_ACQB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ ETHR_DW_NATMC_FUNC__(read_acqb)(&var->native, val->sint);
+#else
+ ethr_dw_sint_t tmp;
+ tmp.sint[0] = ETHR_UNUSUAL_SINT_VAL__;
+ tmp.sint[1] = ETHR_UNUSUAL_SINT_VAL__;
+ val->sint[0] = ETHR_UNUSUAL_SINT_VAL__;
+ val->sint[1] = ETHR_UNUSUAL_SINT_VAL__;
+ (void) ETHR_DW_ATMC_FUNC__(cmpxchg_relb)(var, &tmp, val);
+#endif
+
+#if defined(ETHR_RTCHK_USE_NATIVE_DW_ATOMIC_IMPL__)
+ } else { ETHR_DW_ATOMIC_FUNC__(read_relb)(var, val); }
+#endif
+
+}
+
+static ETHR_INLINE void ETHR_DW_ATMC_FUNC__(read_mb)(ethr_dw_atomic_t *var, ethr_dw_sint_t *val)
+{
+
+#if defined(ETHR_RTCHK_USE_NATIVE_DW_ATOMIC_IMPL__)
+ if (ETHR_RTCHK_USE_NATIVE_DW_ATOMIC_IMPL__) {
+#endif
+
+#if defined(ETHR_HAVE_SU_DW_NATMC_READ_MB)
+ val->dw_sint = ETHR_SU_DW_NATMC_FUNC__(read_mb)(&var->native);
+#elif defined(ETHR_HAVE_SU_DW_NATMC_READ_RELB)
+ val->dw_sint = ETHR_SU_DW_NATMC_FUNC__(read_relb)(&var->native);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore);
+#elif defined(ETHR_HAVE_SU_DW_NATMC_READ_ACQB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ val->dw_sint = ETHR_SU_DW_NATMC_FUNC__(read_acqb)(&var->native);
+#elif defined(ETHR_HAVE_SU_DW_NATMC_READ_WB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ val->dw_sint = ETHR_SU_DW_NATMC_FUNC__(read_wb)(&var->native);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore);
+#elif defined(ETHR_HAVE_SU_DW_NATMC_READ_RB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ val->dw_sint = ETHR_SU_DW_NATMC_FUNC__(read_rb)(&var->native);
+ ETHR_MEMBAR(ETHR_LoadStore);
+#elif defined(ETHR_HAVE_SU_DW_NATMC_READ)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ val->dw_sint = ETHR_SU_DW_NATMC_FUNC__(read)(&var->native);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore);
+#elif defined(ETHR_HAVE_DW_NATMC_READ_MB)
+ ETHR_DW_NATMC_FUNC__(read_mb)(&var->native, val->sint);
+#elif defined(ETHR_HAVE_DW_NATMC_READ_RELB)
+ ETHR_DW_NATMC_FUNC__(read_relb)(&var->native, val->sint);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore);
+#elif defined(ETHR_HAVE_DW_NATMC_READ_ACQB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ ETHR_DW_NATMC_FUNC__(read_acqb)(&var->native, val->sint);
+#elif defined(ETHR_HAVE_DW_NATMC_READ_WB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ ETHR_DW_NATMC_FUNC__(read_wb)(&var->native, val->sint);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore);
+#elif defined(ETHR_HAVE_DW_NATMC_READ_RB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ ETHR_DW_NATMC_FUNC__(read_rb)(&var->native, val->sint);
+ ETHR_MEMBAR(ETHR_LoadStore);
+#elif defined(ETHR_HAVE_DW_NATMC_READ)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ ETHR_DW_NATMC_FUNC__(read)(&var->native, val->sint);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore);
+#else
+ ethr_dw_sint_t tmp;
+ tmp.sint[0] = ETHR_UNUSUAL_SINT_VAL__;
+ tmp.sint[1] = ETHR_UNUSUAL_SINT_VAL__;
+ val->sint[0] = ETHR_UNUSUAL_SINT_VAL__;
+ val->sint[1] = ETHR_UNUSUAL_SINT_VAL__;
+ (void) ETHR_DW_ATMC_FUNC__(cmpxchg_mb)(var, &tmp, val);
+#endif
+
+#if defined(ETHR_RTCHK_USE_NATIVE_DW_ATOMIC_IMPL__)
+ } else { ETHR_DW_ATOMIC_FUNC__(read_mb)(var, val); }
+#endif
+
+}
+
+static ETHR_INLINE void ETHR_DW_ATMC_FUNC__(read_ddrb)(ethr_dw_atomic_t *var, ethr_dw_sint_t *val)
+{
+#ifdef ETHR_ORDERED_READ_DEPEND
+ ETHR_DW_ATMC_FUNC__(read)(var, val);
+#else
+ ETHR_DW_ATMC_FUNC__(read_rb)(var, val);
+#endif
+}
+
+
+/* --- init() --- */
+
+
+static ETHR_INLINE void ETHR_DW_ATMC_FUNC__(init)(ethr_dw_atomic_t *var, ethr_dw_sint_t *val)
+{
+
+#if defined(ETHR_RTCHK_USE_NATIVE_DW_ATOMIC_IMPL__)
+ if (ETHR_RTCHK_USE_NATIVE_DW_ATOMIC_IMPL__) {
+#endif
+
+#if defined(ETHR_HAVE_SU_DW_NATMC_INIT)
+ ETHR_SU_DW_NATMC_FUNC__(init)(&var->native, val->dw_sint);
+#elif defined(ETHR_HAVE_SU_DW_NATMC_INIT_RB)
+ ETHR_SU_DW_NATMC_FUNC__(init_rb)(&var->native, val->dw_sint);
+#elif defined(ETHR_HAVE_SU_DW_NATMC_INIT_WB)
+ ETHR_SU_DW_NATMC_FUNC__(init_wb)(&var->native, val->dw_sint);
+#elif defined(ETHR_HAVE_SU_DW_NATMC_INIT_ACQB)
+ ETHR_SU_DW_NATMC_FUNC__(init_acqb)(&var->native, val->dw_sint);
+#elif defined(ETHR_HAVE_SU_DW_NATMC_INIT_RELB)
+ ETHR_SU_DW_NATMC_FUNC__(init_relb)(&var->native, val->dw_sint);
+#elif defined(ETHR_HAVE_SU_DW_NATMC_INIT_MB)
+ ETHR_SU_DW_NATMC_FUNC__(init_mb)(&var->native, val->dw_sint);
+#elif defined(ETHR_HAVE_DW_NATMC_INIT)
+ ETHR_DW_NATMC_FUNC__(init)(&var->native, val->sint);
+#elif defined(ETHR_HAVE_DW_NATMC_INIT_RB)
+ ETHR_DW_NATMC_FUNC__(init_rb)(&var->native, val->sint);
+#elif defined(ETHR_HAVE_DW_NATMC_INIT_WB)
+ ETHR_DW_NATMC_FUNC__(init_wb)(&var->native, val->sint);
+#elif defined(ETHR_HAVE_DW_NATMC_INIT_ACQB)
+ ETHR_DW_NATMC_FUNC__(init_acqb)(&var->native, val->sint);
+#elif defined(ETHR_HAVE_DW_NATMC_INIT_RELB)
+ ETHR_DW_NATMC_FUNC__(init_relb)(&var->native, val->sint);
+#elif defined(ETHR_HAVE_DW_NATMC_INIT_MB)
+ ETHR_DW_NATMC_FUNC__(init_mb)(&var->native, val->sint);
+#else
+ ETHR_DW_ATMC_FUNC__(set)(var, val);
+#endif
+
+#if defined(ETHR_RTCHK_USE_NATIVE_DW_ATOMIC_IMPL__)
+ } else { ETHR_DW_ATOMIC_FUNC__(init)(var, val); }
+#endif
+
+}
+
+static ETHR_INLINE void ETHR_DW_ATMC_FUNC__(init_rb)(ethr_dw_atomic_t *var, ethr_dw_sint_t *val)
+{
+
+#if defined(ETHR_RTCHK_USE_NATIVE_DW_ATOMIC_IMPL__)
+ if (ETHR_RTCHK_USE_NATIVE_DW_ATOMIC_IMPL__) {
+#endif
+
+#if defined(ETHR_HAVE_SU_DW_NATMC_INIT_RB)
+ ETHR_SU_DW_NATMC_FUNC__(init_rb)(&var->native, val->dw_sint);
+#elif defined(ETHR_HAVE_SU_DW_NATMC_INIT)
+ ETHR_SU_DW_NATMC_FUNC__(init)(&var->native, val->dw_sint);
+ ETHR_MEMBAR(ETHR_LoadLoad);
+#elif defined(ETHR_HAVE_SU_DW_NATMC_INIT_MB)
+ ETHR_SU_DW_NATMC_FUNC__(init_mb)(&var->native, val->dw_sint);
+#elif defined(ETHR_HAVE_SU_DW_NATMC_INIT_WB)
+ ETHR_SU_DW_NATMC_FUNC__(init_wb)(&var->native, val->dw_sint);
+ ETHR_MEMBAR(ETHR_LoadLoad);
+#elif defined(ETHR_HAVE_SU_DW_NATMC_INIT_ACQB)
+ ETHR_SU_DW_NATMC_FUNC__(init_acqb)(&var->native, val->dw_sint);
+ ETHR_MEMBAR(ETHR_LoadLoad);
+#elif defined(ETHR_HAVE_SU_DW_NATMC_INIT_RELB)
+ ETHR_SU_DW_NATMC_FUNC__(init_relb)(&var->native, val->dw_sint);
+ ETHR_MEMBAR(ETHR_LoadLoad);
+#elif defined(ETHR_HAVE_DW_NATMC_INIT_RB)
+ ETHR_DW_NATMC_FUNC__(init_rb)(&var->native, val->sint);
+#elif defined(ETHR_HAVE_DW_NATMC_INIT)
+ ETHR_DW_NATMC_FUNC__(init)(&var->native, val->sint);
+ ETHR_MEMBAR(ETHR_LoadLoad);
+#elif defined(ETHR_HAVE_DW_NATMC_INIT_MB)
+ ETHR_DW_NATMC_FUNC__(init_mb)(&var->native, val->sint);
+#elif defined(ETHR_HAVE_DW_NATMC_INIT_WB)
+ ETHR_DW_NATMC_FUNC__(init_wb)(&var->native, val->sint);
+ ETHR_MEMBAR(ETHR_LoadLoad);
+#elif defined(ETHR_HAVE_DW_NATMC_INIT_ACQB)
+ ETHR_DW_NATMC_FUNC__(init_acqb)(&var->native, val->sint);
+ ETHR_MEMBAR(ETHR_LoadLoad);
+#elif defined(ETHR_HAVE_DW_NATMC_INIT_RELB)
+ ETHR_DW_NATMC_FUNC__(init_relb)(&var->native, val->sint);
+ ETHR_MEMBAR(ETHR_LoadLoad);
+#else
+ ETHR_DW_ATMC_FUNC__(set_rb)(var, val);
+#endif
+
+#if defined(ETHR_RTCHK_USE_NATIVE_DW_ATOMIC_IMPL__)
+ } else { ETHR_DW_ATOMIC_FUNC__(init_rb)(var, val); }
+#endif
+
+}
+
+static ETHR_INLINE void ETHR_DW_ATMC_FUNC__(init_wb)(ethr_dw_atomic_t *var, ethr_dw_sint_t *val)
+{
+
+#if defined(ETHR_RTCHK_USE_NATIVE_DW_ATOMIC_IMPL__)
+ if (ETHR_RTCHK_USE_NATIVE_DW_ATOMIC_IMPL__) {
+#endif
+
+#if defined(ETHR_HAVE_SU_DW_NATMC_INIT_WB)
+ ETHR_SU_DW_NATMC_FUNC__(init_wb)(&var->native, val->dw_sint);
+#elif defined(ETHR_HAVE_SU_DW_NATMC_INIT)
+ ETHR_MEMBAR(ETHR_StoreStore);
+ ETHR_SU_DW_NATMC_FUNC__(init)(&var->native, val->dw_sint);
+#elif defined(ETHR_HAVE_SU_DW_NATMC_INIT_MB)
+ ETHR_SU_DW_NATMC_FUNC__(init_mb)(&var->native, val->dw_sint);
+#elif defined(ETHR_HAVE_SU_DW_NATMC_INIT_RB)
+ ETHR_MEMBAR(ETHR_StoreStore);
+ ETHR_SU_DW_NATMC_FUNC__(init_rb)(&var->native, val->dw_sint);
+#elif defined(ETHR_HAVE_SU_DW_NATMC_INIT_ACQB)
+ ETHR_MEMBAR(ETHR_StoreStore);
+ ETHR_SU_DW_NATMC_FUNC__(init_acqb)(&var->native, val->dw_sint);
+#elif defined(ETHR_HAVE_SU_DW_NATMC_INIT_RELB)
+ ETHR_MEMBAR(ETHR_StoreStore);
+ ETHR_SU_DW_NATMC_FUNC__(init_relb)(&var->native, val->dw_sint);
+#elif defined(ETHR_HAVE_DW_NATMC_INIT_WB)
+ ETHR_DW_NATMC_FUNC__(init_wb)(&var->native, val->sint);
+#elif defined(ETHR_HAVE_DW_NATMC_INIT)
+ ETHR_MEMBAR(ETHR_StoreStore);
+ ETHR_DW_NATMC_FUNC__(init)(&var->native, val->sint);
+#elif defined(ETHR_HAVE_DW_NATMC_INIT_MB)
+ ETHR_DW_NATMC_FUNC__(init_mb)(&var->native, val->sint);
+#elif defined(ETHR_HAVE_DW_NATMC_INIT_RB)
+ ETHR_MEMBAR(ETHR_StoreStore);
+ ETHR_DW_NATMC_FUNC__(init_rb)(&var->native, val->sint);
+#elif defined(ETHR_HAVE_DW_NATMC_INIT_ACQB)
+ ETHR_MEMBAR(ETHR_StoreStore);
+ ETHR_DW_NATMC_FUNC__(init_acqb)(&var->native, val->sint);
+#elif defined(ETHR_HAVE_DW_NATMC_INIT_RELB)
+ ETHR_MEMBAR(ETHR_StoreStore);
+ ETHR_DW_NATMC_FUNC__(init_relb)(&var->native, val->sint);
+#else
+ ETHR_DW_ATMC_FUNC__(set_wb)(var, val);
+#endif
+
+#if defined(ETHR_RTCHK_USE_NATIVE_DW_ATOMIC_IMPL__)
+ } else { ETHR_DW_ATOMIC_FUNC__(init_wb)(var, val); }
+#endif
+
+}
+
+static ETHR_INLINE void ETHR_DW_ATMC_FUNC__(init_acqb)(ethr_dw_atomic_t *var, ethr_dw_sint_t *val)
+{
+
+#if defined(ETHR_RTCHK_USE_NATIVE_DW_ATOMIC_IMPL__)
+ if (ETHR_RTCHK_USE_NATIVE_DW_ATOMIC_IMPL__) {
+#endif
+
+#if defined(ETHR_HAVE_SU_DW_NATMC_INIT_ACQB)
+ ETHR_SU_DW_NATMC_FUNC__(init_acqb)(&var->native, val->dw_sint);
+#elif defined(ETHR_HAVE_SU_DW_NATMC_INIT_RB)
+ ETHR_SU_DW_NATMC_FUNC__(init_rb)(&var->native, val->dw_sint);
+ ETHR_MEMBAR(ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_SU_DW_NATMC_INIT)
+ ETHR_SU_DW_NATMC_FUNC__(init)(&var->native, val->dw_sint);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_SU_DW_NATMC_INIT_MB)
+ ETHR_SU_DW_NATMC_FUNC__(init_mb)(&var->native, val->dw_sint);
+#elif defined(ETHR_HAVE_SU_DW_NATMC_INIT_WB)
+ ETHR_SU_DW_NATMC_FUNC__(init_wb)(&var->native, val->dw_sint);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_SU_DW_NATMC_INIT_RELB)
+ ETHR_SU_DW_NATMC_FUNC__(init_relb)(&var->native, val->dw_sint);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_DW_NATMC_INIT_ACQB)
+ ETHR_DW_NATMC_FUNC__(init_acqb)(&var->native, val->sint);
+#elif defined(ETHR_HAVE_DW_NATMC_INIT_RB)
+ ETHR_DW_NATMC_FUNC__(init_rb)(&var->native, val->sint);
+ ETHR_MEMBAR(ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_DW_NATMC_INIT)
+ ETHR_DW_NATMC_FUNC__(init)(&var->native, val->sint);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_DW_NATMC_INIT_MB)
+ ETHR_DW_NATMC_FUNC__(init_mb)(&var->native, val->sint);
+#elif defined(ETHR_HAVE_DW_NATMC_INIT_WB)
+ ETHR_DW_NATMC_FUNC__(init_wb)(&var->native, val->sint);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_DW_NATMC_INIT_RELB)
+ ETHR_DW_NATMC_FUNC__(init_relb)(&var->native, val->sint);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#else
+ ETHR_DW_ATMC_FUNC__(set_acqb)(var, val);
+#endif
+
+#if defined(ETHR_RTCHK_USE_NATIVE_DW_ATOMIC_IMPL__)
+ } else { ETHR_DW_ATOMIC_FUNC__(init_acqb)(var, val); }
+#endif
+
+}
+
+static ETHR_INLINE void ETHR_DW_ATMC_FUNC__(init_relb)(ethr_dw_atomic_t *var, ethr_dw_sint_t *val)
+{
+
+#if defined(ETHR_RTCHK_USE_NATIVE_DW_ATOMIC_IMPL__)
+ if (ETHR_RTCHK_USE_NATIVE_DW_ATOMIC_IMPL__) {
+#endif
+
+#if defined(ETHR_HAVE_SU_DW_NATMC_INIT_RELB)
+ ETHR_SU_DW_NATMC_FUNC__(init_relb)(&var->native, val->dw_sint);
+#elif defined(ETHR_HAVE_SU_DW_NATMC_INIT_WB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad);
+ ETHR_SU_DW_NATMC_FUNC__(init_wb)(&var->native, val->dw_sint);
+#elif defined(ETHR_HAVE_SU_DW_NATMC_INIT)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+ ETHR_SU_DW_NATMC_FUNC__(init)(&var->native, val->dw_sint);
+#elif defined(ETHR_HAVE_SU_DW_NATMC_INIT_MB)
+ ETHR_SU_DW_NATMC_FUNC__(init_mb)(&var->native, val->dw_sint);
+#elif defined(ETHR_HAVE_SU_DW_NATMC_INIT_RB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+ ETHR_SU_DW_NATMC_FUNC__(init_rb)(&var->native, val->dw_sint);
+#elif defined(ETHR_HAVE_SU_DW_NATMC_INIT_ACQB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+ ETHR_SU_DW_NATMC_FUNC__(init_acqb)(&var->native, val->dw_sint);
+#elif defined(ETHR_HAVE_DW_NATMC_INIT_RELB)
+ ETHR_DW_NATMC_FUNC__(init_relb)(&var->native, val->sint);
+#elif defined(ETHR_HAVE_DW_NATMC_INIT_WB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad);
+ ETHR_DW_NATMC_FUNC__(init_wb)(&var->native, val->sint);
+#elif defined(ETHR_HAVE_DW_NATMC_INIT)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+ ETHR_DW_NATMC_FUNC__(init)(&var->native, val->sint);
+#elif defined(ETHR_HAVE_DW_NATMC_INIT_MB)
+ ETHR_DW_NATMC_FUNC__(init_mb)(&var->native, val->sint);
+#elif defined(ETHR_HAVE_DW_NATMC_INIT_RB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+ ETHR_DW_NATMC_FUNC__(init_rb)(&var->native, val->sint);
+#elif defined(ETHR_HAVE_DW_NATMC_INIT_ACQB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+ ETHR_DW_NATMC_FUNC__(init_acqb)(&var->native, val->sint);
+#else
+ ETHR_DW_ATMC_FUNC__(set_relb)(var, val);
+#endif
+
+#if defined(ETHR_RTCHK_USE_NATIVE_DW_ATOMIC_IMPL__)
+ } else { ETHR_DW_ATOMIC_FUNC__(init_relb)(var, val); }
+#endif
+
+}
+
+static ETHR_INLINE void ETHR_DW_ATMC_FUNC__(init_mb)(ethr_dw_atomic_t *var, ethr_dw_sint_t *val)
+{
+
+#if defined(ETHR_RTCHK_USE_NATIVE_DW_ATOMIC_IMPL__)
+ if (ETHR_RTCHK_USE_NATIVE_DW_ATOMIC_IMPL__) {
+#endif
+
+#if defined(ETHR_HAVE_SU_DW_NATMC_INIT_MB)
+ ETHR_SU_DW_NATMC_FUNC__(init_mb)(&var->native, val->dw_sint);
+#elif defined(ETHR_HAVE_SU_DW_NATMC_INIT_RELB)
+ ETHR_SU_DW_NATMC_FUNC__(init_relb)(&var->native, val->dw_sint);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_SU_DW_NATMC_INIT_ACQB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+ ETHR_SU_DW_NATMC_FUNC__(init_acqb)(&var->native, val->dw_sint);
+#elif defined(ETHR_HAVE_SU_DW_NATMC_INIT_WB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad);
+ ETHR_SU_DW_NATMC_FUNC__(init_wb)(&var->native, val->dw_sint);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_SU_DW_NATMC_INIT_RB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+ ETHR_SU_DW_NATMC_FUNC__(init_rb)(&var->native, val->dw_sint);
+ ETHR_MEMBAR(ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_SU_DW_NATMC_INIT)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+ ETHR_SU_DW_NATMC_FUNC__(init)(&var->native, val->dw_sint);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_DW_NATMC_INIT_MB)
+ ETHR_DW_NATMC_FUNC__(init_mb)(&var->native, val->sint);
+#elif defined(ETHR_HAVE_DW_NATMC_INIT_RELB)
+ ETHR_DW_NATMC_FUNC__(init_relb)(&var->native, val->sint);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_DW_NATMC_INIT_ACQB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+ ETHR_DW_NATMC_FUNC__(init_acqb)(&var->native, val->sint);
+#elif defined(ETHR_HAVE_DW_NATMC_INIT_WB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad);
+ ETHR_DW_NATMC_FUNC__(init_wb)(&var->native, val->sint);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_DW_NATMC_INIT_RB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+ ETHR_DW_NATMC_FUNC__(init_rb)(&var->native, val->sint);
+ ETHR_MEMBAR(ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_DW_NATMC_INIT)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+ ETHR_DW_NATMC_FUNC__(init)(&var->native, val->sint);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#else
+ ETHR_DW_ATMC_FUNC__(set_mb)(var, val);
+#endif
+
+#if defined(ETHR_RTCHK_USE_NATIVE_DW_ATOMIC_IMPL__)
+ } else { ETHR_DW_ATOMIC_FUNC__(init_mb)(var, val); }
+#endif
+
+}
+
+static ETHR_INLINE void ETHR_DW_ATMC_FUNC__(init_ddrb)(ethr_dw_atomic_t *var, ethr_dw_sint_t *val)
+{
+#ifdef ETHR_ORDERED_READ_DEPEND
+ ETHR_DW_ATMC_FUNC__(init)(var, val);
+#else
+ ETHR_DW_ATMC_FUNC__(init_rb)(var, val);
+#endif
+}
+
+#endif /* ETHR_DW_ATMC_INLINE__ */
+
+
+/* ---------- Word size atomic implementation ---------- */
+
+
+#ifdef ETHR_NEED_ATMC_PROTOTYPES__
+ethr_sint_t *ethr_atomic_addr(ethr_atomic_t *var);
+ethr_sint_t ethr_atomic_cmpxchg(ethr_atomic_t *var, ethr_sint_t val, ethr_sint_t old_val);
+ethr_sint_t ethr_atomic_cmpxchg_ddrb(ethr_atomic_t *var, ethr_sint_t val, ethr_sint_t old_val);
+ethr_sint_t ethr_atomic_cmpxchg_rb(ethr_atomic_t *var, ethr_sint_t val, ethr_sint_t old_val);
+ethr_sint_t ethr_atomic_cmpxchg_wb(ethr_atomic_t *var, ethr_sint_t val, ethr_sint_t old_val);
+ethr_sint_t ethr_atomic_cmpxchg_acqb(ethr_atomic_t *var, ethr_sint_t val, ethr_sint_t old_val);
+ethr_sint_t ethr_atomic_cmpxchg_relb(ethr_atomic_t *var, ethr_sint_t val, ethr_sint_t old_val);
+ethr_sint_t ethr_atomic_cmpxchg_mb(ethr_atomic_t *var, ethr_sint_t val, ethr_sint_t old_val);
+ethr_sint_t ethr_atomic_xchg(ethr_atomic_t *var, ethr_sint_t val);
+ethr_sint_t ethr_atomic_xchg_ddrb(ethr_atomic_t *var, ethr_sint_t val);
+ethr_sint_t ethr_atomic_xchg_rb(ethr_atomic_t *var, ethr_sint_t val);
+ethr_sint_t ethr_atomic_xchg_wb(ethr_atomic_t *var, ethr_sint_t val);
+ethr_sint_t ethr_atomic_xchg_acqb(ethr_atomic_t *var, ethr_sint_t val);
+ethr_sint_t ethr_atomic_xchg_relb(ethr_atomic_t *var, ethr_sint_t val);
+ethr_sint_t ethr_atomic_xchg_mb(ethr_atomic_t *var, ethr_sint_t val);
+void ethr_atomic_set(ethr_atomic_t *var, ethr_sint_t val);
+void ethr_atomic_set_ddrb(ethr_atomic_t *var, ethr_sint_t val);
+void ethr_atomic_set_rb(ethr_atomic_t *var, ethr_sint_t val);
+void ethr_atomic_set_wb(ethr_atomic_t *var, ethr_sint_t val);
+void ethr_atomic_set_acqb(ethr_atomic_t *var, ethr_sint_t val);
+void ethr_atomic_set_relb(ethr_atomic_t *var, ethr_sint_t val);
+void ethr_atomic_set_mb(ethr_atomic_t *var, ethr_sint_t val);
+void ethr_atomic_init(ethr_atomic_t *var, ethr_sint_t val);
+void ethr_atomic_init_ddrb(ethr_atomic_t *var, ethr_sint_t val);
+void ethr_atomic_init_rb(ethr_atomic_t *var, ethr_sint_t val);
+void ethr_atomic_init_wb(ethr_atomic_t *var, ethr_sint_t val);
+void ethr_atomic_init_acqb(ethr_atomic_t *var, ethr_sint_t val);
+void ethr_atomic_init_relb(ethr_atomic_t *var, ethr_sint_t val);
+void ethr_atomic_init_mb(ethr_atomic_t *var, ethr_sint_t val);
+ethr_sint_t ethr_atomic_add_read(ethr_atomic_t *var, ethr_sint_t val);
+ethr_sint_t ethr_atomic_add_read_ddrb(ethr_atomic_t *var, ethr_sint_t val);
+ethr_sint_t ethr_atomic_add_read_rb(ethr_atomic_t *var, ethr_sint_t val);
+ethr_sint_t ethr_atomic_add_read_wb(ethr_atomic_t *var, ethr_sint_t val);
+ethr_sint_t ethr_atomic_add_read_acqb(ethr_atomic_t *var, ethr_sint_t val);
+ethr_sint_t ethr_atomic_add_read_relb(ethr_atomic_t *var, ethr_sint_t val);
+ethr_sint_t ethr_atomic_add_read_mb(ethr_atomic_t *var, ethr_sint_t val);
+ethr_sint_t ethr_atomic_read(ethr_atomic_t *var);
+ethr_sint_t ethr_atomic_read_ddrb(ethr_atomic_t *var);
+ethr_sint_t ethr_atomic_read_rb(ethr_atomic_t *var);
+ethr_sint_t ethr_atomic_read_wb(ethr_atomic_t *var);
+ethr_sint_t ethr_atomic_read_acqb(ethr_atomic_t *var);
+ethr_sint_t ethr_atomic_read_relb(ethr_atomic_t *var);
+ethr_sint_t ethr_atomic_read_mb(ethr_atomic_t *var);
+ethr_sint_t ethr_atomic_inc_read(ethr_atomic_t *var);
+ethr_sint_t ethr_atomic_inc_read_ddrb(ethr_atomic_t *var);
+ethr_sint_t ethr_atomic_inc_read_rb(ethr_atomic_t *var);
+ethr_sint_t ethr_atomic_inc_read_wb(ethr_atomic_t *var);
+ethr_sint_t ethr_atomic_inc_read_acqb(ethr_atomic_t *var);
+ethr_sint_t ethr_atomic_inc_read_relb(ethr_atomic_t *var);
+ethr_sint_t ethr_atomic_inc_read_mb(ethr_atomic_t *var);
+ethr_sint_t ethr_atomic_dec_read(ethr_atomic_t *var);
+ethr_sint_t ethr_atomic_dec_read_ddrb(ethr_atomic_t *var);
+ethr_sint_t ethr_atomic_dec_read_rb(ethr_atomic_t *var);
+ethr_sint_t ethr_atomic_dec_read_wb(ethr_atomic_t *var);
+ethr_sint_t ethr_atomic_dec_read_acqb(ethr_atomic_t *var);
+ethr_sint_t ethr_atomic_dec_read_relb(ethr_atomic_t *var);
+ethr_sint_t ethr_atomic_dec_read_mb(ethr_atomic_t *var);
+void ethr_atomic_add(ethr_atomic_t *var, ethr_sint_t val);
+void ethr_atomic_add_ddrb(ethr_atomic_t *var, ethr_sint_t val);
+void ethr_atomic_add_rb(ethr_atomic_t *var, ethr_sint_t val);
+void ethr_atomic_add_wb(ethr_atomic_t *var, ethr_sint_t val);
+void ethr_atomic_add_acqb(ethr_atomic_t *var, ethr_sint_t val);
+void ethr_atomic_add_relb(ethr_atomic_t *var, ethr_sint_t val);
+void ethr_atomic_add_mb(ethr_atomic_t *var, ethr_sint_t val);
+void ethr_atomic_inc(ethr_atomic_t *var);
+void ethr_atomic_inc_ddrb(ethr_atomic_t *var);
+void ethr_atomic_inc_rb(ethr_atomic_t *var);
+void ethr_atomic_inc_wb(ethr_atomic_t *var);
+void ethr_atomic_inc_acqb(ethr_atomic_t *var);
+void ethr_atomic_inc_relb(ethr_atomic_t *var);
+void ethr_atomic_inc_mb(ethr_atomic_t *var);
+void ethr_atomic_dec(ethr_atomic_t *var);
+void ethr_atomic_dec_ddrb(ethr_atomic_t *var);
+void ethr_atomic_dec_rb(ethr_atomic_t *var);
+void ethr_atomic_dec_wb(ethr_atomic_t *var);
+void ethr_atomic_dec_acqb(ethr_atomic_t *var);
+void ethr_atomic_dec_relb(ethr_atomic_t *var);
+void ethr_atomic_dec_mb(ethr_atomic_t *var);
+ethr_sint_t ethr_atomic_read_band(ethr_atomic_t *var, ethr_sint_t val);
+ethr_sint_t ethr_atomic_read_band_ddrb(ethr_atomic_t *var, ethr_sint_t val);
+ethr_sint_t ethr_atomic_read_band_rb(ethr_atomic_t *var, ethr_sint_t val);
+ethr_sint_t ethr_atomic_read_band_wb(ethr_atomic_t *var, ethr_sint_t val);
+ethr_sint_t ethr_atomic_read_band_acqb(ethr_atomic_t *var, ethr_sint_t val);
+ethr_sint_t ethr_atomic_read_band_relb(ethr_atomic_t *var, ethr_sint_t val);
+ethr_sint_t ethr_atomic_read_band_mb(ethr_atomic_t *var, ethr_sint_t val);
+ethr_sint_t ethr_atomic_read_bor(ethr_atomic_t *var, ethr_sint_t val);
+ethr_sint_t ethr_atomic_read_bor_ddrb(ethr_atomic_t *var, ethr_sint_t val);
+ethr_sint_t ethr_atomic_read_bor_rb(ethr_atomic_t *var, ethr_sint_t val);
+ethr_sint_t ethr_atomic_read_bor_wb(ethr_atomic_t *var, ethr_sint_t val);
+ethr_sint_t ethr_atomic_read_bor_acqb(ethr_atomic_t *var, ethr_sint_t val);
+ethr_sint_t ethr_atomic_read_bor_relb(ethr_atomic_t *var, ethr_sint_t val);
+ethr_sint_t ethr_atomic_read_bor_mb(ethr_atomic_t *var, ethr_sint_t val);
+#endif /* ETHR_NEED_ATMC_PROTOTYPES__ */
+
+#if (defined(ETHR_HAVE_WORD_SZ_NATIVE_ATOMIC_OPS) \
+ && (defined(ETHR_ATMC_INLINE__) || defined(ETHR_ATOMIC_IMPL__)))
+
+#if !defined(ETHR_NATMC_BITS__)
+# error "Missing native atomic implementation"
+#elif ETHR_NATMC_BITS__ == 64
+# undef ETHR_HAVE_NATMC_CMPXCHG
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_CMPXCHG
+# define ETHR_HAVE_NATMC_CMPXCHG 1
+# endif
+# undef ETHR_HAVE_NATMC_CMPXCHG_RB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_CMPXCHG_RB
+# define ETHR_HAVE_NATMC_CMPXCHG_RB 1
+# endif
+# undef ETHR_HAVE_NATMC_CMPXCHG_WB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_CMPXCHG_WB
+# define ETHR_HAVE_NATMC_CMPXCHG_WB 1
+# endif
+# undef ETHR_HAVE_NATMC_CMPXCHG_ACQB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_CMPXCHG_ACQB
+# define ETHR_HAVE_NATMC_CMPXCHG_ACQB 1
+# endif
+# undef ETHR_HAVE_NATMC_CMPXCHG_RELB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_CMPXCHG_RELB
+# define ETHR_HAVE_NATMC_CMPXCHG_RELB 1
+# endif
+# undef ETHR_HAVE_NATMC_CMPXCHG_MB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_CMPXCHG_MB
+# define ETHR_HAVE_NATMC_CMPXCHG_MB 1
+# endif
+# undef ETHR_HAVE_NATMC_XCHG
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_XCHG
+# define ETHR_HAVE_NATMC_XCHG 1
+# endif
+# undef ETHR_HAVE_NATMC_XCHG_RB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_XCHG_RB
+# define ETHR_HAVE_NATMC_XCHG_RB 1
+# endif
+# undef ETHR_HAVE_NATMC_XCHG_WB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_XCHG_WB
+# define ETHR_HAVE_NATMC_XCHG_WB 1
+# endif
+# undef ETHR_HAVE_NATMC_XCHG_ACQB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_XCHG_ACQB
+# define ETHR_HAVE_NATMC_XCHG_ACQB 1
+# endif
+# undef ETHR_HAVE_NATMC_XCHG_RELB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_XCHG_RELB
+# define ETHR_HAVE_NATMC_XCHG_RELB 1
+# endif
+# undef ETHR_HAVE_NATMC_XCHG_MB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_XCHG_MB
+# define ETHR_HAVE_NATMC_XCHG_MB 1
+# endif
+# undef ETHR_HAVE_NATMC_SET
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_SET
+# define ETHR_HAVE_NATMC_SET 1
+# endif
+# undef ETHR_HAVE_NATMC_SET_RB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_SET_RB
+# define ETHR_HAVE_NATMC_SET_RB 1
+# endif
+# undef ETHR_HAVE_NATMC_SET_WB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_SET_WB
+# define ETHR_HAVE_NATMC_SET_WB 1
+# endif
+# undef ETHR_HAVE_NATMC_SET_ACQB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_SET_ACQB
+# define ETHR_HAVE_NATMC_SET_ACQB 1
+# endif
+# undef ETHR_HAVE_NATMC_SET_RELB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_SET_RELB
+# define ETHR_HAVE_NATMC_SET_RELB 1
+# endif
+# undef ETHR_HAVE_NATMC_SET_MB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_SET_MB
+# define ETHR_HAVE_NATMC_SET_MB 1
+# endif
+# undef ETHR_HAVE_NATMC_INIT
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_INIT
+# define ETHR_HAVE_NATMC_INIT 1
+# endif
+# undef ETHR_HAVE_NATMC_INIT_RB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_INIT_RB
+# define ETHR_HAVE_NATMC_INIT_RB 1
+# endif
+# undef ETHR_HAVE_NATMC_INIT_WB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_INIT_WB
+# define ETHR_HAVE_NATMC_INIT_WB 1
+# endif
+# undef ETHR_HAVE_NATMC_INIT_ACQB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_INIT_ACQB
+# define ETHR_HAVE_NATMC_INIT_ACQB 1
+# endif
+# undef ETHR_HAVE_NATMC_INIT_RELB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_INIT_RELB
+# define ETHR_HAVE_NATMC_INIT_RELB 1
+# endif
+# undef ETHR_HAVE_NATMC_INIT_MB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_INIT_MB
+# define ETHR_HAVE_NATMC_INIT_MB 1
+# endif
+# undef ETHR_HAVE_NATMC_ADD_RETURN
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_ADD_RETURN
+# define ETHR_HAVE_NATMC_ADD_RETURN 1
+# endif
+# undef ETHR_HAVE_NATMC_ADD_RETURN_RB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_ADD_RETURN_RB
+# define ETHR_HAVE_NATMC_ADD_RETURN_RB 1
+# endif
+# undef ETHR_HAVE_NATMC_ADD_RETURN_WB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_ADD_RETURN_WB
+# define ETHR_HAVE_NATMC_ADD_RETURN_WB 1
+# endif
+# undef ETHR_HAVE_NATMC_ADD_RETURN_ACQB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_ADD_RETURN_ACQB
+# define ETHR_HAVE_NATMC_ADD_RETURN_ACQB 1
+# endif
+# undef ETHR_HAVE_NATMC_ADD_RETURN_RELB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_ADD_RETURN_RELB
+# define ETHR_HAVE_NATMC_ADD_RETURN_RELB 1
+# endif
+# undef ETHR_HAVE_NATMC_ADD_RETURN_MB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_ADD_RETURN_MB
+# define ETHR_HAVE_NATMC_ADD_RETURN_MB 1
+# endif
+# undef ETHR_HAVE_NATMC_READ
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_READ
+# define ETHR_HAVE_NATMC_READ 1
+# endif
+# undef ETHR_HAVE_NATMC_READ_RB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_READ_RB
+# define ETHR_HAVE_NATMC_READ_RB 1
+# endif
+# undef ETHR_HAVE_NATMC_READ_WB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_READ_WB
+# define ETHR_HAVE_NATMC_READ_WB 1
+# endif
+# undef ETHR_HAVE_NATMC_READ_ACQB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_READ_ACQB
+# define ETHR_HAVE_NATMC_READ_ACQB 1
+# endif
+# undef ETHR_HAVE_NATMC_READ_RELB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_READ_RELB
+# define ETHR_HAVE_NATMC_READ_RELB 1
+# endif
+# undef ETHR_HAVE_NATMC_READ_MB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_READ_MB
+# define ETHR_HAVE_NATMC_READ_MB 1
+# endif
+# undef ETHR_HAVE_NATMC_INC_RETURN
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_INC_RETURN
+# define ETHR_HAVE_NATMC_INC_RETURN 1
+# endif
+# undef ETHR_HAVE_NATMC_INC_RETURN_RB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_INC_RETURN_RB
+# define ETHR_HAVE_NATMC_INC_RETURN_RB 1
+# endif
+# undef ETHR_HAVE_NATMC_INC_RETURN_WB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_INC_RETURN_WB
+# define ETHR_HAVE_NATMC_INC_RETURN_WB 1
+# endif
+# undef ETHR_HAVE_NATMC_INC_RETURN_ACQB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_INC_RETURN_ACQB
+# define ETHR_HAVE_NATMC_INC_RETURN_ACQB 1
+# endif
+# undef ETHR_HAVE_NATMC_INC_RETURN_RELB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_INC_RETURN_RELB
+# define ETHR_HAVE_NATMC_INC_RETURN_RELB 1
+# endif
+# undef ETHR_HAVE_NATMC_INC_RETURN_MB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_INC_RETURN_MB
+# define ETHR_HAVE_NATMC_INC_RETURN_MB 1
+# endif
+# undef ETHR_HAVE_NATMC_DEC_RETURN
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_DEC_RETURN
+# define ETHR_HAVE_NATMC_DEC_RETURN 1
+# endif
+# undef ETHR_HAVE_NATMC_DEC_RETURN_RB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_DEC_RETURN_RB
+# define ETHR_HAVE_NATMC_DEC_RETURN_RB 1
+# endif
+# undef ETHR_HAVE_NATMC_DEC_RETURN_WB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_DEC_RETURN_WB
+# define ETHR_HAVE_NATMC_DEC_RETURN_WB 1
+# endif
+# undef ETHR_HAVE_NATMC_DEC_RETURN_ACQB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_DEC_RETURN_ACQB
+# define ETHR_HAVE_NATMC_DEC_RETURN_ACQB 1
+# endif
+# undef ETHR_HAVE_NATMC_DEC_RETURN_RELB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_DEC_RETURN_RELB
+# define ETHR_HAVE_NATMC_DEC_RETURN_RELB 1
+# endif
+# undef ETHR_HAVE_NATMC_DEC_RETURN_MB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_DEC_RETURN_MB
+# define ETHR_HAVE_NATMC_DEC_RETURN_MB 1
+# endif
+# undef ETHR_HAVE_NATMC_ADD
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_ADD
+# define ETHR_HAVE_NATMC_ADD 1
+# endif
+# undef ETHR_HAVE_NATMC_ADD_RB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_ADD_RB
+# define ETHR_HAVE_NATMC_ADD_RB 1
+# endif
+# undef ETHR_HAVE_NATMC_ADD_WB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_ADD_WB
+# define ETHR_HAVE_NATMC_ADD_WB 1
+# endif
+# undef ETHR_HAVE_NATMC_ADD_ACQB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_ADD_ACQB
+# define ETHR_HAVE_NATMC_ADD_ACQB 1
+# endif
+# undef ETHR_HAVE_NATMC_ADD_RELB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_ADD_RELB
+# define ETHR_HAVE_NATMC_ADD_RELB 1
+# endif
+# undef ETHR_HAVE_NATMC_ADD_MB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_ADD_MB
+# define ETHR_HAVE_NATMC_ADD_MB 1
+# endif
+# undef ETHR_HAVE_NATMC_INC
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_INC
+# define ETHR_HAVE_NATMC_INC 1
+# endif
+# undef ETHR_HAVE_NATMC_INC_RB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_INC_RB
+# define ETHR_HAVE_NATMC_INC_RB 1
+# endif
+# undef ETHR_HAVE_NATMC_INC_WB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_INC_WB
+# define ETHR_HAVE_NATMC_INC_WB 1
+# endif
+# undef ETHR_HAVE_NATMC_INC_ACQB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_INC_ACQB
+# define ETHR_HAVE_NATMC_INC_ACQB 1
+# endif
+# undef ETHR_HAVE_NATMC_INC_RELB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_INC_RELB
+# define ETHR_HAVE_NATMC_INC_RELB 1
+# endif
+# undef ETHR_HAVE_NATMC_INC_MB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_INC_MB
+# define ETHR_HAVE_NATMC_INC_MB 1
+# endif
+# undef ETHR_HAVE_NATMC_DEC
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_DEC
+# define ETHR_HAVE_NATMC_DEC 1
+# endif
+# undef ETHR_HAVE_NATMC_DEC_RB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_DEC_RB
+# define ETHR_HAVE_NATMC_DEC_RB 1
+# endif
+# undef ETHR_HAVE_NATMC_DEC_WB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_DEC_WB
+# define ETHR_HAVE_NATMC_DEC_WB 1
+# endif
+# undef ETHR_HAVE_NATMC_DEC_ACQB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_DEC_ACQB
+# define ETHR_HAVE_NATMC_DEC_ACQB 1
+# endif
+# undef ETHR_HAVE_NATMC_DEC_RELB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_DEC_RELB
+# define ETHR_HAVE_NATMC_DEC_RELB 1
+# endif
+# undef ETHR_HAVE_NATMC_DEC_MB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_DEC_MB
+# define ETHR_HAVE_NATMC_DEC_MB 1
+# endif
+# undef ETHR_HAVE_NATMC_AND_RETOLD
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_AND_RETOLD
+# define ETHR_HAVE_NATMC_AND_RETOLD 1
+# endif
+# undef ETHR_HAVE_NATMC_AND_RETOLD_RB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_AND_RETOLD_RB
+# define ETHR_HAVE_NATMC_AND_RETOLD_RB 1
+# endif
+# undef ETHR_HAVE_NATMC_AND_RETOLD_WB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_AND_RETOLD_WB
+# define ETHR_HAVE_NATMC_AND_RETOLD_WB 1
+# endif
+# undef ETHR_HAVE_NATMC_AND_RETOLD_ACQB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_AND_RETOLD_ACQB
+# define ETHR_HAVE_NATMC_AND_RETOLD_ACQB 1
+# endif
+# undef ETHR_HAVE_NATMC_AND_RETOLD_RELB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_AND_RETOLD_RELB
+# define ETHR_HAVE_NATMC_AND_RETOLD_RELB 1
+# endif
+# undef ETHR_HAVE_NATMC_AND_RETOLD_MB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_AND_RETOLD_MB
+# define ETHR_HAVE_NATMC_AND_RETOLD_MB 1
+# endif
+# undef ETHR_HAVE_NATMC_OR_RETOLD
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_OR_RETOLD
+# define ETHR_HAVE_NATMC_OR_RETOLD 1
+# endif
+# undef ETHR_HAVE_NATMC_OR_RETOLD_RB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_OR_RETOLD_RB
+# define ETHR_HAVE_NATMC_OR_RETOLD_RB 1
+# endif
+# undef ETHR_HAVE_NATMC_OR_RETOLD_WB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_OR_RETOLD_WB
+# define ETHR_HAVE_NATMC_OR_RETOLD_WB 1
+# endif
+# undef ETHR_HAVE_NATMC_OR_RETOLD_ACQB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_OR_RETOLD_ACQB
+# define ETHR_HAVE_NATMC_OR_RETOLD_ACQB 1
+# endif
+# undef ETHR_HAVE_NATMC_OR_RETOLD_RELB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_OR_RETOLD_RELB
+# define ETHR_HAVE_NATMC_OR_RETOLD_RELB 1
+# endif
+# undef ETHR_HAVE_NATMC_OR_RETOLD_MB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_OR_RETOLD_MB
+# define ETHR_HAVE_NATMC_OR_RETOLD_MB 1
+# endif
+#elif ETHR_NATMC_BITS__ == 32
+# undef ETHR_HAVE_NATMC_CMPXCHG
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_CMPXCHG
+# define ETHR_HAVE_NATMC_CMPXCHG 1
+# endif
+# undef ETHR_HAVE_NATMC_CMPXCHG_RB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_CMPXCHG_RB
+# define ETHR_HAVE_NATMC_CMPXCHG_RB 1
+# endif
+# undef ETHR_HAVE_NATMC_CMPXCHG_WB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_CMPXCHG_WB
+# define ETHR_HAVE_NATMC_CMPXCHG_WB 1
+# endif
+# undef ETHR_HAVE_NATMC_CMPXCHG_ACQB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_CMPXCHG_ACQB
+# define ETHR_HAVE_NATMC_CMPXCHG_ACQB 1
+# endif
+# undef ETHR_HAVE_NATMC_CMPXCHG_RELB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_CMPXCHG_RELB
+# define ETHR_HAVE_NATMC_CMPXCHG_RELB 1
+# endif
+# undef ETHR_HAVE_NATMC_CMPXCHG_MB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_CMPXCHG_MB
+# define ETHR_HAVE_NATMC_CMPXCHG_MB 1
+# endif
+# undef ETHR_HAVE_NATMC_XCHG
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_XCHG
+# define ETHR_HAVE_NATMC_XCHG 1
+# endif
+# undef ETHR_HAVE_NATMC_XCHG_RB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_XCHG_RB
+# define ETHR_HAVE_NATMC_XCHG_RB 1
+# endif
+# undef ETHR_HAVE_NATMC_XCHG_WB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_XCHG_WB
+# define ETHR_HAVE_NATMC_XCHG_WB 1
+# endif
+# undef ETHR_HAVE_NATMC_XCHG_ACQB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_XCHG_ACQB
+# define ETHR_HAVE_NATMC_XCHG_ACQB 1
+# endif
+# undef ETHR_HAVE_NATMC_XCHG_RELB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_XCHG_RELB
+# define ETHR_HAVE_NATMC_XCHG_RELB 1
+# endif
+# undef ETHR_HAVE_NATMC_XCHG_MB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_XCHG_MB
+# define ETHR_HAVE_NATMC_XCHG_MB 1
+# endif
+# undef ETHR_HAVE_NATMC_SET
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_SET
+# define ETHR_HAVE_NATMC_SET 1
+# endif
+# undef ETHR_HAVE_NATMC_SET_RB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_SET_RB
+# define ETHR_HAVE_NATMC_SET_RB 1
+# endif
+# undef ETHR_HAVE_NATMC_SET_WB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_SET_WB
+# define ETHR_HAVE_NATMC_SET_WB 1
+# endif
+# undef ETHR_HAVE_NATMC_SET_ACQB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_SET_ACQB
+# define ETHR_HAVE_NATMC_SET_ACQB 1
+# endif
+# undef ETHR_HAVE_NATMC_SET_RELB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_SET_RELB
+# define ETHR_HAVE_NATMC_SET_RELB 1
+# endif
+# undef ETHR_HAVE_NATMC_SET_MB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_SET_MB
+# define ETHR_HAVE_NATMC_SET_MB 1
+# endif
+# undef ETHR_HAVE_NATMC_INIT
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_INIT
+# define ETHR_HAVE_NATMC_INIT 1
+# endif
+# undef ETHR_HAVE_NATMC_INIT_RB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_INIT_RB
+# define ETHR_HAVE_NATMC_INIT_RB 1
+# endif
+# undef ETHR_HAVE_NATMC_INIT_WB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_INIT_WB
+# define ETHR_HAVE_NATMC_INIT_WB 1
+# endif
+# undef ETHR_HAVE_NATMC_INIT_ACQB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_INIT_ACQB
+# define ETHR_HAVE_NATMC_INIT_ACQB 1
+# endif
+# undef ETHR_HAVE_NATMC_INIT_RELB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_INIT_RELB
+# define ETHR_HAVE_NATMC_INIT_RELB 1
+# endif
+# undef ETHR_HAVE_NATMC_INIT_MB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_INIT_MB
+# define ETHR_HAVE_NATMC_INIT_MB 1
+# endif
+# undef ETHR_HAVE_NATMC_ADD_RETURN
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_ADD_RETURN
+# define ETHR_HAVE_NATMC_ADD_RETURN 1
+# endif
+# undef ETHR_HAVE_NATMC_ADD_RETURN_RB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_ADD_RETURN_RB
+# define ETHR_HAVE_NATMC_ADD_RETURN_RB 1
+# endif
+# undef ETHR_HAVE_NATMC_ADD_RETURN_WB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_ADD_RETURN_WB
+# define ETHR_HAVE_NATMC_ADD_RETURN_WB 1
+# endif
+# undef ETHR_HAVE_NATMC_ADD_RETURN_ACQB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_ADD_RETURN_ACQB
+# define ETHR_HAVE_NATMC_ADD_RETURN_ACQB 1
+# endif
+# undef ETHR_HAVE_NATMC_ADD_RETURN_RELB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_ADD_RETURN_RELB
+# define ETHR_HAVE_NATMC_ADD_RETURN_RELB 1
+# endif
+# undef ETHR_HAVE_NATMC_ADD_RETURN_MB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_ADD_RETURN_MB
+# define ETHR_HAVE_NATMC_ADD_RETURN_MB 1
+# endif
+# undef ETHR_HAVE_NATMC_READ
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_READ
+# define ETHR_HAVE_NATMC_READ 1
+# endif
+# undef ETHR_HAVE_NATMC_READ_RB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_READ_RB
+# define ETHR_HAVE_NATMC_READ_RB 1
+# endif
+# undef ETHR_HAVE_NATMC_READ_WB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_READ_WB
+# define ETHR_HAVE_NATMC_READ_WB 1
+# endif
+# undef ETHR_HAVE_NATMC_READ_ACQB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_READ_ACQB
+# define ETHR_HAVE_NATMC_READ_ACQB 1
+# endif
+# undef ETHR_HAVE_NATMC_READ_RELB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_READ_RELB
+# define ETHR_HAVE_NATMC_READ_RELB 1
+# endif
+# undef ETHR_HAVE_NATMC_READ_MB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_READ_MB
+# define ETHR_HAVE_NATMC_READ_MB 1
+# endif
+# undef ETHR_HAVE_NATMC_INC_RETURN
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_INC_RETURN
+# define ETHR_HAVE_NATMC_INC_RETURN 1
+# endif
+# undef ETHR_HAVE_NATMC_INC_RETURN_RB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_INC_RETURN_RB
+# define ETHR_HAVE_NATMC_INC_RETURN_RB 1
+# endif
+# undef ETHR_HAVE_NATMC_INC_RETURN_WB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_INC_RETURN_WB
+# define ETHR_HAVE_NATMC_INC_RETURN_WB 1
+# endif
+# undef ETHR_HAVE_NATMC_INC_RETURN_ACQB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_INC_RETURN_ACQB
+# define ETHR_HAVE_NATMC_INC_RETURN_ACQB 1
+# endif
+# undef ETHR_HAVE_NATMC_INC_RETURN_RELB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_INC_RETURN_RELB
+# define ETHR_HAVE_NATMC_INC_RETURN_RELB 1
+# endif
+# undef ETHR_HAVE_NATMC_INC_RETURN_MB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_INC_RETURN_MB
+# define ETHR_HAVE_NATMC_INC_RETURN_MB 1
+# endif
+# undef ETHR_HAVE_NATMC_DEC_RETURN
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_DEC_RETURN
+# define ETHR_HAVE_NATMC_DEC_RETURN 1
+# endif
+# undef ETHR_HAVE_NATMC_DEC_RETURN_RB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_DEC_RETURN_RB
+# define ETHR_HAVE_NATMC_DEC_RETURN_RB 1
+# endif
+# undef ETHR_HAVE_NATMC_DEC_RETURN_WB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_DEC_RETURN_WB
+# define ETHR_HAVE_NATMC_DEC_RETURN_WB 1
+# endif
+# undef ETHR_HAVE_NATMC_DEC_RETURN_ACQB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_DEC_RETURN_ACQB
+# define ETHR_HAVE_NATMC_DEC_RETURN_ACQB 1
+# endif
+# undef ETHR_HAVE_NATMC_DEC_RETURN_RELB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_DEC_RETURN_RELB
+# define ETHR_HAVE_NATMC_DEC_RETURN_RELB 1
+# endif
+# undef ETHR_HAVE_NATMC_DEC_RETURN_MB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_DEC_RETURN_MB
+# define ETHR_HAVE_NATMC_DEC_RETURN_MB 1
+# endif
+# undef ETHR_HAVE_NATMC_ADD
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_ADD
+# define ETHR_HAVE_NATMC_ADD 1
+# endif
+# undef ETHR_HAVE_NATMC_ADD_RB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_ADD_RB
+# define ETHR_HAVE_NATMC_ADD_RB 1
+# endif
+# undef ETHR_HAVE_NATMC_ADD_WB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_ADD_WB
+# define ETHR_HAVE_NATMC_ADD_WB 1
+# endif
+# undef ETHR_HAVE_NATMC_ADD_ACQB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_ADD_ACQB
+# define ETHR_HAVE_NATMC_ADD_ACQB 1
+# endif
+# undef ETHR_HAVE_NATMC_ADD_RELB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_ADD_RELB
+# define ETHR_HAVE_NATMC_ADD_RELB 1
+# endif
+# undef ETHR_HAVE_NATMC_ADD_MB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_ADD_MB
+# define ETHR_HAVE_NATMC_ADD_MB 1
+# endif
+# undef ETHR_HAVE_NATMC_INC
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_INC
+# define ETHR_HAVE_NATMC_INC 1
+# endif
+# undef ETHR_HAVE_NATMC_INC_RB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_INC_RB
+# define ETHR_HAVE_NATMC_INC_RB 1
+# endif
+# undef ETHR_HAVE_NATMC_INC_WB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_INC_WB
+# define ETHR_HAVE_NATMC_INC_WB 1
+# endif
+# undef ETHR_HAVE_NATMC_INC_ACQB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_INC_ACQB
+# define ETHR_HAVE_NATMC_INC_ACQB 1
+# endif
+# undef ETHR_HAVE_NATMC_INC_RELB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_INC_RELB
+# define ETHR_HAVE_NATMC_INC_RELB 1
+# endif
+# undef ETHR_HAVE_NATMC_INC_MB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_INC_MB
+# define ETHR_HAVE_NATMC_INC_MB 1
+# endif
+# undef ETHR_HAVE_NATMC_DEC
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_DEC
+# define ETHR_HAVE_NATMC_DEC 1
+# endif
+# undef ETHR_HAVE_NATMC_DEC_RB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_DEC_RB
+# define ETHR_HAVE_NATMC_DEC_RB 1
+# endif
+# undef ETHR_HAVE_NATMC_DEC_WB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_DEC_WB
+# define ETHR_HAVE_NATMC_DEC_WB 1
+# endif
+# undef ETHR_HAVE_NATMC_DEC_ACQB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_DEC_ACQB
+# define ETHR_HAVE_NATMC_DEC_ACQB 1
+# endif
+# undef ETHR_HAVE_NATMC_DEC_RELB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_DEC_RELB
+# define ETHR_HAVE_NATMC_DEC_RELB 1
+# endif
+# undef ETHR_HAVE_NATMC_DEC_MB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_DEC_MB
+# define ETHR_HAVE_NATMC_DEC_MB 1
+# endif
+# undef ETHR_HAVE_NATMC_AND_RETOLD
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_AND_RETOLD
+# define ETHR_HAVE_NATMC_AND_RETOLD 1
+# endif
+# undef ETHR_HAVE_NATMC_AND_RETOLD_RB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_AND_RETOLD_RB
+# define ETHR_HAVE_NATMC_AND_RETOLD_RB 1
+# endif
+# undef ETHR_HAVE_NATMC_AND_RETOLD_WB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_AND_RETOLD_WB
+# define ETHR_HAVE_NATMC_AND_RETOLD_WB 1
+# endif
+# undef ETHR_HAVE_NATMC_AND_RETOLD_ACQB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_AND_RETOLD_ACQB
+# define ETHR_HAVE_NATMC_AND_RETOLD_ACQB 1
+# endif
+# undef ETHR_HAVE_NATMC_AND_RETOLD_RELB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_AND_RETOLD_RELB
+# define ETHR_HAVE_NATMC_AND_RETOLD_RELB 1
+# endif
+# undef ETHR_HAVE_NATMC_AND_RETOLD_MB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_AND_RETOLD_MB
+# define ETHR_HAVE_NATMC_AND_RETOLD_MB 1
+# endif
+# undef ETHR_HAVE_NATMC_OR_RETOLD
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_OR_RETOLD
+# define ETHR_HAVE_NATMC_OR_RETOLD 1
+# endif
+# undef ETHR_HAVE_NATMC_OR_RETOLD_RB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_OR_RETOLD_RB
+# define ETHR_HAVE_NATMC_OR_RETOLD_RB 1
+# endif
+# undef ETHR_HAVE_NATMC_OR_RETOLD_WB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_OR_RETOLD_WB
+# define ETHR_HAVE_NATMC_OR_RETOLD_WB 1
+# endif
+# undef ETHR_HAVE_NATMC_OR_RETOLD_ACQB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_OR_RETOLD_ACQB
+# define ETHR_HAVE_NATMC_OR_RETOLD_ACQB 1
+# endif
+# undef ETHR_HAVE_NATMC_OR_RETOLD_RELB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_OR_RETOLD_RELB
+# define ETHR_HAVE_NATMC_OR_RETOLD_RELB 1
+# endif
+# undef ETHR_HAVE_NATMC_OR_RETOLD_MB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_OR_RETOLD_MB
+# define ETHR_HAVE_NATMC_OR_RETOLD_MB 1
+# endif
+#else
+# error "Invalid native atomic size"
+#endif
+
+#if (!defined(ETHR_HAVE_NATMC_CMPXCHG) \
+ && !defined(ETHR_HAVE_NATMC_CMPXCHG_RB) \
+ && !defined(ETHR_HAVE_NATMC_CMPXCHG_WB) \
+ && !defined(ETHR_HAVE_NATMC_CMPXCHG_ACQB) \
+ && !defined(ETHR_HAVE_NATMC_CMPXCHG_RELB) \
+ && !defined(ETHR_HAVE_NATMC_CMPXCHG_MB))
+# error "No native cmpxchg() op available"
+#endif
+
+
+/*
+ * Read op used together with cmpxchg() fallback when no native op present.
+ */
+#if defined(ETHR_HAVE_NATMC_READ)
+#define ETHR_NATMC_CMPXCHG_FALLBACK_READ__(VAR) \
+ ETHR_NATMC_FUNC__(read)(VAR)
+#elif defined(ETHR_HAVE_NATMC_READ_RB)
+#define ETHR_NATMC_CMPXCHG_FALLBACK_READ__(VAR) \
+ ETHR_NATMC_FUNC__(read_rb)(VAR)
+#elif defined(ETHR_HAVE_NATMC_READ_WB)
+#define ETHR_NATMC_CMPXCHG_FALLBACK_READ__(VAR) \
+ ETHR_NATMC_FUNC__(read_wb)(VAR)
+#elif defined(ETHR_HAVE_NATMC_READ_ACQB)
+#define ETHR_NATMC_CMPXCHG_FALLBACK_READ__(VAR) \
+ ETHR_NATMC_FUNC__(read_acqb)(VAR)
+#elif defined(ETHR_HAVE_NATMC_READ_RELB)
+#define ETHR_NATMC_CMPXCHG_FALLBACK_READ__(VAR) \
+ ETHR_NATMC_FUNC__(read_relb)(VAR)
+#elif defined(ETHR_HAVE_NATMC_READ_MB)
+#define ETHR_NATMC_CMPXCHG_FALLBACK_READ__(VAR) \
+ ETHR_NATMC_FUNC__(read_mb)(VAR)
+#else
+/*
+ * We have no native read() op; guess zero and then use the
+ * the atomics actual value returned from cmpxchg().
+ */
+#define ETHR_NATMC_CMPXCHG_FALLBACK_READ__(VAR) \
+ ((ETHR_NAINT_T__) 0)
+#endif
+
+/*
+ * Native cmpxchg() fallback used when no native op present.
+ */
+#define ETHR_NATMC_CMPXCHG_FALLBACK__(CMPXCHG, VAR, AVAL, OPS) \
+do { \
+ ethr_sint_t AVAL; \
+ ETHR_NAINT_T__ new__, act__, exp__; \
+ act__ = ETHR_NATMC_CMPXCHG_FALLBACK_READ__(VAR); \
+ do { \
+ exp__ = act__; \
+ AVAL = (ethr_sint_t) act__; \
+ { OPS; } \
+ new__ = (ETHR_NAINT_T__) AVAL; \
+ act__ = CMPXCHG(VAR, new__, exp__); \
+ } while (__builtin_expect(act__ != exp__, 0)); \
+} while (0)
+
+
+
+/* --- addr() --- */
+
+static ETHR_INLINE ethr_sint_t *ETHR_ATMC_FUNC__(addr)(ethr_atomic_t *var)
{
-#ifdef ETHR_HAVE_NATIVE_ATOMICS
return (ethr_sint_t *) ETHR_NATMC_ADDR_FUNC__(var);
+
+}
+
+
+/* --- cmpxchg() --- */
+
+
+static ETHR_INLINE ethr_sint_t ETHR_ATMC_FUNC__(cmpxchg)(ethr_atomic_t *var, ethr_sint_t val, ethr_sint_t old_val)
+{
+ ethr_sint_t res;
+#if defined(ETHR_HAVE_NATMC_CMPXCHG)
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(cmpxchg)(var, (ETHR_NAINT_T__) val, (ETHR_NAINT_T__) old_val);
+#elif defined(ETHR_HAVE_NATMC_CMPXCHG_RB)
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(cmpxchg_rb)(var, (ETHR_NAINT_T__) val, (ETHR_NAINT_T__) old_val);
+#elif defined(ETHR_HAVE_NATMC_CMPXCHG_WB)
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(cmpxchg_wb)(var, (ETHR_NAINT_T__) val, (ETHR_NAINT_T__) old_val);
+#elif defined(ETHR_HAVE_NATMC_CMPXCHG_ACQB)
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(cmpxchg_acqb)(var, (ETHR_NAINT_T__) val, (ETHR_NAINT_T__) old_val);
+#elif defined(ETHR_HAVE_NATMC_CMPXCHG_RELB)
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(cmpxchg_relb)(var, (ETHR_NAINT_T__) val, (ETHR_NAINT_T__) old_val);
+#elif defined(ETHR_HAVE_NATMC_CMPXCHG_MB)
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(cmpxchg_mb)(var, (ETHR_NAINT_T__) val, (ETHR_NAINT_T__) old_val);
+#else
+#error "Missing implementation of ethr_atomic_cmpxchg()!"
+#endif
+ return res;
+}
+
+static ETHR_INLINE ethr_sint_t ETHR_ATMC_FUNC__(cmpxchg_rb)(ethr_atomic_t *var, ethr_sint_t val, ethr_sint_t old_val)
+{
+ ethr_sint_t res;
+#if defined(ETHR_HAVE_NATMC_CMPXCHG_RB)
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(cmpxchg_rb)(var, (ETHR_NAINT_T__) val, (ETHR_NAINT_T__) old_val);
+#elif defined(ETHR_HAVE_NATMC_CMPXCHG)
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(cmpxchg)(var, (ETHR_NAINT_T__) val, (ETHR_NAINT_T__) old_val);
+ ETHR_MEMBAR(ETHR_LoadLoad);
+#elif defined(ETHR_HAVE_NATMC_CMPXCHG_MB)
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(cmpxchg_mb)(var, (ETHR_NAINT_T__) val, (ETHR_NAINT_T__) old_val);
+#elif defined(ETHR_HAVE_NATMC_CMPXCHG_WB)
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(cmpxchg_wb)(var, (ETHR_NAINT_T__) val, (ETHR_NAINT_T__) old_val);
+ ETHR_MEMBAR(ETHR_LoadLoad);
+#elif defined(ETHR_HAVE_NATMC_CMPXCHG_ACQB)
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(cmpxchg_acqb)(var, (ETHR_NAINT_T__) val, (ETHR_NAINT_T__) old_val);
+ ETHR_MEMBAR(ETHR_LoadLoad);
+#elif defined(ETHR_HAVE_NATMC_CMPXCHG_RELB)
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(cmpxchg_relb)(var, (ETHR_NAINT_T__) val, (ETHR_NAINT_T__) old_val);
+ ETHR_MEMBAR(ETHR_LoadLoad);
+#else
+#error "Missing implementation of ethr_atomic_cmpxchg_rb()!"
+#endif
+ return res;
+}
+
+static ETHR_INLINE ethr_sint_t ETHR_ATMC_FUNC__(cmpxchg_wb)(ethr_atomic_t *var, ethr_sint_t val, ethr_sint_t old_val)
+{
+ ethr_sint_t res;
+#if defined(ETHR_HAVE_NATMC_CMPXCHG_WB)
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(cmpxchg_wb)(var, (ETHR_NAINT_T__) val, (ETHR_NAINT_T__) old_val);
+#elif defined(ETHR_HAVE_NATMC_CMPXCHG)
+ ETHR_MEMBAR(ETHR_StoreStore);
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(cmpxchg)(var, (ETHR_NAINT_T__) val, (ETHR_NAINT_T__) old_val);
+#elif defined(ETHR_HAVE_NATMC_CMPXCHG_MB)
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(cmpxchg_mb)(var, (ETHR_NAINT_T__) val, (ETHR_NAINT_T__) old_val);
+#elif defined(ETHR_HAVE_NATMC_CMPXCHG_RB)
+ ETHR_MEMBAR(ETHR_StoreStore);
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(cmpxchg_rb)(var, (ETHR_NAINT_T__) val, (ETHR_NAINT_T__) old_val);
+#elif defined(ETHR_HAVE_NATMC_CMPXCHG_ACQB)
+ ETHR_MEMBAR(ETHR_StoreStore);
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(cmpxchg_acqb)(var, (ETHR_NAINT_T__) val, (ETHR_NAINT_T__) old_val);
+#elif defined(ETHR_HAVE_NATMC_CMPXCHG_RELB)
+ ETHR_MEMBAR(ETHR_StoreStore);
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(cmpxchg_relb)(var, (ETHR_NAINT_T__) val, (ETHR_NAINT_T__) old_val);
+#else
+#error "Missing implementation of ethr_atomic_cmpxchg_wb()!"
+#endif
+ return res;
+}
+
+static ETHR_INLINE ethr_sint_t ETHR_ATMC_FUNC__(cmpxchg_acqb)(ethr_atomic_t *var, ethr_sint_t val, ethr_sint_t old_val)
+{
+ ethr_sint_t res;
+#if defined(ETHR_HAVE_NATMC_CMPXCHG_ACQB)
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(cmpxchg_acqb)(var, (ETHR_NAINT_T__) val, (ETHR_NAINT_T__) old_val);
+#elif defined(ETHR_HAVE_NATMC_CMPXCHG_RB)
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(cmpxchg_rb)(var, (ETHR_NAINT_T__) val, (ETHR_NAINT_T__) old_val);
+ ETHR_MEMBAR(ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_NATMC_CMPXCHG)
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(cmpxchg)(var, (ETHR_NAINT_T__) val, (ETHR_NAINT_T__) old_val);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_NATMC_CMPXCHG_MB)
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(cmpxchg_mb)(var, (ETHR_NAINT_T__) val, (ETHR_NAINT_T__) old_val);
+#elif defined(ETHR_HAVE_NATMC_CMPXCHG_WB)
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(cmpxchg_wb)(var, (ETHR_NAINT_T__) val, (ETHR_NAINT_T__) old_val);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_NATMC_CMPXCHG_RELB)
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(cmpxchg_relb)(var, (ETHR_NAINT_T__) val, (ETHR_NAINT_T__) old_val);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#else
+#error "Missing implementation of ethr_atomic_cmpxchg_acqb()!"
+#endif
+ return res;
+}
+
+static ETHR_INLINE ethr_sint_t ETHR_ATMC_FUNC__(cmpxchg_relb)(ethr_atomic_t *var, ethr_sint_t val, ethr_sint_t old_val)
+{
+ ethr_sint_t res;
+#if defined(ETHR_HAVE_NATMC_CMPXCHG_RELB)
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(cmpxchg_relb)(var, (ETHR_NAINT_T__) val, (ETHR_NAINT_T__) old_val);
+#elif defined(ETHR_HAVE_NATMC_CMPXCHG_WB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(cmpxchg_wb)(var, (ETHR_NAINT_T__) val, (ETHR_NAINT_T__) old_val);
+#elif defined(ETHR_HAVE_NATMC_CMPXCHG)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(cmpxchg)(var, (ETHR_NAINT_T__) val, (ETHR_NAINT_T__) old_val);
+#elif defined(ETHR_HAVE_NATMC_CMPXCHG_MB)
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(cmpxchg_mb)(var, (ETHR_NAINT_T__) val, (ETHR_NAINT_T__) old_val);
+#elif defined(ETHR_HAVE_NATMC_CMPXCHG_RB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(cmpxchg_rb)(var, (ETHR_NAINT_T__) val, (ETHR_NAINT_T__) old_val);
+#elif defined(ETHR_HAVE_NATMC_CMPXCHG_ACQB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(cmpxchg_acqb)(var, (ETHR_NAINT_T__) val, (ETHR_NAINT_T__) old_val);
#else
- return (ethr_sint_t *) var;
+#error "Missing implementation of ethr_atomic_cmpxchg_relb()!"
#endif
+ return res;
}
-static ETHR_INLINE void
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_init)(ethr_atomic_t *var, ethr_sint_t i)
+static ETHR_INLINE ethr_sint_t ETHR_ATMC_FUNC__(cmpxchg_mb)(ethr_atomic_t *var, ethr_sint_t val, ethr_sint_t old_val)
{
-#ifdef ETHR_HAVE_NATIVE_ATOMICS
- ETHR_NATMC_FUNC__(init)(var, (ETHR_NAINT_T__) i);
+ ethr_sint_t res;
+#if defined(ETHR_HAVE_NATMC_CMPXCHG_MB)
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(cmpxchg_mb)(var, (ETHR_NAINT_T__) val, (ETHR_NAINT_T__) old_val);
+#elif defined(ETHR_HAVE_NATMC_CMPXCHG_RELB)
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(cmpxchg_relb)(var, (ETHR_NAINT_T__) val, (ETHR_NAINT_T__) old_val);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_NATMC_CMPXCHG_ACQB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(cmpxchg_acqb)(var, (ETHR_NAINT_T__) val, (ETHR_NAINT_T__) old_val);
+#elif defined(ETHR_HAVE_NATMC_CMPXCHG_WB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(cmpxchg_wb)(var, (ETHR_NAINT_T__) val, (ETHR_NAINT_T__) old_val);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_NATMC_CMPXCHG_RB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(cmpxchg_rb)(var, (ETHR_NAINT_T__) val, (ETHR_NAINT_T__) old_val);
+ ETHR_MEMBAR(ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_NATMC_CMPXCHG)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(cmpxchg)(var, (ETHR_NAINT_T__) val, (ETHR_NAINT_T__) old_val);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
#else
- ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, *var = i);
+#error "Missing implementation of ethr_atomic_cmpxchg_mb()!"
#endif
+ return res;
}
-static ETHR_INLINE void
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_set)(ethr_atomic_t *var, ethr_sint_t i)
+static ETHR_INLINE ethr_sint_t ETHR_ATMC_FUNC__(cmpxchg_ddrb)(ethr_atomic_t *var, ethr_sint_t val, ethr_sint_t old_val)
{
-#ifdef ETHR_HAVE_NATIVE_ATOMICS
- ETHR_NATMC_FUNC__(set)(var, (ETHR_NAINT_T__) i);
+#ifdef ETHR_ORDERED_READ_DEPEND
+ return ETHR_ATMC_FUNC__(cmpxchg)(var, val, old_val);
#else
- ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, *var = i);
+ return ETHR_ATMC_FUNC__(cmpxchg_rb)(var, val, old_val);
#endif
}
-static ETHR_INLINE ethr_sint_t
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_read)(ethr_atomic_t *var)
+
+/* --- xchg() --- */
+
+
+static ETHR_INLINE ethr_sint_t ETHR_ATMC_FUNC__(xchg)(ethr_atomic_t *var, ethr_sint_t val)
{
-#ifdef ETHR_HAVE_NATIVE_ATOMICS
- return (ethr_sint_t) ETHR_NATMC_FUNC__(read)(var);
+ ethr_sint_t res;
+#if defined(ETHR_HAVE_NATMC_XCHG)
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(xchg)(var, (ETHR_NAINT_T__) val);
+#elif defined(ETHR_HAVE_NATMC_XCHG_RB)
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(xchg_rb)(var, (ETHR_NAINT_T__) val);
+#elif defined(ETHR_HAVE_NATMC_XCHG_WB)
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(xchg_wb)(var, (ETHR_NAINT_T__) val);
+#elif defined(ETHR_HAVE_NATMC_XCHG_ACQB)
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(xchg_acqb)(var, (ETHR_NAINT_T__) val);
+#elif defined(ETHR_HAVE_NATMC_XCHG_RELB)
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(xchg_relb)(var, (ETHR_NAINT_T__) val);
+#elif defined(ETHR_HAVE_NATMC_XCHG_MB)
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(xchg_mb)(var, (ETHR_NAINT_T__) val);
+#elif defined(ETHR_HAVE_NATMC_CMPXCHG)
+ ETHR_NATMC_CMPXCHG_FALLBACK__(ETHR_NATMC_FUNC__(cmpxchg), var, aval, res = aval; aval = val);
+#elif defined(ETHR_HAVE_NATMC_CMPXCHG_RB)
+ ETHR_NATMC_CMPXCHG_FALLBACK__(ETHR_NATMC_FUNC__(cmpxchg_rb), var, aval, res = aval; aval = val);
+#elif defined(ETHR_HAVE_NATMC_CMPXCHG_WB)
+ ETHR_NATMC_CMPXCHG_FALLBACK__(ETHR_NATMC_FUNC__(cmpxchg_wb), var, aval, res = aval; aval = val);
+#elif defined(ETHR_HAVE_NATMC_CMPXCHG_ACQB)
+ ETHR_NATMC_CMPXCHG_FALLBACK__(ETHR_NATMC_FUNC__(cmpxchg_acqb), var, aval, res = aval; aval = val);
+#elif defined(ETHR_HAVE_NATMC_CMPXCHG_RELB)
+ ETHR_NATMC_CMPXCHG_FALLBACK__(ETHR_NATMC_FUNC__(cmpxchg_relb), var, aval, res = aval; aval = val);
+#elif defined(ETHR_HAVE_NATMC_CMPXCHG_MB)
+ ETHR_NATMC_CMPXCHG_FALLBACK__(ETHR_NATMC_FUNC__(cmpxchg_mb), var, aval, res = aval; aval = val);
#else
+#error "Missing implementation of ethr_atomic_xchg()!"
+#endif
+ return res;
+}
+
+static ETHR_INLINE ethr_sint_t ETHR_ATMC_FUNC__(xchg_rb)(ethr_atomic_t *var, ethr_sint_t val)
+{
ethr_sint_t res;
- ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, res = *var);
+#if defined(ETHR_HAVE_NATMC_XCHG_RB)
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(xchg_rb)(var, (ETHR_NAINT_T__) val);
+#elif defined(ETHR_HAVE_NATMC_XCHG)
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(xchg)(var, (ETHR_NAINT_T__) val);
+ ETHR_MEMBAR(ETHR_LoadLoad);
+#elif defined(ETHR_HAVE_NATMC_XCHG_MB)
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(xchg_mb)(var, (ETHR_NAINT_T__) val);
+#elif defined(ETHR_HAVE_NATMC_XCHG_WB)
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(xchg_wb)(var, (ETHR_NAINT_T__) val);
+ ETHR_MEMBAR(ETHR_LoadLoad);
+#elif defined(ETHR_HAVE_NATMC_XCHG_ACQB)
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(xchg_acqb)(var, (ETHR_NAINT_T__) val);
+ ETHR_MEMBAR(ETHR_LoadLoad);
+#elif defined(ETHR_HAVE_NATMC_XCHG_RELB)
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(xchg_relb)(var, (ETHR_NAINT_T__) val);
+ ETHR_MEMBAR(ETHR_LoadLoad);
+#elif defined(ETHR_HAVE_NATMC_CMPXCHG_RB)
+ ETHR_NATMC_CMPXCHG_FALLBACK__(ETHR_NATMC_FUNC__(cmpxchg_rb), var, aval, res = aval; aval = val);
+#elif defined(ETHR_HAVE_NATMC_CMPXCHG)
+ ETHR_NATMC_CMPXCHG_FALLBACK__(ETHR_NATMC_FUNC__(cmpxchg), var, aval, res = aval; aval = val);
+ ETHR_MEMBAR(ETHR_LoadLoad);
+#elif defined(ETHR_HAVE_NATMC_CMPXCHG_MB)
+ ETHR_NATMC_CMPXCHG_FALLBACK__(ETHR_NATMC_FUNC__(cmpxchg_mb), var, aval, res = aval; aval = val);
+#elif defined(ETHR_HAVE_NATMC_CMPXCHG_WB)
+ ETHR_NATMC_CMPXCHG_FALLBACK__(ETHR_NATMC_FUNC__(cmpxchg_wb), var, aval, res = aval; aval = val);
+ ETHR_MEMBAR(ETHR_LoadLoad);
+#elif defined(ETHR_HAVE_NATMC_CMPXCHG_ACQB)
+ ETHR_NATMC_CMPXCHG_FALLBACK__(ETHR_NATMC_FUNC__(cmpxchg_acqb), var, aval, res = aval; aval = val);
+ ETHR_MEMBAR(ETHR_LoadLoad);
+#elif defined(ETHR_HAVE_NATMC_CMPXCHG_RELB)
+ ETHR_NATMC_CMPXCHG_FALLBACK__(ETHR_NATMC_FUNC__(cmpxchg_relb), var, aval, res = aval; aval = val);
+ ETHR_MEMBAR(ETHR_LoadLoad);
+#else
+#error "Missing implementation of ethr_atomic_xchg_rb()!"
+#endif
return res;
+}
+
+static ETHR_INLINE ethr_sint_t ETHR_ATMC_FUNC__(xchg_wb)(ethr_atomic_t *var, ethr_sint_t val)
+{
+ ethr_sint_t res;
+#if defined(ETHR_HAVE_NATMC_XCHG_WB)
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(xchg_wb)(var, (ETHR_NAINT_T__) val);
+#elif defined(ETHR_HAVE_NATMC_XCHG)
+ ETHR_MEMBAR(ETHR_StoreStore);
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(xchg)(var, (ETHR_NAINT_T__) val);
+#elif defined(ETHR_HAVE_NATMC_XCHG_MB)
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(xchg_mb)(var, (ETHR_NAINT_T__) val);
+#elif defined(ETHR_HAVE_NATMC_XCHG_RB)
+ ETHR_MEMBAR(ETHR_StoreStore);
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(xchg_rb)(var, (ETHR_NAINT_T__) val);
+#elif defined(ETHR_HAVE_NATMC_XCHG_ACQB)
+ ETHR_MEMBAR(ETHR_StoreStore);
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(xchg_acqb)(var, (ETHR_NAINT_T__) val);
+#elif defined(ETHR_HAVE_NATMC_XCHG_RELB)
+ ETHR_MEMBAR(ETHR_StoreStore);
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(xchg_relb)(var, (ETHR_NAINT_T__) val);
+#elif defined(ETHR_HAVE_NATMC_CMPXCHG_WB)
+ ETHR_NATMC_CMPXCHG_FALLBACK__(ETHR_NATMC_FUNC__(cmpxchg_wb), var, aval, res = aval; aval = val);
+#elif defined(ETHR_HAVE_NATMC_CMPXCHG)
+ ETHR_MEMBAR(ETHR_StoreStore);
+ ETHR_NATMC_CMPXCHG_FALLBACK__(ETHR_NATMC_FUNC__(cmpxchg), var, aval, res = aval; aval = val);
+#elif defined(ETHR_HAVE_NATMC_CMPXCHG_MB)
+ ETHR_NATMC_CMPXCHG_FALLBACK__(ETHR_NATMC_FUNC__(cmpxchg_mb), var, aval, res = aval; aval = val);
+#elif defined(ETHR_HAVE_NATMC_CMPXCHG_RB)
+ ETHR_MEMBAR(ETHR_StoreStore);
+ ETHR_NATMC_CMPXCHG_FALLBACK__(ETHR_NATMC_FUNC__(cmpxchg_rb), var, aval, res = aval; aval = val);
+#elif defined(ETHR_HAVE_NATMC_CMPXCHG_ACQB)
+ ETHR_MEMBAR(ETHR_StoreStore);
+ ETHR_NATMC_CMPXCHG_FALLBACK__(ETHR_NATMC_FUNC__(cmpxchg_acqb), var, aval, res = aval; aval = val);
+#elif defined(ETHR_HAVE_NATMC_CMPXCHG_RELB)
+ ETHR_MEMBAR(ETHR_StoreStore);
+ ETHR_NATMC_CMPXCHG_FALLBACK__(ETHR_NATMC_FUNC__(cmpxchg_relb), var, aval, res = aval; aval = val);
+#else
+#error "Missing implementation of ethr_atomic_xchg_wb()!"
#endif
+ return res;
}
-static ETHR_INLINE void
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_add)(ethr_atomic_t *var, ethr_sint_t incr)
+static ETHR_INLINE ethr_sint_t ETHR_ATMC_FUNC__(xchg_acqb)(ethr_atomic_t *var, ethr_sint_t val)
{
-#ifdef ETHR_HAVE_NATIVE_ATOMICS
- ETHR_NATMC_FUNC__(add)(var, (ETHR_NAINT_T__) incr);
+ ethr_sint_t res;
+#if defined(ETHR_HAVE_NATMC_XCHG_ACQB)
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(xchg_acqb)(var, (ETHR_NAINT_T__) val);
+#elif defined(ETHR_HAVE_NATMC_XCHG_RB)
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(xchg_rb)(var, (ETHR_NAINT_T__) val);
+ ETHR_MEMBAR(ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_NATMC_XCHG)
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(xchg)(var, (ETHR_NAINT_T__) val);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_NATMC_XCHG_MB)
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(xchg_mb)(var, (ETHR_NAINT_T__) val);
+#elif defined(ETHR_HAVE_NATMC_XCHG_WB)
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(xchg_wb)(var, (ETHR_NAINT_T__) val);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_NATMC_XCHG_RELB)
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(xchg_relb)(var, (ETHR_NAINT_T__) val);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_NATMC_CMPXCHG_ACQB)
+ ETHR_NATMC_CMPXCHG_FALLBACK__(ETHR_NATMC_FUNC__(cmpxchg_acqb), var, aval, res = aval; aval = val);
+#elif defined(ETHR_HAVE_NATMC_CMPXCHG_RB)
+ ETHR_NATMC_CMPXCHG_FALLBACK__(ETHR_NATMC_FUNC__(cmpxchg_rb), var, aval, res = aval; aval = val);
+ ETHR_MEMBAR(ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_NATMC_CMPXCHG)
+ ETHR_NATMC_CMPXCHG_FALLBACK__(ETHR_NATMC_FUNC__(cmpxchg), var, aval, res = aval; aval = val);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_NATMC_CMPXCHG_MB)
+ ETHR_NATMC_CMPXCHG_FALLBACK__(ETHR_NATMC_FUNC__(cmpxchg_mb), var, aval, res = aval; aval = val);
+#elif defined(ETHR_HAVE_NATMC_CMPXCHG_WB)
+ ETHR_NATMC_CMPXCHG_FALLBACK__(ETHR_NATMC_FUNC__(cmpxchg_wb), var, aval, res = aval; aval = val);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_NATMC_CMPXCHG_RELB)
+ ETHR_NATMC_CMPXCHG_FALLBACK__(ETHR_NATMC_FUNC__(cmpxchg_relb), var, aval, res = aval; aval = val);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
#else
- ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, *var += incr);
+#error "Missing implementation of ethr_atomic_xchg_acqb()!"
#endif
-}
-
-static ETHR_INLINE ethr_sint_t
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_add_read)(ethr_atomic_t *var, ethr_sint_t i)
+ return res;
+}
+
+static ETHR_INLINE ethr_sint_t ETHR_ATMC_FUNC__(xchg_relb)(ethr_atomic_t *var, ethr_sint_t val)
{
-#ifdef ETHR_HAVE_NATIVE_ATOMICS
- return (ethr_sint_t) ETHR_NATMC_FUNC__(add_return)(var, (ETHR_NAINT_T__) i);
+ ethr_sint_t res;
+#if defined(ETHR_HAVE_NATMC_XCHG_RELB)
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(xchg_relb)(var, (ETHR_NAINT_T__) val);
+#elif defined(ETHR_HAVE_NATMC_XCHG_WB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(xchg_wb)(var, (ETHR_NAINT_T__) val);
+#elif defined(ETHR_HAVE_NATMC_XCHG)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(xchg)(var, (ETHR_NAINT_T__) val);
+#elif defined(ETHR_HAVE_NATMC_XCHG_MB)
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(xchg_mb)(var, (ETHR_NAINT_T__) val);
+#elif defined(ETHR_HAVE_NATMC_XCHG_RB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(xchg_rb)(var, (ETHR_NAINT_T__) val);
+#elif defined(ETHR_HAVE_NATMC_XCHG_ACQB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(xchg_acqb)(var, (ETHR_NAINT_T__) val);
+#elif defined(ETHR_HAVE_NATMC_CMPXCHG_RELB)
+ ETHR_NATMC_CMPXCHG_FALLBACK__(ETHR_NATMC_FUNC__(cmpxchg_relb), var, aval, res = aval; aval = val);
+#elif defined(ETHR_HAVE_NATMC_CMPXCHG_WB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ ETHR_NATMC_CMPXCHG_FALLBACK__(ETHR_NATMC_FUNC__(cmpxchg_wb), var, aval, res = aval; aval = val);
+#elif defined(ETHR_HAVE_NATMC_CMPXCHG)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ ETHR_NATMC_CMPXCHG_FALLBACK__(ETHR_NATMC_FUNC__(cmpxchg), var, aval, res = aval; aval = val);
+#elif defined(ETHR_HAVE_NATMC_CMPXCHG_MB)
+ ETHR_NATMC_CMPXCHG_FALLBACK__(ETHR_NATMC_FUNC__(cmpxchg_mb), var, aval, res = aval; aval = val);
+#elif defined(ETHR_HAVE_NATMC_CMPXCHG_RB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ ETHR_NATMC_CMPXCHG_FALLBACK__(ETHR_NATMC_FUNC__(cmpxchg_rb), var, aval, res = aval; aval = val);
+#elif defined(ETHR_HAVE_NATMC_CMPXCHG_ACQB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ ETHR_NATMC_CMPXCHG_FALLBACK__(ETHR_NATMC_FUNC__(cmpxchg_acqb), var, aval, res = aval; aval = val);
#else
+#error "Missing implementation of ethr_atomic_xchg_relb()!"
+#endif
+ return res;
+}
+
+static ETHR_INLINE ethr_sint_t ETHR_ATMC_FUNC__(xchg_mb)(ethr_atomic_t *var, ethr_sint_t val)
+{
ethr_sint_t res;
- ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, *var += i; res = *var);
+#if defined(ETHR_HAVE_NATMC_XCHG_MB)
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(xchg_mb)(var, (ETHR_NAINT_T__) val);
+#elif defined(ETHR_HAVE_NATMC_XCHG_RELB)
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(xchg_relb)(var, (ETHR_NAINT_T__) val);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_NATMC_XCHG_ACQB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(xchg_acqb)(var, (ETHR_NAINT_T__) val);
+#elif defined(ETHR_HAVE_NATMC_XCHG_WB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(xchg_wb)(var, (ETHR_NAINT_T__) val);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_NATMC_XCHG_RB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(xchg_rb)(var, (ETHR_NAINT_T__) val);
+ ETHR_MEMBAR(ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_NATMC_XCHG)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(xchg)(var, (ETHR_NAINT_T__) val);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_NATMC_CMPXCHG_MB)
+ ETHR_NATMC_CMPXCHG_FALLBACK__(ETHR_NATMC_FUNC__(cmpxchg_mb), var, aval, res = aval; aval = val);
+#elif defined(ETHR_HAVE_NATMC_CMPXCHG_RELB)
+ ETHR_NATMC_CMPXCHG_FALLBACK__(ETHR_NATMC_FUNC__(cmpxchg_relb), var, aval, res = aval; aval = val);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_NATMC_CMPXCHG_ACQB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ ETHR_NATMC_CMPXCHG_FALLBACK__(ETHR_NATMC_FUNC__(cmpxchg_acqb), var, aval, res = aval; aval = val);
+#elif defined(ETHR_HAVE_NATMC_CMPXCHG_WB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ ETHR_NATMC_CMPXCHG_FALLBACK__(ETHR_NATMC_FUNC__(cmpxchg_wb), var, aval, res = aval; aval = val);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_NATMC_CMPXCHG_RB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ ETHR_NATMC_CMPXCHG_FALLBACK__(ETHR_NATMC_FUNC__(cmpxchg_rb), var, aval, res = aval; aval = val);
+ ETHR_MEMBAR(ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_NATMC_CMPXCHG)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ ETHR_NATMC_CMPXCHG_FALLBACK__(ETHR_NATMC_FUNC__(cmpxchg), var, aval, res = aval; aval = val);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#else
+#error "Missing implementation of ethr_atomic_xchg_mb()!"
+#endif
return res;
+}
+
+static ETHR_INLINE ethr_sint_t ETHR_ATMC_FUNC__(xchg_ddrb)(ethr_atomic_t *var, ethr_sint_t val)
+{
+#ifdef ETHR_ORDERED_READ_DEPEND
+ return ETHR_ATMC_FUNC__(xchg)(var, val);
+#else
+ return ETHR_ATMC_FUNC__(xchg_rb)(var, val);
#endif
}
-static ETHR_INLINE void
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_inc)(ethr_atomic_t *var)
+
+/* --- set() --- */
+
+
+static ETHR_INLINE void ETHR_ATMC_FUNC__(set)(ethr_atomic_t *var, ethr_sint_t val)
{
-#ifdef ETHR_HAVE_NATIVE_ATOMICS
- ETHR_NATMC_FUNC__(inc)(var);
+#if defined(ETHR_HAVE_NATMC_SET)
+ ETHR_NATMC_FUNC__(set)(var, (ETHR_NAINT_T__) val);
+#elif defined(ETHR_HAVE_NATMC_SET_RB)
+ ETHR_NATMC_FUNC__(set_rb)(var, (ETHR_NAINT_T__) val);
+#elif defined(ETHR_HAVE_NATMC_SET_WB)
+ ETHR_NATMC_FUNC__(set_wb)(var, (ETHR_NAINT_T__) val);
+#elif defined(ETHR_HAVE_NATMC_SET_ACQB)
+ ETHR_NATMC_FUNC__(set_acqb)(var, (ETHR_NAINT_T__) val);
+#elif defined(ETHR_HAVE_NATMC_SET_RELB)
+ ETHR_NATMC_FUNC__(set_relb)(var, (ETHR_NAINT_T__) val);
+#elif defined(ETHR_HAVE_NATMC_SET_MB)
+ ETHR_NATMC_FUNC__(set_mb)(var, (ETHR_NAINT_T__) val);
#else
- ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, ++(*var));
+ (void) ETHR_ATMC_FUNC__(xchg)(var, val);
#endif
}
-static ETHR_INLINE void
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_dec)(ethr_atomic_t *var)
+static ETHR_INLINE void ETHR_ATMC_FUNC__(set_rb)(ethr_atomic_t *var, ethr_sint_t val)
{
-#ifdef ETHR_HAVE_NATIVE_ATOMICS
- ETHR_NATMC_FUNC__(dec)(var);
+#if defined(ETHR_HAVE_NATMC_SET_RB)
+ ETHR_NATMC_FUNC__(set_rb)(var, (ETHR_NAINT_T__) val);
+#elif defined(ETHR_HAVE_NATMC_SET)
+ ETHR_NATMC_FUNC__(set)(var, (ETHR_NAINT_T__) val);
+ ETHR_MEMBAR(ETHR_LoadLoad);
+#elif defined(ETHR_HAVE_NATMC_SET_MB)
+ ETHR_NATMC_FUNC__(set_mb)(var, (ETHR_NAINT_T__) val);
+#elif defined(ETHR_HAVE_NATMC_SET_WB)
+ ETHR_NATMC_FUNC__(set_wb)(var, (ETHR_NAINT_T__) val);
+ ETHR_MEMBAR(ETHR_LoadLoad);
+#elif defined(ETHR_HAVE_NATMC_SET_ACQB)
+ ETHR_NATMC_FUNC__(set_acqb)(var, (ETHR_NAINT_T__) val);
+ ETHR_MEMBAR(ETHR_LoadLoad);
+#elif defined(ETHR_HAVE_NATMC_SET_RELB)
+ ETHR_NATMC_FUNC__(set_relb)(var, (ETHR_NAINT_T__) val);
+ ETHR_MEMBAR(ETHR_LoadLoad);
#else
- ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, --(*var));
+ (void) ETHR_ATMC_FUNC__(xchg_rb)(var, val);
#endif
}
-static ETHR_INLINE ethr_sint_t
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_inc_read)(ethr_atomic_t *var)
+static ETHR_INLINE void ETHR_ATMC_FUNC__(set_wb)(ethr_atomic_t *var, ethr_sint_t val)
{
-#ifdef ETHR_HAVE_NATIVE_ATOMICS
- return (ethr_sint_t) ETHR_NATMC_FUNC__(inc_return)(var);
+#if defined(ETHR_HAVE_NATMC_SET_WB)
+ ETHR_NATMC_FUNC__(set_wb)(var, (ETHR_NAINT_T__) val);
+#elif defined(ETHR_HAVE_NATMC_SET)
+ ETHR_MEMBAR(ETHR_StoreStore);
+ ETHR_NATMC_FUNC__(set)(var, (ETHR_NAINT_T__) val);
+#elif defined(ETHR_HAVE_NATMC_SET_MB)
+ ETHR_NATMC_FUNC__(set_mb)(var, (ETHR_NAINT_T__) val);
+#elif defined(ETHR_HAVE_NATMC_SET_RB)
+ ETHR_MEMBAR(ETHR_StoreStore);
+ ETHR_NATMC_FUNC__(set_rb)(var, (ETHR_NAINT_T__) val);
+#elif defined(ETHR_HAVE_NATMC_SET_ACQB)
+ ETHR_MEMBAR(ETHR_StoreStore);
+ ETHR_NATMC_FUNC__(set_acqb)(var, (ETHR_NAINT_T__) val);
+#elif defined(ETHR_HAVE_NATMC_SET_RELB)
+ ETHR_MEMBAR(ETHR_StoreStore);
+ ETHR_NATMC_FUNC__(set_relb)(var, (ETHR_NAINT_T__) val);
#else
+ (void) ETHR_ATMC_FUNC__(xchg_wb)(var, val);
+#endif
+}
+
+static ETHR_INLINE void ETHR_ATMC_FUNC__(set_acqb)(ethr_atomic_t *var, ethr_sint_t val)
+{
+#if defined(ETHR_HAVE_NATMC_SET_ACQB)
+ ETHR_NATMC_FUNC__(set_acqb)(var, (ETHR_NAINT_T__) val);
+#elif defined(ETHR_HAVE_NATMC_SET_RB)
+ ETHR_NATMC_FUNC__(set_rb)(var, (ETHR_NAINT_T__) val);
+ ETHR_MEMBAR(ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_NATMC_SET)
+ ETHR_NATMC_FUNC__(set)(var, (ETHR_NAINT_T__) val);
+ ETHR_MEMBAR(ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_NATMC_SET_MB)
+ ETHR_NATMC_FUNC__(set_mb)(var, (ETHR_NAINT_T__) val);
+#elif defined(ETHR_HAVE_NATMC_SET_WB)
+ ETHR_NATMC_FUNC__(set_wb)(var, (ETHR_NAINT_T__) val);
+ ETHR_MEMBAR(ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_NATMC_SET_RELB)
+ ETHR_NATMC_FUNC__(set_relb)(var, (ETHR_NAINT_T__) val);
+ ETHR_MEMBAR(ETHR_StoreLoad|ETHR_StoreStore);
+#else
+ (void) ETHR_ATMC_FUNC__(xchg_acqb)(var, val);
+#endif
+}
+
+static ETHR_INLINE void ETHR_ATMC_FUNC__(set_relb)(ethr_atomic_t *var, ethr_sint_t val)
+{
+#if defined(ETHR_HAVE_NATMC_SET_RELB)
+ ETHR_NATMC_FUNC__(set_relb)(var, (ETHR_NAINT_T__) val);
+#elif defined(ETHR_HAVE_NATMC_SET_WB)
+ ETHR_MEMBAR(ETHR_LoadStore);
+ ETHR_NATMC_FUNC__(set_wb)(var, (ETHR_NAINT_T__) val);
+#elif defined(ETHR_HAVE_NATMC_SET)
+ ETHR_MEMBAR(ETHR_LoadStore|ETHR_StoreStore);
+ ETHR_NATMC_FUNC__(set)(var, (ETHR_NAINT_T__) val);
+#elif defined(ETHR_HAVE_NATMC_SET_MB)
+ ETHR_NATMC_FUNC__(set_mb)(var, (ETHR_NAINT_T__) val);
+#elif defined(ETHR_HAVE_NATMC_SET_RB)
+ ETHR_MEMBAR(ETHR_LoadStore|ETHR_StoreStore);
+ ETHR_NATMC_FUNC__(set_rb)(var, (ETHR_NAINT_T__) val);
+#elif defined(ETHR_HAVE_NATMC_SET_ACQB)
+ ETHR_MEMBAR(ETHR_LoadStore|ETHR_StoreStore);
+ ETHR_NATMC_FUNC__(set_acqb)(var, (ETHR_NAINT_T__) val);
+#else
+ (void) ETHR_ATMC_FUNC__(xchg_relb)(var, val);
+#endif
+}
+
+static ETHR_INLINE void ETHR_ATMC_FUNC__(set_mb)(ethr_atomic_t *var, ethr_sint_t val)
+{
+#if defined(ETHR_HAVE_NATMC_SET_MB)
+ ETHR_NATMC_FUNC__(set_mb)(var, (ETHR_NAINT_T__) val);
+#elif defined(ETHR_HAVE_NATMC_SET_RELB)
+ ETHR_NATMC_FUNC__(set_relb)(var, (ETHR_NAINT_T__) val);
+ ETHR_MEMBAR(ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_NATMC_SET_ACQB)
+ ETHR_MEMBAR(ETHR_LoadStore|ETHR_StoreStore);
+ ETHR_NATMC_FUNC__(set_acqb)(var, (ETHR_NAINT_T__) val);
+#elif defined(ETHR_HAVE_NATMC_SET_WB)
+ ETHR_MEMBAR(ETHR_LoadStore);
+ ETHR_NATMC_FUNC__(set_wb)(var, (ETHR_NAINT_T__) val);
+ ETHR_MEMBAR(ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_NATMC_SET_RB)
+ ETHR_MEMBAR(ETHR_LoadStore|ETHR_StoreStore);
+ ETHR_NATMC_FUNC__(set_rb)(var, (ETHR_NAINT_T__) val);
+ ETHR_MEMBAR(ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_NATMC_SET)
+ ETHR_MEMBAR(ETHR_LoadStore|ETHR_StoreStore);
+ ETHR_NATMC_FUNC__(set)(var, (ETHR_NAINT_T__) val);
+ ETHR_MEMBAR(ETHR_StoreLoad|ETHR_StoreStore);
+#else
+ (void) ETHR_ATMC_FUNC__(xchg_mb)(var, val);
+#endif
+}
+
+static ETHR_INLINE void ETHR_ATMC_FUNC__(set_ddrb)(ethr_atomic_t *var, ethr_sint_t val)
+{
+#ifdef ETHR_ORDERED_READ_DEPEND
+ ETHR_ATMC_FUNC__(set)(var, val);
+#else
+ ETHR_ATMC_FUNC__(set_rb)(var, val);
+#endif
+}
+
+
+/* --- init() --- */
+
+
+static ETHR_INLINE void ETHR_ATMC_FUNC__(init)(ethr_atomic_t *var, ethr_sint_t val)
+{
+#if defined(ETHR_HAVE_NATMC_INIT)
+ ETHR_NATMC_FUNC__(init)(var, (ETHR_NAINT_T__) val);
+#elif defined(ETHR_HAVE_NATMC_INIT_RB)
+ ETHR_NATMC_FUNC__(init_rb)(var, (ETHR_NAINT_T__) val);
+#elif defined(ETHR_HAVE_NATMC_INIT_WB)
+ ETHR_NATMC_FUNC__(init_wb)(var, (ETHR_NAINT_T__) val);
+#elif defined(ETHR_HAVE_NATMC_INIT_ACQB)
+ ETHR_NATMC_FUNC__(init_acqb)(var, (ETHR_NAINT_T__) val);
+#elif defined(ETHR_HAVE_NATMC_INIT_RELB)
+ ETHR_NATMC_FUNC__(init_relb)(var, (ETHR_NAINT_T__) val);
+#elif defined(ETHR_HAVE_NATMC_INIT_MB)
+ ETHR_NATMC_FUNC__(init_mb)(var, (ETHR_NAINT_T__) val);
+#else
+ ETHR_ATMC_FUNC__(set)(var, val);
+#endif
+}
+
+static ETHR_INLINE void ETHR_ATMC_FUNC__(init_rb)(ethr_atomic_t *var, ethr_sint_t val)
+{
+#if defined(ETHR_HAVE_NATMC_INIT_RB)
+ ETHR_NATMC_FUNC__(init_rb)(var, (ETHR_NAINT_T__) val);
+#elif defined(ETHR_HAVE_NATMC_INIT)
+ ETHR_NATMC_FUNC__(init)(var, (ETHR_NAINT_T__) val);
+ ETHR_MEMBAR(ETHR_LoadLoad);
+#elif defined(ETHR_HAVE_NATMC_INIT_MB)
+ ETHR_NATMC_FUNC__(init_mb)(var, (ETHR_NAINT_T__) val);
+#elif defined(ETHR_HAVE_NATMC_INIT_WB)
+ ETHR_NATMC_FUNC__(init_wb)(var, (ETHR_NAINT_T__) val);
+ ETHR_MEMBAR(ETHR_LoadLoad);
+#elif defined(ETHR_HAVE_NATMC_INIT_ACQB)
+ ETHR_NATMC_FUNC__(init_acqb)(var, (ETHR_NAINT_T__) val);
+ ETHR_MEMBAR(ETHR_LoadLoad);
+#elif defined(ETHR_HAVE_NATMC_INIT_RELB)
+ ETHR_NATMC_FUNC__(init_relb)(var, (ETHR_NAINT_T__) val);
+ ETHR_MEMBAR(ETHR_LoadLoad);
+#else
+ ETHR_ATMC_FUNC__(set_rb)(var, val);
+#endif
+}
+
+static ETHR_INLINE void ETHR_ATMC_FUNC__(init_wb)(ethr_atomic_t *var, ethr_sint_t val)
+{
+#if defined(ETHR_HAVE_NATMC_INIT_WB)
+ ETHR_NATMC_FUNC__(init_wb)(var, (ETHR_NAINT_T__) val);
+#elif defined(ETHR_HAVE_NATMC_INIT)
+ ETHR_MEMBAR(ETHR_StoreStore);
+ ETHR_NATMC_FUNC__(init)(var, (ETHR_NAINT_T__) val);
+#elif defined(ETHR_HAVE_NATMC_INIT_MB)
+ ETHR_NATMC_FUNC__(init_mb)(var, (ETHR_NAINT_T__) val);
+#elif defined(ETHR_HAVE_NATMC_INIT_RB)
+ ETHR_MEMBAR(ETHR_StoreStore);
+ ETHR_NATMC_FUNC__(init_rb)(var, (ETHR_NAINT_T__) val);
+#elif defined(ETHR_HAVE_NATMC_INIT_ACQB)
+ ETHR_MEMBAR(ETHR_StoreStore);
+ ETHR_NATMC_FUNC__(init_acqb)(var, (ETHR_NAINT_T__) val);
+#elif defined(ETHR_HAVE_NATMC_INIT_RELB)
+ ETHR_MEMBAR(ETHR_StoreStore);
+ ETHR_NATMC_FUNC__(init_relb)(var, (ETHR_NAINT_T__) val);
+#else
+ ETHR_ATMC_FUNC__(set_wb)(var, val);
+#endif
+}
+
+static ETHR_INLINE void ETHR_ATMC_FUNC__(init_acqb)(ethr_atomic_t *var, ethr_sint_t val)
+{
+#if defined(ETHR_HAVE_NATMC_INIT_ACQB)
+ ETHR_NATMC_FUNC__(init_acqb)(var, (ETHR_NAINT_T__) val);
+#elif defined(ETHR_HAVE_NATMC_INIT_RB)
+ ETHR_NATMC_FUNC__(init_rb)(var, (ETHR_NAINT_T__) val);
+ ETHR_MEMBAR(ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_NATMC_INIT)
+ ETHR_NATMC_FUNC__(init)(var, (ETHR_NAINT_T__) val);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_NATMC_INIT_MB)
+ ETHR_NATMC_FUNC__(init_mb)(var, (ETHR_NAINT_T__) val);
+#elif defined(ETHR_HAVE_NATMC_INIT_WB)
+ ETHR_NATMC_FUNC__(init_wb)(var, (ETHR_NAINT_T__) val);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_NATMC_INIT_RELB)
+ ETHR_NATMC_FUNC__(init_relb)(var, (ETHR_NAINT_T__) val);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#else
+ ETHR_ATMC_FUNC__(set_acqb)(var, val);
+#endif
+}
+
+static ETHR_INLINE void ETHR_ATMC_FUNC__(init_relb)(ethr_atomic_t *var, ethr_sint_t val)
+{
+#if defined(ETHR_HAVE_NATMC_INIT_RELB)
+ ETHR_NATMC_FUNC__(init_relb)(var, (ETHR_NAINT_T__) val);
+#elif defined(ETHR_HAVE_NATMC_INIT_WB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad);
+ ETHR_NATMC_FUNC__(init_wb)(var, (ETHR_NAINT_T__) val);
+#elif defined(ETHR_HAVE_NATMC_INIT)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+ ETHR_NATMC_FUNC__(init)(var, (ETHR_NAINT_T__) val);
+#elif defined(ETHR_HAVE_NATMC_INIT_MB)
+ ETHR_NATMC_FUNC__(init_mb)(var, (ETHR_NAINT_T__) val);
+#elif defined(ETHR_HAVE_NATMC_INIT_RB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+ ETHR_NATMC_FUNC__(init_rb)(var, (ETHR_NAINT_T__) val);
+#elif defined(ETHR_HAVE_NATMC_INIT_ACQB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+ ETHR_NATMC_FUNC__(init_acqb)(var, (ETHR_NAINT_T__) val);
+#else
+ ETHR_ATMC_FUNC__(set_relb)(var, val);
+#endif
+}
+
+static ETHR_INLINE void ETHR_ATMC_FUNC__(init_mb)(ethr_atomic_t *var, ethr_sint_t val)
+{
+#if defined(ETHR_HAVE_NATMC_INIT_MB)
+ ETHR_NATMC_FUNC__(init_mb)(var, (ETHR_NAINT_T__) val);
+#elif defined(ETHR_HAVE_NATMC_INIT_RELB)
+ ETHR_NATMC_FUNC__(init_relb)(var, (ETHR_NAINT_T__) val);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_NATMC_INIT_ACQB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+ ETHR_NATMC_FUNC__(init_acqb)(var, (ETHR_NAINT_T__) val);
+#elif defined(ETHR_HAVE_NATMC_INIT_WB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad);
+ ETHR_NATMC_FUNC__(init_wb)(var, (ETHR_NAINT_T__) val);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_NATMC_INIT_RB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+ ETHR_NATMC_FUNC__(init_rb)(var, (ETHR_NAINT_T__) val);
+ ETHR_MEMBAR(ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_NATMC_INIT)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+ ETHR_NATMC_FUNC__(init)(var, (ETHR_NAINT_T__) val);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#else
+ ETHR_ATMC_FUNC__(set_mb)(var, val);
+#endif
+}
+
+static ETHR_INLINE void ETHR_ATMC_FUNC__(init_ddrb)(ethr_atomic_t *var, ethr_sint_t val)
+{
+#ifdef ETHR_ORDERED_READ_DEPEND
+ ETHR_ATMC_FUNC__(init)(var, val);
+#else
+ ETHR_ATMC_FUNC__(init_rb)(var, val);
+#endif
+}
+
+
+/* --- add_read() --- */
+
+
+static ETHR_INLINE ethr_sint_t ETHR_ATMC_FUNC__(add_read)(ethr_atomic_t *var, ethr_sint_t val)
+{
+ ethr_sint_t res;
+#if defined(ETHR_HAVE_NATMC_ADD_RETURN)
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(add_return)(var, (ETHR_NAINT_T__) val);
+#elif defined(ETHR_HAVE_NATMC_ADD_RETURN_RB)
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(add_return_rb)(var, (ETHR_NAINT_T__) val);
+#elif defined(ETHR_HAVE_NATMC_ADD_RETURN_WB)
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(add_return_wb)(var, (ETHR_NAINT_T__) val);
+#elif defined(ETHR_HAVE_NATMC_ADD_RETURN_ACQB)
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(add_return_acqb)(var, (ETHR_NAINT_T__) val);
+#elif defined(ETHR_HAVE_NATMC_ADD_RETURN_RELB)
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(add_return_relb)(var, (ETHR_NAINT_T__) val);
+#elif defined(ETHR_HAVE_NATMC_ADD_RETURN_MB)
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(add_return_mb)(var, (ETHR_NAINT_T__) val);
+#elif defined(ETHR_HAVE_NATMC_CMPXCHG)
+ ETHR_NATMC_CMPXCHG_FALLBACK__(ETHR_NATMC_FUNC__(cmpxchg), var, aval, aval += val; res = aval);
+#elif defined(ETHR_HAVE_NATMC_CMPXCHG_RB)
+ ETHR_NATMC_CMPXCHG_FALLBACK__(ETHR_NATMC_FUNC__(cmpxchg_rb), var, aval, aval += val; res = aval);
+#elif defined(ETHR_HAVE_NATMC_CMPXCHG_WB)
+ ETHR_NATMC_CMPXCHG_FALLBACK__(ETHR_NATMC_FUNC__(cmpxchg_wb), var, aval, aval += val; res = aval);
+#elif defined(ETHR_HAVE_NATMC_CMPXCHG_ACQB)
+ ETHR_NATMC_CMPXCHG_FALLBACK__(ETHR_NATMC_FUNC__(cmpxchg_acqb), var, aval, aval += val; res = aval);
+#elif defined(ETHR_HAVE_NATMC_CMPXCHG_RELB)
+ ETHR_NATMC_CMPXCHG_FALLBACK__(ETHR_NATMC_FUNC__(cmpxchg_relb), var, aval, aval += val; res = aval);
+#elif defined(ETHR_HAVE_NATMC_CMPXCHG_MB)
+ ETHR_NATMC_CMPXCHG_FALLBACK__(ETHR_NATMC_FUNC__(cmpxchg_mb), var, aval, aval += val; res = aval);
+#else
+#error "Missing implementation of ethr_atomic_add_read()!"
+#endif
+ return res;
+}
+
+static ETHR_INLINE ethr_sint_t ETHR_ATMC_FUNC__(add_read_rb)(ethr_atomic_t *var, ethr_sint_t val)
+{
+ ethr_sint_t res;
+#if defined(ETHR_HAVE_NATMC_ADD_RETURN_RB)
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(add_return_rb)(var, (ETHR_NAINT_T__) val);
+#elif defined(ETHR_HAVE_NATMC_ADD_RETURN)
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(add_return)(var, (ETHR_NAINT_T__) val);
+ ETHR_MEMBAR(ETHR_LoadLoad);
+#elif defined(ETHR_HAVE_NATMC_ADD_RETURN_MB)
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(add_return_mb)(var, (ETHR_NAINT_T__) val);
+#elif defined(ETHR_HAVE_NATMC_ADD_RETURN_WB)
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(add_return_wb)(var, (ETHR_NAINT_T__) val);
+ ETHR_MEMBAR(ETHR_LoadLoad);
+#elif defined(ETHR_HAVE_NATMC_ADD_RETURN_ACQB)
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(add_return_acqb)(var, (ETHR_NAINT_T__) val);
+ ETHR_MEMBAR(ETHR_LoadLoad);
+#elif defined(ETHR_HAVE_NATMC_ADD_RETURN_RELB)
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(add_return_relb)(var, (ETHR_NAINT_T__) val);
+ ETHR_MEMBAR(ETHR_LoadLoad);
+#elif defined(ETHR_HAVE_NATMC_CMPXCHG_RB)
+ ETHR_NATMC_CMPXCHG_FALLBACK__(ETHR_NATMC_FUNC__(cmpxchg_rb), var, aval, aval += val; res = aval);
+#elif defined(ETHR_HAVE_NATMC_CMPXCHG)
+ ETHR_NATMC_CMPXCHG_FALLBACK__(ETHR_NATMC_FUNC__(cmpxchg), var, aval, aval += val; res = aval);
+ ETHR_MEMBAR(ETHR_LoadLoad);
+#elif defined(ETHR_HAVE_NATMC_CMPXCHG_MB)
+ ETHR_NATMC_CMPXCHG_FALLBACK__(ETHR_NATMC_FUNC__(cmpxchg_mb), var, aval, aval += val; res = aval);
+#elif defined(ETHR_HAVE_NATMC_CMPXCHG_WB)
+ ETHR_NATMC_CMPXCHG_FALLBACK__(ETHR_NATMC_FUNC__(cmpxchg_wb), var, aval, aval += val; res = aval);
+ ETHR_MEMBAR(ETHR_LoadLoad);
+#elif defined(ETHR_HAVE_NATMC_CMPXCHG_ACQB)
+ ETHR_NATMC_CMPXCHG_FALLBACK__(ETHR_NATMC_FUNC__(cmpxchg_acqb), var, aval, aval += val; res = aval);
+ ETHR_MEMBAR(ETHR_LoadLoad);
+#elif defined(ETHR_HAVE_NATMC_CMPXCHG_RELB)
+ ETHR_NATMC_CMPXCHG_FALLBACK__(ETHR_NATMC_FUNC__(cmpxchg_relb), var, aval, aval += val; res = aval);
+ ETHR_MEMBAR(ETHR_LoadLoad);
+#else
+#error "Missing implementation of ethr_atomic_add_read_rb()!"
+#endif
+ return res;
+}
+
+static ETHR_INLINE ethr_sint_t ETHR_ATMC_FUNC__(add_read_wb)(ethr_atomic_t *var, ethr_sint_t val)
+{
+ ethr_sint_t res;
+#if defined(ETHR_HAVE_NATMC_ADD_RETURN_WB)
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(add_return_wb)(var, (ETHR_NAINT_T__) val);
+#elif defined(ETHR_HAVE_NATMC_ADD_RETURN)
+ ETHR_MEMBAR(ETHR_StoreStore);
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(add_return)(var, (ETHR_NAINT_T__) val);
+#elif defined(ETHR_HAVE_NATMC_ADD_RETURN_MB)
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(add_return_mb)(var, (ETHR_NAINT_T__) val);
+#elif defined(ETHR_HAVE_NATMC_ADD_RETURN_RB)
+ ETHR_MEMBAR(ETHR_StoreStore);
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(add_return_rb)(var, (ETHR_NAINT_T__) val);
+#elif defined(ETHR_HAVE_NATMC_ADD_RETURN_ACQB)
+ ETHR_MEMBAR(ETHR_StoreStore);
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(add_return_acqb)(var, (ETHR_NAINT_T__) val);
+#elif defined(ETHR_HAVE_NATMC_ADD_RETURN_RELB)
+ ETHR_MEMBAR(ETHR_StoreStore);
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(add_return_relb)(var, (ETHR_NAINT_T__) val);
+#elif defined(ETHR_HAVE_NATMC_CMPXCHG_WB)
+ ETHR_NATMC_CMPXCHG_FALLBACK__(ETHR_NATMC_FUNC__(cmpxchg_wb), var, aval, aval += val; res = aval);
+#elif defined(ETHR_HAVE_NATMC_CMPXCHG)
+ ETHR_MEMBAR(ETHR_StoreStore);
+ ETHR_NATMC_CMPXCHG_FALLBACK__(ETHR_NATMC_FUNC__(cmpxchg), var, aval, aval += val; res = aval);
+#elif defined(ETHR_HAVE_NATMC_CMPXCHG_MB)
+ ETHR_NATMC_CMPXCHG_FALLBACK__(ETHR_NATMC_FUNC__(cmpxchg_mb), var, aval, aval += val; res = aval);
+#elif defined(ETHR_HAVE_NATMC_CMPXCHG_RB)
+ ETHR_MEMBAR(ETHR_StoreStore);
+ ETHR_NATMC_CMPXCHG_FALLBACK__(ETHR_NATMC_FUNC__(cmpxchg_rb), var, aval, aval += val; res = aval);
+#elif defined(ETHR_HAVE_NATMC_CMPXCHG_ACQB)
+ ETHR_MEMBAR(ETHR_StoreStore);
+ ETHR_NATMC_CMPXCHG_FALLBACK__(ETHR_NATMC_FUNC__(cmpxchg_acqb), var, aval, aval += val; res = aval);
+#elif defined(ETHR_HAVE_NATMC_CMPXCHG_RELB)
+ ETHR_MEMBAR(ETHR_StoreStore);
+ ETHR_NATMC_CMPXCHG_FALLBACK__(ETHR_NATMC_FUNC__(cmpxchg_relb), var, aval, aval += val; res = aval);
+#else
+#error "Missing implementation of ethr_atomic_add_read_wb()!"
+#endif
+ return res;
+}
+
+static ETHR_INLINE ethr_sint_t ETHR_ATMC_FUNC__(add_read_acqb)(ethr_atomic_t *var, ethr_sint_t val)
+{
+ ethr_sint_t res;
+#if defined(ETHR_HAVE_NATMC_ADD_RETURN_ACQB)
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(add_return_acqb)(var, (ETHR_NAINT_T__) val);
+#elif defined(ETHR_HAVE_NATMC_ADD_RETURN_RB)
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(add_return_rb)(var, (ETHR_NAINT_T__) val);
+ ETHR_MEMBAR(ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_NATMC_ADD_RETURN)
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(add_return)(var, (ETHR_NAINT_T__) val);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_NATMC_ADD_RETURN_MB)
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(add_return_mb)(var, (ETHR_NAINT_T__) val);
+#elif defined(ETHR_HAVE_NATMC_ADD_RETURN_WB)
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(add_return_wb)(var, (ETHR_NAINT_T__) val);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_NATMC_ADD_RETURN_RELB)
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(add_return_relb)(var, (ETHR_NAINT_T__) val);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_NATMC_CMPXCHG_ACQB)
+ ETHR_NATMC_CMPXCHG_FALLBACK__(ETHR_NATMC_FUNC__(cmpxchg_acqb), var, aval, aval += val; res = aval);
+#elif defined(ETHR_HAVE_NATMC_CMPXCHG_RB)
+ ETHR_NATMC_CMPXCHG_FALLBACK__(ETHR_NATMC_FUNC__(cmpxchg_rb), var, aval, aval += val; res = aval);
+ ETHR_MEMBAR(ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_NATMC_CMPXCHG)
+ ETHR_NATMC_CMPXCHG_FALLBACK__(ETHR_NATMC_FUNC__(cmpxchg), var, aval, aval += val; res = aval);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_NATMC_CMPXCHG_MB)
+ ETHR_NATMC_CMPXCHG_FALLBACK__(ETHR_NATMC_FUNC__(cmpxchg_mb), var, aval, aval += val; res = aval);
+#elif defined(ETHR_HAVE_NATMC_CMPXCHG_WB)
+ ETHR_NATMC_CMPXCHG_FALLBACK__(ETHR_NATMC_FUNC__(cmpxchg_wb), var, aval, aval += val; res = aval);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_NATMC_CMPXCHG_RELB)
+ ETHR_NATMC_CMPXCHG_FALLBACK__(ETHR_NATMC_FUNC__(cmpxchg_relb), var, aval, aval += val; res = aval);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#else
+#error "Missing implementation of ethr_atomic_add_read_acqb()!"
+#endif
+ return res;
+}
+
+static ETHR_INLINE ethr_sint_t ETHR_ATMC_FUNC__(add_read_relb)(ethr_atomic_t *var, ethr_sint_t val)
+{
+ ethr_sint_t res;
+#if defined(ETHR_HAVE_NATMC_ADD_RETURN_RELB)
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(add_return_relb)(var, (ETHR_NAINT_T__) val);
+#elif defined(ETHR_HAVE_NATMC_ADD_RETURN_WB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(add_return_wb)(var, (ETHR_NAINT_T__) val);
+#elif defined(ETHR_HAVE_NATMC_ADD_RETURN)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(add_return)(var, (ETHR_NAINT_T__) val);
+#elif defined(ETHR_HAVE_NATMC_ADD_RETURN_MB)
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(add_return_mb)(var, (ETHR_NAINT_T__) val);
+#elif defined(ETHR_HAVE_NATMC_ADD_RETURN_RB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(add_return_rb)(var, (ETHR_NAINT_T__) val);
+#elif defined(ETHR_HAVE_NATMC_ADD_RETURN_ACQB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(add_return_acqb)(var, (ETHR_NAINT_T__) val);
+#elif defined(ETHR_HAVE_NATMC_CMPXCHG_RELB)
+ ETHR_NATMC_CMPXCHG_FALLBACK__(ETHR_NATMC_FUNC__(cmpxchg_relb), var, aval, aval += val; res = aval);
+#elif defined(ETHR_HAVE_NATMC_CMPXCHG_WB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ ETHR_NATMC_CMPXCHG_FALLBACK__(ETHR_NATMC_FUNC__(cmpxchg_wb), var, aval, aval += val; res = aval);
+#elif defined(ETHR_HAVE_NATMC_CMPXCHG)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ ETHR_NATMC_CMPXCHG_FALLBACK__(ETHR_NATMC_FUNC__(cmpxchg), var, aval, aval += val; res = aval);
+#elif defined(ETHR_HAVE_NATMC_CMPXCHG_MB)
+ ETHR_NATMC_CMPXCHG_FALLBACK__(ETHR_NATMC_FUNC__(cmpxchg_mb), var, aval, aval += val; res = aval);
+#elif defined(ETHR_HAVE_NATMC_CMPXCHG_RB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ ETHR_NATMC_CMPXCHG_FALLBACK__(ETHR_NATMC_FUNC__(cmpxchg_rb), var, aval, aval += val; res = aval);
+#elif defined(ETHR_HAVE_NATMC_CMPXCHG_ACQB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ ETHR_NATMC_CMPXCHG_FALLBACK__(ETHR_NATMC_FUNC__(cmpxchg_acqb), var, aval, aval += val; res = aval);
+#else
+#error "Missing implementation of ethr_atomic_add_read_relb()!"
+#endif
+ return res;
+}
+
+static ETHR_INLINE ethr_sint_t ETHR_ATMC_FUNC__(add_read_mb)(ethr_atomic_t *var, ethr_sint_t val)
+{
+ ethr_sint_t res;
+#if defined(ETHR_HAVE_NATMC_ADD_RETURN_MB)
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(add_return_mb)(var, (ETHR_NAINT_T__) val);
+#elif defined(ETHR_HAVE_NATMC_ADD_RETURN_RELB)
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(add_return_relb)(var, (ETHR_NAINT_T__) val);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_NATMC_ADD_RETURN_ACQB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(add_return_acqb)(var, (ETHR_NAINT_T__) val);
+#elif defined(ETHR_HAVE_NATMC_ADD_RETURN_WB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(add_return_wb)(var, (ETHR_NAINT_T__) val);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_NATMC_ADD_RETURN_RB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(add_return_rb)(var, (ETHR_NAINT_T__) val);
+ ETHR_MEMBAR(ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_NATMC_ADD_RETURN)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(add_return)(var, (ETHR_NAINT_T__) val);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_NATMC_CMPXCHG_MB)
+ ETHR_NATMC_CMPXCHG_FALLBACK__(ETHR_NATMC_FUNC__(cmpxchg_mb), var, aval, aval += val; res = aval);
+#elif defined(ETHR_HAVE_NATMC_CMPXCHG_RELB)
+ ETHR_NATMC_CMPXCHG_FALLBACK__(ETHR_NATMC_FUNC__(cmpxchg_relb), var, aval, aval += val; res = aval);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_NATMC_CMPXCHG_ACQB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ ETHR_NATMC_CMPXCHG_FALLBACK__(ETHR_NATMC_FUNC__(cmpxchg_acqb), var, aval, aval += val; res = aval);
+#elif defined(ETHR_HAVE_NATMC_CMPXCHG_WB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ ETHR_NATMC_CMPXCHG_FALLBACK__(ETHR_NATMC_FUNC__(cmpxchg_wb), var, aval, aval += val; res = aval);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_NATMC_CMPXCHG_RB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ ETHR_NATMC_CMPXCHG_FALLBACK__(ETHR_NATMC_FUNC__(cmpxchg_rb), var, aval, aval += val; res = aval);
+ ETHR_MEMBAR(ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_NATMC_CMPXCHG)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ ETHR_NATMC_CMPXCHG_FALLBACK__(ETHR_NATMC_FUNC__(cmpxchg), var, aval, aval += val; res = aval);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#else
+#error "Missing implementation of ethr_atomic_add_read_mb()!"
+#endif
+ return res;
+}
+
+static ETHR_INLINE ethr_sint_t ETHR_ATMC_FUNC__(add_read_ddrb)(ethr_atomic_t *var, ethr_sint_t val)
+{
+#ifdef ETHR_ORDERED_READ_DEPEND
+ return ETHR_ATMC_FUNC__(add_read)(var, val);
+#else
+ return ETHR_ATMC_FUNC__(add_read_rb)(var, val);
+#endif
+}
+
+
+/* --- read() --- */
+
+
+static ETHR_INLINE ethr_sint_t ETHR_ATMC_FUNC__(read)(ethr_atomic_t *var)
+{
+ ethr_sint_t res;
+#if defined(ETHR_HAVE_NATMC_READ)
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(read)(var);
+#elif defined(ETHR_HAVE_NATMC_READ_RB)
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(read_rb)(var);
+#elif defined(ETHR_HAVE_NATMC_READ_WB)
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(read_wb)(var);
+#elif defined(ETHR_HAVE_NATMC_READ_ACQB)
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(read_acqb)(var);
+#elif defined(ETHR_HAVE_NATMC_READ_RELB)
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(read_relb)(var);
+#elif defined(ETHR_HAVE_NATMC_READ_MB)
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(read_mb)(var);
+#else
+ res = ETHR_ATMC_FUNC__(cmpxchg)(var, (ethr_sint_t) ETHR_UNUSUAL_SINT_VAL__, (ethr_sint_t) ETHR_UNUSUAL_SINT_VAL__);
+#endif
+ return res;
+}
+
+static ETHR_INLINE ethr_sint_t ETHR_ATMC_FUNC__(read_rb)(ethr_atomic_t *var)
+{
+ ethr_sint_t res;
+#if defined(ETHR_HAVE_NATMC_READ_RB)
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(read_rb)(var);
+#elif defined(ETHR_HAVE_NATMC_READ)
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(read)(var);
+ ETHR_MEMBAR(ETHR_LoadLoad);
+#elif defined(ETHR_HAVE_NATMC_READ_MB)
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(read_mb)(var);
+#elif defined(ETHR_HAVE_NATMC_READ_WB)
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(read_wb)(var);
+ ETHR_MEMBAR(ETHR_LoadLoad);
+#elif defined(ETHR_HAVE_NATMC_READ_ACQB)
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(read_acqb)(var);
+ ETHR_MEMBAR(ETHR_LoadLoad);
+#elif defined(ETHR_HAVE_NATMC_READ_RELB)
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(read_relb)(var);
+ ETHR_MEMBAR(ETHR_LoadLoad);
+#else
+ res = ETHR_ATMC_FUNC__(cmpxchg_rb)(var, (ethr_sint_t) ETHR_UNUSUAL_SINT_VAL__, (ethr_sint_t) ETHR_UNUSUAL_SINT_VAL__);
+#endif
+ return res;
+}
+
+static ETHR_INLINE ethr_sint_t ETHR_ATMC_FUNC__(read_wb)(ethr_atomic_t *var)
+{
ethr_sint_t res;
- ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, res = ++(*var));
+#if defined(ETHR_HAVE_NATMC_READ_WB)
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(read_wb)(var);
+#elif defined(ETHR_HAVE_NATMC_READ)
+ ETHR_MEMBAR(ETHR_StoreStore);
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(read)(var);
+#elif defined(ETHR_HAVE_NATMC_READ_MB)
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(read_mb)(var);
+#elif defined(ETHR_HAVE_NATMC_READ_RB)
+ ETHR_MEMBAR(ETHR_StoreStore);
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(read_rb)(var);
+#elif defined(ETHR_HAVE_NATMC_READ_ACQB)
+ ETHR_MEMBAR(ETHR_StoreStore);
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(read_acqb)(var);
+#elif defined(ETHR_HAVE_NATMC_READ_RELB)
+ ETHR_MEMBAR(ETHR_StoreStore);
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(read_relb)(var);
+#else
+ res = ETHR_ATMC_FUNC__(cmpxchg_wb)(var, (ethr_sint_t) ETHR_UNUSUAL_SINT_VAL__, (ethr_sint_t) ETHR_UNUSUAL_SINT_VAL__);
+#endif
return res;
+}
+
+static ETHR_INLINE ethr_sint_t ETHR_ATMC_FUNC__(read_acqb)(ethr_atomic_t *var)
+{
+ ethr_sint_t res;
+#if defined(ETHR_HAVE_NATMC_READ_ACQB)
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(read_acqb)(var);
+#elif defined(ETHR_HAVE_NATMC_READ_RB)
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(read_rb)(var);
+ ETHR_MEMBAR(ETHR_LoadStore);
+#elif defined(ETHR_HAVE_NATMC_READ)
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(read)(var);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore);
+#elif defined(ETHR_HAVE_NATMC_READ_MB)
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(read_mb)(var);
+#elif defined(ETHR_HAVE_NATMC_READ_WB)
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(read_wb)(var);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore);
+#elif defined(ETHR_HAVE_NATMC_READ_RELB)
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(read_relb)(var);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore);
+#else
+ res = ETHR_ATMC_FUNC__(cmpxchg_acqb)(var, (ethr_sint_t) ETHR_UNUSUAL_SINT_VAL__, (ethr_sint_t) ETHR_UNUSUAL_SINT_VAL__);
#endif
+ return res;
}
-static ETHR_INLINE ethr_sint_t
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_dec_read)(ethr_atomic_t *var)
+static ETHR_INLINE ethr_sint_t ETHR_ATMC_FUNC__(read_relb)(ethr_atomic_t *var)
{
-#ifdef ETHR_HAVE_NATIVE_ATOMICS
- return (ethr_sint_t) ETHR_NATMC_FUNC__(dec_return)(var);
+ ethr_sint_t res;
+#if defined(ETHR_HAVE_NATMC_READ_RELB)
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(read_relb)(var);
+#elif defined(ETHR_HAVE_NATMC_READ_WB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(read_wb)(var);
+#elif defined(ETHR_HAVE_NATMC_READ)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(read)(var);
+#elif defined(ETHR_HAVE_NATMC_READ_MB)
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(read_mb)(var);
+#elif defined(ETHR_HAVE_NATMC_READ_RB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(read_rb)(var);
+#elif defined(ETHR_HAVE_NATMC_READ_ACQB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(read_acqb)(var);
#else
+ res = ETHR_ATMC_FUNC__(cmpxchg_relb)(var, (ethr_sint_t) ETHR_UNUSUAL_SINT_VAL__, (ethr_sint_t) ETHR_UNUSUAL_SINT_VAL__);
+#endif
+ return res;
+}
+
+static ETHR_INLINE ethr_sint_t ETHR_ATMC_FUNC__(read_mb)(ethr_atomic_t *var)
+{
ethr_sint_t res;
- ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, res = --(*var));
+#if defined(ETHR_HAVE_NATMC_READ_MB)
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(read_mb)(var);
+#elif defined(ETHR_HAVE_NATMC_READ_RELB)
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(read_relb)(var);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore);
+#elif defined(ETHR_HAVE_NATMC_READ_ACQB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(read_acqb)(var);
+#elif defined(ETHR_HAVE_NATMC_READ_WB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(read_wb)(var);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore);
+#elif defined(ETHR_HAVE_NATMC_READ_RB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(read_rb)(var);
+ ETHR_MEMBAR(ETHR_LoadStore);
+#elif defined(ETHR_HAVE_NATMC_READ)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(read)(var);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore);
+#else
+ res = ETHR_ATMC_FUNC__(cmpxchg_mb)(var, (ethr_sint_t) ETHR_UNUSUAL_SINT_VAL__, (ethr_sint_t) ETHR_UNUSUAL_SINT_VAL__);
+#endif
return res;
+}
+
+static ETHR_INLINE ethr_sint_t ETHR_ATMC_FUNC__(read_ddrb)(ethr_atomic_t *var)
+{
+#ifdef ETHR_ORDERED_READ_DEPEND
+ return ETHR_ATMC_FUNC__(read)(var);
+#else
+ return ETHR_ATMC_FUNC__(read_rb)(var);
#endif
}
-static ETHR_INLINE ethr_sint_t
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_read_band)(ethr_atomic_t *var,
- ethr_sint_t mask)
+
+/* --- inc_read() --- */
+
+
+static ETHR_INLINE ethr_sint_t ETHR_ATMC_FUNC__(inc_read)(ethr_atomic_t *var)
{
-#ifdef ETHR_HAVE_NATIVE_ATOMICS
- return (ethr_sint_t) ETHR_NATMC_FUNC__(and_retold)(var,
- (ETHR_NAINT_T__) mask);
+ ethr_sint_t res;
+#if defined(ETHR_HAVE_NATMC_INC_RETURN)
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(inc_return)(var);
+#elif defined(ETHR_HAVE_NATMC_INC_RETURN_RB)
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(inc_return_rb)(var);
+#elif defined(ETHR_HAVE_NATMC_INC_RETURN_WB)
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(inc_return_wb)(var);
+#elif defined(ETHR_HAVE_NATMC_INC_RETURN_ACQB)
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(inc_return_acqb)(var);
+#elif defined(ETHR_HAVE_NATMC_INC_RETURN_RELB)
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(inc_return_relb)(var);
+#elif defined(ETHR_HAVE_NATMC_INC_RETURN_MB)
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(inc_return_mb)(var);
#else
+ res = ETHR_ATMC_FUNC__(add_read)(var, (ethr_sint_t) 1);
+#endif
+ return res;
+}
+
+static ETHR_INLINE ethr_sint_t ETHR_ATMC_FUNC__(inc_read_rb)(ethr_atomic_t *var)
+{
ethr_sint_t res;
- ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, res = *var; *var &= mask);
+#if defined(ETHR_HAVE_NATMC_INC_RETURN_RB)
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(inc_return_rb)(var);
+#elif defined(ETHR_HAVE_NATMC_INC_RETURN)
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(inc_return)(var);
+ ETHR_MEMBAR(ETHR_LoadLoad);
+#elif defined(ETHR_HAVE_NATMC_INC_RETURN_MB)
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(inc_return_mb)(var);
+#elif defined(ETHR_HAVE_NATMC_INC_RETURN_WB)
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(inc_return_wb)(var);
+ ETHR_MEMBAR(ETHR_LoadLoad);
+#elif defined(ETHR_HAVE_NATMC_INC_RETURN_ACQB)
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(inc_return_acqb)(var);
+ ETHR_MEMBAR(ETHR_LoadLoad);
+#elif defined(ETHR_HAVE_NATMC_INC_RETURN_RELB)
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(inc_return_relb)(var);
+ ETHR_MEMBAR(ETHR_LoadLoad);
+#else
+ res = ETHR_ATMC_FUNC__(add_read_rb)(var, (ethr_sint_t) 1);
+#endif
return res;
+}
+
+static ETHR_INLINE ethr_sint_t ETHR_ATMC_FUNC__(inc_read_wb)(ethr_atomic_t *var)
+{
+ ethr_sint_t res;
+#if defined(ETHR_HAVE_NATMC_INC_RETURN_WB)
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(inc_return_wb)(var);
+#elif defined(ETHR_HAVE_NATMC_INC_RETURN)
+ ETHR_MEMBAR(ETHR_StoreStore);
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(inc_return)(var);
+#elif defined(ETHR_HAVE_NATMC_INC_RETURN_MB)
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(inc_return_mb)(var);
+#elif defined(ETHR_HAVE_NATMC_INC_RETURN_RB)
+ ETHR_MEMBAR(ETHR_StoreStore);
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(inc_return_rb)(var);
+#elif defined(ETHR_HAVE_NATMC_INC_RETURN_ACQB)
+ ETHR_MEMBAR(ETHR_StoreStore);
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(inc_return_acqb)(var);
+#elif defined(ETHR_HAVE_NATMC_INC_RETURN_RELB)
+ ETHR_MEMBAR(ETHR_StoreStore);
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(inc_return_relb)(var);
+#else
+ res = ETHR_ATMC_FUNC__(add_read_wb)(var, (ethr_sint_t) 1);
#endif
+ return res;
}
-static ETHR_INLINE ethr_sint_t
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_read_bor)(ethr_atomic_t *var,
- ethr_sint_t mask)
+static ETHR_INLINE ethr_sint_t ETHR_ATMC_FUNC__(inc_read_acqb)(ethr_atomic_t *var)
{
-#ifdef ETHR_HAVE_NATIVE_ATOMICS
- return (ethr_sint_t) ETHR_NATMC_FUNC__(or_retold)(var,
- (ETHR_NAINT_T__) mask);
+ ethr_sint_t res;
+#if defined(ETHR_HAVE_NATMC_INC_RETURN_ACQB)
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(inc_return_acqb)(var);
+#elif defined(ETHR_HAVE_NATMC_INC_RETURN_RB)
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(inc_return_rb)(var);
+ ETHR_MEMBAR(ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_NATMC_INC_RETURN)
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(inc_return)(var);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_NATMC_INC_RETURN_MB)
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(inc_return_mb)(var);
+#elif defined(ETHR_HAVE_NATMC_INC_RETURN_WB)
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(inc_return_wb)(var);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_NATMC_INC_RETURN_RELB)
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(inc_return_relb)(var);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
#else
+ res = ETHR_ATMC_FUNC__(add_read_acqb)(var, (ethr_sint_t) 1);
+#endif
+ return res;
+}
+
+static ETHR_INLINE ethr_sint_t ETHR_ATMC_FUNC__(inc_read_relb)(ethr_atomic_t *var)
+{
+ ethr_sint_t res;
+#if defined(ETHR_HAVE_NATMC_INC_RETURN_RELB)
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(inc_return_relb)(var);
+#elif defined(ETHR_HAVE_NATMC_INC_RETURN_WB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(inc_return_wb)(var);
+#elif defined(ETHR_HAVE_NATMC_INC_RETURN)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(inc_return)(var);
+#elif defined(ETHR_HAVE_NATMC_INC_RETURN_MB)
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(inc_return_mb)(var);
+#elif defined(ETHR_HAVE_NATMC_INC_RETURN_RB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(inc_return_rb)(var);
+#elif defined(ETHR_HAVE_NATMC_INC_RETURN_ACQB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(inc_return_acqb)(var);
+#else
+ res = ETHR_ATMC_FUNC__(add_read_relb)(var, (ethr_sint_t) 1);
+#endif
+ return res;
+}
+
+static ETHR_INLINE ethr_sint_t ETHR_ATMC_FUNC__(inc_read_mb)(ethr_atomic_t *var)
+{
ethr_sint_t res;
- ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, res = *var; *var |= mask);
+#if defined(ETHR_HAVE_NATMC_INC_RETURN_MB)
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(inc_return_mb)(var);
+#elif defined(ETHR_HAVE_NATMC_INC_RETURN_RELB)
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(inc_return_relb)(var);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_NATMC_INC_RETURN_ACQB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(inc_return_acqb)(var);
+#elif defined(ETHR_HAVE_NATMC_INC_RETURN_WB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(inc_return_wb)(var);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_NATMC_INC_RETURN_RB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(inc_return_rb)(var);
+ ETHR_MEMBAR(ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_NATMC_INC_RETURN)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(inc_return)(var);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#else
+ res = ETHR_ATMC_FUNC__(add_read_mb)(var, (ethr_sint_t) 1);
+#endif
return res;
+}
+
+static ETHR_INLINE ethr_sint_t ETHR_ATMC_FUNC__(inc_read_ddrb)(ethr_atomic_t *var)
+{
+#ifdef ETHR_ORDERED_READ_DEPEND
+ return ETHR_ATMC_FUNC__(inc_read)(var);
+#else
+ return ETHR_ATMC_FUNC__(inc_read_rb)(var);
+#endif
+}
+
+
+/* --- dec_read() --- */
+
+
+static ETHR_INLINE ethr_sint_t ETHR_ATMC_FUNC__(dec_read)(ethr_atomic_t *var)
+{
+ ethr_sint_t res;
+#if defined(ETHR_HAVE_NATMC_DEC_RETURN)
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(dec_return)(var);
+#elif defined(ETHR_HAVE_NATMC_DEC_RETURN_RB)
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(dec_return_rb)(var);
+#elif defined(ETHR_HAVE_NATMC_DEC_RETURN_WB)
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(dec_return_wb)(var);
+#elif defined(ETHR_HAVE_NATMC_DEC_RETURN_ACQB)
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(dec_return_acqb)(var);
+#elif defined(ETHR_HAVE_NATMC_DEC_RETURN_RELB)
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(dec_return_relb)(var);
+#elif defined(ETHR_HAVE_NATMC_DEC_RETURN_MB)
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(dec_return_mb)(var);
+#else
+ res = ETHR_ATMC_FUNC__(add_read)(var, (ethr_sint_t) -1);
#endif
+ return res;
}
-static ETHR_INLINE ethr_sint_t
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_xchg)(ethr_atomic_t *var, ethr_sint_t new)
+static ETHR_INLINE ethr_sint_t ETHR_ATMC_FUNC__(dec_read_rb)(ethr_atomic_t *var)
{
-#ifdef ETHR_HAVE_NATIVE_ATOMICS
- return (ethr_sint_t) ETHR_NATMC_FUNC__(xchg)(var,
- (ETHR_NAINT_T__) new);
+ ethr_sint_t res;
+#if defined(ETHR_HAVE_NATMC_DEC_RETURN_RB)
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(dec_return_rb)(var);
+#elif defined(ETHR_HAVE_NATMC_DEC_RETURN)
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(dec_return)(var);
+ ETHR_MEMBAR(ETHR_LoadLoad);
+#elif defined(ETHR_HAVE_NATMC_DEC_RETURN_MB)
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(dec_return_mb)(var);
+#elif defined(ETHR_HAVE_NATMC_DEC_RETURN_WB)
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(dec_return_wb)(var);
+ ETHR_MEMBAR(ETHR_LoadLoad);
+#elif defined(ETHR_HAVE_NATMC_DEC_RETURN_ACQB)
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(dec_return_acqb)(var);
+ ETHR_MEMBAR(ETHR_LoadLoad);
+#elif defined(ETHR_HAVE_NATMC_DEC_RETURN_RELB)
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(dec_return_relb)(var);
+ ETHR_MEMBAR(ETHR_LoadLoad);
#else
+ res = ETHR_ATMC_FUNC__(add_read_rb)(var, (ethr_sint_t) -1);
+#endif
+ return res;
+}
+
+static ETHR_INLINE ethr_sint_t ETHR_ATMC_FUNC__(dec_read_wb)(ethr_atomic_t *var)
+{
ethr_sint_t res;
- ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, res = *var; *var = new);
+#if defined(ETHR_HAVE_NATMC_DEC_RETURN_WB)
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(dec_return_wb)(var);
+#elif defined(ETHR_HAVE_NATMC_DEC_RETURN)
+ ETHR_MEMBAR(ETHR_StoreStore);
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(dec_return)(var);
+#elif defined(ETHR_HAVE_NATMC_DEC_RETURN_MB)
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(dec_return_mb)(var);
+#elif defined(ETHR_HAVE_NATMC_DEC_RETURN_RB)
+ ETHR_MEMBAR(ETHR_StoreStore);
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(dec_return_rb)(var);
+#elif defined(ETHR_HAVE_NATMC_DEC_RETURN_ACQB)
+ ETHR_MEMBAR(ETHR_StoreStore);
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(dec_return_acqb)(var);
+#elif defined(ETHR_HAVE_NATMC_DEC_RETURN_RELB)
+ ETHR_MEMBAR(ETHR_StoreStore);
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(dec_return_relb)(var);
+#else
+ res = ETHR_ATMC_FUNC__(add_read_wb)(var, (ethr_sint_t) -1);
+#endif
return res;
+}
+
+static ETHR_INLINE ethr_sint_t ETHR_ATMC_FUNC__(dec_read_acqb)(ethr_atomic_t *var)
+{
+ ethr_sint_t res;
+#if defined(ETHR_HAVE_NATMC_DEC_RETURN_ACQB)
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(dec_return_acqb)(var);
+#elif defined(ETHR_HAVE_NATMC_DEC_RETURN_RB)
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(dec_return_rb)(var);
+ ETHR_MEMBAR(ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_NATMC_DEC_RETURN)
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(dec_return)(var);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_NATMC_DEC_RETURN_MB)
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(dec_return_mb)(var);
+#elif defined(ETHR_HAVE_NATMC_DEC_RETURN_WB)
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(dec_return_wb)(var);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_NATMC_DEC_RETURN_RELB)
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(dec_return_relb)(var);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#else
+ res = ETHR_ATMC_FUNC__(add_read_acqb)(var, (ethr_sint_t) -1);
#endif
+ return res;
}
-static ETHR_INLINE ethr_sint_t
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_cmpxchg)(ethr_atomic_t *var,
- ethr_sint_t new,
- ethr_sint_t exp)
+static ETHR_INLINE ethr_sint_t ETHR_ATMC_FUNC__(dec_read_relb)(ethr_atomic_t *var)
{
-#ifdef ETHR_HAVE_NATIVE_ATOMICS
- return (ethr_sint_t) ETHR_NATMC_FUNC__(cmpxchg)(var,
- (ETHR_NAINT_T__) new,
- (ETHR_NAINT_T__) exp);
+ ethr_sint_t res;
+#if defined(ETHR_HAVE_NATMC_DEC_RETURN_RELB)
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(dec_return_relb)(var);
+#elif defined(ETHR_HAVE_NATMC_DEC_RETURN_WB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(dec_return_wb)(var);
+#elif defined(ETHR_HAVE_NATMC_DEC_RETURN)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(dec_return)(var);
+#elif defined(ETHR_HAVE_NATMC_DEC_RETURN_MB)
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(dec_return_mb)(var);
+#elif defined(ETHR_HAVE_NATMC_DEC_RETURN_RB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(dec_return_rb)(var);
+#elif defined(ETHR_HAVE_NATMC_DEC_RETURN_ACQB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(dec_return_acqb)(var);
#else
+ res = ETHR_ATMC_FUNC__(add_read_relb)(var, (ethr_sint_t) -1);
+#endif
+ return res;
+}
+
+static ETHR_INLINE ethr_sint_t ETHR_ATMC_FUNC__(dec_read_mb)(ethr_atomic_t *var)
+{
ethr_sint_t res;
- ETHR_ATOMIC_OP_FALLBACK_IMPL__(var,
- {
- res = *var;
- if (__builtin_expect(res == exp, 1))
- *var = new;
- });
+#if defined(ETHR_HAVE_NATMC_DEC_RETURN_MB)
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(dec_return_mb)(var);
+#elif defined(ETHR_HAVE_NATMC_DEC_RETURN_RELB)
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(dec_return_relb)(var);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_NATMC_DEC_RETURN_ACQB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(dec_return_acqb)(var);
+#elif defined(ETHR_HAVE_NATMC_DEC_RETURN_WB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(dec_return_wb)(var);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_NATMC_DEC_RETURN_RB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(dec_return_rb)(var);
+ ETHR_MEMBAR(ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_NATMC_DEC_RETURN)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(dec_return)(var);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#else
+ res = ETHR_ATMC_FUNC__(add_read_mb)(var, (ethr_sint_t) -1);
+#endif
return res;
+}
+
+static ETHR_INLINE ethr_sint_t ETHR_ATMC_FUNC__(dec_read_ddrb)(ethr_atomic_t *var)
+{
+#ifdef ETHR_ORDERED_READ_DEPEND
+ return ETHR_ATMC_FUNC__(dec_read)(var);
+#else
+ return ETHR_ATMC_FUNC__(dec_read_rb)(var);
#endif
}
-/*
- * Important memory barrier requirements.
- *
- * The following atomic operations *must* supply a memory barrier of
- * at least the type specified by its suffix:
- * _acqb = acquire barrier
- * _relb = release barrier
- */
-static ETHR_INLINE ethr_sint_t
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_read_acqb)(ethr_atomic_t *var)
+/* --- add() --- */
+
+
+static ETHR_INLINE void ETHR_ATMC_FUNC__(add)(ethr_atomic_t *var, ethr_sint_t val)
{
-#ifdef ETHR_HAVE_NATIVE_ATOMICS
- return (ethr_sint_t) ETHR_NATMC_FUNC__(read_acqb)(var);
+#if defined(ETHR_HAVE_NATMC_ADD)
+ ETHR_NATMC_FUNC__(add)(var, (ETHR_NAINT_T__) val);
+#elif defined(ETHR_HAVE_NATMC_ADD_RB)
+ ETHR_NATMC_FUNC__(add_rb)(var, (ETHR_NAINT_T__) val);
+#elif defined(ETHR_HAVE_NATMC_ADD_WB)
+ ETHR_NATMC_FUNC__(add_wb)(var, (ETHR_NAINT_T__) val);
+#elif defined(ETHR_HAVE_NATMC_ADD_ACQB)
+ ETHR_NATMC_FUNC__(add_acqb)(var, (ETHR_NAINT_T__) val);
+#elif defined(ETHR_HAVE_NATMC_ADD_RELB)
+ ETHR_NATMC_FUNC__(add_relb)(var, (ETHR_NAINT_T__) val);
+#elif defined(ETHR_HAVE_NATMC_ADD_MB)
+ ETHR_NATMC_FUNC__(add_mb)(var, (ETHR_NAINT_T__) val);
#else
- return ETHR_INLINE_FUNC_NAME_(ethr_atomic_read)(var);
+ (void) ETHR_ATMC_FUNC__(add_read)(var, val);
#endif
}
-static ETHR_INLINE ethr_sint_t
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_inc_read_acqb)(ethr_atomic_t *var)
+static ETHR_INLINE void ETHR_ATMC_FUNC__(add_rb)(ethr_atomic_t *var, ethr_sint_t val)
{
-#ifdef ETHR_HAVE_NATIVE_ATOMICS
- return (ethr_sint_t) ETHR_NATMC_FUNC__(inc_return_acqb)(var);
+#if defined(ETHR_HAVE_NATMC_ADD_RB)
+ ETHR_NATMC_FUNC__(add_rb)(var, (ETHR_NAINT_T__) val);
+#elif defined(ETHR_HAVE_NATMC_ADD)
+ ETHR_NATMC_FUNC__(add)(var, (ETHR_NAINT_T__) val);
+ ETHR_MEMBAR(ETHR_LoadLoad);
+#elif defined(ETHR_HAVE_NATMC_ADD_MB)
+ ETHR_NATMC_FUNC__(add_mb)(var, (ETHR_NAINT_T__) val);
+#elif defined(ETHR_HAVE_NATMC_ADD_WB)
+ ETHR_NATMC_FUNC__(add_wb)(var, (ETHR_NAINT_T__) val);
+ ETHR_MEMBAR(ETHR_LoadLoad);
+#elif defined(ETHR_HAVE_NATMC_ADD_ACQB)
+ ETHR_NATMC_FUNC__(add_acqb)(var, (ETHR_NAINT_T__) val);
+ ETHR_MEMBAR(ETHR_LoadLoad);
+#elif defined(ETHR_HAVE_NATMC_ADD_RELB)
+ ETHR_NATMC_FUNC__(add_relb)(var, (ETHR_NAINT_T__) val);
+ ETHR_MEMBAR(ETHR_LoadLoad);
#else
- return ETHR_INLINE_FUNC_NAME_(ethr_atomic_inc_read)(var);
+ (void) ETHR_ATMC_FUNC__(add_read_rb)(var, val);
#endif
}
-static ETHR_INLINE void
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_set_relb)(ethr_atomic_t *var,
- ethr_sint_t val)
+static ETHR_INLINE void ETHR_ATMC_FUNC__(add_wb)(ethr_atomic_t *var, ethr_sint_t val)
{
-#ifdef ETHR_HAVE_NATIVE_ATOMICS
- ETHR_NATMC_FUNC__(set_relb)(var, (ETHR_NAINT_T__) val);
+#if defined(ETHR_HAVE_NATMC_ADD_WB)
+ ETHR_NATMC_FUNC__(add_wb)(var, (ETHR_NAINT_T__) val);
+#elif defined(ETHR_HAVE_NATMC_ADD)
+ ETHR_MEMBAR(ETHR_StoreStore);
+ ETHR_NATMC_FUNC__(add)(var, (ETHR_NAINT_T__) val);
+#elif defined(ETHR_HAVE_NATMC_ADD_MB)
+ ETHR_NATMC_FUNC__(add_mb)(var, (ETHR_NAINT_T__) val);
+#elif defined(ETHR_HAVE_NATMC_ADD_RB)
+ ETHR_MEMBAR(ETHR_StoreStore);
+ ETHR_NATMC_FUNC__(add_rb)(var, (ETHR_NAINT_T__) val);
+#elif defined(ETHR_HAVE_NATMC_ADD_ACQB)
+ ETHR_MEMBAR(ETHR_StoreStore);
+ ETHR_NATMC_FUNC__(add_acqb)(var, (ETHR_NAINT_T__) val);
+#elif defined(ETHR_HAVE_NATMC_ADD_RELB)
+ ETHR_MEMBAR(ETHR_StoreStore);
+ ETHR_NATMC_FUNC__(add_relb)(var, (ETHR_NAINT_T__) val);
+#else
+ (void) ETHR_ATMC_FUNC__(add_read_wb)(var, val);
+#endif
+}
+
+static ETHR_INLINE void ETHR_ATMC_FUNC__(add_acqb)(ethr_atomic_t *var, ethr_sint_t val)
+{
+#if defined(ETHR_HAVE_NATMC_ADD_ACQB)
+ ETHR_NATMC_FUNC__(add_acqb)(var, (ETHR_NAINT_T__) val);
+#elif defined(ETHR_HAVE_NATMC_ADD_RB)
+ ETHR_NATMC_FUNC__(add_rb)(var, (ETHR_NAINT_T__) val);
+ ETHR_MEMBAR(ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_NATMC_ADD)
+ ETHR_NATMC_FUNC__(add)(var, (ETHR_NAINT_T__) val);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_NATMC_ADD_MB)
+ ETHR_NATMC_FUNC__(add_mb)(var, (ETHR_NAINT_T__) val);
+#elif defined(ETHR_HAVE_NATMC_ADD_WB)
+ ETHR_NATMC_FUNC__(add_wb)(var, (ETHR_NAINT_T__) val);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_NATMC_ADD_RELB)
+ ETHR_NATMC_FUNC__(add_relb)(var, (ETHR_NAINT_T__) val);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#else
+ (void) ETHR_ATMC_FUNC__(add_read_acqb)(var, val);
+#endif
+}
+
+static ETHR_INLINE void ETHR_ATMC_FUNC__(add_relb)(ethr_atomic_t *var, ethr_sint_t val)
+{
+#if defined(ETHR_HAVE_NATMC_ADD_RELB)
+ ETHR_NATMC_FUNC__(add_relb)(var, (ETHR_NAINT_T__) val);
+#elif defined(ETHR_HAVE_NATMC_ADD_WB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ ETHR_NATMC_FUNC__(add_wb)(var, (ETHR_NAINT_T__) val);
+#elif defined(ETHR_HAVE_NATMC_ADD)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ ETHR_NATMC_FUNC__(add)(var, (ETHR_NAINT_T__) val);
+#elif defined(ETHR_HAVE_NATMC_ADD_MB)
+ ETHR_NATMC_FUNC__(add_mb)(var, (ETHR_NAINT_T__) val);
+#elif defined(ETHR_HAVE_NATMC_ADD_RB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ ETHR_NATMC_FUNC__(add_rb)(var, (ETHR_NAINT_T__) val);
+#elif defined(ETHR_HAVE_NATMC_ADD_ACQB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ ETHR_NATMC_FUNC__(add_acqb)(var, (ETHR_NAINT_T__) val);
+#else
+ (void) ETHR_ATMC_FUNC__(add_read_relb)(var, val);
+#endif
+}
+
+static ETHR_INLINE void ETHR_ATMC_FUNC__(add_mb)(ethr_atomic_t *var, ethr_sint_t val)
+{
+#if defined(ETHR_HAVE_NATMC_ADD_MB)
+ ETHR_NATMC_FUNC__(add_mb)(var, (ETHR_NAINT_T__) val);
+#elif defined(ETHR_HAVE_NATMC_ADD_RELB)
+ ETHR_NATMC_FUNC__(add_relb)(var, (ETHR_NAINT_T__) val);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_NATMC_ADD_ACQB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ ETHR_NATMC_FUNC__(add_acqb)(var, (ETHR_NAINT_T__) val);
+#elif defined(ETHR_HAVE_NATMC_ADD_WB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ ETHR_NATMC_FUNC__(add_wb)(var, (ETHR_NAINT_T__) val);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_NATMC_ADD_RB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ ETHR_NATMC_FUNC__(add_rb)(var, (ETHR_NAINT_T__) val);
+ ETHR_MEMBAR(ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_NATMC_ADD)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ ETHR_NATMC_FUNC__(add)(var, (ETHR_NAINT_T__) val);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#else
+ (void) ETHR_ATMC_FUNC__(add_read_mb)(var, val);
+#endif
+}
+
+static ETHR_INLINE void ETHR_ATMC_FUNC__(add_ddrb)(ethr_atomic_t *var, ethr_sint_t val)
+{
+#ifdef ETHR_ORDERED_READ_DEPEND
+ ETHR_ATMC_FUNC__(add)(var, val);
+#else
+ ETHR_ATMC_FUNC__(add_rb)(var, val);
+#endif
+}
+
+
+/* --- inc() --- */
+
+
+static ETHR_INLINE void ETHR_ATMC_FUNC__(inc)(ethr_atomic_t *var)
+{
+#if defined(ETHR_HAVE_NATMC_INC)
+ ETHR_NATMC_FUNC__(inc)(var);
+#elif defined(ETHR_HAVE_NATMC_INC_RB)
+ ETHR_NATMC_FUNC__(inc_rb)(var);
+#elif defined(ETHR_HAVE_NATMC_INC_WB)
+ ETHR_NATMC_FUNC__(inc_wb)(var);
+#elif defined(ETHR_HAVE_NATMC_INC_ACQB)
+ ETHR_NATMC_FUNC__(inc_acqb)(var);
+#elif defined(ETHR_HAVE_NATMC_INC_RELB)
+ ETHR_NATMC_FUNC__(inc_relb)(var);
+#elif defined(ETHR_HAVE_NATMC_INC_MB)
+ ETHR_NATMC_FUNC__(inc_mb)(var);
+#else
+ (void) ETHR_ATMC_FUNC__(inc_read)(var);
+#endif
+}
+
+static ETHR_INLINE void ETHR_ATMC_FUNC__(inc_rb)(ethr_atomic_t *var)
+{
+#if defined(ETHR_HAVE_NATMC_INC_RB)
+ ETHR_NATMC_FUNC__(inc_rb)(var);
+#elif defined(ETHR_HAVE_NATMC_INC)
+ ETHR_NATMC_FUNC__(inc)(var);
+ ETHR_MEMBAR(ETHR_LoadLoad);
+#elif defined(ETHR_HAVE_NATMC_INC_MB)
+ ETHR_NATMC_FUNC__(inc_mb)(var);
+#elif defined(ETHR_HAVE_NATMC_INC_WB)
+ ETHR_NATMC_FUNC__(inc_wb)(var);
+ ETHR_MEMBAR(ETHR_LoadLoad);
+#elif defined(ETHR_HAVE_NATMC_INC_ACQB)
+ ETHR_NATMC_FUNC__(inc_acqb)(var);
+ ETHR_MEMBAR(ETHR_LoadLoad);
+#elif defined(ETHR_HAVE_NATMC_INC_RELB)
+ ETHR_NATMC_FUNC__(inc_relb)(var);
+ ETHR_MEMBAR(ETHR_LoadLoad);
+#else
+ (void) ETHR_ATMC_FUNC__(inc_read_rb)(var);
+#endif
+}
+
+static ETHR_INLINE void ETHR_ATMC_FUNC__(inc_wb)(ethr_atomic_t *var)
+{
+#if defined(ETHR_HAVE_NATMC_INC_WB)
+ ETHR_NATMC_FUNC__(inc_wb)(var);
+#elif defined(ETHR_HAVE_NATMC_INC)
+ ETHR_MEMBAR(ETHR_StoreStore);
+ ETHR_NATMC_FUNC__(inc)(var);
+#elif defined(ETHR_HAVE_NATMC_INC_MB)
+ ETHR_NATMC_FUNC__(inc_mb)(var);
+#elif defined(ETHR_HAVE_NATMC_INC_RB)
+ ETHR_MEMBAR(ETHR_StoreStore);
+ ETHR_NATMC_FUNC__(inc_rb)(var);
+#elif defined(ETHR_HAVE_NATMC_INC_ACQB)
+ ETHR_MEMBAR(ETHR_StoreStore);
+ ETHR_NATMC_FUNC__(inc_acqb)(var);
+#elif defined(ETHR_HAVE_NATMC_INC_RELB)
+ ETHR_MEMBAR(ETHR_StoreStore);
+ ETHR_NATMC_FUNC__(inc_relb)(var);
+#else
+ (void) ETHR_ATMC_FUNC__(inc_read_wb)(var);
+#endif
+}
+
+static ETHR_INLINE void ETHR_ATMC_FUNC__(inc_acqb)(ethr_atomic_t *var)
+{
+#if defined(ETHR_HAVE_NATMC_INC_ACQB)
+ ETHR_NATMC_FUNC__(inc_acqb)(var);
+#elif defined(ETHR_HAVE_NATMC_INC_RB)
+ ETHR_NATMC_FUNC__(inc_rb)(var);
+ ETHR_MEMBAR(ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_NATMC_INC)
+ ETHR_NATMC_FUNC__(inc)(var);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_NATMC_INC_MB)
+ ETHR_NATMC_FUNC__(inc_mb)(var);
+#elif defined(ETHR_HAVE_NATMC_INC_WB)
+ ETHR_NATMC_FUNC__(inc_wb)(var);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_NATMC_INC_RELB)
+ ETHR_NATMC_FUNC__(inc_relb)(var);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#else
+ (void) ETHR_ATMC_FUNC__(inc_read_acqb)(var);
+#endif
+}
+
+static ETHR_INLINE void ETHR_ATMC_FUNC__(inc_relb)(ethr_atomic_t *var)
+{
+#if defined(ETHR_HAVE_NATMC_INC_RELB)
+ ETHR_NATMC_FUNC__(inc_relb)(var);
+#elif defined(ETHR_HAVE_NATMC_INC_WB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ ETHR_NATMC_FUNC__(inc_wb)(var);
+#elif defined(ETHR_HAVE_NATMC_INC)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ ETHR_NATMC_FUNC__(inc)(var);
+#elif defined(ETHR_HAVE_NATMC_INC_MB)
+ ETHR_NATMC_FUNC__(inc_mb)(var);
+#elif defined(ETHR_HAVE_NATMC_INC_RB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ ETHR_NATMC_FUNC__(inc_rb)(var);
+#elif defined(ETHR_HAVE_NATMC_INC_ACQB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ ETHR_NATMC_FUNC__(inc_acqb)(var);
+#else
+ (void) ETHR_ATMC_FUNC__(inc_read_relb)(var);
+#endif
+}
+
+static ETHR_INLINE void ETHR_ATMC_FUNC__(inc_mb)(ethr_atomic_t *var)
+{
+#if defined(ETHR_HAVE_NATMC_INC_MB)
+ ETHR_NATMC_FUNC__(inc_mb)(var);
+#elif defined(ETHR_HAVE_NATMC_INC_RELB)
+ ETHR_NATMC_FUNC__(inc_relb)(var);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_NATMC_INC_ACQB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ ETHR_NATMC_FUNC__(inc_acqb)(var);
+#elif defined(ETHR_HAVE_NATMC_INC_WB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ ETHR_NATMC_FUNC__(inc_wb)(var);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_NATMC_INC_RB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ ETHR_NATMC_FUNC__(inc_rb)(var);
+ ETHR_MEMBAR(ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_NATMC_INC)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ ETHR_NATMC_FUNC__(inc)(var);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#else
+ (void) ETHR_ATMC_FUNC__(inc_read_mb)(var);
+#endif
+}
+
+static ETHR_INLINE void ETHR_ATMC_FUNC__(inc_ddrb)(ethr_atomic_t *var)
+{
+#ifdef ETHR_ORDERED_READ_DEPEND
+ ETHR_ATMC_FUNC__(inc)(var);
+#else
+ ETHR_ATMC_FUNC__(inc_rb)(var);
+#endif
+}
+
+
+/* --- dec() --- */
+
+
+static ETHR_INLINE void ETHR_ATMC_FUNC__(dec)(ethr_atomic_t *var)
+{
+#if defined(ETHR_HAVE_NATMC_DEC)
+ ETHR_NATMC_FUNC__(dec)(var);
+#elif defined(ETHR_HAVE_NATMC_DEC_RB)
+ ETHR_NATMC_FUNC__(dec_rb)(var);
+#elif defined(ETHR_HAVE_NATMC_DEC_WB)
+ ETHR_NATMC_FUNC__(dec_wb)(var);
+#elif defined(ETHR_HAVE_NATMC_DEC_ACQB)
+ ETHR_NATMC_FUNC__(dec_acqb)(var);
+#elif defined(ETHR_HAVE_NATMC_DEC_RELB)
+ ETHR_NATMC_FUNC__(dec_relb)(var);
+#elif defined(ETHR_HAVE_NATMC_DEC_MB)
+ ETHR_NATMC_FUNC__(dec_mb)(var);
+#else
+ (void) ETHR_ATMC_FUNC__(dec_read)(var);
+#endif
+}
+
+static ETHR_INLINE void ETHR_ATMC_FUNC__(dec_rb)(ethr_atomic_t *var)
+{
+#if defined(ETHR_HAVE_NATMC_DEC_RB)
+ ETHR_NATMC_FUNC__(dec_rb)(var);
+#elif defined(ETHR_HAVE_NATMC_DEC)
+ ETHR_NATMC_FUNC__(dec)(var);
+ ETHR_MEMBAR(ETHR_LoadLoad);
+#elif defined(ETHR_HAVE_NATMC_DEC_MB)
+ ETHR_NATMC_FUNC__(dec_mb)(var);
+#elif defined(ETHR_HAVE_NATMC_DEC_WB)
+ ETHR_NATMC_FUNC__(dec_wb)(var);
+ ETHR_MEMBAR(ETHR_LoadLoad);
+#elif defined(ETHR_HAVE_NATMC_DEC_ACQB)
+ ETHR_NATMC_FUNC__(dec_acqb)(var);
+ ETHR_MEMBAR(ETHR_LoadLoad);
+#elif defined(ETHR_HAVE_NATMC_DEC_RELB)
+ ETHR_NATMC_FUNC__(dec_relb)(var);
+ ETHR_MEMBAR(ETHR_LoadLoad);
+#else
+ (void) ETHR_ATMC_FUNC__(dec_read_rb)(var);
+#endif
+}
+
+static ETHR_INLINE void ETHR_ATMC_FUNC__(dec_wb)(ethr_atomic_t *var)
+{
+#if defined(ETHR_HAVE_NATMC_DEC_WB)
+ ETHR_NATMC_FUNC__(dec_wb)(var);
+#elif defined(ETHR_HAVE_NATMC_DEC)
+ ETHR_MEMBAR(ETHR_StoreStore);
+ ETHR_NATMC_FUNC__(dec)(var);
+#elif defined(ETHR_HAVE_NATMC_DEC_MB)
+ ETHR_NATMC_FUNC__(dec_mb)(var);
+#elif defined(ETHR_HAVE_NATMC_DEC_RB)
+ ETHR_MEMBAR(ETHR_StoreStore);
+ ETHR_NATMC_FUNC__(dec_rb)(var);
+#elif defined(ETHR_HAVE_NATMC_DEC_ACQB)
+ ETHR_MEMBAR(ETHR_StoreStore);
+ ETHR_NATMC_FUNC__(dec_acqb)(var);
+#elif defined(ETHR_HAVE_NATMC_DEC_RELB)
+ ETHR_MEMBAR(ETHR_StoreStore);
+ ETHR_NATMC_FUNC__(dec_relb)(var);
+#else
+ (void) ETHR_ATMC_FUNC__(dec_read_wb)(var);
+#endif
+}
+
+static ETHR_INLINE void ETHR_ATMC_FUNC__(dec_acqb)(ethr_atomic_t *var)
+{
+#if defined(ETHR_HAVE_NATMC_DEC_ACQB)
+ ETHR_NATMC_FUNC__(dec_acqb)(var);
+#elif defined(ETHR_HAVE_NATMC_DEC_RB)
+ ETHR_NATMC_FUNC__(dec_rb)(var);
+ ETHR_MEMBAR(ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_NATMC_DEC)
+ ETHR_NATMC_FUNC__(dec)(var);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_NATMC_DEC_MB)
+ ETHR_NATMC_FUNC__(dec_mb)(var);
+#elif defined(ETHR_HAVE_NATMC_DEC_WB)
+ ETHR_NATMC_FUNC__(dec_wb)(var);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_NATMC_DEC_RELB)
+ ETHR_NATMC_FUNC__(dec_relb)(var);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
#else
- ETHR_INLINE_FUNC_NAME_(ethr_atomic_set)(var, val);
+ (void) ETHR_ATMC_FUNC__(dec_read_acqb)(var);
#endif
}
-static ETHR_INLINE void
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_dec_relb)(ethr_atomic_t *var)
+static ETHR_INLINE void ETHR_ATMC_FUNC__(dec_relb)(ethr_atomic_t *var)
{
-#ifdef ETHR_HAVE_NATIVE_ATOMICS
+#if defined(ETHR_HAVE_NATMC_DEC_RELB)
ETHR_NATMC_FUNC__(dec_relb)(var);
+#elif defined(ETHR_HAVE_NATMC_DEC_WB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ ETHR_NATMC_FUNC__(dec_wb)(var);
+#elif defined(ETHR_HAVE_NATMC_DEC)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ ETHR_NATMC_FUNC__(dec)(var);
+#elif defined(ETHR_HAVE_NATMC_DEC_MB)
+ ETHR_NATMC_FUNC__(dec_mb)(var);
+#elif defined(ETHR_HAVE_NATMC_DEC_RB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ ETHR_NATMC_FUNC__(dec_rb)(var);
+#elif defined(ETHR_HAVE_NATMC_DEC_ACQB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ ETHR_NATMC_FUNC__(dec_acqb)(var);
+#else
+ (void) ETHR_ATMC_FUNC__(dec_read_relb)(var);
+#endif
+}
+
+static ETHR_INLINE void ETHR_ATMC_FUNC__(dec_mb)(ethr_atomic_t *var)
+{
+#if defined(ETHR_HAVE_NATMC_DEC_MB)
+ ETHR_NATMC_FUNC__(dec_mb)(var);
+#elif defined(ETHR_HAVE_NATMC_DEC_RELB)
+ ETHR_NATMC_FUNC__(dec_relb)(var);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_NATMC_DEC_ACQB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ ETHR_NATMC_FUNC__(dec_acqb)(var);
+#elif defined(ETHR_HAVE_NATMC_DEC_WB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ ETHR_NATMC_FUNC__(dec_wb)(var);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_NATMC_DEC_RB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ ETHR_NATMC_FUNC__(dec_rb)(var);
+ ETHR_MEMBAR(ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_NATMC_DEC)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ ETHR_NATMC_FUNC__(dec)(var);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#else
+ (void) ETHR_ATMC_FUNC__(dec_read_mb)(var);
+#endif
+}
+
+static ETHR_INLINE void ETHR_ATMC_FUNC__(dec_ddrb)(ethr_atomic_t *var)
+{
+#ifdef ETHR_ORDERED_READ_DEPEND
+ ETHR_ATMC_FUNC__(dec)(var);
+#else
+ ETHR_ATMC_FUNC__(dec_rb)(var);
+#endif
+}
+
+
+/* --- read_band() --- */
+
+
+static ETHR_INLINE ethr_sint_t ETHR_ATMC_FUNC__(read_band)(ethr_atomic_t *var, ethr_sint_t val)
+{
+ ethr_sint_t res;
+#if defined(ETHR_HAVE_NATMC_AND_RETOLD)
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(and_retold)(var, (ETHR_NAINT_T__) val);
+#elif defined(ETHR_HAVE_NATMC_AND_RETOLD_RB)
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(and_retold_rb)(var, (ETHR_NAINT_T__) val);
+#elif defined(ETHR_HAVE_NATMC_AND_RETOLD_WB)
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(and_retold_wb)(var, (ETHR_NAINT_T__) val);
+#elif defined(ETHR_HAVE_NATMC_AND_RETOLD_ACQB)
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(and_retold_acqb)(var, (ETHR_NAINT_T__) val);
+#elif defined(ETHR_HAVE_NATMC_AND_RETOLD_RELB)
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(and_retold_relb)(var, (ETHR_NAINT_T__) val);
+#elif defined(ETHR_HAVE_NATMC_AND_RETOLD_MB)
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(and_retold_mb)(var, (ETHR_NAINT_T__) val);
+#elif defined(ETHR_HAVE_NATMC_CMPXCHG)
+ ETHR_NATMC_CMPXCHG_FALLBACK__(ETHR_NATMC_FUNC__(cmpxchg), var, aval, res = aval; aval &= val);
+#elif defined(ETHR_HAVE_NATMC_CMPXCHG_RB)
+ ETHR_NATMC_CMPXCHG_FALLBACK__(ETHR_NATMC_FUNC__(cmpxchg_rb), var, aval, res = aval; aval &= val);
+#elif defined(ETHR_HAVE_NATMC_CMPXCHG_WB)
+ ETHR_NATMC_CMPXCHG_FALLBACK__(ETHR_NATMC_FUNC__(cmpxchg_wb), var, aval, res = aval; aval &= val);
+#elif defined(ETHR_HAVE_NATMC_CMPXCHG_ACQB)
+ ETHR_NATMC_CMPXCHG_FALLBACK__(ETHR_NATMC_FUNC__(cmpxchg_acqb), var, aval, res = aval; aval &= val);
+#elif defined(ETHR_HAVE_NATMC_CMPXCHG_RELB)
+ ETHR_NATMC_CMPXCHG_FALLBACK__(ETHR_NATMC_FUNC__(cmpxchg_relb), var, aval, res = aval; aval &= val);
+#elif defined(ETHR_HAVE_NATMC_CMPXCHG_MB)
+ ETHR_NATMC_CMPXCHG_FALLBACK__(ETHR_NATMC_FUNC__(cmpxchg_mb), var, aval, res = aval; aval &= val);
+#else
+#error "Missing implementation of ethr_atomic_read_band()!"
+#endif
+ return res;
+}
+
+static ETHR_INLINE ethr_sint_t ETHR_ATMC_FUNC__(read_band_rb)(ethr_atomic_t *var, ethr_sint_t val)
+{
+ ethr_sint_t res;
+#if defined(ETHR_HAVE_NATMC_AND_RETOLD_RB)
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(and_retold_rb)(var, (ETHR_NAINT_T__) val);
+#elif defined(ETHR_HAVE_NATMC_AND_RETOLD)
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(and_retold)(var, (ETHR_NAINT_T__) val);
+ ETHR_MEMBAR(ETHR_LoadLoad);
+#elif defined(ETHR_HAVE_NATMC_AND_RETOLD_MB)
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(and_retold_mb)(var, (ETHR_NAINT_T__) val);
+#elif defined(ETHR_HAVE_NATMC_AND_RETOLD_WB)
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(and_retold_wb)(var, (ETHR_NAINT_T__) val);
+ ETHR_MEMBAR(ETHR_LoadLoad);
+#elif defined(ETHR_HAVE_NATMC_AND_RETOLD_ACQB)
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(and_retold_acqb)(var, (ETHR_NAINT_T__) val);
+ ETHR_MEMBAR(ETHR_LoadLoad);
+#elif defined(ETHR_HAVE_NATMC_AND_RETOLD_RELB)
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(and_retold_relb)(var, (ETHR_NAINT_T__) val);
+ ETHR_MEMBAR(ETHR_LoadLoad);
+#elif defined(ETHR_HAVE_NATMC_CMPXCHG_RB)
+ ETHR_NATMC_CMPXCHG_FALLBACK__(ETHR_NATMC_FUNC__(cmpxchg_rb), var, aval, res = aval; aval &= val);
+#elif defined(ETHR_HAVE_NATMC_CMPXCHG)
+ ETHR_NATMC_CMPXCHG_FALLBACK__(ETHR_NATMC_FUNC__(cmpxchg), var, aval, res = aval; aval &= val);
+ ETHR_MEMBAR(ETHR_LoadLoad);
+#elif defined(ETHR_HAVE_NATMC_CMPXCHG_MB)
+ ETHR_NATMC_CMPXCHG_FALLBACK__(ETHR_NATMC_FUNC__(cmpxchg_mb), var, aval, res = aval; aval &= val);
+#elif defined(ETHR_HAVE_NATMC_CMPXCHG_WB)
+ ETHR_NATMC_CMPXCHG_FALLBACK__(ETHR_NATMC_FUNC__(cmpxchg_wb), var, aval, res = aval; aval &= val);
+ ETHR_MEMBAR(ETHR_LoadLoad);
+#elif defined(ETHR_HAVE_NATMC_CMPXCHG_ACQB)
+ ETHR_NATMC_CMPXCHG_FALLBACK__(ETHR_NATMC_FUNC__(cmpxchg_acqb), var, aval, res = aval; aval &= val);
+ ETHR_MEMBAR(ETHR_LoadLoad);
+#elif defined(ETHR_HAVE_NATMC_CMPXCHG_RELB)
+ ETHR_NATMC_CMPXCHG_FALLBACK__(ETHR_NATMC_FUNC__(cmpxchg_relb), var, aval, res = aval; aval &= val);
+ ETHR_MEMBAR(ETHR_LoadLoad);
+#else
+#error "Missing implementation of ethr_atomic_read_band_rb()!"
+#endif
+ return res;
+}
+
+static ETHR_INLINE ethr_sint_t ETHR_ATMC_FUNC__(read_band_wb)(ethr_atomic_t *var, ethr_sint_t val)
+{
+ ethr_sint_t res;
+#if defined(ETHR_HAVE_NATMC_AND_RETOLD_WB)
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(and_retold_wb)(var, (ETHR_NAINT_T__) val);
+#elif defined(ETHR_HAVE_NATMC_AND_RETOLD)
+ ETHR_MEMBAR(ETHR_StoreStore);
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(and_retold)(var, (ETHR_NAINT_T__) val);
+#elif defined(ETHR_HAVE_NATMC_AND_RETOLD_MB)
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(and_retold_mb)(var, (ETHR_NAINT_T__) val);
+#elif defined(ETHR_HAVE_NATMC_AND_RETOLD_RB)
+ ETHR_MEMBAR(ETHR_StoreStore);
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(and_retold_rb)(var, (ETHR_NAINT_T__) val);
+#elif defined(ETHR_HAVE_NATMC_AND_RETOLD_ACQB)
+ ETHR_MEMBAR(ETHR_StoreStore);
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(and_retold_acqb)(var, (ETHR_NAINT_T__) val);
+#elif defined(ETHR_HAVE_NATMC_AND_RETOLD_RELB)
+ ETHR_MEMBAR(ETHR_StoreStore);
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(and_retold_relb)(var, (ETHR_NAINT_T__) val);
+#elif defined(ETHR_HAVE_NATMC_CMPXCHG_WB)
+ ETHR_NATMC_CMPXCHG_FALLBACK__(ETHR_NATMC_FUNC__(cmpxchg_wb), var, aval, res = aval; aval &= val);
+#elif defined(ETHR_HAVE_NATMC_CMPXCHG)
+ ETHR_MEMBAR(ETHR_StoreStore);
+ ETHR_NATMC_CMPXCHG_FALLBACK__(ETHR_NATMC_FUNC__(cmpxchg), var, aval, res = aval; aval &= val);
+#elif defined(ETHR_HAVE_NATMC_CMPXCHG_MB)
+ ETHR_NATMC_CMPXCHG_FALLBACK__(ETHR_NATMC_FUNC__(cmpxchg_mb), var, aval, res = aval; aval &= val);
+#elif defined(ETHR_HAVE_NATMC_CMPXCHG_RB)
+ ETHR_MEMBAR(ETHR_StoreStore);
+ ETHR_NATMC_CMPXCHG_FALLBACK__(ETHR_NATMC_FUNC__(cmpxchg_rb), var, aval, res = aval; aval &= val);
+#elif defined(ETHR_HAVE_NATMC_CMPXCHG_ACQB)
+ ETHR_MEMBAR(ETHR_StoreStore);
+ ETHR_NATMC_CMPXCHG_FALLBACK__(ETHR_NATMC_FUNC__(cmpxchg_acqb), var, aval, res = aval; aval &= val);
+#elif defined(ETHR_HAVE_NATMC_CMPXCHG_RELB)
+ ETHR_MEMBAR(ETHR_StoreStore);
+ ETHR_NATMC_CMPXCHG_FALLBACK__(ETHR_NATMC_FUNC__(cmpxchg_relb), var, aval, res = aval; aval &= val);
+#else
+#error "Missing implementation of ethr_atomic_read_band_wb()!"
+#endif
+ return res;
+}
+
+static ETHR_INLINE ethr_sint_t ETHR_ATMC_FUNC__(read_band_acqb)(ethr_atomic_t *var, ethr_sint_t val)
+{
+ ethr_sint_t res;
+#if defined(ETHR_HAVE_NATMC_AND_RETOLD_ACQB)
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(and_retold_acqb)(var, (ETHR_NAINT_T__) val);
+#elif defined(ETHR_HAVE_NATMC_AND_RETOLD_RB)
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(and_retold_rb)(var, (ETHR_NAINT_T__) val);
+ ETHR_MEMBAR(ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_NATMC_AND_RETOLD)
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(and_retold)(var, (ETHR_NAINT_T__) val);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_NATMC_AND_RETOLD_MB)
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(and_retold_mb)(var, (ETHR_NAINT_T__) val);
+#elif defined(ETHR_HAVE_NATMC_AND_RETOLD_WB)
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(and_retold_wb)(var, (ETHR_NAINT_T__) val);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_NATMC_AND_RETOLD_RELB)
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(and_retold_relb)(var, (ETHR_NAINT_T__) val);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_NATMC_CMPXCHG_ACQB)
+ ETHR_NATMC_CMPXCHG_FALLBACK__(ETHR_NATMC_FUNC__(cmpxchg_acqb), var, aval, res = aval; aval &= val);
+#elif defined(ETHR_HAVE_NATMC_CMPXCHG_RB)
+ ETHR_NATMC_CMPXCHG_FALLBACK__(ETHR_NATMC_FUNC__(cmpxchg_rb), var, aval, res = aval; aval &= val);
+ ETHR_MEMBAR(ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_NATMC_CMPXCHG)
+ ETHR_NATMC_CMPXCHG_FALLBACK__(ETHR_NATMC_FUNC__(cmpxchg), var, aval, res = aval; aval &= val);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_NATMC_CMPXCHG_MB)
+ ETHR_NATMC_CMPXCHG_FALLBACK__(ETHR_NATMC_FUNC__(cmpxchg_mb), var, aval, res = aval; aval &= val);
+#elif defined(ETHR_HAVE_NATMC_CMPXCHG_WB)
+ ETHR_NATMC_CMPXCHG_FALLBACK__(ETHR_NATMC_FUNC__(cmpxchg_wb), var, aval, res = aval; aval &= val);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_NATMC_CMPXCHG_RELB)
+ ETHR_NATMC_CMPXCHG_FALLBACK__(ETHR_NATMC_FUNC__(cmpxchg_relb), var, aval, res = aval; aval &= val);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#else
+#error "Missing implementation of ethr_atomic_read_band_acqb()!"
+#endif
+ return res;
+}
+
+static ETHR_INLINE ethr_sint_t ETHR_ATMC_FUNC__(read_band_relb)(ethr_atomic_t *var, ethr_sint_t val)
+{
+ ethr_sint_t res;
+#if defined(ETHR_HAVE_NATMC_AND_RETOLD_RELB)
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(and_retold_relb)(var, (ETHR_NAINT_T__) val);
+#elif defined(ETHR_HAVE_NATMC_AND_RETOLD_WB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(and_retold_wb)(var, (ETHR_NAINT_T__) val);
+#elif defined(ETHR_HAVE_NATMC_AND_RETOLD)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(and_retold)(var, (ETHR_NAINT_T__) val);
+#elif defined(ETHR_HAVE_NATMC_AND_RETOLD_MB)
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(and_retold_mb)(var, (ETHR_NAINT_T__) val);
+#elif defined(ETHR_HAVE_NATMC_AND_RETOLD_RB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(and_retold_rb)(var, (ETHR_NAINT_T__) val);
+#elif defined(ETHR_HAVE_NATMC_AND_RETOLD_ACQB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(and_retold_acqb)(var, (ETHR_NAINT_T__) val);
+#elif defined(ETHR_HAVE_NATMC_CMPXCHG_RELB)
+ ETHR_NATMC_CMPXCHG_FALLBACK__(ETHR_NATMC_FUNC__(cmpxchg_relb), var, aval, res = aval; aval &= val);
+#elif defined(ETHR_HAVE_NATMC_CMPXCHG_WB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ ETHR_NATMC_CMPXCHG_FALLBACK__(ETHR_NATMC_FUNC__(cmpxchg_wb), var, aval, res = aval; aval &= val);
+#elif defined(ETHR_HAVE_NATMC_CMPXCHG)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ ETHR_NATMC_CMPXCHG_FALLBACK__(ETHR_NATMC_FUNC__(cmpxchg), var, aval, res = aval; aval &= val);
+#elif defined(ETHR_HAVE_NATMC_CMPXCHG_MB)
+ ETHR_NATMC_CMPXCHG_FALLBACK__(ETHR_NATMC_FUNC__(cmpxchg_mb), var, aval, res = aval; aval &= val);
+#elif defined(ETHR_HAVE_NATMC_CMPXCHG_RB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ ETHR_NATMC_CMPXCHG_FALLBACK__(ETHR_NATMC_FUNC__(cmpxchg_rb), var, aval, res = aval; aval &= val);
+#elif defined(ETHR_HAVE_NATMC_CMPXCHG_ACQB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ ETHR_NATMC_CMPXCHG_FALLBACK__(ETHR_NATMC_FUNC__(cmpxchg_acqb), var, aval, res = aval; aval &= val);
+#else
+#error "Missing implementation of ethr_atomic_read_band_relb()!"
+#endif
+ return res;
+}
+
+static ETHR_INLINE ethr_sint_t ETHR_ATMC_FUNC__(read_band_mb)(ethr_atomic_t *var, ethr_sint_t val)
+{
+ ethr_sint_t res;
+#if defined(ETHR_HAVE_NATMC_AND_RETOLD_MB)
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(and_retold_mb)(var, (ETHR_NAINT_T__) val);
+#elif defined(ETHR_HAVE_NATMC_AND_RETOLD_RELB)
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(and_retold_relb)(var, (ETHR_NAINT_T__) val);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_NATMC_AND_RETOLD_ACQB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(and_retold_acqb)(var, (ETHR_NAINT_T__) val);
+#elif defined(ETHR_HAVE_NATMC_AND_RETOLD_WB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(and_retold_wb)(var, (ETHR_NAINT_T__) val);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_NATMC_AND_RETOLD_RB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(and_retold_rb)(var, (ETHR_NAINT_T__) val);
+ ETHR_MEMBAR(ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_NATMC_AND_RETOLD)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(and_retold)(var, (ETHR_NAINT_T__) val);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_NATMC_CMPXCHG_MB)
+ ETHR_NATMC_CMPXCHG_FALLBACK__(ETHR_NATMC_FUNC__(cmpxchg_mb), var, aval, res = aval; aval &= val);
+#elif defined(ETHR_HAVE_NATMC_CMPXCHG_RELB)
+ ETHR_NATMC_CMPXCHG_FALLBACK__(ETHR_NATMC_FUNC__(cmpxchg_relb), var, aval, res = aval; aval &= val);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_NATMC_CMPXCHG_ACQB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ ETHR_NATMC_CMPXCHG_FALLBACK__(ETHR_NATMC_FUNC__(cmpxchg_acqb), var, aval, res = aval; aval &= val);
+#elif defined(ETHR_HAVE_NATMC_CMPXCHG_WB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ ETHR_NATMC_CMPXCHG_FALLBACK__(ETHR_NATMC_FUNC__(cmpxchg_wb), var, aval, res = aval; aval &= val);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_NATMC_CMPXCHG_RB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ ETHR_NATMC_CMPXCHG_FALLBACK__(ETHR_NATMC_FUNC__(cmpxchg_rb), var, aval, res = aval; aval &= val);
+ ETHR_MEMBAR(ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_NATMC_CMPXCHG)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ ETHR_NATMC_CMPXCHG_FALLBACK__(ETHR_NATMC_FUNC__(cmpxchg), var, aval, res = aval; aval &= val);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
#else
- ETHR_INLINE_FUNC_NAME_(ethr_atomic_dec)(var);
+#error "Missing implementation of ethr_atomic_read_band_mb()!"
#endif
+ return res;
}
-static ETHR_INLINE ethr_sint_t
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_dec_read_relb)(ethr_atomic_t *var)
+static ETHR_INLINE ethr_sint_t ETHR_ATMC_FUNC__(read_band_ddrb)(ethr_atomic_t *var, ethr_sint_t val)
{
-#ifdef ETHR_HAVE_NATIVE_ATOMICS
- return (ethr_sint_t) ETHR_NATMC_FUNC__(dec_return_relb)(var);
+#ifdef ETHR_ORDERED_READ_DEPEND
+ return ETHR_ATMC_FUNC__(read_band)(var, val);
#else
- return ETHR_INLINE_FUNC_NAME_(ethr_atomic_dec_read)(var);
+ return ETHR_ATMC_FUNC__(read_band_rb)(var, val);
#endif
}
-static ETHR_INLINE ethr_sint_t
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_cmpxchg_acqb)(ethr_atomic_t *var,
- ethr_sint_t new,
- ethr_sint_t exp)
+
+/* --- read_bor() --- */
+
+
+static ETHR_INLINE ethr_sint_t ETHR_ATMC_FUNC__(read_bor)(ethr_atomic_t *var, ethr_sint_t val)
{
-#ifdef ETHR_HAVE_NATIVE_ATOMICS
- return (ethr_sint_t) ETHR_NATMC_FUNC__(cmpxchg_acqb)(var,
- (ETHR_NAINT_T__) new,
- (ETHR_NAINT_T__) exp);
+ ethr_sint_t res;
+#if defined(ETHR_HAVE_NATMC_OR_RETOLD)
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(or_retold)(var, (ETHR_NAINT_T__) val);
+#elif defined(ETHR_HAVE_NATMC_OR_RETOLD_RB)
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(or_retold_rb)(var, (ETHR_NAINT_T__) val);
+#elif defined(ETHR_HAVE_NATMC_OR_RETOLD_WB)
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(or_retold_wb)(var, (ETHR_NAINT_T__) val);
+#elif defined(ETHR_HAVE_NATMC_OR_RETOLD_ACQB)
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(or_retold_acqb)(var, (ETHR_NAINT_T__) val);
+#elif defined(ETHR_HAVE_NATMC_OR_RETOLD_RELB)
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(or_retold_relb)(var, (ETHR_NAINT_T__) val);
+#elif defined(ETHR_HAVE_NATMC_OR_RETOLD_MB)
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(or_retold_mb)(var, (ETHR_NAINT_T__) val);
+#elif defined(ETHR_HAVE_NATMC_CMPXCHG)
+ ETHR_NATMC_CMPXCHG_FALLBACK__(ETHR_NATMC_FUNC__(cmpxchg), var, aval, res = aval; aval |= val);
+#elif defined(ETHR_HAVE_NATMC_CMPXCHG_RB)
+ ETHR_NATMC_CMPXCHG_FALLBACK__(ETHR_NATMC_FUNC__(cmpxchg_rb), var, aval, res = aval; aval |= val);
+#elif defined(ETHR_HAVE_NATMC_CMPXCHG_WB)
+ ETHR_NATMC_CMPXCHG_FALLBACK__(ETHR_NATMC_FUNC__(cmpxchg_wb), var, aval, res = aval; aval |= val);
+#elif defined(ETHR_HAVE_NATMC_CMPXCHG_ACQB)
+ ETHR_NATMC_CMPXCHG_FALLBACK__(ETHR_NATMC_FUNC__(cmpxchg_acqb), var, aval, res = aval; aval |= val);
+#elif defined(ETHR_HAVE_NATMC_CMPXCHG_RELB)
+ ETHR_NATMC_CMPXCHG_FALLBACK__(ETHR_NATMC_FUNC__(cmpxchg_relb), var, aval, res = aval; aval |= val);
+#elif defined(ETHR_HAVE_NATMC_CMPXCHG_MB)
+ ETHR_NATMC_CMPXCHG_FALLBACK__(ETHR_NATMC_FUNC__(cmpxchg_mb), var, aval, res = aval; aval |= val);
#else
- return ETHR_INLINE_FUNC_NAME_(ethr_atomic_cmpxchg)(var, new, exp);
+#error "Missing implementation of ethr_atomic_read_bor()!"
#endif
+ return res;
}
-static ETHR_INLINE ethr_sint_t
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_cmpxchg_relb)(ethr_atomic_t *var,
- ethr_sint_t new,
- ethr_sint_t exp)
+static ETHR_INLINE ethr_sint_t ETHR_ATMC_FUNC__(read_bor_rb)(ethr_atomic_t *var, ethr_sint_t val)
{
-#ifdef ETHR_HAVE_NATIVE_ATOMICS
- return (ethr_sint_t) ETHR_NATMC_FUNC__(cmpxchg_relb)(var,
- (ETHR_NAINT_T__) new,
- (ETHR_NAINT_T__) exp);
+ ethr_sint_t res;
+#if defined(ETHR_HAVE_NATMC_OR_RETOLD_RB)
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(or_retold_rb)(var, (ETHR_NAINT_T__) val);
+#elif defined(ETHR_HAVE_NATMC_OR_RETOLD)
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(or_retold)(var, (ETHR_NAINT_T__) val);
+ ETHR_MEMBAR(ETHR_LoadLoad);
+#elif defined(ETHR_HAVE_NATMC_OR_RETOLD_MB)
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(or_retold_mb)(var, (ETHR_NAINT_T__) val);
+#elif defined(ETHR_HAVE_NATMC_OR_RETOLD_WB)
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(or_retold_wb)(var, (ETHR_NAINT_T__) val);
+ ETHR_MEMBAR(ETHR_LoadLoad);
+#elif defined(ETHR_HAVE_NATMC_OR_RETOLD_ACQB)
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(or_retold_acqb)(var, (ETHR_NAINT_T__) val);
+ ETHR_MEMBAR(ETHR_LoadLoad);
+#elif defined(ETHR_HAVE_NATMC_OR_RETOLD_RELB)
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(or_retold_relb)(var, (ETHR_NAINT_T__) val);
+ ETHR_MEMBAR(ETHR_LoadLoad);
+#elif defined(ETHR_HAVE_NATMC_CMPXCHG_RB)
+ ETHR_NATMC_CMPXCHG_FALLBACK__(ETHR_NATMC_FUNC__(cmpxchg_rb), var, aval, res = aval; aval |= val);
+#elif defined(ETHR_HAVE_NATMC_CMPXCHG)
+ ETHR_NATMC_CMPXCHG_FALLBACK__(ETHR_NATMC_FUNC__(cmpxchg), var, aval, res = aval; aval |= val);
+ ETHR_MEMBAR(ETHR_LoadLoad);
+#elif defined(ETHR_HAVE_NATMC_CMPXCHG_MB)
+ ETHR_NATMC_CMPXCHG_FALLBACK__(ETHR_NATMC_FUNC__(cmpxchg_mb), var, aval, res = aval; aval |= val);
+#elif defined(ETHR_HAVE_NATMC_CMPXCHG_WB)
+ ETHR_NATMC_CMPXCHG_FALLBACK__(ETHR_NATMC_FUNC__(cmpxchg_wb), var, aval, res = aval; aval |= val);
+ ETHR_MEMBAR(ETHR_LoadLoad);
+#elif defined(ETHR_HAVE_NATMC_CMPXCHG_ACQB)
+ ETHR_NATMC_CMPXCHG_FALLBACK__(ETHR_NATMC_FUNC__(cmpxchg_acqb), var, aval, res = aval; aval |= val);
+ ETHR_MEMBAR(ETHR_LoadLoad);
+#elif defined(ETHR_HAVE_NATMC_CMPXCHG_RELB)
+ ETHR_NATMC_CMPXCHG_FALLBACK__(ETHR_NATMC_FUNC__(cmpxchg_relb), var, aval, res = aval; aval |= val);
+ ETHR_MEMBAR(ETHR_LoadLoad);
#else
- return ETHR_INLINE_FUNC_NAME_(ethr_atomic_cmpxchg)(var, new, exp);
+#error "Missing implementation of ethr_atomic_read_bor_rb()!"
#endif
+ return res;
}
+static ETHR_INLINE ethr_sint_t ETHR_ATMC_FUNC__(read_bor_wb)(ethr_atomic_t *var, ethr_sint_t val)
+{
+ ethr_sint_t res;
+#if defined(ETHR_HAVE_NATMC_OR_RETOLD_WB)
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(or_retold_wb)(var, (ETHR_NAINT_T__) val);
+#elif defined(ETHR_HAVE_NATMC_OR_RETOLD)
+ ETHR_MEMBAR(ETHR_StoreStore);
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(or_retold)(var, (ETHR_NAINT_T__) val);
+#elif defined(ETHR_HAVE_NATMC_OR_RETOLD_MB)
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(or_retold_mb)(var, (ETHR_NAINT_T__) val);
+#elif defined(ETHR_HAVE_NATMC_OR_RETOLD_RB)
+ ETHR_MEMBAR(ETHR_StoreStore);
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(or_retold_rb)(var, (ETHR_NAINT_T__) val);
+#elif defined(ETHR_HAVE_NATMC_OR_RETOLD_ACQB)
+ ETHR_MEMBAR(ETHR_StoreStore);
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(or_retold_acqb)(var, (ETHR_NAINT_T__) val);
+#elif defined(ETHR_HAVE_NATMC_OR_RETOLD_RELB)
+ ETHR_MEMBAR(ETHR_StoreStore);
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(or_retold_relb)(var, (ETHR_NAINT_T__) val);
+#elif defined(ETHR_HAVE_NATMC_CMPXCHG_WB)
+ ETHR_NATMC_CMPXCHG_FALLBACK__(ETHR_NATMC_FUNC__(cmpxchg_wb), var, aval, res = aval; aval |= val);
+#elif defined(ETHR_HAVE_NATMC_CMPXCHG)
+ ETHR_MEMBAR(ETHR_StoreStore);
+ ETHR_NATMC_CMPXCHG_FALLBACK__(ETHR_NATMC_FUNC__(cmpxchg), var, aval, res = aval; aval |= val);
+#elif defined(ETHR_HAVE_NATMC_CMPXCHG_MB)
+ ETHR_NATMC_CMPXCHG_FALLBACK__(ETHR_NATMC_FUNC__(cmpxchg_mb), var, aval, res = aval; aval |= val);
+#elif defined(ETHR_HAVE_NATMC_CMPXCHG_RB)
+ ETHR_MEMBAR(ETHR_StoreStore);
+ ETHR_NATMC_CMPXCHG_FALLBACK__(ETHR_NATMC_FUNC__(cmpxchg_rb), var, aval, res = aval; aval |= val);
+#elif defined(ETHR_HAVE_NATMC_CMPXCHG_ACQB)
+ ETHR_MEMBAR(ETHR_StoreStore);
+ ETHR_NATMC_CMPXCHG_FALLBACK__(ETHR_NATMC_FUNC__(cmpxchg_acqb), var, aval, res = aval; aval |= val);
+#elif defined(ETHR_HAVE_NATMC_CMPXCHG_RELB)
+ ETHR_MEMBAR(ETHR_StoreStore);
+ ETHR_NATMC_CMPXCHG_FALLBACK__(ETHR_NATMC_FUNC__(cmpxchg_relb), var, aval, res = aval; aval |= val);
+#else
+#error "Missing implementation of ethr_atomic_read_bor_wb()!"
+#endif
+ return res;
+}
+
+static ETHR_INLINE ethr_sint_t ETHR_ATMC_FUNC__(read_bor_acqb)(ethr_atomic_t *var, ethr_sint_t val)
+{
+ ethr_sint_t res;
+#if defined(ETHR_HAVE_NATMC_OR_RETOLD_ACQB)
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(or_retold_acqb)(var, (ETHR_NAINT_T__) val);
+#elif defined(ETHR_HAVE_NATMC_OR_RETOLD_RB)
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(or_retold_rb)(var, (ETHR_NAINT_T__) val);
+ ETHR_MEMBAR(ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_NATMC_OR_RETOLD)
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(or_retold)(var, (ETHR_NAINT_T__) val);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_NATMC_OR_RETOLD_MB)
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(or_retold_mb)(var, (ETHR_NAINT_T__) val);
+#elif defined(ETHR_HAVE_NATMC_OR_RETOLD_WB)
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(or_retold_wb)(var, (ETHR_NAINT_T__) val);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_NATMC_OR_RETOLD_RELB)
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(or_retold_relb)(var, (ETHR_NAINT_T__) val);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_NATMC_CMPXCHG_ACQB)
+ ETHR_NATMC_CMPXCHG_FALLBACK__(ETHR_NATMC_FUNC__(cmpxchg_acqb), var, aval, res = aval; aval |= val);
+#elif defined(ETHR_HAVE_NATMC_CMPXCHG_RB)
+ ETHR_NATMC_CMPXCHG_FALLBACK__(ETHR_NATMC_FUNC__(cmpxchg_rb), var, aval, res = aval; aval |= val);
+ ETHR_MEMBAR(ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_NATMC_CMPXCHG)
+ ETHR_NATMC_CMPXCHG_FALLBACK__(ETHR_NATMC_FUNC__(cmpxchg), var, aval, res = aval; aval |= val);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_NATMC_CMPXCHG_MB)
+ ETHR_NATMC_CMPXCHG_FALLBACK__(ETHR_NATMC_FUNC__(cmpxchg_mb), var, aval, res = aval; aval |= val);
+#elif defined(ETHR_HAVE_NATMC_CMPXCHG_WB)
+ ETHR_NATMC_CMPXCHG_FALLBACK__(ETHR_NATMC_FUNC__(cmpxchg_wb), var, aval, res = aval; aval |= val);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_NATMC_CMPXCHG_RELB)
+ ETHR_NATMC_CMPXCHG_FALLBACK__(ETHR_NATMC_FUNC__(cmpxchg_relb), var, aval, res = aval; aval |= val);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#else
+#error "Missing implementation of ethr_atomic_read_bor_acqb()!"
+#endif
+ return res;
+}
+
+static ETHR_INLINE ethr_sint_t ETHR_ATMC_FUNC__(read_bor_relb)(ethr_atomic_t *var, ethr_sint_t val)
+{
+ ethr_sint_t res;
+#if defined(ETHR_HAVE_NATMC_OR_RETOLD_RELB)
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(or_retold_relb)(var, (ETHR_NAINT_T__) val);
+#elif defined(ETHR_HAVE_NATMC_OR_RETOLD_WB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(or_retold_wb)(var, (ETHR_NAINT_T__) val);
+#elif defined(ETHR_HAVE_NATMC_OR_RETOLD)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(or_retold)(var, (ETHR_NAINT_T__) val);
+#elif defined(ETHR_HAVE_NATMC_OR_RETOLD_MB)
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(or_retold_mb)(var, (ETHR_NAINT_T__) val);
+#elif defined(ETHR_HAVE_NATMC_OR_RETOLD_RB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(or_retold_rb)(var, (ETHR_NAINT_T__) val);
+#elif defined(ETHR_HAVE_NATMC_OR_RETOLD_ACQB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(or_retold_acqb)(var, (ETHR_NAINT_T__) val);
+#elif defined(ETHR_HAVE_NATMC_CMPXCHG_RELB)
+ ETHR_NATMC_CMPXCHG_FALLBACK__(ETHR_NATMC_FUNC__(cmpxchg_relb), var, aval, res = aval; aval |= val);
+#elif defined(ETHR_HAVE_NATMC_CMPXCHG_WB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ ETHR_NATMC_CMPXCHG_FALLBACK__(ETHR_NATMC_FUNC__(cmpxchg_wb), var, aval, res = aval; aval |= val);
+#elif defined(ETHR_HAVE_NATMC_CMPXCHG)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ ETHR_NATMC_CMPXCHG_FALLBACK__(ETHR_NATMC_FUNC__(cmpxchg), var, aval, res = aval; aval |= val);
+#elif defined(ETHR_HAVE_NATMC_CMPXCHG_MB)
+ ETHR_NATMC_CMPXCHG_FALLBACK__(ETHR_NATMC_FUNC__(cmpxchg_mb), var, aval, res = aval; aval |= val);
+#elif defined(ETHR_HAVE_NATMC_CMPXCHG_RB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ ETHR_NATMC_CMPXCHG_FALLBACK__(ETHR_NATMC_FUNC__(cmpxchg_rb), var, aval, res = aval; aval |= val);
+#elif defined(ETHR_HAVE_NATMC_CMPXCHG_ACQB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ ETHR_NATMC_CMPXCHG_FALLBACK__(ETHR_NATMC_FUNC__(cmpxchg_acqb), var, aval, res = aval; aval |= val);
+#else
+#error "Missing implementation of ethr_atomic_read_bor_relb()!"
+#endif
+ return res;
+}
+
+static ETHR_INLINE ethr_sint_t ETHR_ATMC_FUNC__(read_bor_mb)(ethr_atomic_t *var, ethr_sint_t val)
+{
+ ethr_sint_t res;
+#if defined(ETHR_HAVE_NATMC_OR_RETOLD_MB)
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(or_retold_mb)(var, (ETHR_NAINT_T__) val);
+#elif defined(ETHR_HAVE_NATMC_OR_RETOLD_RELB)
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(or_retold_relb)(var, (ETHR_NAINT_T__) val);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_NATMC_OR_RETOLD_ACQB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(or_retold_acqb)(var, (ETHR_NAINT_T__) val);
+#elif defined(ETHR_HAVE_NATMC_OR_RETOLD_WB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(or_retold_wb)(var, (ETHR_NAINT_T__) val);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_NATMC_OR_RETOLD_RB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(or_retold_rb)(var, (ETHR_NAINT_T__) val);
+ ETHR_MEMBAR(ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_NATMC_OR_RETOLD)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ res = (ethr_sint_t) ETHR_NATMC_FUNC__(or_retold)(var, (ETHR_NAINT_T__) val);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_NATMC_CMPXCHG_MB)
+ ETHR_NATMC_CMPXCHG_FALLBACK__(ETHR_NATMC_FUNC__(cmpxchg_mb), var, aval, res = aval; aval |= val);
+#elif defined(ETHR_HAVE_NATMC_CMPXCHG_RELB)
+ ETHR_NATMC_CMPXCHG_FALLBACK__(ETHR_NATMC_FUNC__(cmpxchg_relb), var, aval, res = aval; aval |= val);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_NATMC_CMPXCHG_ACQB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ ETHR_NATMC_CMPXCHG_FALLBACK__(ETHR_NATMC_FUNC__(cmpxchg_acqb), var, aval, res = aval; aval |= val);
+#elif defined(ETHR_HAVE_NATMC_CMPXCHG_WB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ ETHR_NATMC_CMPXCHG_FALLBACK__(ETHR_NATMC_FUNC__(cmpxchg_wb), var, aval, res = aval; aval |= val);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_NATMC_CMPXCHG_RB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ ETHR_NATMC_CMPXCHG_FALLBACK__(ETHR_NATMC_FUNC__(cmpxchg_rb), var, aval, res = aval; aval |= val);
+ ETHR_MEMBAR(ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_NATMC_CMPXCHG)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ ETHR_NATMC_CMPXCHG_FALLBACK__(ETHR_NATMC_FUNC__(cmpxchg), var, aval, res = aval; aval |= val);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#else
+#error "Missing implementation of ethr_atomic_read_bor_mb()!"
+#endif
+ return res;
+}
+
+static ETHR_INLINE ethr_sint_t ETHR_ATMC_FUNC__(read_bor_ddrb)(ethr_atomic_t *var, ethr_sint_t val)
+{
+#ifdef ETHR_ORDERED_READ_DEPEND
+ return ETHR_ATMC_FUNC__(read_bor)(var, val);
+#else
+ return ETHR_ATMC_FUNC__(read_bor_rb)(var, val);
+#endif
+}
+
+#endif /* ETHR_ATMC_INLINE__ */
+
+
+/* ---------- 32-bit atomic implementation ---------- */
+
+
+#ifdef ETHR_NEED_ATMC32_PROTOTYPES__
+ethr_sint32_t *ethr_atomic32_addr(ethr_atomic32_t *var);
+ethr_sint32_t ethr_atomic32_cmpxchg(ethr_atomic32_t *var, ethr_sint32_t val, ethr_sint32_t old_val);
+ethr_sint32_t ethr_atomic32_cmpxchg_ddrb(ethr_atomic32_t *var, ethr_sint32_t val, ethr_sint32_t old_val);
+ethr_sint32_t ethr_atomic32_cmpxchg_rb(ethr_atomic32_t *var, ethr_sint32_t val, ethr_sint32_t old_val);
+ethr_sint32_t ethr_atomic32_cmpxchg_wb(ethr_atomic32_t *var, ethr_sint32_t val, ethr_sint32_t old_val);
+ethr_sint32_t ethr_atomic32_cmpxchg_acqb(ethr_atomic32_t *var, ethr_sint32_t val, ethr_sint32_t old_val);
+ethr_sint32_t ethr_atomic32_cmpxchg_relb(ethr_atomic32_t *var, ethr_sint32_t val, ethr_sint32_t old_val);
+ethr_sint32_t ethr_atomic32_cmpxchg_mb(ethr_atomic32_t *var, ethr_sint32_t val, ethr_sint32_t old_val);
+ethr_sint32_t ethr_atomic32_xchg(ethr_atomic32_t *var, ethr_sint32_t val);
+ethr_sint32_t ethr_atomic32_xchg_ddrb(ethr_atomic32_t *var, ethr_sint32_t val);
+ethr_sint32_t ethr_atomic32_xchg_rb(ethr_atomic32_t *var, ethr_sint32_t val);
+ethr_sint32_t ethr_atomic32_xchg_wb(ethr_atomic32_t *var, ethr_sint32_t val);
+ethr_sint32_t ethr_atomic32_xchg_acqb(ethr_atomic32_t *var, ethr_sint32_t val);
+ethr_sint32_t ethr_atomic32_xchg_relb(ethr_atomic32_t *var, ethr_sint32_t val);
+ethr_sint32_t ethr_atomic32_xchg_mb(ethr_atomic32_t *var, ethr_sint32_t val);
+void ethr_atomic32_set(ethr_atomic32_t *var, ethr_sint32_t val);
+void ethr_atomic32_set_ddrb(ethr_atomic32_t *var, ethr_sint32_t val);
+void ethr_atomic32_set_rb(ethr_atomic32_t *var, ethr_sint32_t val);
+void ethr_atomic32_set_wb(ethr_atomic32_t *var, ethr_sint32_t val);
+void ethr_atomic32_set_acqb(ethr_atomic32_t *var, ethr_sint32_t val);
+void ethr_atomic32_set_relb(ethr_atomic32_t *var, ethr_sint32_t val);
+void ethr_atomic32_set_mb(ethr_atomic32_t *var, ethr_sint32_t val);
+void ethr_atomic32_init(ethr_atomic32_t *var, ethr_sint32_t val);
+void ethr_atomic32_init_ddrb(ethr_atomic32_t *var, ethr_sint32_t val);
+void ethr_atomic32_init_rb(ethr_atomic32_t *var, ethr_sint32_t val);
+void ethr_atomic32_init_wb(ethr_atomic32_t *var, ethr_sint32_t val);
+void ethr_atomic32_init_acqb(ethr_atomic32_t *var, ethr_sint32_t val);
+void ethr_atomic32_init_relb(ethr_atomic32_t *var, ethr_sint32_t val);
+void ethr_atomic32_init_mb(ethr_atomic32_t *var, ethr_sint32_t val);
+ethr_sint32_t ethr_atomic32_add_read(ethr_atomic32_t *var, ethr_sint32_t val);
+ethr_sint32_t ethr_atomic32_add_read_ddrb(ethr_atomic32_t *var, ethr_sint32_t val);
+ethr_sint32_t ethr_atomic32_add_read_rb(ethr_atomic32_t *var, ethr_sint32_t val);
+ethr_sint32_t ethr_atomic32_add_read_wb(ethr_atomic32_t *var, ethr_sint32_t val);
+ethr_sint32_t ethr_atomic32_add_read_acqb(ethr_atomic32_t *var, ethr_sint32_t val);
+ethr_sint32_t ethr_atomic32_add_read_relb(ethr_atomic32_t *var, ethr_sint32_t val);
+ethr_sint32_t ethr_atomic32_add_read_mb(ethr_atomic32_t *var, ethr_sint32_t val);
+ethr_sint32_t ethr_atomic32_read(ethr_atomic32_t *var);
+ethr_sint32_t ethr_atomic32_read_ddrb(ethr_atomic32_t *var);
+ethr_sint32_t ethr_atomic32_read_rb(ethr_atomic32_t *var);
+ethr_sint32_t ethr_atomic32_read_wb(ethr_atomic32_t *var);
+ethr_sint32_t ethr_atomic32_read_acqb(ethr_atomic32_t *var);
+ethr_sint32_t ethr_atomic32_read_relb(ethr_atomic32_t *var);
+ethr_sint32_t ethr_atomic32_read_mb(ethr_atomic32_t *var);
+ethr_sint32_t ethr_atomic32_inc_read(ethr_atomic32_t *var);
+ethr_sint32_t ethr_atomic32_inc_read_ddrb(ethr_atomic32_t *var);
+ethr_sint32_t ethr_atomic32_inc_read_rb(ethr_atomic32_t *var);
+ethr_sint32_t ethr_atomic32_inc_read_wb(ethr_atomic32_t *var);
+ethr_sint32_t ethr_atomic32_inc_read_acqb(ethr_atomic32_t *var);
+ethr_sint32_t ethr_atomic32_inc_read_relb(ethr_atomic32_t *var);
+ethr_sint32_t ethr_atomic32_inc_read_mb(ethr_atomic32_t *var);
+ethr_sint32_t ethr_atomic32_dec_read(ethr_atomic32_t *var);
+ethr_sint32_t ethr_atomic32_dec_read_ddrb(ethr_atomic32_t *var);
+ethr_sint32_t ethr_atomic32_dec_read_rb(ethr_atomic32_t *var);
+ethr_sint32_t ethr_atomic32_dec_read_wb(ethr_atomic32_t *var);
+ethr_sint32_t ethr_atomic32_dec_read_acqb(ethr_atomic32_t *var);
+ethr_sint32_t ethr_atomic32_dec_read_relb(ethr_atomic32_t *var);
+ethr_sint32_t ethr_atomic32_dec_read_mb(ethr_atomic32_t *var);
+void ethr_atomic32_add(ethr_atomic32_t *var, ethr_sint32_t val);
+void ethr_atomic32_add_ddrb(ethr_atomic32_t *var, ethr_sint32_t val);
+void ethr_atomic32_add_rb(ethr_atomic32_t *var, ethr_sint32_t val);
+void ethr_atomic32_add_wb(ethr_atomic32_t *var, ethr_sint32_t val);
+void ethr_atomic32_add_acqb(ethr_atomic32_t *var, ethr_sint32_t val);
+void ethr_atomic32_add_relb(ethr_atomic32_t *var, ethr_sint32_t val);
+void ethr_atomic32_add_mb(ethr_atomic32_t *var, ethr_sint32_t val);
+void ethr_atomic32_inc(ethr_atomic32_t *var);
+void ethr_atomic32_inc_ddrb(ethr_atomic32_t *var);
+void ethr_atomic32_inc_rb(ethr_atomic32_t *var);
+void ethr_atomic32_inc_wb(ethr_atomic32_t *var);
+void ethr_atomic32_inc_acqb(ethr_atomic32_t *var);
+void ethr_atomic32_inc_relb(ethr_atomic32_t *var);
+void ethr_atomic32_inc_mb(ethr_atomic32_t *var);
+void ethr_atomic32_dec(ethr_atomic32_t *var);
+void ethr_atomic32_dec_ddrb(ethr_atomic32_t *var);
+void ethr_atomic32_dec_rb(ethr_atomic32_t *var);
+void ethr_atomic32_dec_wb(ethr_atomic32_t *var);
+void ethr_atomic32_dec_acqb(ethr_atomic32_t *var);
+void ethr_atomic32_dec_relb(ethr_atomic32_t *var);
+void ethr_atomic32_dec_mb(ethr_atomic32_t *var);
+ethr_sint32_t ethr_atomic32_read_band(ethr_atomic32_t *var, ethr_sint32_t val);
+ethr_sint32_t ethr_atomic32_read_band_ddrb(ethr_atomic32_t *var, ethr_sint32_t val);
+ethr_sint32_t ethr_atomic32_read_band_rb(ethr_atomic32_t *var, ethr_sint32_t val);
+ethr_sint32_t ethr_atomic32_read_band_wb(ethr_atomic32_t *var, ethr_sint32_t val);
+ethr_sint32_t ethr_atomic32_read_band_acqb(ethr_atomic32_t *var, ethr_sint32_t val);
+ethr_sint32_t ethr_atomic32_read_band_relb(ethr_atomic32_t *var, ethr_sint32_t val);
+ethr_sint32_t ethr_atomic32_read_band_mb(ethr_atomic32_t *var, ethr_sint32_t val);
+ethr_sint32_t ethr_atomic32_read_bor(ethr_atomic32_t *var, ethr_sint32_t val);
+ethr_sint32_t ethr_atomic32_read_bor_ddrb(ethr_atomic32_t *var, ethr_sint32_t val);
+ethr_sint32_t ethr_atomic32_read_bor_rb(ethr_atomic32_t *var, ethr_sint32_t val);
+ethr_sint32_t ethr_atomic32_read_bor_wb(ethr_atomic32_t *var, ethr_sint32_t val);
+ethr_sint32_t ethr_atomic32_read_bor_acqb(ethr_atomic32_t *var, ethr_sint32_t val);
+ethr_sint32_t ethr_atomic32_read_bor_relb(ethr_atomic32_t *var, ethr_sint32_t val);
+ethr_sint32_t ethr_atomic32_read_bor_mb(ethr_atomic32_t *var, ethr_sint32_t val);
+#endif /* ETHR_NEED_ATMC32_PROTOTYPES__ */
+
+#if (defined(ETHR_HAVE_32BIT_NATIVE_ATOMIC_OPS) \
+ && (defined(ETHR_ATMC32_INLINE__) || defined(ETHR_ATOMIC_IMPL__)))
+
+#if !defined(ETHR_NATMC32_BITS__)
+# error "Missing native atomic implementation"
+#elif ETHR_NATMC32_BITS__ == 64
+# undef ETHR_HAVE_NATMC32_CMPXCHG
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_CMPXCHG
+# define ETHR_HAVE_NATMC32_CMPXCHG 1
+# endif
+# undef ETHR_HAVE_NATMC32_CMPXCHG_RB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_CMPXCHG_RB
+# define ETHR_HAVE_NATMC32_CMPXCHG_RB 1
+# endif
+# undef ETHR_HAVE_NATMC32_CMPXCHG_WB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_CMPXCHG_WB
+# define ETHR_HAVE_NATMC32_CMPXCHG_WB 1
+# endif
+# undef ETHR_HAVE_NATMC32_CMPXCHG_ACQB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_CMPXCHG_ACQB
+# define ETHR_HAVE_NATMC32_CMPXCHG_ACQB 1
+# endif
+# undef ETHR_HAVE_NATMC32_CMPXCHG_RELB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_CMPXCHG_RELB
+# define ETHR_HAVE_NATMC32_CMPXCHG_RELB 1
+# endif
+# undef ETHR_HAVE_NATMC32_CMPXCHG_MB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_CMPXCHG_MB
+# define ETHR_HAVE_NATMC32_CMPXCHG_MB 1
+# endif
+# undef ETHR_HAVE_NATMC32_XCHG
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_XCHG
+# define ETHR_HAVE_NATMC32_XCHG 1
+# endif
+# undef ETHR_HAVE_NATMC32_XCHG_RB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_XCHG_RB
+# define ETHR_HAVE_NATMC32_XCHG_RB 1
+# endif
+# undef ETHR_HAVE_NATMC32_XCHG_WB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_XCHG_WB
+# define ETHR_HAVE_NATMC32_XCHG_WB 1
+# endif
+# undef ETHR_HAVE_NATMC32_XCHG_ACQB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_XCHG_ACQB
+# define ETHR_HAVE_NATMC32_XCHG_ACQB 1
+# endif
+# undef ETHR_HAVE_NATMC32_XCHG_RELB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_XCHG_RELB
+# define ETHR_HAVE_NATMC32_XCHG_RELB 1
+# endif
+# undef ETHR_HAVE_NATMC32_XCHG_MB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_XCHG_MB
+# define ETHR_HAVE_NATMC32_XCHG_MB 1
+# endif
+# undef ETHR_HAVE_NATMC32_SET
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_SET
+# define ETHR_HAVE_NATMC32_SET 1
+# endif
+# undef ETHR_HAVE_NATMC32_SET_RB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_SET_RB
+# define ETHR_HAVE_NATMC32_SET_RB 1
+# endif
+# undef ETHR_HAVE_NATMC32_SET_WB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_SET_WB
+# define ETHR_HAVE_NATMC32_SET_WB 1
+# endif
+# undef ETHR_HAVE_NATMC32_SET_ACQB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_SET_ACQB
+# define ETHR_HAVE_NATMC32_SET_ACQB 1
+# endif
+# undef ETHR_HAVE_NATMC32_SET_RELB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_SET_RELB
+# define ETHR_HAVE_NATMC32_SET_RELB 1
+# endif
+# undef ETHR_HAVE_NATMC32_SET_MB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_SET_MB
+# define ETHR_HAVE_NATMC32_SET_MB 1
+# endif
+# undef ETHR_HAVE_NATMC32_INIT
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_INIT
+# define ETHR_HAVE_NATMC32_INIT 1
+# endif
+# undef ETHR_HAVE_NATMC32_INIT_RB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_INIT_RB
+# define ETHR_HAVE_NATMC32_INIT_RB 1
+# endif
+# undef ETHR_HAVE_NATMC32_INIT_WB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_INIT_WB
+# define ETHR_HAVE_NATMC32_INIT_WB 1
+# endif
+# undef ETHR_HAVE_NATMC32_INIT_ACQB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_INIT_ACQB
+# define ETHR_HAVE_NATMC32_INIT_ACQB 1
+# endif
+# undef ETHR_HAVE_NATMC32_INIT_RELB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_INIT_RELB
+# define ETHR_HAVE_NATMC32_INIT_RELB 1
+# endif
+# undef ETHR_HAVE_NATMC32_INIT_MB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_INIT_MB
+# define ETHR_HAVE_NATMC32_INIT_MB 1
+# endif
+# undef ETHR_HAVE_NATMC32_ADD_RETURN
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_ADD_RETURN
+# define ETHR_HAVE_NATMC32_ADD_RETURN 1
+# endif
+# undef ETHR_HAVE_NATMC32_ADD_RETURN_RB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_ADD_RETURN_RB
+# define ETHR_HAVE_NATMC32_ADD_RETURN_RB 1
+# endif
+# undef ETHR_HAVE_NATMC32_ADD_RETURN_WB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_ADD_RETURN_WB
+# define ETHR_HAVE_NATMC32_ADD_RETURN_WB 1
+# endif
+# undef ETHR_HAVE_NATMC32_ADD_RETURN_ACQB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_ADD_RETURN_ACQB
+# define ETHR_HAVE_NATMC32_ADD_RETURN_ACQB 1
+# endif
+# undef ETHR_HAVE_NATMC32_ADD_RETURN_RELB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_ADD_RETURN_RELB
+# define ETHR_HAVE_NATMC32_ADD_RETURN_RELB 1
+# endif
+# undef ETHR_HAVE_NATMC32_ADD_RETURN_MB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_ADD_RETURN_MB
+# define ETHR_HAVE_NATMC32_ADD_RETURN_MB 1
+# endif
+# undef ETHR_HAVE_NATMC32_READ
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_READ
+# define ETHR_HAVE_NATMC32_READ 1
+# endif
+# undef ETHR_HAVE_NATMC32_READ_RB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_READ_RB
+# define ETHR_HAVE_NATMC32_READ_RB 1
+# endif
+# undef ETHR_HAVE_NATMC32_READ_WB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_READ_WB
+# define ETHR_HAVE_NATMC32_READ_WB 1
+# endif
+# undef ETHR_HAVE_NATMC32_READ_ACQB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_READ_ACQB
+# define ETHR_HAVE_NATMC32_READ_ACQB 1
+# endif
+# undef ETHR_HAVE_NATMC32_READ_RELB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_READ_RELB
+# define ETHR_HAVE_NATMC32_READ_RELB 1
+# endif
+# undef ETHR_HAVE_NATMC32_READ_MB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_READ_MB
+# define ETHR_HAVE_NATMC32_READ_MB 1
+# endif
+# undef ETHR_HAVE_NATMC32_INC_RETURN
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_INC_RETURN
+# define ETHR_HAVE_NATMC32_INC_RETURN 1
+# endif
+# undef ETHR_HAVE_NATMC32_INC_RETURN_RB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_INC_RETURN_RB
+# define ETHR_HAVE_NATMC32_INC_RETURN_RB 1
+# endif
+# undef ETHR_HAVE_NATMC32_INC_RETURN_WB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_INC_RETURN_WB
+# define ETHR_HAVE_NATMC32_INC_RETURN_WB 1
+# endif
+# undef ETHR_HAVE_NATMC32_INC_RETURN_ACQB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_INC_RETURN_ACQB
+# define ETHR_HAVE_NATMC32_INC_RETURN_ACQB 1
+# endif
+# undef ETHR_HAVE_NATMC32_INC_RETURN_RELB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_INC_RETURN_RELB
+# define ETHR_HAVE_NATMC32_INC_RETURN_RELB 1
+# endif
+# undef ETHR_HAVE_NATMC32_INC_RETURN_MB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_INC_RETURN_MB
+# define ETHR_HAVE_NATMC32_INC_RETURN_MB 1
+# endif
+# undef ETHR_HAVE_NATMC32_DEC_RETURN
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_DEC_RETURN
+# define ETHR_HAVE_NATMC32_DEC_RETURN 1
+# endif
+# undef ETHR_HAVE_NATMC32_DEC_RETURN_RB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_DEC_RETURN_RB
+# define ETHR_HAVE_NATMC32_DEC_RETURN_RB 1
+# endif
+# undef ETHR_HAVE_NATMC32_DEC_RETURN_WB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_DEC_RETURN_WB
+# define ETHR_HAVE_NATMC32_DEC_RETURN_WB 1
+# endif
+# undef ETHR_HAVE_NATMC32_DEC_RETURN_ACQB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_DEC_RETURN_ACQB
+# define ETHR_HAVE_NATMC32_DEC_RETURN_ACQB 1
+# endif
+# undef ETHR_HAVE_NATMC32_DEC_RETURN_RELB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_DEC_RETURN_RELB
+# define ETHR_HAVE_NATMC32_DEC_RETURN_RELB 1
+# endif
+# undef ETHR_HAVE_NATMC32_DEC_RETURN_MB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_DEC_RETURN_MB
+# define ETHR_HAVE_NATMC32_DEC_RETURN_MB 1
+# endif
+# undef ETHR_HAVE_NATMC32_ADD
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_ADD
+# define ETHR_HAVE_NATMC32_ADD 1
+# endif
+# undef ETHR_HAVE_NATMC32_ADD_RB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_ADD_RB
+# define ETHR_HAVE_NATMC32_ADD_RB 1
+# endif
+# undef ETHR_HAVE_NATMC32_ADD_WB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_ADD_WB
+# define ETHR_HAVE_NATMC32_ADD_WB 1
+# endif
+# undef ETHR_HAVE_NATMC32_ADD_ACQB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_ADD_ACQB
+# define ETHR_HAVE_NATMC32_ADD_ACQB 1
+# endif
+# undef ETHR_HAVE_NATMC32_ADD_RELB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_ADD_RELB
+# define ETHR_HAVE_NATMC32_ADD_RELB 1
+# endif
+# undef ETHR_HAVE_NATMC32_ADD_MB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_ADD_MB
+# define ETHR_HAVE_NATMC32_ADD_MB 1
+# endif
+# undef ETHR_HAVE_NATMC32_INC
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_INC
+# define ETHR_HAVE_NATMC32_INC 1
+# endif
+# undef ETHR_HAVE_NATMC32_INC_RB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_INC_RB
+# define ETHR_HAVE_NATMC32_INC_RB 1
+# endif
+# undef ETHR_HAVE_NATMC32_INC_WB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_INC_WB
+# define ETHR_HAVE_NATMC32_INC_WB 1
+# endif
+# undef ETHR_HAVE_NATMC32_INC_ACQB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_INC_ACQB
+# define ETHR_HAVE_NATMC32_INC_ACQB 1
+# endif
+# undef ETHR_HAVE_NATMC32_INC_RELB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_INC_RELB
+# define ETHR_HAVE_NATMC32_INC_RELB 1
+# endif
+# undef ETHR_HAVE_NATMC32_INC_MB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_INC_MB
+# define ETHR_HAVE_NATMC32_INC_MB 1
+# endif
+# undef ETHR_HAVE_NATMC32_DEC
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_DEC
+# define ETHR_HAVE_NATMC32_DEC 1
+# endif
+# undef ETHR_HAVE_NATMC32_DEC_RB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_DEC_RB
+# define ETHR_HAVE_NATMC32_DEC_RB 1
+# endif
+# undef ETHR_HAVE_NATMC32_DEC_WB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_DEC_WB
+# define ETHR_HAVE_NATMC32_DEC_WB 1
+# endif
+# undef ETHR_HAVE_NATMC32_DEC_ACQB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_DEC_ACQB
+# define ETHR_HAVE_NATMC32_DEC_ACQB 1
+# endif
+# undef ETHR_HAVE_NATMC32_DEC_RELB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_DEC_RELB
+# define ETHR_HAVE_NATMC32_DEC_RELB 1
+# endif
+# undef ETHR_HAVE_NATMC32_DEC_MB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_DEC_MB
+# define ETHR_HAVE_NATMC32_DEC_MB 1
+# endif
+# undef ETHR_HAVE_NATMC32_AND_RETOLD
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_AND_RETOLD
+# define ETHR_HAVE_NATMC32_AND_RETOLD 1
+# endif
+# undef ETHR_HAVE_NATMC32_AND_RETOLD_RB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_AND_RETOLD_RB
+# define ETHR_HAVE_NATMC32_AND_RETOLD_RB 1
+# endif
+# undef ETHR_HAVE_NATMC32_AND_RETOLD_WB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_AND_RETOLD_WB
+# define ETHR_HAVE_NATMC32_AND_RETOLD_WB 1
+# endif
+# undef ETHR_HAVE_NATMC32_AND_RETOLD_ACQB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_AND_RETOLD_ACQB
+# define ETHR_HAVE_NATMC32_AND_RETOLD_ACQB 1
+# endif
+# undef ETHR_HAVE_NATMC32_AND_RETOLD_RELB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_AND_RETOLD_RELB
+# define ETHR_HAVE_NATMC32_AND_RETOLD_RELB 1
+# endif
+# undef ETHR_HAVE_NATMC32_AND_RETOLD_MB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_AND_RETOLD_MB
+# define ETHR_HAVE_NATMC32_AND_RETOLD_MB 1
+# endif
+# undef ETHR_HAVE_NATMC32_OR_RETOLD
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_OR_RETOLD
+# define ETHR_HAVE_NATMC32_OR_RETOLD 1
+# endif
+# undef ETHR_HAVE_NATMC32_OR_RETOLD_RB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_OR_RETOLD_RB
+# define ETHR_HAVE_NATMC32_OR_RETOLD_RB 1
+# endif
+# undef ETHR_HAVE_NATMC32_OR_RETOLD_WB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_OR_RETOLD_WB
+# define ETHR_HAVE_NATMC32_OR_RETOLD_WB 1
+# endif
+# undef ETHR_HAVE_NATMC32_OR_RETOLD_ACQB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_OR_RETOLD_ACQB
+# define ETHR_HAVE_NATMC32_OR_RETOLD_ACQB 1
+# endif
+# undef ETHR_HAVE_NATMC32_OR_RETOLD_RELB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_OR_RETOLD_RELB
+# define ETHR_HAVE_NATMC32_OR_RETOLD_RELB 1
+# endif
+# undef ETHR_HAVE_NATMC32_OR_RETOLD_MB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_OR_RETOLD_MB
+# define ETHR_HAVE_NATMC32_OR_RETOLD_MB 1
+# endif
+#elif ETHR_NATMC32_BITS__ == 32
+# undef ETHR_HAVE_NATMC32_CMPXCHG
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_CMPXCHG
+# define ETHR_HAVE_NATMC32_CMPXCHG 1
+# endif
+# undef ETHR_HAVE_NATMC32_CMPXCHG_RB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_CMPXCHG_RB
+# define ETHR_HAVE_NATMC32_CMPXCHG_RB 1
+# endif
+# undef ETHR_HAVE_NATMC32_CMPXCHG_WB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_CMPXCHG_WB
+# define ETHR_HAVE_NATMC32_CMPXCHG_WB 1
+# endif
+# undef ETHR_HAVE_NATMC32_CMPXCHG_ACQB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_CMPXCHG_ACQB
+# define ETHR_HAVE_NATMC32_CMPXCHG_ACQB 1
+# endif
+# undef ETHR_HAVE_NATMC32_CMPXCHG_RELB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_CMPXCHG_RELB
+# define ETHR_HAVE_NATMC32_CMPXCHG_RELB 1
+# endif
+# undef ETHR_HAVE_NATMC32_CMPXCHG_MB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_CMPXCHG_MB
+# define ETHR_HAVE_NATMC32_CMPXCHG_MB 1
+# endif
+# undef ETHR_HAVE_NATMC32_XCHG
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_XCHG
+# define ETHR_HAVE_NATMC32_XCHG 1
+# endif
+# undef ETHR_HAVE_NATMC32_XCHG_RB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_XCHG_RB
+# define ETHR_HAVE_NATMC32_XCHG_RB 1
+# endif
+# undef ETHR_HAVE_NATMC32_XCHG_WB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_XCHG_WB
+# define ETHR_HAVE_NATMC32_XCHG_WB 1
+# endif
+# undef ETHR_HAVE_NATMC32_XCHG_ACQB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_XCHG_ACQB
+# define ETHR_HAVE_NATMC32_XCHG_ACQB 1
+# endif
+# undef ETHR_HAVE_NATMC32_XCHG_RELB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_XCHG_RELB
+# define ETHR_HAVE_NATMC32_XCHG_RELB 1
+# endif
+# undef ETHR_HAVE_NATMC32_XCHG_MB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_XCHG_MB
+# define ETHR_HAVE_NATMC32_XCHG_MB 1
+# endif
+# undef ETHR_HAVE_NATMC32_SET
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_SET
+# define ETHR_HAVE_NATMC32_SET 1
+# endif
+# undef ETHR_HAVE_NATMC32_SET_RB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_SET_RB
+# define ETHR_HAVE_NATMC32_SET_RB 1
+# endif
+# undef ETHR_HAVE_NATMC32_SET_WB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_SET_WB
+# define ETHR_HAVE_NATMC32_SET_WB 1
+# endif
+# undef ETHR_HAVE_NATMC32_SET_ACQB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_SET_ACQB
+# define ETHR_HAVE_NATMC32_SET_ACQB 1
+# endif
+# undef ETHR_HAVE_NATMC32_SET_RELB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_SET_RELB
+# define ETHR_HAVE_NATMC32_SET_RELB 1
+# endif
+# undef ETHR_HAVE_NATMC32_SET_MB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_SET_MB
+# define ETHR_HAVE_NATMC32_SET_MB 1
+# endif
+# undef ETHR_HAVE_NATMC32_INIT
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_INIT
+# define ETHR_HAVE_NATMC32_INIT 1
+# endif
+# undef ETHR_HAVE_NATMC32_INIT_RB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_INIT_RB
+# define ETHR_HAVE_NATMC32_INIT_RB 1
+# endif
+# undef ETHR_HAVE_NATMC32_INIT_WB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_INIT_WB
+# define ETHR_HAVE_NATMC32_INIT_WB 1
+# endif
+# undef ETHR_HAVE_NATMC32_INIT_ACQB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_INIT_ACQB
+# define ETHR_HAVE_NATMC32_INIT_ACQB 1
+# endif
+# undef ETHR_HAVE_NATMC32_INIT_RELB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_INIT_RELB
+# define ETHR_HAVE_NATMC32_INIT_RELB 1
+# endif
+# undef ETHR_HAVE_NATMC32_INIT_MB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_INIT_MB
+# define ETHR_HAVE_NATMC32_INIT_MB 1
+# endif
+# undef ETHR_HAVE_NATMC32_ADD_RETURN
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_ADD_RETURN
+# define ETHR_HAVE_NATMC32_ADD_RETURN 1
+# endif
+# undef ETHR_HAVE_NATMC32_ADD_RETURN_RB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_ADD_RETURN_RB
+# define ETHR_HAVE_NATMC32_ADD_RETURN_RB 1
+# endif
+# undef ETHR_HAVE_NATMC32_ADD_RETURN_WB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_ADD_RETURN_WB
+# define ETHR_HAVE_NATMC32_ADD_RETURN_WB 1
+# endif
+# undef ETHR_HAVE_NATMC32_ADD_RETURN_ACQB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_ADD_RETURN_ACQB
+# define ETHR_HAVE_NATMC32_ADD_RETURN_ACQB 1
+# endif
+# undef ETHR_HAVE_NATMC32_ADD_RETURN_RELB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_ADD_RETURN_RELB
+# define ETHR_HAVE_NATMC32_ADD_RETURN_RELB 1
+# endif
+# undef ETHR_HAVE_NATMC32_ADD_RETURN_MB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_ADD_RETURN_MB
+# define ETHR_HAVE_NATMC32_ADD_RETURN_MB 1
+# endif
+# undef ETHR_HAVE_NATMC32_READ
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_READ
+# define ETHR_HAVE_NATMC32_READ 1
+# endif
+# undef ETHR_HAVE_NATMC32_READ_RB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_READ_RB
+# define ETHR_HAVE_NATMC32_READ_RB 1
+# endif
+# undef ETHR_HAVE_NATMC32_READ_WB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_READ_WB
+# define ETHR_HAVE_NATMC32_READ_WB 1
+# endif
+# undef ETHR_HAVE_NATMC32_READ_ACQB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_READ_ACQB
+# define ETHR_HAVE_NATMC32_READ_ACQB 1
+# endif
+# undef ETHR_HAVE_NATMC32_READ_RELB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_READ_RELB
+# define ETHR_HAVE_NATMC32_READ_RELB 1
+# endif
+# undef ETHR_HAVE_NATMC32_READ_MB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_READ_MB
+# define ETHR_HAVE_NATMC32_READ_MB 1
+# endif
+# undef ETHR_HAVE_NATMC32_INC_RETURN
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_INC_RETURN
+# define ETHR_HAVE_NATMC32_INC_RETURN 1
+# endif
+# undef ETHR_HAVE_NATMC32_INC_RETURN_RB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_INC_RETURN_RB
+# define ETHR_HAVE_NATMC32_INC_RETURN_RB 1
+# endif
+# undef ETHR_HAVE_NATMC32_INC_RETURN_WB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_INC_RETURN_WB
+# define ETHR_HAVE_NATMC32_INC_RETURN_WB 1
+# endif
+# undef ETHR_HAVE_NATMC32_INC_RETURN_ACQB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_INC_RETURN_ACQB
+# define ETHR_HAVE_NATMC32_INC_RETURN_ACQB 1
+# endif
+# undef ETHR_HAVE_NATMC32_INC_RETURN_RELB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_INC_RETURN_RELB
+# define ETHR_HAVE_NATMC32_INC_RETURN_RELB 1
+# endif
+# undef ETHR_HAVE_NATMC32_INC_RETURN_MB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_INC_RETURN_MB
+# define ETHR_HAVE_NATMC32_INC_RETURN_MB 1
+# endif
+# undef ETHR_HAVE_NATMC32_DEC_RETURN
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_DEC_RETURN
+# define ETHR_HAVE_NATMC32_DEC_RETURN 1
+# endif
+# undef ETHR_HAVE_NATMC32_DEC_RETURN_RB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_DEC_RETURN_RB
+# define ETHR_HAVE_NATMC32_DEC_RETURN_RB 1
+# endif
+# undef ETHR_HAVE_NATMC32_DEC_RETURN_WB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_DEC_RETURN_WB
+# define ETHR_HAVE_NATMC32_DEC_RETURN_WB 1
+# endif
+# undef ETHR_HAVE_NATMC32_DEC_RETURN_ACQB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_DEC_RETURN_ACQB
+# define ETHR_HAVE_NATMC32_DEC_RETURN_ACQB 1
+# endif
+# undef ETHR_HAVE_NATMC32_DEC_RETURN_RELB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_DEC_RETURN_RELB
+# define ETHR_HAVE_NATMC32_DEC_RETURN_RELB 1
+# endif
+# undef ETHR_HAVE_NATMC32_DEC_RETURN_MB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_DEC_RETURN_MB
+# define ETHR_HAVE_NATMC32_DEC_RETURN_MB 1
+# endif
+# undef ETHR_HAVE_NATMC32_ADD
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_ADD
+# define ETHR_HAVE_NATMC32_ADD 1
+# endif
+# undef ETHR_HAVE_NATMC32_ADD_RB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_ADD_RB
+# define ETHR_HAVE_NATMC32_ADD_RB 1
+# endif
+# undef ETHR_HAVE_NATMC32_ADD_WB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_ADD_WB
+# define ETHR_HAVE_NATMC32_ADD_WB 1
+# endif
+# undef ETHR_HAVE_NATMC32_ADD_ACQB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_ADD_ACQB
+# define ETHR_HAVE_NATMC32_ADD_ACQB 1
+# endif
+# undef ETHR_HAVE_NATMC32_ADD_RELB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_ADD_RELB
+# define ETHR_HAVE_NATMC32_ADD_RELB 1
+# endif
+# undef ETHR_HAVE_NATMC32_ADD_MB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_ADD_MB
+# define ETHR_HAVE_NATMC32_ADD_MB 1
+# endif
+# undef ETHR_HAVE_NATMC32_INC
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_INC
+# define ETHR_HAVE_NATMC32_INC 1
+# endif
+# undef ETHR_HAVE_NATMC32_INC_RB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_INC_RB
+# define ETHR_HAVE_NATMC32_INC_RB 1
+# endif
+# undef ETHR_HAVE_NATMC32_INC_WB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_INC_WB
+# define ETHR_HAVE_NATMC32_INC_WB 1
+# endif
+# undef ETHR_HAVE_NATMC32_INC_ACQB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_INC_ACQB
+# define ETHR_HAVE_NATMC32_INC_ACQB 1
+# endif
+# undef ETHR_HAVE_NATMC32_INC_RELB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_INC_RELB
+# define ETHR_HAVE_NATMC32_INC_RELB 1
+# endif
+# undef ETHR_HAVE_NATMC32_INC_MB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_INC_MB
+# define ETHR_HAVE_NATMC32_INC_MB 1
+# endif
+# undef ETHR_HAVE_NATMC32_DEC
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_DEC
+# define ETHR_HAVE_NATMC32_DEC 1
+# endif
+# undef ETHR_HAVE_NATMC32_DEC_RB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_DEC_RB
+# define ETHR_HAVE_NATMC32_DEC_RB 1
+# endif
+# undef ETHR_HAVE_NATMC32_DEC_WB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_DEC_WB
+# define ETHR_HAVE_NATMC32_DEC_WB 1
+# endif
+# undef ETHR_HAVE_NATMC32_DEC_ACQB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_DEC_ACQB
+# define ETHR_HAVE_NATMC32_DEC_ACQB 1
+# endif
+# undef ETHR_HAVE_NATMC32_DEC_RELB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_DEC_RELB
+# define ETHR_HAVE_NATMC32_DEC_RELB 1
+# endif
+# undef ETHR_HAVE_NATMC32_DEC_MB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_DEC_MB
+# define ETHR_HAVE_NATMC32_DEC_MB 1
+# endif
+# undef ETHR_HAVE_NATMC32_AND_RETOLD
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_AND_RETOLD
+# define ETHR_HAVE_NATMC32_AND_RETOLD 1
+# endif
+# undef ETHR_HAVE_NATMC32_AND_RETOLD_RB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_AND_RETOLD_RB
+# define ETHR_HAVE_NATMC32_AND_RETOLD_RB 1
+# endif
+# undef ETHR_HAVE_NATMC32_AND_RETOLD_WB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_AND_RETOLD_WB
+# define ETHR_HAVE_NATMC32_AND_RETOLD_WB 1
+# endif
+# undef ETHR_HAVE_NATMC32_AND_RETOLD_ACQB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_AND_RETOLD_ACQB
+# define ETHR_HAVE_NATMC32_AND_RETOLD_ACQB 1
+# endif
+# undef ETHR_HAVE_NATMC32_AND_RETOLD_RELB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_AND_RETOLD_RELB
+# define ETHR_HAVE_NATMC32_AND_RETOLD_RELB 1
+# endif
+# undef ETHR_HAVE_NATMC32_AND_RETOLD_MB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_AND_RETOLD_MB
+# define ETHR_HAVE_NATMC32_AND_RETOLD_MB 1
+# endif
+# undef ETHR_HAVE_NATMC32_OR_RETOLD
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_OR_RETOLD
+# define ETHR_HAVE_NATMC32_OR_RETOLD 1
+# endif
+# undef ETHR_HAVE_NATMC32_OR_RETOLD_RB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_OR_RETOLD_RB
+# define ETHR_HAVE_NATMC32_OR_RETOLD_RB 1
+# endif
+# undef ETHR_HAVE_NATMC32_OR_RETOLD_WB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_OR_RETOLD_WB
+# define ETHR_HAVE_NATMC32_OR_RETOLD_WB 1
+# endif
+# undef ETHR_HAVE_NATMC32_OR_RETOLD_ACQB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_OR_RETOLD_ACQB
+# define ETHR_HAVE_NATMC32_OR_RETOLD_ACQB 1
+# endif
+# undef ETHR_HAVE_NATMC32_OR_RETOLD_RELB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_OR_RETOLD_RELB
+# define ETHR_HAVE_NATMC32_OR_RETOLD_RELB 1
+# endif
+# undef ETHR_HAVE_NATMC32_OR_RETOLD_MB
+# ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_OR_RETOLD_MB
+# define ETHR_HAVE_NATMC32_OR_RETOLD_MB 1
+# endif
+#else
+# error "Invalid native atomic size"
+#endif
+
+#if (!defined(ETHR_HAVE_NATMC32_CMPXCHG) \
+ && !defined(ETHR_HAVE_NATMC32_CMPXCHG_RB) \
+ && !defined(ETHR_HAVE_NATMC32_CMPXCHG_WB) \
+ && !defined(ETHR_HAVE_NATMC32_CMPXCHG_ACQB) \
+ && !defined(ETHR_HAVE_NATMC32_CMPXCHG_RELB) \
+ && !defined(ETHR_HAVE_NATMC32_CMPXCHG_MB))
+# error "No native cmpxchg() op available"
+#endif
+
+
+/*
+ * Read op used together with cmpxchg() fallback when no native op present.
+ */
+#if defined(ETHR_HAVE_NATMC32_READ)
+#define ETHR_NATMC32_CMPXCHG_FALLBACK_READ__(VAR) \
+ ETHR_NATMC32_FUNC__(read)(VAR)
+#elif defined(ETHR_HAVE_NATMC32_READ_RB)
+#define ETHR_NATMC32_CMPXCHG_FALLBACK_READ__(VAR) \
+ ETHR_NATMC32_FUNC__(read_rb)(VAR)
+#elif defined(ETHR_HAVE_NATMC32_READ_WB)
+#define ETHR_NATMC32_CMPXCHG_FALLBACK_READ__(VAR) \
+ ETHR_NATMC32_FUNC__(read_wb)(VAR)
+#elif defined(ETHR_HAVE_NATMC32_READ_ACQB)
+#define ETHR_NATMC32_CMPXCHG_FALLBACK_READ__(VAR) \
+ ETHR_NATMC32_FUNC__(read_acqb)(VAR)
+#elif defined(ETHR_HAVE_NATMC32_READ_RELB)
+#define ETHR_NATMC32_CMPXCHG_FALLBACK_READ__(VAR) \
+ ETHR_NATMC32_FUNC__(read_relb)(VAR)
+#elif defined(ETHR_HAVE_NATMC32_READ_MB)
+#define ETHR_NATMC32_CMPXCHG_FALLBACK_READ__(VAR) \
+ ETHR_NATMC32_FUNC__(read_mb)(VAR)
+#else
/*
- * --- 32-bit atomics ---------------------------------------------------------
+ * We have no native read() op; guess zero and then use the
+ * the atomics actual value returned from cmpxchg().
*/
+#define ETHR_NATMC32_CMPXCHG_FALLBACK_READ__(VAR) \
+ ((ETHR_NAINT32_T__) 0)
+#endif
+
+/*
+ * Native cmpxchg() fallback used when no native op present.
+ */
+#define ETHR_NATMC32_CMPXCHG_FALLBACK__(CMPXCHG, VAR, AVAL, OPS) \
+do { \
+ ethr_sint32_t AVAL; \
+ ETHR_NAINT32_T__ new__, act__, exp__; \
+ act__ = ETHR_NATMC32_CMPXCHG_FALLBACK_READ__(VAR); \
+ do { \
+ exp__ = act__; \
+ AVAL = (ethr_sint32_t) act__; \
+ { OPS; } \
+ new__ = (ETHR_NAINT32_T__) AVAL; \
+ act__ = CMPXCHG(VAR, new__, exp__); \
+ } while (__builtin_expect(act__ != exp__, 0)); \
+} while (0)
+
+
+
+/* --- addr() --- */
+
+static ETHR_INLINE ethr_sint32_t *ETHR_ATMC32_FUNC__(addr)(ethr_atomic32_t *var)
+{
+ return (ethr_sint32_t *) ETHR_NATMC32_ADDR_FUNC__(var);
-static ETHR_INLINE ethr_sint32_t *
-ETHR_INLINE_FUNC_NAME_(ethr_atomic32_addr)(ethr_atomic32_t *var)
+}
+
+
+/* --- cmpxchg() --- */
+
+
+static ETHR_INLINE ethr_sint32_t ETHR_ATMC32_FUNC__(cmpxchg)(ethr_atomic32_t *var, ethr_sint32_t val, ethr_sint32_t old_val)
{
-#ifdef ETHR_HAVE_NATIVE_ATOMICS
- return ethr_native_atomic32_addr(var);
+ ethr_sint32_t res;
+#if defined(ETHR_HAVE_NATMC32_CMPXCHG)
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(cmpxchg)(var, (ETHR_NAINT32_T__) val, (ETHR_NAINT32_T__) old_val);
+#elif defined(ETHR_HAVE_NATMC32_CMPXCHG_RB)
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(cmpxchg_rb)(var, (ETHR_NAINT32_T__) val, (ETHR_NAINT32_T__) old_val);
+#elif defined(ETHR_HAVE_NATMC32_CMPXCHG_WB)
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(cmpxchg_wb)(var, (ETHR_NAINT32_T__) val, (ETHR_NAINT32_T__) old_val);
+#elif defined(ETHR_HAVE_NATMC32_CMPXCHG_ACQB)
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(cmpxchg_acqb)(var, (ETHR_NAINT32_T__) val, (ETHR_NAINT32_T__) old_val);
+#elif defined(ETHR_HAVE_NATMC32_CMPXCHG_RELB)
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(cmpxchg_relb)(var, (ETHR_NAINT32_T__) val, (ETHR_NAINT32_T__) old_val);
+#elif defined(ETHR_HAVE_NATMC32_CMPXCHG_MB)
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(cmpxchg_mb)(var, (ETHR_NAINT32_T__) val, (ETHR_NAINT32_T__) old_val);
#else
- return (ethr_sint32_t *) var;
+#error "Missing implementation of ethr_atomic32_cmpxchg()!"
#endif
+ return res;
}
-static ETHR_INLINE void
-ETHR_INLINE_FUNC_NAME_(ethr_atomic32_init)(ethr_atomic32_t *var,
- ethr_sint32_t i)
+static ETHR_INLINE ethr_sint32_t ETHR_ATMC32_FUNC__(cmpxchg_rb)(ethr_atomic32_t *var, ethr_sint32_t val, ethr_sint32_t old_val)
{
-#ifdef ETHR_HAVE_NATIVE_ATOMICS
- ETHR_NATMC32_FUNC__(init)(var, (ETHR_NAINT32_T__) i);
+ ethr_sint32_t res;
+#if defined(ETHR_HAVE_NATMC32_CMPXCHG_RB)
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(cmpxchg_rb)(var, (ETHR_NAINT32_T__) val, (ETHR_NAINT32_T__) old_val);
+#elif defined(ETHR_HAVE_NATMC32_CMPXCHG)
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(cmpxchg)(var, (ETHR_NAINT32_T__) val, (ETHR_NAINT32_T__) old_val);
+ ETHR_MEMBAR(ETHR_LoadLoad);
+#elif defined(ETHR_HAVE_NATMC32_CMPXCHG_MB)
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(cmpxchg_mb)(var, (ETHR_NAINT32_T__) val, (ETHR_NAINT32_T__) old_val);
+#elif defined(ETHR_HAVE_NATMC32_CMPXCHG_WB)
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(cmpxchg_wb)(var, (ETHR_NAINT32_T__) val, (ETHR_NAINT32_T__) old_val);
+ ETHR_MEMBAR(ETHR_LoadLoad);
+#elif defined(ETHR_HAVE_NATMC32_CMPXCHG_ACQB)
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(cmpxchg_acqb)(var, (ETHR_NAINT32_T__) val, (ETHR_NAINT32_T__) old_val);
+ ETHR_MEMBAR(ETHR_LoadLoad);
+#elif defined(ETHR_HAVE_NATMC32_CMPXCHG_RELB)
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(cmpxchg_relb)(var, (ETHR_NAINT32_T__) val, (ETHR_NAINT32_T__) old_val);
+ ETHR_MEMBAR(ETHR_LoadLoad);
#else
- ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, *var = i);
+#error "Missing implementation of ethr_atomic32_cmpxchg_rb()!"
#endif
+ return res;
}
-static ETHR_INLINE void
-ETHR_INLINE_FUNC_NAME_(ethr_atomic32_set)(ethr_atomic32_t *var, ethr_sint32_t i)
+static ETHR_INLINE ethr_sint32_t ETHR_ATMC32_FUNC__(cmpxchg_wb)(ethr_atomic32_t *var, ethr_sint32_t val, ethr_sint32_t old_val)
{
-#ifdef ETHR_HAVE_NATIVE_ATOMICS
- ETHR_NATMC32_FUNC__(set)(var, (ETHR_NAINT32_T__) i);
+ ethr_sint32_t res;
+#if defined(ETHR_HAVE_NATMC32_CMPXCHG_WB)
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(cmpxchg_wb)(var, (ETHR_NAINT32_T__) val, (ETHR_NAINT32_T__) old_val);
+#elif defined(ETHR_HAVE_NATMC32_CMPXCHG)
+ ETHR_MEMBAR(ETHR_StoreStore);
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(cmpxchg)(var, (ETHR_NAINT32_T__) val, (ETHR_NAINT32_T__) old_val);
+#elif defined(ETHR_HAVE_NATMC32_CMPXCHG_MB)
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(cmpxchg_mb)(var, (ETHR_NAINT32_T__) val, (ETHR_NAINT32_T__) old_val);
+#elif defined(ETHR_HAVE_NATMC32_CMPXCHG_RB)
+ ETHR_MEMBAR(ETHR_StoreStore);
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(cmpxchg_rb)(var, (ETHR_NAINT32_T__) val, (ETHR_NAINT32_T__) old_val);
+#elif defined(ETHR_HAVE_NATMC32_CMPXCHG_ACQB)
+ ETHR_MEMBAR(ETHR_StoreStore);
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(cmpxchg_acqb)(var, (ETHR_NAINT32_T__) val, (ETHR_NAINT32_T__) old_val);
+#elif defined(ETHR_HAVE_NATMC32_CMPXCHG_RELB)
+ ETHR_MEMBAR(ETHR_StoreStore);
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(cmpxchg_relb)(var, (ETHR_NAINT32_T__) val, (ETHR_NAINT32_T__) old_val);
#else
- ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, *var = i);
+#error "Missing implementation of ethr_atomic32_cmpxchg_wb()!"
#endif
+ return res;
}
-static ETHR_INLINE ethr_sint32_t
-ETHR_INLINE_FUNC_NAME_(ethr_atomic32_read)(ethr_atomic32_t *var)
+static ETHR_INLINE ethr_sint32_t ETHR_ATMC32_FUNC__(cmpxchg_acqb)(ethr_atomic32_t *var, ethr_sint32_t val, ethr_sint32_t old_val)
{
-#ifdef ETHR_HAVE_NATIVE_ATOMICS
- return (ethr_sint32_t) ETHR_NATMC32_FUNC__(read)(var);
+ ethr_sint32_t res;
+#if defined(ETHR_HAVE_NATMC32_CMPXCHG_ACQB)
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(cmpxchg_acqb)(var, (ETHR_NAINT32_T__) val, (ETHR_NAINT32_T__) old_val);
+#elif defined(ETHR_HAVE_NATMC32_CMPXCHG_RB)
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(cmpxchg_rb)(var, (ETHR_NAINT32_T__) val, (ETHR_NAINT32_T__) old_val);
+ ETHR_MEMBAR(ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_NATMC32_CMPXCHG)
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(cmpxchg)(var, (ETHR_NAINT32_T__) val, (ETHR_NAINT32_T__) old_val);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_NATMC32_CMPXCHG_MB)
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(cmpxchg_mb)(var, (ETHR_NAINT32_T__) val, (ETHR_NAINT32_T__) old_val);
+#elif defined(ETHR_HAVE_NATMC32_CMPXCHG_WB)
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(cmpxchg_wb)(var, (ETHR_NAINT32_T__) val, (ETHR_NAINT32_T__) old_val);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_NATMC32_CMPXCHG_RELB)
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(cmpxchg_relb)(var, (ETHR_NAINT32_T__) val, (ETHR_NAINT32_T__) old_val);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
#else
+#error "Missing implementation of ethr_atomic32_cmpxchg_acqb()!"
+#endif
+ return res;
+}
+
+static ETHR_INLINE ethr_sint32_t ETHR_ATMC32_FUNC__(cmpxchg_relb)(ethr_atomic32_t *var, ethr_sint32_t val, ethr_sint32_t old_val)
+{
ethr_sint32_t res;
- ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, res = *var);
+#if defined(ETHR_HAVE_NATMC32_CMPXCHG_RELB)
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(cmpxchg_relb)(var, (ETHR_NAINT32_T__) val, (ETHR_NAINT32_T__) old_val);
+#elif defined(ETHR_HAVE_NATMC32_CMPXCHG_WB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(cmpxchg_wb)(var, (ETHR_NAINT32_T__) val, (ETHR_NAINT32_T__) old_val);
+#elif defined(ETHR_HAVE_NATMC32_CMPXCHG)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(cmpxchg)(var, (ETHR_NAINT32_T__) val, (ETHR_NAINT32_T__) old_val);
+#elif defined(ETHR_HAVE_NATMC32_CMPXCHG_MB)
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(cmpxchg_mb)(var, (ETHR_NAINT32_T__) val, (ETHR_NAINT32_T__) old_val);
+#elif defined(ETHR_HAVE_NATMC32_CMPXCHG_RB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(cmpxchg_rb)(var, (ETHR_NAINT32_T__) val, (ETHR_NAINT32_T__) old_val);
+#elif defined(ETHR_HAVE_NATMC32_CMPXCHG_ACQB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(cmpxchg_acqb)(var, (ETHR_NAINT32_T__) val, (ETHR_NAINT32_T__) old_val);
+#else
+#error "Missing implementation of ethr_atomic32_cmpxchg_relb()!"
+#endif
return res;
+}
+
+static ETHR_INLINE ethr_sint32_t ETHR_ATMC32_FUNC__(cmpxchg_mb)(ethr_atomic32_t *var, ethr_sint32_t val, ethr_sint32_t old_val)
+{
+ ethr_sint32_t res;
+#if defined(ETHR_HAVE_NATMC32_CMPXCHG_MB)
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(cmpxchg_mb)(var, (ETHR_NAINT32_T__) val, (ETHR_NAINT32_T__) old_val);
+#elif defined(ETHR_HAVE_NATMC32_CMPXCHG_RELB)
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(cmpxchg_relb)(var, (ETHR_NAINT32_T__) val, (ETHR_NAINT32_T__) old_val);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_NATMC32_CMPXCHG_ACQB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(cmpxchg_acqb)(var, (ETHR_NAINT32_T__) val, (ETHR_NAINT32_T__) old_val);
+#elif defined(ETHR_HAVE_NATMC32_CMPXCHG_WB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(cmpxchg_wb)(var, (ETHR_NAINT32_T__) val, (ETHR_NAINT32_T__) old_val);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_NATMC32_CMPXCHG_RB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(cmpxchg_rb)(var, (ETHR_NAINT32_T__) val, (ETHR_NAINT32_T__) old_val);
+ ETHR_MEMBAR(ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_NATMC32_CMPXCHG)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(cmpxchg)(var, (ETHR_NAINT32_T__) val, (ETHR_NAINT32_T__) old_val);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#else
+#error "Missing implementation of ethr_atomic32_cmpxchg_mb()!"
#endif
+ return res;
}
-static ETHR_INLINE void
-ETHR_INLINE_FUNC_NAME_(ethr_atomic32_add)(ethr_atomic32_t *var,
- ethr_sint32_t incr)
+static ETHR_INLINE ethr_sint32_t ETHR_ATMC32_FUNC__(cmpxchg_ddrb)(ethr_atomic32_t *var, ethr_sint32_t val, ethr_sint32_t old_val)
{
-#ifdef ETHR_HAVE_NATIVE_ATOMICS
- ETHR_NATMC32_FUNC__(add)(var, (ETHR_NAINT32_T__) incr);
+#ifdef ETHR_ORDERED_READ_DEPEND
+ return ETHR_ATMC32_FUNC__(cmpxchg)(var, val, old_val);
#else
- ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, *var += incr);
+ return ETHR_ATMC32_FUNC__(cmpxchg_rb)(var, val, old_val);
#endif
-}
-
-static ETHR_INLINE ethr_sint32_t
-ETHR_INLINE_FUNC_NAME_(ethr_atomic32_add_read)(ethr_atomic32_t *var,
- ethr_sint32_t i)
+}
+
+
+/* --- xchg() --- */
+
+
+static ETHR_INLINE ethr_sint32_t ETHR_ATMC32_FUNC__(xchg)(ethr_atomic32_t *var, ethr_sint32_t val)
{
-#ifdef ETHR_HAVE_NATIVE_ATOMICS
- return (ethr_sint32_t)
- ETHR_NATMC32_FUNC__(add_return)(var, (ETHR_NAINT32_T__) i);
+ ethr_sint32_t res;
+#if defined(ETHR_HAVE_NATMC32_XCHG)
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(xchg)(var, (ETHR_NAINT32_T__) val);
+#elif defined(ETHR_HAVE_NATMC32_XCHG_RB)
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(xchg_rb)(var, (ETHR_NAINT32_T__) val);
+#elif defined(ETHR_HAVE_NATMC32_XCHG_WB)
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(xchg_wb)(var, (ETHR_NAINT32_T__) val);
+#elif defined(ETHR_HAVE_NATMC32_XCHG_ACQB)
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(xchg_acqb)(var, (ETHR_NAINT32_T__) val);
+#elif defined(ETHR_HAVE_NATMC32_XCHG_RELB)
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(xchg_relb)(var, (ETHR_NAINT32_T__) val);
+#elif defined(ETHR_HAVE_NATMC32_XCHG_MB)
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(xchg_mb)(var, (ETHR_NAINT32_T__) val);
+#elif defined(ETHR_HAVE_NATMC32_CMPXCHG)
+ ETHR_NATMC32_CMPXCHG_FALLBACK__(ETHR_NATMC32_FUNC__(cmpxchg), var, aval, res = aval; aval = val);
+#elif defined(ETHR_HAVE_NATMC32_CMPXCHG_RB)
+ ETHR_NATMC32_CMPXCHG_FALLBACK__(ETHR_NATMC32_FUNC__(cmpxchg_rb), var, aval, res = aval; aval = val);
+#elif defined(ETHR_HAVE_NATMC32_CMPXCHG_WB)
+ ETHR_NATMC32_CMPXCHG_FALLBACK__(ETHR_NATMC32_FUNC__(cmpxchg_wb), var, aval, res = aval; aval = val);
+#elif defined(ETHR_HAVE_NATMC32_CMPXCHG_ACQB)
+ ETHR_NATMC32_CMPXCHG_FALLBACK__(ETHR_NATMC32_FUNC__(cmpxchg_acqb), var, aval, res = aval; aval = val);
+#elif defined(ETHR_HAVE_NATMC32_CMPXCHG_RELB)
+ ETHR_NATMC32_CMPXCHG_FALLBACK__(ETHR_NATMC32_FUNC__(cmpxchg_relb), var, aval, res = aval; aval = val);
+#elif defined(ETHR_HAVE_NATMC32_CMPXCHG_MB)
+ ETHR_NATMC32_CMPXCHG_FALLBACK__(ETHR_NATMC32_FUNC__(cmpxchg_mb), var, aval, res = aval; aval = val);
#else
+#error "Missing implementation of ethr_atomic32_xchg()!"
+#endif
+ return res;
+}
+
+static ETHR_INLINE ethr_sint32_t ETHR_ATMC32_FUNC__(xchg_rb)(ethr_atomic32_t *var, ethr_sint32_t val)
+{
ethr_sint32_t res;
- ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, *var += i; res = *var);
+#if defined(ETHR_HAVE_NATMC32_XCHG_RB)
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(xchg_rb)(var, (ETHR_NAINT32_T__) val);
+#elif defined(ETHR_HAVE_NATMC32_XCHG)
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(xchg)(var, (ETHR_NAINT32_T__) val);
+ ETHR_MEMBAR(ETHR_LoadLoad);
+#elif defined(ETHR_HAVE_NATMC32_XCHG_MB)
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(xchg_mb)(var, (ETHR_NAINT32_T__) val);
+#elif defined(ETHR_HAVE_NATMC32_XCHG_WB)
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(xchg_wb)(var, (ETHR_NAINT32_T__) val);
+ ETHR_MEMBAR(ETHR_LoadLoad);
+#elif defined(ETHR_HAVE_NATMC32_XCHG_ACQB)
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(xchg_acqb)(var, (ETHR_NAINT32_T__) val);
+ ETHR_MEMBAR(ETHR_LoadLoad);
+#elif defined(ETHR_HAVE_NATMC32_XCHG_RELB)
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(xchg_relb)(var, (ETHR_NAINT32_T__) val);
+ ETHR_MEMBAR(ETHR_LoadLoad);
+#elif defined(ETHR_HAVE_NATMC32_CMPXCHG_RB)
+ ETHR_NATMC32_CMPXCHG_FALLBACK__(ETHR_NATMC32_FUNC__(cmpxchg_rb), var, aval, res = aval; aval = val);
+#elif defined(ETHR_HAVE_NATMC32_CMPXCHG)
+ ETHR_NATMC32_CMPXCHG_FALLBACK__(ETHR_NATMC32_FUNC__(cmpxchg), var, aval, res = aval; aval = val);
+ ETHR_MEMBAR(ETHR_LoadLoad);
+#elif defined(ETHR_HAVE_NATMC32_CMPXCHG_MB)
+ ETHR_NATMC32_CMPXCHG_FALLBACK__(ETHR_NATMC32_FUNC__(cmpxchg_mb), var, aval, res = aval; aval = val);
+#elif defined(ETHR_HAVE_NATMC32_CMPXCHG_WB)
+ ETHR_NATMC32_CMPXCHG_FALLBACK__(ETHR_NATMC32_FUNC__(cmpxchg_wb), var, aval, res = aval; aval = val);
+ ETHR_MEMBAR(ETHR_LoadLoad);
+#elif defined(ETHR_HAVE_NATMC32_CMPXCHG_ACQB)
+ ETHR_NATMC32_CMPXCHG_FALLBACK__(ETHR_NATMC32_FUNC__(cmpxchg_acqb), var, aval, res = aval; aval = val);
+ ETHR_MEMBAR(ETHR_LoadLoad);
+#elif defined(ETHR_HAVE_NATMC32_CMPXCHG_RELB)
+ ETHR_NATMC32_CMPXCHG_FALLBACK__(ETHR_NATMC32_FUNC__(cmpxchg_relb), var, aval, res = aval; aval = val);
+ ETHR_MEMBAR(ETHR_LoadLoad);
+#else
+#error "Missing implementation of ethr_atomic32_xchg_rb()!"
+#endif
return res;
+}
+
+static ETHR_INLINE ethr_sint32_t ETHR_ATMC32_FUNC__(xchg_wb)(ethr_atomic32_t *var, ethr_sint32_t val)
+{
+ ethr_sint32_t res;
+#if defined(ETHR_HAVE_NATMC32_XCHG_WB)
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(xchg_wb)(var, (ETHR_NAINT32_T__) val);
+#elif defined(ETHR_HAVE_NATMC32_XCHG)
+ ETHR_MEMBAR(ETHR_StoreStore);
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(xchg)(var, (ETHR_NAINT32_T__) val);
+#elif defined(ETHR_HAVE_NATMC32_XCHG_MB)
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(xchg_mb)(var, (ETHR_NAINT32_T__) val);
+#elif defined(ETHR_HAVE_NATMC32_XCHG_RB)
+ ETHR_MEMBAR(ETHR_StoreStore);
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(xchg_rb)(var, (ETHR_NAINT32_T__) val);
+#elif defined(ETHR_HAVE_NATMC32_XCHG_ACQB)
+ ETHR_MEMBAR(ETHR_StoreStore);
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(xchg_acqb)(var, (ETHR_NAINT32_T__) val);
+#elif defined(ETHR_HAVE_NATMC32_XCHG_RELB)
+ ETHR_MEMBAR(ETHR_StoreStore);
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(xchg_relb)(var, (ETHR_NAINT32_T__) val);
+#elif defined(ETHR_HAVE_NATMC32_CMPXCHG_WB)
+ ETHR_NATMC32_CMPXCHG_FALLBACK__(ETHR_NATMC32_FUNC__(cmpxchg_wb), var, aval, res = aval; aval = val);
+#elif defined(ETHR_HAVE_NATMC32_CMPXCHG)
+ ETHR_MEMBAR(ETHR_StoreStore);
+ ETHR_NATMC32_CMPXCHG_FALLBACK__(ETHR_NATMC32_FUNC__(cmpxchg), var, aval, res = aval; aval = val);
+#elif defined(ETHR_HAVE_NATMC32_CMPXCHG_MB)
+ ETHR_NATMC32_CMPXCHG_FALLBACK__(ETHR_NATMC32_FUNC__(cmpxchg_mb), var, aval, res = aval; aval = val);
+#elif defined(ETHR_HAVE_NATMC32_CMPXCHG_RB)
+ ETHR_MEMBAR(ETHR_StoreStore);
+ ETHR_NATMC32_CMPXCHG_FALLBACK__(ETHR_NATMC32_FUNC__(cmpxchg_rb), var, aval, res = aval; aval = val);
+#elif defined(ETHR_HAVE_NATMC32_CMPXCHG_ACQB)
+ ETHR_MEMBAR(ETHR_StoreStore);
+ ETHR_NATMC32_CMPXCHG_FALLBACK__(ETHR_NATMC32_FUNC__(cmpxchg_acqb), var, aval, res = aval; aval = val);
+#elif defined(ETHR_HAVE_NATMC32_CMPXCHG_RELB)
+ ETHR_MEMBAR(ETHR_StoreStore);
+ ETHR_NATMC32_CMPXCHG_FALLBACK__(ETHR_NATMC32_FUNC__(cmpxchg_relb), var, aval, res = aval; aval = val);
+#else
+#error "Missing implementation of ethr_atomic32_xchg_wb()!"
#endif
+ return res;
}
-static ETHR_INLINE void
-ETHR_INLINE_FUNC_NAME_(ethr_atomic32_inc)(ethr_atomic32_t *var)
+static ETHR_INLINE ethr_sint32_t ETHR_ATMC32_FUNC__(xchg_acqb)(ethr_atomic32_t *var, ethr_sint32_t val)
{
-#ifdef ETHR_HAVE_NATIVE_ATOMICS
- ETHR_NATMC32_FUNC__(inc)(var);
+ ethr_sint32_t res;
+#if defined(ETHR_HAVE_NATMC32_XCHG_ACQB)
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(xchg_acqb)(var, (ETHR_NAINT32_T__) val);
+#elif defined(ETHR_HAVE_NATMC32_XCHG_RB)
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(xchg_rb)(var, (ETHR_NAINT32_T__) val);
+ ETHR_MEMBAR(ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_NATMC32_XCHG)
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(xchg)(var, (ETHR_NAINT32_T__) val);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_NATMC32_XCHG_MB)
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(xchg_mb)(var, (ETHR_NAINT32_T__) val);
+#elif defined(ETHR_HAVE_NATMC32_XCHG_WB)
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(xchg_wb)(var, (ETHR_NAINT32_T__) val);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_NATMC32_XCHG_RELB)
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(xchg_relb)(var, (ETHR_NAINT32_T__) val);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_NATMC32_CMPXCHG_ACQB)
+ ETHR_NATMC32_CMPXCHG_FALLBACK__(ETHR_NATMC32_FUNC__(cmpxchg_acqb), var, aval, res = aval; aval = val);
+#elif defined(ETHR_HAVE_NATMC32_CMPXCHG_RB)
+ ETHR_NATMC32_CMPXCHG_FALLBACK__(ETHR_NATMC32_FUNC__(cmpxchg_rb), var, aval, res = aval; aval = val);
+ ETHR_MEMBAR(ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_NATMC32_CMPXCHG)
+ ETHR_NATMC32_CMPXCHG_FALLBACK__(ETHR_NATMC32_FUNC__(cmpxchg), var, aval, res = aval; aval = val);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_NATMC32_CMPXCHG_MB)
+ ETHR_NATMC32_CMPXCHG_FALLBACK__(ETHR_NATMC32_FUNC__(cmpxchg_mb), var, aval, res = aval; aval = val);
+#elif defined(ETHR_HAVE_NATMC32_CMPXCHG_WB)
+ ETHR_NATMC32_CMPXCHG_FALLBACK__(ETHR_NATMC32_FUNC__(cmpxchg_wb), var, aval, res = aval; aval = val);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_NATMC32_CMPXCHG_RELB)
+ ETHR_NATMC32_CMPXCHG_FALLBACK__(ETHR_NATMC32_FUNC__(cmpxchg_relb), var, aval, res = aval; aval = val);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
#else
- ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, ++(*var));
+#error "Missing implementation of ethr_atomic32_xchg_acqb()!"
#endif
+ return res;
}
-static ETHR_INLINE void
-ETHR_INLINE_FUNC_NAME_(ethr_atomic32_dec)(ethr_atomic32_t *var)
+static ETHR_INLINE ethr_sint32_t ETHR_ATMC32_FUNC__(xchg_relb)(ethr_atomic32_t *var, ethr_sint32_t val)
{
-#ifdef ETHR_HAVE_NATIVE_ATOMICS
- ETHR_NATMC32_FUNC__(dec)(var);
+ ethr_sint32_t res;
+#if defined(ETHR_HAVE_NATMC32_XCHG_RELB)
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(xchg_relb)(var, (ETHR_NAINT32_T__) val);
+#elif defined(ETHR_HAVE_NATMC32_XCHG_WB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(xchg_wb)(var, (ETHR_NAINT32_T__) val);
+#elif defined(ETHR_HAVE_NATMC32_XCHG)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(xchg)(var, (ETHR_NAINT32_T__) val);
+#elif defined(ETHR_HAVE_NATMC32_XCHG_MB)
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(xchg_mb)(var, (ETHR_NAINT32_T__) val);
+#elif defined(ETHR_HAVE_NATMC32_XCHG_RB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(xchg_rb)(var, (ETHR_NAINT32_T__) val);
+#elif defined(ETHR_HAVE_NATMC32_XCHG_ACQB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(xchg_acqb)(var, (ETHR_NAINT32_T__) val);
+#elif defined(ETHR_HAVE_NATMC32_CMPXCHG_RELB)
+ ETHR_NATMC32_CMPXCHG_FALLBACK__(ETHR_NATMC32_FUNC__(cmpxchg_relb), var, aval, res = aval; aval = val);
+#elif defined(ETHR_HAVE_NATMC32_CMPXCHG_WB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ ETHR_NATMC32_CMPXCHG_FALLBACK__(ETHR_NATMC32_FUNC__(cmpxchg_wb), var, aval, res = aval; aval = val);
+#elif defined(ETHR_HAVE_NATMC32_CMPXCHG)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ ETHR_NATMC32_CMPXCHG_FALLBACK__(ETHR_NATMC32_FUNC__(cmpxchg), var, aval, res = aval; aval = val);
+#elif defined(ETHR_HAVE_NATMC32_CMPXCHG_MB)
+ ETHR_NATMC32_CMPXCHG_FALLBACK__(ETHR_NATMC32_FUNC__(cmpxchg_mb), var, aval, res = aval; aval = val);
+#elif defined(ETHR_HAVE_NATMC32_CMPXCHG_RB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ ETHR_NATMC32_CMPXCHG_FALLBACK__(ETHR_NATMC32_FUNC__(cmpxchg_rb), var, aval, res = aval; aval = val);
+#elif defined(ETHR_HAVE_NATMC32_CMPXCHG_ACQB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ ETHR_NATMC32_CMPXCHG_FALLBACK__(ETHR_NATMC32_FUNC__(cmpxchg_acqb), var, aval, res = aval; aval = val);
+#else
+#error "Missing implementation of ethr_atomic32_xchg_relb()!"
+#endif
+ return res;
+}
+
+static ETHR_INLINE ethr_sint32_t ETHR_ATMC32_FUNC__(xchg_mb)(ethr_atomic32_t *var, ethr_sint32_t val)
+{
+ ethr_sint32_t res;
+#if defined(ETHR_HAVE_NATMC32_XCHG_MB)
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(xchg_mb)(var, (ETHR_NAINT32_T__) val);
+#elif defined(ETHR_HAVE_NATMC32_XCHG_RELB)
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(xchg_relb)(var, (ETHR_NAINT32_T__) val);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_NATMC32_XCHG_ACQB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(xchg_acqb)(var, (ETHR_NAINT32_T__) val);
+#elif defined(ETHR_HAVE_NATMC32_XCHG_WB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(xchg_wb)(var, (ETHR_NAINT32_T__) val);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_NATMC32_XCHG_RB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(xchg_rb)(var, (ETHR_NAINT32_T__) val);
+ ETHR_MEMBAR(ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_NATMC32_XCHG)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(xchg)(var, (ETHR_NAINT32_T__) val);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_NATMC32_CMPXCHG_MB)
+ ETHR_NATMC32_CMPXCHG_FALLBACK__(ETHR_NATMC32_FUNC__(cmpxchg_mb), var, aval, res = aval; aval = val);
+#elif defined(ETHR_HAVE_NATMC32_CMPXCHG_RELB)
+ ETHR_NATMC32_CMPXCHG_FALLBACK__(ETHR_NATMC32_FUNC__(cmpxchg_relb), var, aval, res = aval; aval = val);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_NATMC32_CMPXCHG_ACQB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ ETHR_NATMC32_CMPXCHG_FALLBACK__(ETHR_NATMC32_FUNC__(cmpxchg_acqb), var, aval, res = aval; aval = val);
+#elif defined(ETHR_HAVE_NATMC32_CMPXCHG_WB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ ETHR_NATMC32_CMPXCHG_FALLBACK__(ETHR_NATMC32_FUNC__(cmpxchg_wb), var, aval, res = aval; aval = val);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_NATMC32_CMPXCHG_RB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ ETHR_NATMC32_CMPXCHG_FALLBACK__(ETHR_NATMC32_FUNC__(cmpxchg_rb), var, aval, res = aval; aval = val);
+ ETHR_MEMBAR(ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_NATMC32_CMPXCHG)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ ETHR_NATMC32_CMPXCHG_FALLBACK__(ETHR_NATMC32_FUNC__(cmpxchg), var, aval, res = aval; aval = val);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#else
+#error "Missing implementation of ethr_atomic32_xchg_mb()!"
+#endif
+ return res;
+}
+
+static ETHR_INLINE ethr_sint32_t ETHR_ATMC32_FUNC__(xchg_ddrb)(ethr_atomic32_t *var, ethr_sint32_t val)
+{
+#ifdef ETHR_ORDERED_READ_DEPEND
+ return ETHR_ATMC32_FUNC__(xchg)(var, val);
+#else
+ return ETHR_ATMC32_FUNC__(xchg_rb)(var, val);
+#endif
+}
+
+
+/* --- set() --- */
+
+
+static ETHR_INLINE void ETHR_ATMC32_FUNC__(set)(ethr_atomic32_t *var, ethr_sint32_t val)
+{
+#if defined(ETHR_HAVE_NATMC32_SET)
+ ETHR_NATMC32_FUNC__(set)(var, (ETHR_NAINT32_T__) val);
+#elif defined(ETHR_HAVE_NATMC32_SET_RB)
+ ETHR_NATMC32_FUNC__(set_rb)(var, (ETHR_NAINT32_T__) val);
+#elif defined(ETHR_HAVE_NATMC32_SET_WB)
+ ETHR_NATMC32_FUNC__(set_wb)(var, (ETHR_NAINT32_T__) val);
+#elif defined(ETHR_HAVE_NATMC32_SET_ACQB)
+ ETHR_NATMC32_FUNC__(set_acqb)(var, (ETHR_NAINT32_T__) val);
+#elif defined(ETHR_HAVE_NATMC32_SET_RELB)
+ ETHR_NATMC32_FUNC__(set_relb)(var, (ETHR_NAINT32_T__) val);
+#elif defined(ETHR_HAVE_NATMC32_SET_MB)
+ ETHR_NATMC32_FUNC__(set_mb)(var, (ETHR_NAINT32_T__) val);
+#else
+ (void) ETHR_ATMC32_FUNC__(xchg)(var, val);
+#endif
+}
+
+static ETHR_INLINE void ETHR_ATMC32_FUNC__(set_rb)(ethr_atomic32_t *var, ethr_sint32_t val)
+{
+#if defined(ETHR_HAVE_NATMC32_SET_RB)
+ ETHR_NATMC32_FUNC__(set_rb)(var, (ETHR_NAINT32_T__) val);
+#elif defined(ETHR_HAVE_NATMC32_SET)
+ ETHR_NATMC32_FUNC__(set)(var, (ETHR_NAINT32_T__) val);
+ ETHR_MEMBAR(ETHR_LoadLoad);
+#elif defined(ETHR_HAVE_NATMC32_SET_MB)
+ ETHR_NATMC32_FUNC__(set_mb)(var, (ETHR_NAINT32_T__) val);
+#elif defined(ETHR_HAVE_NATMC32_SET_WB)
+ ETHR_NATMC32_FUNC__(set_wb)(var, (ETHR_NAINT32_T__) val);
+ ETHR_MEMBAR(ETHR_LoadLoad);
+#elif defined(ETHR_HAVE_NATMC32_SET_ACQB)
+ ETHR_NATMC32_FUNC__(set_acqb)(var, (ETHR_NAINT32_T__) val);
+ ETHR_MEMBAR(ETHR_LoadLoad);
+#elif defined(ETHR_HAVE_NATMC32_SET_RELB)
+ ETHR_NATMC32_FUNC__(set_relb)(var, (ETHR_NAINT32_T__) val);
+ ETHR_MEMBAR(ETHR_LoadLoad);
+#else
+ (void) ETHR_ATMC32_FUNC__(xchg_rb)(var, val);
+#endif
+}
+
+static ETHR_INLINE void ETHR_ATMC32_FUNC__(set_wb)(ethr_atomic32_t *var, ethr_sint32_t val)
+{
+#if defined(ETHR_HAVE_NATMC32_SET_WB)
+ ETHR_NATMC32_FUNC__(set_wb)(var, (ETHR_NAINT32_T__) val);
+#elif defined(ETHR_HAVE_NATMC32_SET)
+ ETHR_MEMBAR(ETHR_StoreStore);
+ ETHR_NATMC32_FUNC__(set)(var, (ETHR_NAINT32_T__) val);
+#elif defined(ETHR_HAVE_NATMC32_SET_MB)
+ ETHR_NATMC32_FUNC__(set_mb)(var, (ETHR_NAINT32_T__) val);
+#elif defined(ETHR_HAVE_NATMC32_SET_RB)
+ ETHR_MEMBAR(ETHR_StoreStore);
+ ETHR_NATMC32_FUNC__(set_rb)(var, (ETHR_NAINT32_T__) val);
+#elif defined(ETHR_HAVE_NATMC32_SET_ACQB)
+ ETHR_MEMBAR(ETHR_StoreStore);
+ ETHR_NATMC32_FUNC__(set_acqb)(var, (ETHR_NAINT32_T__) val);
+#elif defined(ETHR_HAVE_NATMC32_SET_RELB)
+ ETHR_MEMBAR(ETHR_StoreStore);
+ ETHR_NATMC32_FUNC__(set_relb)(var, (ETHR_NAINT32_T__) val);
+#else
+ (void) ETHR_ATMC32_FUNC__(xchg_wb)(var, val);
+#endif
+}
+
+static ETHR_INLINE void ETHR_ATMC32_FUNC__(set_acqb)(ethr_atomic32_t *var, ethr_sint32_t val)
+{
+#if defined(ETHR_HAVE_NATMC32_SET_ACQB)
+ ETHR_NATMC32_FUNC__(set_acqb)(var, (ETHR_NAINT32_T__) val);
+#elif defined(ETHR_HAVE_NATMC32_SET_RB)
+ ETHR_NATMC32_FUNC__(set_rb)(var, (ETHR_NAINT32_T__) val);
+ ETHR_MEMBAR(ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_NATMC32_SET)
+ ETHR_NATMC32_FUNC__(set)(var, (ETHR_NAINT32_T__) val);
+ ETHR_MEMBAR(ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_NATMC32_SET_MB)
+ ETHR_NATMC32_FUNC__(set_mb)(var, (ETHR_NAINT32_T__) val);
+#elif defined(ETHR_HAVE_NATMC32_SET_WB)
+ ETHR_NATMC32_FUNC__(set_wb)(var, (ETHR_NAINT32_T__) val);
+ ETHR_MEMBAR(ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_NATMC32_SET_RELB)
+ ETHR_NATMC32_FUNC__(set_relb)(var, (ETHR_NAINT32_T__) val);
+ ETHR_MEMBAR(ETHR_StoreLoad|ETHR_StoreStore);
+#else
+ (void) ETHR_ATMC32_FUNC__(xchg_acqb)(var, val);
+#endif
+}
+
+static ETHR_INLINE void ETHR_ATMC32_FUNC__(set_relb)(ethr_atomic32_t *var, ethr_sint32_t val)
+{
+#if defined(ETHR_HAVE_NATMC32_SET_RELB)
+ ETHR_NATMC32_FUNC__(set_relb)(var, (ETHR_NAINT32_T__) val);
+#elif defined(ETHR_HAVE_NATMC32_SET_WB)
+ ETHR_MEMBAR(ETHR_LoadStore);
+ ETHR_NATMC32_FUNC__(set_wb)(var, (ETHR_NAINT32_T__) val);
+#elif defined(ETHR_HAVE_NATMC32_SET)
+ ETHR_MEMBAR(ETHR_LoadStore|ETHR_StoreStore);
+ ETHR_NATMC32_FUNC__(set)(var, (ETHR_NAINT32_T__) val);
+#elif defined(ETHR_HAVE_NATMC32_SET_MB)
+ ETHR_NATMC32_FUNC__(set_mb)(var, (ETHR_NAINT32_T__) val);
+#elif defined(ETHR_HAVE_NATMC32_SET_RB)
+ ETHR_MEMBAR(ETHR_LoadStore|ETHR_StoreStore);
+ ETHR_NATMC32_FUNC__(set_rb)(var, (ETHR_NAINT32_T__) val);
+#elif defined(ETHR_HAVE_NATMC32_SET_ACQB)
+ ETHR_MEMBAR(ETHR_LoadStore|ETHR_StoreStore);
+ ETHR_NATMC32_FUNC__(set_acqb)(var, (ETHR_NAINT32_T__) val);
+#else
+ (void) ETHR_ATMC32_FUNC__(xchg_relb)(var, val);
+#endif
+}
+
+static ETHR_INLINE void ETHR_ATMC32_FUNC__(set_mb)(ethr_atomic32_t *var, ethr_sint32_t val)
+{
+#if defined(ETHR_HAVE_NATMC32_SET_MB)
+ ETHR_NATMC32_FUNC__(set_mb)(var, (ETHR_NAINT32_T__) val);
+#elif defined(ETHR_HAVE_NATMC32_SET_RELB)
+ ETHR_NATMC32_FUNC__(set_relb)(var, (ETHR_NAINT32_T__) val);
+ ETHR_MEMBAR(ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_NATMC32_SET_ACQB)
+ ETHR_MEMBAR(ETHR_LoadStore|ETHR_StoreStore);
+ ETHR_NATMC32_FUNC__(set_acqb)(var, (ETHR_NAINT32_T__) val);
+#elif defined(ETHR_HAVE_NATMC32_SET_WB)
+ ETHR_MEMBAR(ETHR_LoadStore);
+ ETHR_NATMC32_FUNC__(set_wb)(var, (ETHR_NAINT32_T__) val);
+ ETHR_MEMBAR(ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_NATMC32_SET_RB)
+ ETHR_MEMBAR(ETHR_LoadStore|ETHR_StoreStore);
+ ETHR_NATMC32_FUNC__(set_rb)(var, (ETHR_NAINT32_T__) val);
+ ETHR_MEMBAR(ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_NATMC32_SET)
+ ETHR_MEMBAR(ETHR_LoadStore|ETHR_StoreStore);
+ ETHR_NATMC32_FUNC__(set)(var, (ETHR_NAINT32_T__) val);
+ ETHR_MEMBAR(ETHR_StoreLoad|ETHR_StoreStore);
+#else
+ (void) ETHR_ATMC32_FUNC__(xchg_mb)(var, val);
+#endif
+}
+
+static ETHR_INLINE void ETHR_ATMC32_FUNC__(set_ddrb)(ethr_atomic32_t *var, ethr_sint32_t val)
+{
+#ifdef ETHR_ORDERED_READ_DEPEND
+ ETHR_ATMC32_FUNC__(set)(var, val);
+#else
+ ETHR_ATMC32_FUNC__(set_rb)(var, val);
+#endif
+}
+
+
+/* --- init() --- */
+
+
+static ETHR_INLINE void ETHR_ATMC32_FUNC__(init)(ethr_atomic32_t *var, ethr_sint32_t val)
+{
+#if defined(ETHR_HAVE_NATMC32_INIT)
+ ETHR_NATMC32_FUNC__(init)(var, (ETHR_NAINT32_T__) val);
+#elif defined(ETHR_HAVE_NATMC32_INIT_RB)
+ ETHR_NATMC32_FUNC__(init_rb)(var, (ETHR_NAINT32_T__) val);
+#elif defined(ETHR_HAVE_NATMC32_INIT_WB)
+ ETHR_NATMC32_FUNC__(init_wb)(var, (ETHR_NAINT32_T__) val);
+#elif defined(ETHR_HAVE_NATMC32_INIT_ACQB)
+ ETHR_NATMC32_FUNC__(init_acqb)(var, (ETHR_NAINT32_T__) val);
+#elif defined(ETHR_HAVE_NATMC32_INIT_RELB)
+ ETHR_NATMC32_FUNC__(init_relb)(var, (ETHR_NAINT32_T__) val);
+#elif defined(ETHR_HAVE_NATMC32_INIT_MB)
+ ETHR_NATMC32_FUNC__(init_mb)(var, (ETHR_NAINT32_T__) val);
+#else
+ ETHR_ATMC32_FUNC__(set)(var, val);
+#endif
+}
+
+static ETHR_INLINE void ETHR_ATMC32_FUNC__(init_rb)(ethr_atomic32_t *var, ethr_sint32_t val)
+{
+#if defined(ETHR_HAVE_NATMC32_INIT_RB)
+ ETHR_NATMC32_FUNC__(init_rb)(var, (ETHR_NAINT32_T__) val);
+#elif defined(ETHR_HAVE_NATMC32_INIT)
+ ETHR_NATMC32_FUNC__(init)(var, (ETHR_NAINT32_T__) val);
+ ETHR_MEMBAR(ETHR_LoadLoad);
+#elif defined(ETHR_HAVE_NATMC32_INIT_MB)
+ ETHR_NATMC32_FUNC__(init_mb)(var, (ETHR_NAINT32_T__) val);
+#elif defined(ETHR_HAVE_NATMC32_INIT_WB)
+ ETHR_NATMC32_FUNC__(init_wb)(var, (ETHR_NAINT32_T__) val);
+ ETHR_MEMBAR(ETHR_LoadLoad);
+#elif defined(ETHR_HAVE_NATMC32_INIT_ACQB)
+ ETHR_NATMC32_FUNC__(init_acqb)(var, (ETHR_NAINT32_T__) val);
+ ETHR_MEMBAR(ETHR_LoadLoad);
+#elif defined(ETHR_HAVE_NATMC32_INIT_RELB)
+ ETHR_NATMC32_FUNC__(init_relb)(var, (ETHR_NAINT32_T__) val);
+ ETHR_MEMBAR(ETHR_LoadLoad);
+#else
+ ETHR_ATMC32_FUNC__(set_rb)(var, val);
+#endif
+}
+
+static ETHR_INLINE void ETHR_ATMC32_FUNC__(init_wb)(ethr_atomic32_t *var, ethr_sint32_t val)
+{
+#if defined(ETHR_HAVE_NATMC32_INIT_WB)
+ ETHR_NATMC32_FUNC__(init_wb)(var, (ETHR_NAINT32_T__) val);
+#elif defined(ETHR_HAVE_NATMC32_INIT)
+ ETHR_MEMBAR(ETHR_StoreStore);
+ ETHR_NATMC32_FUNC__(init)(var, (ETHR_NAINT32_T__) val);
+#elif defined(ETHR_HAVE_NATMC32_INIT_MB)
+ ETHR_NATMC32_FUNC__(init_mb)(var, (ETHR_NAINT32_T__) val);
+#elif defined(ETHR_HAVE_NATMC32_INIT_RB)
+ ETHR_MEMBAR(ETHR_StoreStore);
+ ETHR_NATMC32_FUNC__(init_rb)(var, (ETHR_NAINT32_T__) val);
+#elif defined(ETHR_HAVE_NATMC32_INIT_ACQB)
+ ETHR_MEMBAR(ETHR_StoreStore);
+ ETHR_NATMC32_FUNC__(init_acqb)(var, (ETHR_NAINT32_T__) val);
+#elif defined(ETHR_HAVE_NATMC32_INIT_RELB)
+ ETHR_MEMBAR(ETHR_StoreStore);
+ ETHR_NATMC32_FUNC__(init_relb)(var, (ETHR_NAINT32_T__) val);
+#else
+ ETHR_ATMC32_FUNC__(set_wb)(var, val);
+#endif
+}
+
+static ETHR_INLINE void ETHR_ATMC32_FUNC__(init_acqb)(ethr_atomic32_t *var, ethr_sint32_t val)
+{
+#if defined(ETHR_HAVE_NATMC32_INIT_ACQB)
+ ETHR_NATMC32_FUNC__(init_acqb)(var, (ETHR_NAINT32_T__) val);
+#elif defined(ETHR_HAVE_NATMC32_INIT_RB)
+ ETHR_NATMC32_FUNC__(init_rb)(var, (ETHR_NAINT32_T__) val);
+ ETHR_MEMBAR(ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_NATMC32_INIT)
+ ETHR_NATMC32_FUNC__(init)(var, (ETHR_NAINT32_T__) val);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_NATMC32_INIT_MB)
+ ETHR_NATMC32_FUNC__(init_mb)(var, (ETHR_NAINT32_T__) val);
+#elif defined(ETHR_HAVE_NATMC32_INIT_WB)
+ ETHR_NATMC32_FUNC__(init_wb)(var, (ETHR_NAINT32_T__) val);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_NATMC32_INIT_RELB)
+ ETHR_NATMC32_FUNC__(init_relb)(var, (ETHR_NAINT32_T__) val);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#else
+ ETHR_ATMC32_FUNC__(set_acqb)(var, val);
+#endif
+}
+
+static ETHR_INLINE void ETHR_ATMC32_FUNC__(init_relb)(ethr_atomic32_t *var, ethr_sint32_t val)
+{
+#if defined(ETHR_HAVE_NATMC32_INIT_RELB)
+ ETHR_NATMC32_FUNC__(init_relb)(var, (ETHR_NAINT32_T__) val);
+#elif defined(ETHR_HAVE_NATMC32_INIT_WB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad);
+ ETHR_NATMC32_FUNC__(init_wb)(var, (ETHR_NAINT32_T__) val);
+#elif defined(ETHR_HAVE_NATMC32_INIT)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+ ETHR_NATMC32_FUNC__(init)(var, (ETHR_NAINT32_T__) val);
+#elif defined(ETHR_HAVE_NATMC32_INIT_MB)
+ ETHR_NATMC32_FUNC__(init_mb)(var, (ETHR_NAINT32_T__) val);
+#elif defined(ETHR_HAVE_NATMC32_INIT_RB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+ ETHR_NATMC32_FUNC__(init_rb)(var, (ETHR_NAINT32_T__) val);
+#elif defined(ETHR_HAVE_NATMC32_INIT_ACQB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+ ETHR_NATMC32_FUNC__(init_acqb)(var, (ETHR_NAINT32_T__) val);
+#else
+ ETHR_ATMC32_FUNC__(set_relb)(var, val);
+#endif
+}
+
+static ETHR_INLINE void ETHR_ATMC32_FUNC__(init_mb)(ethr_atomic32_t *var, ethr_sint32_t val)
+{
+#if defined(ETHR_HAVE_NATMC32_INIT_MB)
+ ETHR_NATMC32_FUNC__(init_mb)(var, (ETHR_NAINT32_T__) val);
+#elif defined(ETHR_HAVE_NATMC32_INIT_RELB)
+ ETHR_NATMC32_FUNC__(init_relb)(var, (ETHR_NAINT32_T__) val);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_NATMC32_INIT_ACQB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+ ETHR_NATMC32_FUNC__(init_acqb)(var, (ETHR_NAINT32_T__) val);
+#elif defined(ETHR_HAVE_NATMC32_INIT_WB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad);
+ ETHR_NATMC32_FUNC__(init_wb)(var, (ETHR_NAINT32_T__) val);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_NATMC32_INIT_RB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+ ETHR_NATMC32_FUNC__(init_rb)(var, (ETHR_NAINT32_T__) val);
+ ETHR_MEMBAR(ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_NATMC32_INIT)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+ ETHR_NATMC32_FUNC__(init)(var, (ETHR_NAINT32_T__) val);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
#else
- ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, --(*var));
+ ETHR_ATMC32_FUNC__(set_mb)(var, val);
#endif
}
-static ETHR_INLINE ethr_sint32_t
-ETHR_INLINE_FUNC_NAME_(ethr_atomic32_inc_read)(ethr_atomic32_t *var)
+static ETHR_INLINE void ETHR_ATMC32_FUNC__(init_ddrb)(ethr_atomic32_t *var, ethr_sint32_t val)
{
-#ifdef ETHR_HAVE_NATIVE_ATOMICS
- return (ethr_sint32_t) ETHR_NATMC32_FUNC__(inc_return)(var);
+#ifdef ETHR_ORDERED_READ_DEPEND
+ ETHR_ATMC32_FUNC__(init)(var, val);
#else
+ ETHR_ATMC32_FUNC__(init_rb)(var, val);
+#endif
+}
+
+
+/* --- add_read() --- */
+
+
+static ETHR_INLINE ethr_sint32_t ETHR_ATMC32_FUNC__(add_read)(ethr_atomic32_t *var, ethr_sint32_t val)
+{
ethr_sint32_t res;
- ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, res = ++(*var));
+#if defined(ETHR_HAVE_NATMC32_ADD_RETURN)
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(add_return)(var, (ETHR_NAINT32_T__) val);
+#elif defined(ETHR_HAVE_NATMC32_ADD_RETURN_RB)
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(add_return_rb)(var, (ETHR_NAINT32_T__) val);
+#elif defined(ETHR_HAVE_NATMC32_ADD_RETURN_WB)
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(add_return_wb)(var, (ETHR_NAINT32_T__) val);
+#elif defined(ETHR_HAVE_NATMC32_ADD_RETURN_ACQB)
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(add_return_acqb)(var, (ETHR_NAINT32_T__) val);
+#elif defined(ETHR_HAVE_NATMC32_ADD_RETURN_RELB)
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(add_return_relb)(var, (ETHR_NAINT32_T__) val);
+#elif defined(ETHR_HAVE_NATMC32_ADD_RETURN_MB)
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(add_return_mb)(var, (ETHR_NAINT32_T__) val);
+#elif defined(ETHR_HAVE_NATMC32_CMPXCHG)
+ ETHR_NATMC32_CMPXCHG_FALLBACK__(ETHR_NATMC32_FUNC__(cmpxchg), var, aval, aval += val; res = aval);
+#elif defined(ETHR_HAVE_NATMC32_CMPXCHG_RB)
+ ETHR_NATMC32_CMPXCHG_FALLBACK__(ETHR_NATMC32_FUNC__(cmpxchg_rb), var, aval, aval += val; res = aval);
+#elif defined(ETHR_HAVE_NATMC32_CMPXCHG_WB)
+ ETHR_NATMC32_CMPXCHG_FALLBACK__(ETHR_NATMC32_FUNC__(cmpxchg_wb), var, aval, aval += val; res = aval);
+#elif defined(ETHR_HAVE_NATMC32_CMPXCHG_ACQB)
+ ETHR_NATMC32_CMPXCHG_FALLBACK__(ETHR_NATMC32_FUNC__(cmpxchg_acqb), var, aval, aval += val; res = aval);
+#elif defined(ETHR_HAVE_NATMC32_CMPXCHG_RELB)
+ ETHR_NATMC32_CMPXCHG_FALLBACK__(ETHR_NATMC32_FUNC__(cmpxchg_relb), var, aval, aval += val; res = aval);
+#elif defined(ETHR_HAVE_NATMC32_CMPXCHG_MB)
+ ETHR_NATMC32_CMPXCHG_FALLBACK__(ETHR_NATMC32_FUNC__(cmpxchg_mb), var, aval, aval += val; res = aval);
+#else
+#error "Missing implementation of ethr_atomic32_add_read()!"
+#endif
return res;
+}
+
+static ETHR_INLINE ethr_sint32_t ETHR_ATMC32_FUNC__(add_read_rb)(ethr_atomic32_t *var, ethr_sint32_t val)
+{
+ ethr_sint32_t res;
+#if defined(ETHR_HAVE_NATMC32_ADD_RETURN_RB)
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(add_return_rb)(var, (ETHR_NAINT32_T__) val);
+#elif defined(ETHR_HAVE_NATMC32_ADD_RETURN)
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(add_return)(var, (ETHR_NAINT32_T__) val);
+ ETHR_MEMBAR(ETHR_LoadLoad);
+#elif defined(ETHR_HAVE_NATMC32_ADD_RETURN_MB)
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(add_return_mb)(var, (ETHR_NAINT32_T__) val);
+#elif defined(ETHR_HAVE_NATMC32_ADD_RETURN_WB)
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(add_return_wb)(var, (ETHR_NAINT32_T__) val);
+ ETHR_MEMBAR(ETHR_LoadLoad);
+#elif defined(ETHR_HAVE_NATMC32_ADD_RETURN_ACQB)
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(add_return_acqb)(var, (ETHR_NAINT32_T__) val);
+ ETHR_MEMBAR(ETHR_LoadLoad);
+#elif defined(ETHR_HAVE_NATMC32_ADD_RETURN_RELB)
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(add_return_relb)(var, (ETHR_NAINT32_T__) val);
+ ETHR_MEMBAR(ETHR_LoadLoad);
+#elif defined(ETHR_HAVE_NATMC32_CMPXCHG_RB)
+ ETHR_NATMC32_CMPXCHG_FALLBACK__(ETHR_NATMC32_FUNC__(cmpxchg_rb), var, aval, aval += val; res = aval);
+#elif defined(ETHR_HAVE_NATMC32_CMPXCHG)
+ ETHR_NATMC32_CMPXCHG_FALLBACK__(ETHR_NATMC32_FUNC__(cmpxchg), var, aval, aval += val; res = aval);
+ ETHR_MEMBAR(ETHR_LoadLoad);
+#elif defined(ETHR_HAVE_NATMC32_CMPXCHG_MB)
+ ETHR_NATMC32_CMPXCHG_FALLBACK__(ETHR_NATMC32_FUNC__(cmpxchg_mb), var, aval, aval += val; res = aval);
+#elif defined(ETHR_HAVE_NATMC32_CMPXCHG_WB)
+ ETHR_NATMC32_CMPXCHG_FALLBACK__(ETHR_NATMC32_FUNC__(cmpxchg_wb), var, aval, aval += val; res = aval);
+ ETHR_MEMBAR(ETHR_LoadLoad);
+#elif defined(ETHR_HAVE_NATMC32_CMPXCHG_ACQB)
+ ETHR_NATMC32_CMPXCHG_FALLBACK__(ETHR_NATMC32_FUNC__(cmpxchg_acqb), var, aval, aval += val; res = aval);
+ ETHR_MEMBAR(ETHR_LoadLoad);
+#elif defined(ETHR_HAVE_NATMC32_CMPXCHG_RELB)
+ ETHR_NATMC32_CMPXCHG_FALLBACK__(ETHR_NATMC32_FUNC__(cmpxchg_relb), var, aval, aval += val; res = aval);
+ ETHR_MEMBAR(ETHR_LoadLoad);
+#else
+#error "Missing implementation of ethr_atomic32_add_read_rb()!"
#endif
+ return res;
}
-static ETHR_INLINE ethr_sint32_t
-ETHR_INLINE_FUNC_NAME_(ethr_atomic32_dec_read)(ethr_atomic32_t *var)
+static ETHR_INLINE ethr_sint32_t ETHR_ATMC32_FUNC__(add_read_wb)(ethr_atomic32_t *var, ethr_sint32_t val)
{
-#ifdef ETHR_HAVE_NATIVE_ATOMICS
- return (ethr_sint32_t) ETHR_NATMC32_FUNC__(dec_return)(var);
+ ethr_sint32_t res;
+#if defined(ETHR_HAVE_NATMC32_ADD_RETURN_WB)
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(add_return_wb)(var, (ETHR_NAINT32_T__) val);
+#elif defined(ETHR_HAVE_NATMC32_ADD_RETURN)
+ ETHR_MEMBAR(ETHR_StoreStore);
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(add_return)(var, (ETHR_NAINT32_T__) val);
+#elif defined(ETHR_HAVE_NATMC32_ADD_RETURN_MB)
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(add_return_mb)(var, (ETHR_NAINT32_T__) val);
+#elif defined(ETHR_HAVE_NATMC32_ADD_RETURN_RB)
+ ETHR_MEMBAR(ETHR_StoreStore);
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(add_return_rb)(var, (ETHR_NAINT32_T__) val);
+#elif defined(ETHR_HAVE_NATMC32_ADD_RETURN_ACQB)
+ ETHR_MEMBAR(ETHR_StoreStore);
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(add_return_acqb)(var, (ETHR_NAINT32_T__) val);
+#elif defined(ETHR_HAVE_NATMC32_ADD_RETURN_RELB)
+ ETHR_MEMBAR(ETHR_StoreStore);
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(add_return_relb)(var, (ETHR_NAINT32_T__) val);
+#elif defined(ETHR_HAVE_NATMC32_CMPXCHG_WB)
+ ETHR_NATMC32_CMPXCHG_FALLBACK__(ETHR_NATMC32_FUNC__(cmpxchg_wb), var, aval, aval += val; res = aval);
+#elif defined(ETHR_HAVE_NATMC32_CMPXCHG)
+ ETHR_MEMBAR(ETHR_StoreStore);
+ ETHR_NATMC32_CMPXCHG_FALLBACK__(ETHR_NATMC32_FUNC__(cmpxchg), var, aval, aval += val; res = aval);
+#elif defined(ETHR_HAVE_NATMC32_CMPXCHG_MB)
+ ETHR_NATMC32_CMPXCHG_FALLBACK__(ETHR_NATMC32_FUNC__(cmpxchg_mb), var, aval, aval += val; res = aval);
+#elif defined(ETHR_HAVE_NATMC32_CMPXCHG_RB)
+ ETHR_MEMBAR(ETHR_StoreStore);
+ ETHR_NATMC32_CMPXCHG_FALLBACK__(ETHR_NATMC32_FUNC__(cmpxchg_rb), var, aval, aval += val; res = aval);
+#elif defined(ETHR_HAVE_NATMC32_CMPXCHG_ACQB)
+ ETHR_MEMBAR(ETHR_StoreStore);
+ ETHR_NATMC32_CMPXCHG_FALLBACK__(ETHR_NATMC32_FUNC__(cmpxchg_acqb), var, aval, aval += val; res = aval);
+#elif defined(ETHR_HAVE_NATMC32_CMPXCHG_RELB)
+ ETHR_MEMBAR(ETHR_StoreStore);
+ ETHR_NATMC32_CMPXCHG_FALLBACK__(ETHR_NATMC32_FUNC__(cmpxchg_relb), var, aval, aval += val; res = aval);
#else
+#error "Missing implementation of ethr_atomic32_add_read_wb()!"
+#endif
+ return res;
+}
+
+static ETHR_INLINE ethr_sint32_t ETHR_ATMC32_FUNC__(add_read_acqb)(ethr_atomic32_t *var, ethr_sint32_t val)
+{
ethr_sint32_t res;
- ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, res = --(*var));
+#if defined(ETHR_HAVE_NATMC32_ADD_RETURN_ACQB)
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(add_return_acqb)(var, (ETHR_NAINT32_T__) val);
+#elif defined(ETHR_HAVE_NATMC32_ADD_RETURN_RB)
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(add_return_rb)(var, (ETHR_NAINT32_T__) val);
+ ETHR_MEMBAR(ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_NATMC32_ADD_RETURN)
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(add_return)(var, (ETHR_NAINT32_T__) val);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_NATMC32_ADD_RETURN_MB)
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(add_return_mb)(var, (ETHR_NAINT32_T__) val);
+#elif defined(ETHR_HAVE_NATMC32_ADD_RETURN_WB)
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(add_return_wb)(var, (ETHR_NAINT32_T__) val);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_NATMC32_ADD_RETURN_RELB)
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(add_return_relb)(var, (ETHR_NAINT32_T__) val);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_NATMC32_CMPXCHG_ACQB)
+ ETHR_NATMC32_CMPXCHG_FALLBACK__(ETHR_NATMC32_FUNC__(cmpxchg_acqb), var, aval, aval += val; res = aval);
+#elif defined(ETHR_HAVE_NATMC32_CMPXCHG_RB)
+ ETHR_NATMC32_CMPXCHG_FALLBACK__(ETHR_NATMC32_FUNC__(cmpxchg_rb), var, aval, aval += val; res = aval);
+ ETHR_MEMBAR(ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_NATMC32_CMPXCHG)
+ ETHR_NATMC32_CMPXCHG_FALLBACK__(ETHR_NATMC32_FUNC__(cmpxchg), var, aval, aval += val; res = aval);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_NATMC32_CMPXCHG_MB)
+ ETHR_NATMC32_CMPXCHG_FALLBACK__(ETHR_NATMC32_FUNC__(cmpxchg_mb), var, aval, aval += val; res = aval);
+#elif defined(ETHR_HAVE_NATMC32_CMPXCHG_WB)
+ ETHR_NATMC32_CMPXCHG_FALLBACK__(ETHR_NATMC32_FUNC__(cmpxchg_wb), var, aval, aval += val; res = aval);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_NATMC32_CMPXCHG_RELB)
+ ETHR_NATMC32_CMPXCHG_FALLBACK__(ETHR_NATMC32_FUNC__(cmpxchg_relb), var, aval, aval += val; res = aval);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#else
+#error "Missing implementation of ethr_atomic32_add_read_acqb()!"
+#endif
return res;
+}
+
+static ETHR_INLINE ethr_sint32_t ETHR_ATMC32_FUNC__(add_read_relb)(ethr_atomic32_t *var, ethr_sint32_t val)
+{
+ ethr_sint32_t res;
+#if defined(ETHR_HAVE_NATMC32_ADD_RETURN_RELB)
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(add_return_relb)(var, (ETHR_NAINT32_T__) val);
+#elif defined(ETHR_HAVE_NATMC32_ADD_RETURN_WB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(add_return_wb)(var, (ETHR_NAINT32_T__) val);
+#elif defined(ETHR_HAVE_NATMC32_ADD_RETURN)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(add_return)(var, (ETHR_NAINT32_T__) val);
+#elif defined(ETHR_HAVE_NATMC32_ADD_RETURN_MB)
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(add_return_mb)(var, (ETHR_NAINT32_T__) val);
+#elif defined(ETHR_HAVE_NATMC32_ADD_RETURN_RB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(add_return_rb)(var, (ETHR_NAINT32_T__) val);
+#elif defined(ETHR_HAVE_NATMC32_ADD_RETURN_ACQB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(add_return_acqb)(var, (ETHR_NAINT32_T__) val);
+#elif defined(ETHR_HAVE_NATMC32_CMPXCHG_RELB)
+ ETHR_NATMC32_CMPXCHG_FALLBACK__(ETHR_NATMC32_FUNC__(cmpxchg_relb), var, aval, aval += val; res = aval);
+#elif defined(ETHR_HAVE_NATMC32_CMPXCHG_WB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ ETHR_NATMC32_CMPXCHG_FALLBACK__(ETHR_NATMC32_FUNC__(cmpxchg_wb), var, aval, aval += val; res = aval);
+#elif defined(ETHR_HAVE_NATMC32_CMPXCHG)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ ETHR_NATMC32_CMPXCHG_FALLBACK__(ETHR_NATMC32_FUNC__(cmpxchg), var, aval, aval += val; res = aval);
+#elif defined(ETHR_HAVE_NATMC32_CMPXCHG_MB)
+ ETHR_NATMC32_CMPXCHG_FALLBACK__(ETHR_NATMC32_FUNC__(cmpxchg_mb), var, aval, aval += val; res = aval);
+#elif defined(ETHR_HAVE_NATMC32_CMPXCHG_RB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ ETHR_NATMC32_CMPXCHG_FALLBACK__(ETHR_NATMC32_FUNC__(cmpxchg_rb), var, aval, aval += val; res = aval);
+#elif defined(ETHR_HAVE_NATMC32_CMPXCHG_ACQB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ ETHR_NATMC32_CMPXCHG_FALLBACK__(ETHR_NATMC32_FUNC__(cmpxchg_acqb), var, aval, aval += val; res = aval);
+#else
+#error "Missing implementation of ethr_atomic32_add_read_relb()!"
#endif
+ return res;
}
-static ETHR_INLINE ethr_sint32_t
-ETHR_INLINE_FUNC_NAME_(ethr_atomic32_read_band)(ethr_atomic32_t *var,
- ethr_sint32_t mask)
+static ETHR_INLINE ethr_sint32_t ETHR_ATMC32_FUNC__(add_read_mb)(ethr_atomic32_t *var, ethr_sint32_t val)
{
-#ifdef ETHR_HAVE_NATIVE_ATOMICS
- return (ethr_sint32_t)
- ETHR_NATMC32_FUNC__(and_retold)(var, (ETHR_NAINT32_T__) mask);
+ ethr_sint32_t res;
+#if defined(ETHR_HAVE_NATMC32_ADD_RETURN_MB)
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(add_return_mb)(var, (ETHR_NAINT32_T__) val);
+#elif defined(ETHR_HAVE_NATMC32_ADD_RETURN_RELB)
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(add_return_relb)(var, (ETHR_NAINT32_T__) val);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_NATMC32_ADD_RETURN_ACQB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(add_return_acqb)(var, (ETHR_NAINT32_T__) val);
+#elif defined(ETHR_HAVE_NATMC32_ADD_RETURN_WB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(add_return_wb)(var, (ETHR_NAINT32_T__) val);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_NATMC32_ADD_RETURN_RB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(add_return_rb)(var, (ETHR_NAINT32_T__) val);
+ ETHR_MEMBAR(ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_NATMC32_ADD_RETURN)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(add_return)(var, (ETHR_NAINT32_T__) val);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_NATMC32_CMPXCHG_MB)
+ ETHR_NATMC32_CMPXCHG_FALLBACK__(ETHR_NATMC32_FUNC__(cmpxchg_mb), var, aval, aval += val; res = aval);
+#elif defined(ETHR_HAVE_NATMC32_CMPXCHG_RELB)
+ ETHR_NATMC32_CMPXCHG_FALLBACK__(ETHR_NATMC32_FUNC__(cmpxchg_relb), var, aval, aval += val; res = aval);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_NATMC32_CMPXCHG_ACQB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ ETHR_NATMC32_CMPXCHG_FALLBACK__(ETHR_NATMC32_FUNC__(cmpxchg_acqb), var, aval, aval += val; res = aval);
+#elif defined(ETHR_HAVE_NATMC32_CMPXCHG_WB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ ETHR_NATMC32_CMPXCHG_FALLBACK__(ETHR_NATMC32_FUNC__(cmpxchg_wb), var, aval, aval += val; res = aval);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_NATMC32_CMPXCHG_RB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ ETHR_NATMC32_CMPXCHG_FALLBACK__(ETHR_NATMC32_FUNC__(cmpxchg_rb), var, aval, aval += val; res = aval);
+ ETHR_MEMBAR(ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_NATMC32_CMPXCHG)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ ETHR_NATMC32_CMPXCHG_FALLBACK__(ETHR_NATMC32_FUNC__(cmpxchg), var, aval, aval += val; res = aval);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#else
+#error "Missing implementation of ethr_atomic32_add_read_mb()!"
+#endif
+ return res;
+}
+
+static ETHR_INLINE ethr_sint32_t ETHR_ATMC32_FUNC__(add_read_ddrb)(ethr_atomic32_t *var, ethr_sint32_t val)
+{
+#ifdef ETHR_ORDERED_READ_DEPEND
+ return ETHR_ATMC32_FUNC__(add_read)(var, val);
+#else
+ return ETHR_ATMC32_FUNC__(add_read_rb)(var, val);
+#endif
+}
+
+
+/* --- read() --- */
+
+
+static ETHR_INLINE ethr_sint32_t ETHR_ATMC32_FUNC__(read)(ethr_atomic32_t *var)
+{
+ ethr_sint32_t res;
+#if defined(ETHR_HAVE_NATMC32_READ)
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(read)(var);
+#elif defined(ETHR_HAVE_NATMC32_READ_RB)
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(read_rb)(var);
+#elif defined(ETHR_HAVE_NATMC32_READ_WB)
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(read_wb)(var);
+#elif defined(ETHR_HAVE_NATMC32_READ_ACQB)
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(read_acqb)(var);
+#elif defined(ETHR_HAVE_NATMC32_READ_RELB)
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(read_relb)(var);
+#elif defined(ETHR_HAVE_NATMC32_READ_MB)
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(read_mb)(var);
#else
+ res = ETHR_ATMC32_FUNC__(cmpxchg)(var, (ethr_sint32_t) ETHR_UNUSUAL_SINT32_VAL__, (ethr_sint32_t) ETHR_UNUSUAL_SINT32_VAL__);
+#endif
+ return res;
+}
+
+static ETHR_INLINE ethr_sint32_t ETHR_ATMC32_FUNC__(read_rb)(ethr_atomic32_t *var)
+{
ethr_sint32_t res;
- ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, res = *var; *var &= mask);
+#if defined(ETHR_HAVE_NATMC32_READ_RB)
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(read_rb)(var);
+#elif defined(ETHR_HAVE_NATMC32_READ)
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(read)(var);
+ ETHR_MEMBAR(ETHR_LoadLoad);
+#elif defined(ETHR_HAVE_NATMC32_READ_MB)
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(read_mb)(var);
+#elif defined(ETHR_HAVE_NATMC32_READ_WB)
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(read_wb)(var);
+ ETHR_MEMBAR(ETHR_LoadLoad);
+#elif defined(ETHR_HAVE_NATMC32_READ_ACQB)
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(read_acqb)(var);
+ ETHR_MEMBAR(ETHR_LoadLoad);
+#elif defined(ETHR_HAVE_NATMC32_READ_RELB)
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(read_relb)(var);
+ ETHR_MEMBAR(ETHR_LoadLoad);
+#else
+ res = ETHR_ATMC32_FUNC__(cmpxchg_rb)(var, (ethr_sint32_t) ETHR_UNUSUAL_SINT32_VAL__, (ethr_sint32_t) ETHR_UNUSUAL_SINT32_VAL__);
+#endif
return res;
+}
+
+static ETHR_INLINE ethr_sint32_t ETHR_ATMC32_FUNC__(read_wb)(ethr_atomic32_t *var)
+{
+ ethr_sint32_t res;
+#if defined(ETHR_HAVE_NATMC32_READ_WB)
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(read_wb)(var);
+#elif defined(ETHR_HAVE_NATMC32_READ)
+ ETHR_MEMBAR(ETHR_StoreStore);
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(read)(var);
+#elif defined(ETHR_HAVE_NATMC32_READ_MB)
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(read_mb)(var);
+#elif defined(ETHR_HAVE_NATMC32_READ_RB)
+ ETHR_MEMBAR(ETHR_StoreStore);
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(read_rb)(var);
+#elif defined(ETHR_HAVE_NATMC32_READ_ACQB)
+ ETHR_MEMBAR(ETHR_StoreStore);
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(read_acqb)(var);
+#elif defined(ETHR_HAVE_NATMC32_READ_RELB)
+ ETHR_MEMBAR(ETHR_StoreStore);
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(read_relb)(var);
+#else
+ res = ETHR_ATMC32_FUNC__(cmpxchg_wb)(var, (ethr_sint32_t) ETHR_UNUSUAL_SINT32_VAL__, (ethr_sint32_t) ETHR_UNUSUAL_SINT32_VAL__);
#endif
+ return res;
}
-static ETHR_INLINE ethr_sint32_t
-ETHR_INLINE_FUNC_NAME_(ethr_atomic32_read_bor)(ethr_atomic32_t *var,
- ethr_sint32_t mask)
+static ETHR_INLINE ethr_sint32_t ETHR_ATMC32_FUNC__(read_acqb)(ethr_atomic32_t *var)
{
-#ifdef ETHR_HAVE_NATIVE_ATOMICS
- return
- (ethr_sint32_t) ETHR_NATMC32_FUNC__(or_retold)(var,
- (ETHR_NAINT32_T__) mask);
+ ethr_sint32_t res;
+#if defined(ETHR_HAVE_NATMC32_READ_ACQB)
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(read_acqb)(var);
+#elif defined(ETHR_HAVE_NATMC32_READ_RB)
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(read_rb)(var);
+ ETHR_MEMBAR(ETHR_LoadStore);
+#elif defined(ETHR_HAVE_NATMC32_READ)
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(read)(var);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore);
+#elif defined(ETHR_HAVE_NATMC32_READ_MB)
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(read_mb)(var);
+#elif defined(ETHR_HAVE_NATMC32_READ_WB)
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(read_wb)(var);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore);
+#elif defined(ETHR_HAVE_NATMC32_READ_RELB)
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(read_relb)(var);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore);
#else
+ res = ETHR_ATMC32_FUNC__(cmpxchg_acqb)(var, (ethr_sint32_t) ETHR_UNUSUAL_SINT32_VAL__, (ethr_sint32_t) ETHR_UNUSUAL_SINT32_VAL__);
+#endif
+ return res;
+}
+
+static ETHR_INLINE ethr_sint32_t ETHR_ATMC32_FUNC__(read_relb)(ethr_atomic32_t *var)
+{
ethr_sint32_t res;
- ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, res = *var; *var |= mask);
+#if defined(ETHR_HAVE_NATMC32_READ_RELB)
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(read_relb)(var);
+#elif defined(ETHR_HAVE_NATMC32_READ_WB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(read_wb)(var);
+#elif defined(ETHR_HAVE_NATMC32_READ)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(read)(var);
+#elif defined(ETHR_HAVE_NATMC32_READ_MB)
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(read_mb)(var);
+#elif defined(ETHR_HAVE_NATMC32_READ_RB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(read_rb)(var);
+#elif defined(ETHR_HAVE_NATMC32_READ_ACQB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(read_acqb)(var);
+#else
+ res = ETHR_ATMC32_FUNC__(cmpxchg_relb)(var, (ethr_sint32_t) ETHR_UNUSUAL_SINT32_VAL__, (ethr_sint32_t) ETHR_UNUSUAL_SINT32_VAL__);
+#endif
return res;
+}
+
+static ETHR_INLINE ethr_sint32_t ETHR_ATMC32_FUNC__(read_mb)(ethr_atomic32_t *var)
+{
+ ethr_sint32_t res;
+#if defined(ETHR_HAVE_NATMC32_READ_MB)
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(read_mb)(var);
+#elif defined(ETHR_HAVE_NATMC32_READ_RELB)
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(read_relb)(var);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore);
+#elif defined(ETHR_HAVE_NATMC32_READ_ACQB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(read_acqb)(var);
+#elif defined(ETHR_HAVE_NATMC32_READ_WB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(read_wb)(var);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore);
+#elif defined(ETHR_HAVE_NATMC32_READ_RB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(read_rb)(var);
+ ETHR_MEMBAR(ETHR_LoadStore);
+#elif defined(ETHR_HAVE_NATMC32_READ)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(read)(var);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore);
+#else
+ res = ETHR_ATMC32_FUNC__(cmpxchg_mb)(var, (ethr_sint32_t) ETHR_UNUSUAL_SINT32_VAL__, (ethr_sint32_t) ETHR_UNUSUAL_SINT32_VAL__);
#endif
+ return res;
}
-static ETHR_INLINE ethr_sint32_t
-ETHR_INLINE_FUNC_NAME_(ethr_atomic32_xchg)(ethr_atomic32_t *var,
- ethr_sint32_t new)
+static ETHR_INLINE ethr_sint32_t ETHR_ATMC32_FUNC__(read_ddrb)(ethr_atomic32_t *var)
{
-#ifdef ETHR_HAVE_NATIVE_ATOMICS
- return (ethr_sint32_t) ETHR_NATMC32_FUNC__(xchg)(var,
- (ETHR_NAINT32_T__) new);
+#ifdef ETHR_ORDERED_READ_DEPEND
+ return ETHR_ATMC32_FUNC__(read)(var);
#else
+ return ETHR_ATMC32_FUNC__(read_rb)(var);
+#endif
+}
+
+
+/* --- inc_read() --- */
+
+
+static ETHR_INLINE ethr_sint32_t ETHR_ATMC32_FUNC__(inc_read)(ethr_atomic32_t *var)
+{
ethr_sint32_t res;
- ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, res = *var; *var = new);
+#if defined(ETHR_HAVE_NATMC32_INC_RETURN)
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(inc_return)(var);
+#elif defined(ETHR_HAVE_NATMC32_INC_RETURN_RB)
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(inc_return_rb)(var);
+#elif defined(ETHR_HAVE_NATMC32_INC_RETURN_WB)
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(inc_return_wb)(var);
+#elif defined(ETHR_HAVE_NATMC32_INC_RETURN_ACQB)
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(inc_return_acqb)(var);
+#elif defined(ETHR_HAVE_NATMC32_INC_RETURN_RELB)
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(inc_return_relb)(var);
+#elif defined(ETHR_HAVE_NATMC32_INC_RETURN_MB)
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(inc_return_mb)(var);
+#else
+ res = ETHR_ATMC32_FUNC__(add_read)(var, (ethr_sint32_t) 1);
+#endif
return res;
+}
+
+static ETHR_INLINE ethr_sint32_t ETHR_ATMC32_FUNC__(inc_read_rb)(ethr_atomic32_t *var)
+{
+ ethr_sint32_t res;
+#if defined(ETHR_HAVE_NATMC32_INC_RETURN_RB)
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(inc_return_rb)(var);
+#elif defined(ETHR_HAVE_NATMC32_INC_RETURN)
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(inc_return)(var);
+ ETHR_MEMBAR(ETHR_LoadLoad);
+#elif defined(ETHR_HAVE_NATMC32_INC_RETURN_MB)
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(inc_return_mb)(var);
+#elif defined(ETHR_HAVE_NATMC32_INC_RETURN_WB)
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(inc_return_wb)(var);
+ ETHR_MEMBAR(ETHR_LoadLoad);
+#elif defined(ETHR_HAVE_NATMC32_INC_RETURN_ACQB)
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(inc_return_acqb)(var);
+ ETHR_MEMBAR(ETHR_LoadLoad);
+#elif defined(ETHR_HAVE_NATMC32_INC_RETURN_RELB)
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(inc_return_relb)(var);
+ ETHR_MEMBAR(ETHR_LoadLoad);
+#else
+ res = ETHR_ATMC32_FUNC__(add_read_rb)(var, (ethr_sint32_t) 1);
#endif
+ return res;
}
-static ETHR_INLINE ethr_sint32_t
-ETHR_INLINE_FUNC_NAME_(ethr_atomic32_cmpxchg)(ethr_atomic32_t *var,
- ethr_sint32_t new,
- ethr_sint32_t exp)
+static ETHR_INLINE ethr_sint32_t ETHR_ATMC32_FUNC__(inc_read_wb)(ethr_atomic32_t *var)
{
-#ifdef ETHR_HAVE_NATIVE_ATOMICS
- return (ethr_sint32_t) ETHR_NATMC32_FUNC__(cmpxchg)(var,
- (ETHR_NAINT32_T__) new,
- (ETHR_NAINT32_T__) exp);
+ ethr_sint32_t res;
+#if defined(ETHR_HAVE_NATMC32_INC_RETURN_WB)
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(inc_return_wb)(var);
+#elif defined(ETHR_HAVE_NATMC32_INC_RETURN)
+ ETHR_MEMBAR(ETHR_StoreStore);
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(inc_return)(var);
+#elif defined(ETHR_HAVE_NATMC32_INC_RETURN_MB)
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(inc_return_mb)(var);
+#elif defined(ETHR_HAVE_NATMC32_INC_RETURN_RB)
+ ETHR_MEMBAR(ETHR_StoreStore);
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(inc_return_rb)(var);
+#elif defined(ETHR_HAVE_NATMC32_INC_RETURN_ACQB)
+ ETHR_MEMBAR(ETHR_StoreStore);
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(inc_return_acqb)(var);
+#elif defined(ETHR_HAVE_NATMC32_INC_RETURN_RELB)
+ ETHR_MEMBAR(ETHR_StoreStore);
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(inc_return_relb)(var);
#else
+ res = ETHR_ATMC32_FUNC__(add_read_wb)(var, (ethr_sint32_t) 1);
+#endif
+ return res;
+}
+
+static ETHR_INLINE ethr_sint32_t ETHR_ATMC32_FUNC__(inc_read_acqb)(ethr_atomic32_t *var)
+{
ethr_sint32_t res;
- ETHR_ATOMIC_OP_FALLBACK_IMPL__(var,
- {
- res = *var;
- if (__builtin_expect(res == exp, 1))
- *var = new;
- });
+#if defined(ETHR_HAVE_NATMC32_INC_RETURN_ACQB)
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(inc_return_acqb)(var);
+#elif defined(ETHR_HAVE_NATMC32_INC_RETURN_RB)
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(inc_return_rb)(var);
+ ETHR_MEMBAR(ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_NATMC32_INC_RETURN)
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(inc_return)(var);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_NATMC32_INC_RETURN_MB)
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(inc_return_mb)(var);
+#elif defined(ETHR_HAVE_NATMC32_INC_RETURN_WB)
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(inc_return_wb)(var);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_NATMC32_INC_RETURN_RELB)
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(inc_return_relb)(var);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#else
+ res = ETHR_ATMC32_FUNC__(add_read_acqb)(var, (ethr_sint32_t) 1);
+#endif
return res;
+}
+
+static ETHR_INLINE ethr_sint32_t ETHR_ATMC32_FUNC__(inc_read_relb)(ethr_atomic32_t *var)
+{
+ ethr_sint32_t res;
+#if defined(ETHR_HAVE_NATMC32_INC_RETURN_RELB)
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(inc_return_relb)(var);
+#elif defined(ETHR_HAVE_NATMC32_INC_RETURN_WB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(inc_return_wb)(var);
+#elif defined(ETHR_HAVE_NATMC32_INC_RETURN)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(inc_return)(var);
+#elif defined(ETHR_HAVE_NATMC32_INC_RETURN_MB)
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(inc_return_mb)(var);
+#elif defined(ETHR_HAVE_NATMC32_INC_RETURN_RB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(inc_return_rb)(var);
+#elif defined(ETHR_HAVE_NATMC32_INC_RETURN_ACQB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(inc_return_acqb)(var);
+#else
+ res = ETHR_ATMC32_FUNC__(add_read_relb)(var, (ethr_sint32_t) 1);
#endif
+ return res;
}
-/*
- * Important memory barrier requirements.
- *
- * The following atomic operations *must* supply a memory barrier of
- * at least the type specified by its suffix:
- * _acqb = acquire barrier
- * _relb = release barrier
- */
+static ETHR_INLINE ethr_sint32_t ETHR_ATMC32_FUNC__(inc_read_mb)(ethr_atomic32_t *var)
+{
+ ethr_sint32_t res;
+#if defined(ETHR_HAVE_NATMC32_INC_RETURN_MB)
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(inc_return_mb)(var);
+#elif defined(ETHR_HAVE_NATMC32_INC_RETURN_RELB)
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(inc_return_relb)(var);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_NATMC32_INC_RETURN_ACQB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(inc_return_acqb)(var);
+#elif defined(ETHR_HAVE_NATMC32_INC_RETURN_WB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(inc_return_wb)(var);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_NATMC32_INC_RETURN_RB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(inc_return_rb)(var);
+ ETHR_MEMBAR(ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_NATMC32_INC_RETURN)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(inc_return)(var);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#else
+ res = ETHR_ATMC32_FUNC__(add_read_mb)(var, (ethr_sint32_t) 1);
+#endif
+ return res;
+}
-static ETHR_INLINE ethr_sint32_t
-ETHR_INLINE_FUNC_NAME_(ethr_atomic32_read_acqb)(ethr_atomic32_t *var)
+static ETHR_INLINE ethr_sint32_t ETHR_ATMC32_FUNC__(inc_read_ddrb)(ethr_atomic32_t *var)
{
-#ifdef ETHR_HAVE_NATIVE_ATOMICS
- return (ethr_sint32_t) ETHR_NATMC32_FUNC__(read_acqb)(var);
+#ifdef ETHR_ORDERED_READ_DEPEND
+ return ETHR_ATMC32_FUNC__(inc_read)(var);
#else
- return ETHR_INLINE_FUNC_NAME_(ethr_atomic32_read)(var);
+ return ETHR_ATMC32_FUNC__(inc_read_rb)(var);
#endif
}
-static ETHR_INLINE ethr_sint32_t
-ETHR_INLINE_FUNC_NAME_(ethr_atomic32_inc_read_acqb)(ethr_atomic32_t *var)
+
+/* --- dec_read() --- */
+
+
+static ETHR_INLINE ethr_sint32_t ETHR_ATMC32_FUNC__(dec_read)(ethr_atomic32_t *var)
{
-#ifdef ETHR_HAVE_NATIVE_ATOMICS
- return (ethr_sint32_t) ETHR_NATMC32_FUNC__(inc_return_acqb)(var);
+ ethr_sint32_t res;
+#if defined(ETHR_HAVE_NATMC32_DEC_RETURN)
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(dec_return)(var);
+#elif defined(ETHR_HAVE_NATMC32_DEC_RETURN_RB)
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(dec_return_rb)(var);
+#elif defined(ETHR_HAVE_NATMC32_DEC_RETURN_WB)
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(dec_return_wb)(var);
+#elif defined(ETHR_HAVE_NATMC32_DEC_RETURN_ACQB)
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(dec_return_acqb)(var);
+#elif defined(ETHR_HAVE_NATMC32_DEC_RETURN_RELB)
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(dec_return_relb)(var);
+#elif defined(ETHR_HAVE_NATMC32_DEC_RETURN_MB)
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(dec_return_mb)(var);
#else
- return ETHR_INLINE_FUNC_NAME_(ethr_atomic32_inc_read)(var);
+ res = ETHR_ATMC32_FUNC__(add_read)(var, (ethr_sint32_t) -1);
#endif
+ return res;
}
-static ETHR_INLINE void
-ETHR_INLINE_FUNC_NAME_(ethr_atomic32_set_relb)(ethr_atomic32_t *var,
- ethr_sint32_t val)
+static ETHR_INLINE ethr_sint32_t ETHR_ATMC32_FUNC__(dec_read_rb)(ethr_atomic32_t *var)
{
-#ifdef ETHR_HAVE_NATIVE_ATOMICS
- ETHR_NATMC32_FUNC__(set_relb)(var, (ETHR_NAINT32_T__) val);
+ ethr_sint32_t res;
+#if defined(ETHR_HAVE_NATMC32_DEC_RETURN_RB)
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(dec_return_rb)(var);
+#elif defined(ETHR_HAVE_NATMC32_DEC_RETURN)
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(dec_return)(var);
+ ETHR_MEMBAR(ETHR_LoadLoad);
+#elif defined(ETHR_HAVE_NATMC32_DEC_RETURN_MB)
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(dec_return_mb)(var);
+#elif defined(ETHR_HAVE_NATMC32_DEC_RETURN_WB)
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(dec_return_wb)(var);
+ ETHR_MEMBAR(ETHR_LoadLoad);
+#elif defined(ETHR_HAVE_NATMC32_DEC_RETURN_ACQB)
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(dec_return_acqb)(var);
+ ETHR_MEMBAR(ETHR_LoadLoad);
+#elif defined(ETHR_HAVE_NATMC32_DEC_RETURN_RELB)
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(dec_return_relb)(var);
+ ETHR_MEMBAR(ETHR_LoadLoad);
#else
- ETHR_INLINE_FUNC_NAME_(ethr_atomic32_set)(var, val);
+ res = ETHR_ATMC32_FUNC__(add_read_rb)(var, (ethr_sint32_t) -1);
#endif
+ return res;
}
-static ETHR_INLINE void
-ETHR_INLINE_FUNC_NAME_(ethr_atomic32_dec_relb)(ethr_atomic32_t *var)
+static ETHR_INLINE ethr_sint32_t ETHR_ATMC32_FUNC__(dec_read_wb)(ethr_atomic32_t *var)
{
-#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ ethr_sint32_t res;
+#if defined(ETHR_HAVE_NATMC32_DEC_RETURN_WB)
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(dec_return_wb)(var);
+#elif defined(ETHR_HAVE_NATMC32_DEC_RETURN)
+ ETHR_MEMBAR(ETHR_StoreStore);
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(dec_return)(var);
+#elif defined(ETHR_HAVE_NATMC32_DEC_RETURN_MB)
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(dec_return_mb)(var);
+#elif defined(ETHR_HAVE_NATMC32_DEC_RETURN_RB)
+ ETHR_MEMBAR(ETHR_StoreStore);
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(dec_return_rb)(var);
+#elif defined(ETHR_HAVE_NATMC32_DEC_RETURN_ACQB)
+ ETHR_MEMBAR(ETHR_StoreStore);
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(dec_return_acqb)(var);
+#elif defined(ETHR_HAVE_NATMC32_DEC_RETURN_RELB)
+ ETHR_MEMBAR(ETHR_StoreStore);
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(dec_return_relb)(var);
+#else
+ res = ETHR_ATMC32_FUNC__(add_read_wb)(var, (ethr_sint32_t) -1);
+#endif
+ return res;
+}
+
+static ETHR_INLINE ethr_sint32_t ETHR_ATMC32_FUNC__(dec_read_acqb)(ethr_atomic32_t *var)
+{
+ ethr_sint32_t res;
+#if defined(ETHR_HAVE_NATMC32_DEC_RETURN_ACQB)
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(dec_return_acqb)(var);
+#elif defined(ETHR_HAVE_NATMC32_DEC_RETURN_RB)
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(dec_return_rb)(var);
+ ETHR_MEMBAR(ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_NATMC32_DEC_RETURN)
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(dec_return)(var);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_NATMC32_DEC_RETURN_MB)
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(dec_return_mb)(var);
+#elif defined(ETHR_HAVE_NATMC32_DEC_RETURN_WB)
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(dec_return_wb)(var);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_NATMC32_DEC_RETURN_RELB)
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(dec_return_relb)(var);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#else
+ res = ETHR_ATMC32_FUNC__(add_read_acqb)(var, (ethr_sint32_t) -1);
+#endif
+ return res;
+}
+
+static ETHR_INLINE ethr_sint32_t ETHR_ATMC32_FUNC__(dec_read_relb)(ethr_atomic32_t *var)
+{
+ ethr_sint32_t res;
+#if defined(ETHR_HAVE_NATMC32_DEC_RETURN_RELB)
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(dec_return_relb)(var);
+#elif defined(ETHR_HAVE_NATMC32_DEC_RETURN_WB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(dec_return_wb)(var);
+#elif defined(ETHR_HAVE_NATMC32_DEC_RETURN)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(dec_return)(var);
+#elif defined(ETHR_HAVE_NATMC32_DEC_RETURN_MB)
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(dec_return_mb)(var);
+#elif defined(ETHR_HAVE_NATMC32_DEC_RETURN_RB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(dec_return_rb)(var);
+#elif defined(ETHR_HAVE_NATMC32_DEC_RETURN_ACQB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(dec_return_acqb)(var);
+#else
+ res = ETHR_ATMC32_FUNC__(add_read_relb)(var, (ethr_sint32_t) -1);
+#endif
+ return res;
+}
+
+static ETHR_INLINE ethr_sint32_t ETHR_ATMC32_FUNC__(dec_read_mb)(ethr_atomic32_t *var)
+{
+ ethr_sint32_t res;
+#if defined(ETHR_HAVE_NATMC32_DEC_RETURN_MB)
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(dec_return_mb)(var);
+#elif defined(ETHR_HAVE_NATMC32_DEC_RETURN_RELB)
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(dec_return_relb)(var);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_NATMC32_DEC_RETURN_ACQB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(dec_return_acqb)(var);
+#elif defined(ETHR_HAVE_NATMC32_DEC_RETURN_WB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(dec_return_wb)(var);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_NATMC32_DEC_RETURN_RB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(dec_return_rb)(var);
+ ETHR_MEMBAR(ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_NATMC32_DEC_RETURN)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(dec_return)(var);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#else
+ res = ETHR_ATMC32_FUNC__(add_read_mb)(var, (ethr_sint32_t) -1);
+#endif
+ return res;
+}
+
+static ETHR_INLINE ethr_sint32_t ETHR_ATMC32_FUNC__(dec_read_ddrb)(ethr_atomic32_t *var)
+{
+#ifdef ETHR_ORDERED_READ_DEPEND
+ return ETHR_ATMC32_FUNC__(dec_read)(var);
+#else
+ return ETHR_ATMC32_FUNC__(dec_read_rb)(var);
+#endif
+}
+
+
+/* --- add() --- */
+
+
+static ETHR_INLINE void ETHR_ATMC32_FUNC__(add)(ethr_atomic32_t *var, ethr_sint32_t val)
+{
+#if defined(ETHR_HAVE_NATMC32_ADD)
+ ETHR_NATMC32_FUNC__(add)(var, (ETHR_NAINT32_T__) val);
+#elif defined(ETHR_HAVE_NATMC32_ADD_RB)
+ ETHR_NATMC32_FUNC__(add_rb)(var, (ETHR_NAINT32_T__) val);
+#elif defined(ETHR_HAVE_NATMC32_ADD_WB)
+ ETHR_NATMC32_FUNC__(add_wb)(var, (ETHR_NAINT32_T__) val);
+#elif defined(ETHR_HAVE_NATMC32_ADD_ACQB)
+ ETHR_NATMC32_FUNC__(add_acqb)(var, (ETHR_NAINT32_T__) val);
+#elif defined(ETHR_HAVE_NATMC32_ADD_RELB)
+ ETHR_NATMC32_FUNC__(add_relb)(var, (ETHR_NAINT32_T__) val);
+#elif defined(ETHR_HAVE_NATMC32_ADD_MB)
+ ETHR_NATMC32_FUNC__(add_mb)(var, (ETHR_NAINT32_T__) val);
+#else
+ (void) ETHR_ATMC32_FUNC__(add_read)(var, val);
+#endif
+}
+
+static ETHR_INLINE void ETHR_ATMC32_FUNC__(add_rb)(ethr_atomic32_t *var, ethr_sint32_t val)
+{
+#if defined(ETHR_HAVE_NATMC32_ADD_RB)
+ ETHR_NATMC32_FUNC__(add_rb)(var, (ETHR_NAINT32_T__) val);
+#elif defined(ETHR_HAVE_NATMC32_ADD)
+ ETHR_NATMC32_FUNC__(add)(var, (ETHR_NAINT32_T__) val);
+ ETHR_MEMBAR(ETHR_LoadLoad);
+#elif defined(ETHR_HAVE_NATMC32_ADD_MB)
+ ETHR_NATMC32_FUNC__(add_mb)(var, (ETHR_NAINT32_T__) val);
+#elif defined(ETHR_HAVE_NATMC32_ADD_WB)
+ ETHR_NATMC32_FUNC__(add_wb)(var, (ETHR_NAINT32_T__) val);
+ ETHR_MEMBAR(ETHR_LoadLoad);
+#elif defined(ETHR_HAVE_NATMC32_ADD_ACQB)
+ ETHR_NATMC32_FUNC__(add_acqb)(var, (ETHR_NAINT32_T__) val);
+ ETHR_MEMBAR(ETHR_LoadLoad);
+#elif defined(ETHR_HAVE_NATMC32_ADD_RELB)
+ ETHR_NATMC32_FUNC__(add_relb)(var, (ETHR_NAINT32_T__) val);
+ ETHR_MEMBAR(ETHR_LoadLoad);
+#else
+ (void) ETHR_ATMC32_FUNC__(add_read_rb)(var, val);
+#endif
+}
+
+static ETHR_INLINE void ETHR_ATMC32_FUNC__(add_wb)(ethr_atomic32_t *var, ethr_sint32_t val)
+{
+#if defined(ETHR_HAVE_NATMC32_ADD_WB)
+ ETHR_NATMC32_FUNC__(add_wb)(var, (ETHR_NAINT32_T__) val);
+#elif defined(ETHR_HAVE_NATMC32_ADD)
+ ETHR_MEMBAR(ETHR_StoreStore);
+ ETHR_NATMC32_FUNC__(add)(var, (ETHR_NAINT32_T__) val);
+#elif defined(ETHR_HAVE_NATMC32_ADD_MB)
+ ETHR_NATMC32_FUNC__(add_mb)(var, (ETHR_NAINT32_T__) val);
+#elif defined(ETHR_HAVE_NATMC32_ADD_RB)
+ ETHR_MEMBAR(ETHR_StoreStore);
+ ETHR_NATMC32_FUNC__(add_rb)(var, (ETHR_NAINT32_T__) val);
+#elif defined(ETHR_HAVE_NATMC32_ADD_ACQB)
+ ETHR_MEMBAR(ETHR_StoreStore);
+ ETHR_NATMC32_FUNC__(add_acqb)(var, (ETHR_NAINT32_T__) val);
+#elif defined(ETHR_HAVE_NATMC32_ADD_RELB)
+ ETHR_MEMBAR(ETHR_StoreStore);
+ ETHR_NATMC32_FUNC__(add_relb)(var, (ETHR_NAINT32_T__) val);
+#else
+ (void) ETHR_ATMC32_FUNC__(add_read_wb)(var, val);
+#endif
+}
+
+static ETHR_INLINE void ETHR_ATMC32_FUNC__(add_acqb)(ethr_atomic32_t *var, ethr_sint32_t val)
+{
+#if defined(ETHR_HAVE_NATMC32_ADD_ACQB)
+ ETHR_NATMC32_FUNC__(add_acqb)(var, (ETHR_NAINT32_T__) val);
+#elif defined(ETHR_HAVE_NATMC32_ADD_RB)
+ ETHR_NATMC32_FUNC__(add_rb)(var, (ETHR_NAINT32_T__) val);
+ ETHR_MEMBAR(ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_NATMC32_ADD)
+ ETHR_NATMC32_FUNC__(add)(var, (ETHR_NAINT32_T__) val);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_NATMC32_ADD_MB)
+ ETHR_NATMC32_FUNC__(add_mb)(var, (ETHR_NAINT32_T__) val);
+#elif defined(ETHR_HAVE_NATMC32_ADD_WB)
+ ETHR_NATMC32_FUNC__(add_wb)(var, (ETHR_NAINT32_T__) val);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_NATMC32_ADD_RELB)
+ ETHR_NATMC32_FUNC__(add_relb)(var, (ETHR_NAINT32_T__) val);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#else
+ (void) ETHR_ATMC32_FUNC__(add_read_acqb)(var, val);
+#endif
+}
+
+static ETHR_INLINE void ETHR_ATMC32_FUNC__(add_relb)(ethr_atomic32_t *var, ethr_sint32_t val)
+{
+#if defined(ETHR_HAVE_NATMC32_ADD_RELB)
+ ETHR_NATMC32_FUNC__(add_relb)(var, (ETHR_NAINT32_T__) val);
+#elif defined(ETHR_HAVE_NATMC32_ADD_WB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ ETHR_NATMC32_FUNC__(add_wb)(var, (ETHR_NAINT32_T__) val);
+#elif defined(ETHR_HAVE_NATMC32_ADD)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ ETHR_NATMC32_FUNC__(add)(var, (ETHR_NAINT32_T__) val);
+#elif defined(ETHR_HAVE_NATMC32_ADD_MB)
+ ETHR_NATMC32_FUNC__(add_mb)(var, (ETHR_NAINT32_T__) val);
+#elif defined(ETHR_HAVE_NATMC32_ADD_RB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ ETHR_NATMC32_FUNC__(add_rb)(var, (ETHR_NAINT32_T__) val);
+#elif defined(ETHR_HAVE_NATMC32_ADD_ACQB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ ETHR_NATMC32_FUNC__(add_acqb)(var, (ETHR_NAINT32_T__) val);
+#else
+ (void) ETHR_ATMC32_FUNC__(add_read_relb)(var, val);
+#endif
+}
+
+static ETHR_INLINE void ETHR_ATMC32_FUNC__(add_mb)(ethr_atomic32_t *var, ethr_sint32_t val)
+{
+#if defined(ETHR_HAVE_NATMC32_ADD_MB)
+ ETHR_NATMC32_FUNC__(add_mb)(var, (ETHR_NAINT32_T__) val);
+#elif defined(ETHR_HAVE_NATMC32_ADD_RELB)
+ ETHR_NATMC32_FUNC__(add_relb)(var, (ETHR_NAINT32_T__) val);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_NATMC32_ADD_ACQB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ ETHR_NATMC32_FUNC__(add_acqb)(var, (ETHR_NAINT32_T__) val);
+#elif defined(ETHR_HAVE_NATMC32_ADD_WB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ ETHR_NATMC32_FUNC__(add_wb)(var, (ETHR_NAINT32_T__) val);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_NATMC32_ADD_RB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ ETHR_NATMC32_FUNC__(add_rb)(var, (ETHR_NAINT32_T__) val);
+ ETHR_MEMBAR(ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_NATMC32_ADD)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ ETHR_NATMC32_FUNC__(add)(var, (ETHR_NAINT32_T__) val);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#else
+ (void) ETHR_ATMC32_FUNC__(add_read_mb)(var, val);
+#endif
+}
+
+static ETHR_INLINE void ETHR_ATMC32_FUNC__(add_ddrb)(ethr_atomic32_t *var, ethr_sint32_t val)
+{
+#ifdef ETHR_ORDERED_READ_DEPEND
+ ETHR_ATMC32_FUNC__(add)(var, val);
+#else
+ ETHR_ATMC32_FUNC__(add_rb)(var, val);
+#endif
+}
+
+
+/* --- inc() --- */
+
+
+static ETHR_INLINE void ETHR_ATMC32_FUNC__(inc)(ethr_atomic32_t *var)
+{
+#if defined(ETHR_HAVE_NATMC32_INC)
+ ETHR_NATMC32_FUNC__(inc)(var);
+#elif defined(ETHR_HAVE_NATMC32_INC_RB)
+ ETHR_NATMC32_FUNC__(inc_rb)(var);
+#elif defined(ETHR_HAVE_NATMC32_INC_WB)
+ ETHR_NATMC32_FUNC__(inc_wb)(var);
+#elif defined(ETHR_HAVE_NATMC32_INC_ACQB)
+ ETHR_NATMC32_FUNC__(inc_acqb)(var);
+#elif defined(ETHR_HAVE_NATMC32_INC_RELB)
+ ETHR_NATMC32_FUNC__(inc_relb)(var);
+#elif defined(ETHR_HAVE_NATMC32_INC_MB)
+ ETHR_NATMC32_FUNC__(inc_mb)(var);
+#else
+ (void) ETHR_ATMC32_FUNC__(inc_read)(var);
+#endif
+}
+
+static ETHR_INLINE void ETHR_ATMC32_FUNC__(inc_rb)(ethr_atomic32_t *var)
+{
+#if defined(ETHR_HAVE_NATMC32_INC_RB)
+ ETHR_NATMC32_FUNC__(inc_rb)(var);
+#elif defined(ETHR_HAVE_NATMC32_INC)
+ ETHR_NATMC32_FUNC__(inc)(var);
+ ETHR_MEMBAR(ETHR_LoadLoad);
+#elif defined(ETHR_HAVE_NATMC32_INC_MB)
+ ETHR_NATMC32_FUNC__(inc_mb)(var);
+#elif defined(ETHR_HAVE_NATMC32_INC_WB)
+ ETHR_NATMC32_FUNC__(inc_wb)(var);
+ ETHR_MEMBAR(ETHR_LoadLoad);
+#elif defined(ETHR_HAVE_NATMC32_INC_ACQB)
+ ETHR_NATMC32_FUNC__(inc_acqb)(var);
+ ETHR_MEMBAR(ETHR_LoadLoad);
+#elif defined(ETHR_HAVE_NATMC32_INC_RELB)
+ ETHR_NATMC32_FUNC__(inc_relb)(var);
+ ETHR_MEMBAR(ETHR_LoadLoad);
+#else
+ (void) ETHR_ATMC32_FUNC__(inc_read_rb)(var);
+#endif
+}
+
+static ETHR_INLINE void ETHR_ATMC32_FUNC__(inc_wb)(ethr_atomic32_t *var)
+{
+#if defined(ETHR_HAVE_NATMC32_INC_WB)
+ ETHR_NATMC32_FUNC__(inc_wb)(var);
+#elif defined(ETHR_HAVE_NATMC32_INC)
+ ETHR_MEMBAR(ETHR_StoreStore);
+ ETHR_NATMC32_FUNC__(inc)(var);
+#elif defined(ETHR_HAVE_NATMC32_INC_MB)
+ ETHR_NATMC32_FUNC__(inc_mb)(var);
+#elif defined(ETHR_HAVE_NATMC32_INC_RB)
+ ETHR_MEMBAR(ETHR_StoreStore);
+ ETHR_NATMC32_FUNC__(inc_rb)(var);
+#elif defined(ETHR_HAVE_NATMC32_INC_ACQB)
+ ETHR_MEMBAR(ETHR_StoreStore);
+ ETHR_NATMC32_FUNC__(inc_acqb)(var);
+#elif defined(ETHR_HAVE_NATMC32_INC_RELB)
+ ETHR_MEMBAR(ETHR_StoreStore);
+ ETHR_NATMC32_FUNC__(inc_relb)(var);
+#else
+ (void) ETHR_ATMC32_FUNC__(inc_read_wb)(var);
+#endif
+}
+
+static ETHR_INLINE void ETHR_ATMC32_FUNC__(inc_acqb)(ethr_atomic32_t *var)
+{
+#if defined(ETHR_HAVE_NATMC32_INC_ACQB)
+ ETHR_NATMC32_FUNC__(inc_acqb)(var);
+#elif defined(ETHR_HAVE_NATMC32_INC_RB)
+ ETHR_NATMC32_FUNC__(inc_rb)(var);
+ ETHR_MEMBAR(ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_NATMC32_INC)
+ ETHR_NATMC32_FUNC__(inc)(var);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_NATMC32_INC_MB)
+ ETHR_NATMC32_FUNC__(inc_mb)(var);
+#elif defined(ETHR_HAVE_NATMC32_INC_WB)
+ ETHR_NATMC32_FUNC__(inc_wb)(var);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_NATMC32_INC_RELB)
+ ETHR_NATMC32_FUNC__(inc_relb)(var);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#else
+ (void) ETHR_ATMC32_FUNC__(inc_read_acqb)(var);
+#endif
+}
+
+static ETHR_INLINE void ETHR_ATMC32_FUNC__(inc_relb)(ethr_atomic32_t *var)
+{
+#if defined(ETHR_HAVE_NATMC32_INC_RELB)
+ ETHR_NATMC32_FUNC__(inc_relb)(var);
+#elif defined(ETHR_HAVE_NATMC32_INC_WB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ ETHR_NATMC32_FUNC__(inc_wb)(var);
+#elif defined(ETHR_HAVE_NATMC32_INC)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ ETHR_NATMC32_FUNC__(inc)(var);
+#elif defined(ETHR_HAVE_NATMC32_INC_MB)
+ ETHR_NATMC32_FUNC__(inc_mb)(var);
+#elif defined(ETHR_HAVE_NATMC32_INC_RB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ ETHR_NATMC32_FUNC__(inc_rb)(var);
+#elif defined(ETHR_HAVE_NATMC32_INC_ACQB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ ETHR_NATMC32_FUNC__(inc_acqb)(var);
+#else
+ (void) ETHR_ATMC32_FUNC__(inc_read_relb)(var);
+#endif
+}
+
+static ETHR_INLINE void ETHR_ATMC32_FUNC__(inc_mb)(ethr_atomic32_t *var)
+{
+#if defined(ETHR_HAVE_NATMC32_INC_MB)
+ ETHR_NATMC32_FUNC__(inc_mb)(var);
+#elif defined(ETHR_HAVE_NATMC32_INC_RELB)
+ ETHR_NATMC32_FUNC__(inc_relb)(var);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_NATMC32_INC_ACQB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ ETHR_NATMC32_FUNC__(inc_acqb)(var);
+#elif defined(ETHR_HAVE_NATMC32_INC_WB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ ETHR_NATMC32_FUNC__(inc_wb)(var);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_NATMC32_INC_RB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ ETHR_NATMC32_FUNC__(inc_rb)(var);
+ ETHR_MEMBAR(ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_NATMC32_INC)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ ETHR_NATMC32_FUNC__(inc)(var);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#else
+ (void) ETHR_ATMC32_FUNC__(inc_read_mb)(var);
+#endif
+}
+
+static ETHR_INLINE void ETHR_ATMC32_FUNC__(inc_ddrb)(ethr_atomic32_t *var)
+{
+#ifdef ETHR_ORDERED_READ_DEPEND
+ ETHR_ATMC32_FUNC__(inc)(var);
+#else
+ ETHR_ATMC32_FUNC__(inc_rb)(var);
+#endif
+}
+
+
+/* --- dec() --- */
+
+
+static ETHR_INLINE void ETHR_ATMC32_FUNC__(dec)(ethr_atomic32_t *var)
+{
+#if defined(ETHR_HAVE_NATMC32_DEC)
+ ETHR_NATMC32_FUNC__(dec)(var);
+#elif defined(ETHR_HAVE_NATMC32_DEC_RB)
+ ETHR_NATMC32_FUNC__(dec_rb)(var);
+#elif defined(ETHR_HAVE_NATMC32_DEC_WB)
+ ETHR_NATMC32_FUNC__(dec_wb)(var);
+#elif defined(ETHR_HAVE_NATMC32_DEC_ACQB)
+ ETHR_NATMC32_FUNC__(dec_acqb)(var);
+#elif defined(ETHR_HAVE_NATMC32_DEC_RELB)
ETHR_NATMC32_FUNC__(dec_relb)(var);
+#elif defined(ETHR_HAVE_NATMC32_DEC_MB)
+ ETHR_NATMC32_FUNC__(dec_mb)(var);
#else
- ETHR_INLINE_FUNC_NAME_(ethr_atomic32_dec)(var);
+ (void) ETHR_ATMC32_FUNC__(dec_read)(var);
#endif
}
-static ETHR_INLINE ethr_sint32_t
-ETHR_INLINE_FUNC_NAME_(ethr_atomic32_dec_read_relb)(ethr_atomic32_t *var)
+static ETHR_INLINE void ETHR_ATMC32_FUNC__(dec_rb)(ethr_atomic32_t *var)
{
-#ifdef ETHR_HAVE_NATIVE_ATOMICS
- return (ethr_sint32_t) ETHR_NATMC32_FUNC__(dec_return_relb)(var);
+#if defined(ETHR_HAVE_NATMC32_DEC_RB)
+ ETHR_NATMC32_FUNC__(dec_rb)(var);
+#elif defined(ETHR_HAVE_NATMC32_DEC)
+ ETHR_NATMC32_FUNC__(dec)(var);
+ ETHR_MEMBAR(ETHR_LoadLoad);
+#elif defined(ETHR_HAVE_NATMC32_DEC_MB)
+ ETHR_NATMC32_FUNC__(dec_mb)(var);
+#elif defined(ETHR_HAVE_NATMC32_DEC_WB)
+ ETHR_NATMC32_FUNC__(dec_wb)(var);
+ ETHR_MEMBAR(ETHR_LoadLoad);
+#elif defined(ETHR_HAVE_NATMC32_DEC_ACQB)
+ ETHR_NATMC32_FUNC__(dec_acqb)(var);
+ ETHR_MEMBAR(ETHR_LoadLoad);
+#elif defined(ETHR_HAVE_NATMC32_DEC_RELB)
+ ETHR_NATMC32_FUNC__(dec_relb)(var);
+ ETHR_MEMBAR(ETHR_LoadLoad);
#else
- return ETHR_INLINE_FUNC_NAME_(ethr_atomic32_dec_read)(var);
+ (void) ETHR_ATMC32_FUNC__(dec_read_rb)(var);
#endif
}
-static ETHR_INLINE ethr_sint32_t
-ETHR_INLINE_FUNC_NAME_(ethr_atomic32_cmpxchg_acqb)(ethr_atomic32_t *var,
- ethr_sint32_t new,
- ethr_sint32_t exp)
+static ETHR_INLINE void ETHR_ATMC32_FUNC__(dec_wb)(ethr_atomic32_t *var)
{
-#ifdef ETHR_HAVE_NATIVE_ATOMICS
- return (ethr_sint32_t)
- ETHR_NATMC32_FUNC__(cmpxchg_acqb)(var,
- (ETHR_NAINT32_T__) new,
- (ETHR_NAINT32_T__) exp);
+#if defined(ETHR_HAVE_NATMC32_DEC_WB)
+ ETHR_NATMC32_FUNC__(dec_wb)(var);
+#elif defined(ETHR_HAVE_NATMC32_DEC)
+ ETHR_MEMBAR(ETHR_StoreStore);
+ ETHR_NATMC32_FUNC__(dec)(var);
+#elif defined(ETHR_HAVE_NATMC32_DEC_MB)
+ ETHR_NATMC32_FUNC__(dec_mb)(var);
+#elif defined(ETHR_HAVE_NATMC32_DEC_RB)
+ ETHR_MEMBAR(ETHR_StoreStore);
+ ETHR_NATMC32_FUNC__(dec_rb)(var);
+#elif defined(ETHR_HAVE_NATMC32_DEC_ACQB)
+ ETHR_MEMBAR(ETHR_StoreStore);
+ ETHR_NATMC32_FUNC__(dec_acqb)(var);
+#elif defined(ETHR_HAVE_NATMC32_DEC_RELB)
+ ETHR_MEMBAR(ETHR_StoreStore);
+ ETHR_NATMC32_FUNC__(dec_relb)(var);
#else
- return ETHR_INLINE_FUNC_NAME_(ethr_atomic32_cmpxchg)(var, new, exp);
+ (void) ETHR_ATMC32_FUNC__(dec_read_wb)(var);
#endif
}
-static ETHR_INLINE ethr_sint32_t
-ETHR_INLINE_FUNC_NAME_(ethr_atomic32_cmpxchg_relb)(ethr_atomic32_t *var,
- ethr_sint32_t new,
- ethr_sint32_t exp)
+static ETHR_INLINE void ETHR_ATMC32_FUNC__(dec_acqb)(ethr_atomic32_t *var)
{
-#ifdef ETHR_HAVE_NATIVE_ATOMICS
- return (ethr_sint32_t)
- ETHR_NATMC32_FUNC__(cmpxchg_relb)(var,
- (ETHR_NAINT32_T__) new,
- (ETHR_NAINT32_T__) exp);
+#if defined(ETHR_HAVE_NATMC32_DEC_ACQB)
+ ETHR_NATMC32_FUNC__(dec_acqb)(var);
+#elif defined(ETHR_HAVE_NATMC32_DEC_RB)
+ ETHR_NATMC32_FUNC__(dec_rb)(var);
+ ETHR_MEMBAR(ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_NATMC32_DEC)
+ ETHR_NATMC32_FUNC__(dec)(var);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_NATMC32_DEC_MB)
+ ETHR_NATMC32_FUNC__(dec_mb)(var);
+#elif defined(ETHR_HAVE_NATMC32_DEC_WB)
+ ETHR_NATMC32_FUNC__(dec_wb)(var);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_NATMC32_DEC_RELB)
+ ETHR_NATMC32_FUNC__(dec_relb)(var);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
#else
- return ETHR_INLINE_FUNC_NAME_(ethr_atomic32_cmpxchg)(var, new, exp);
+ (void) ETHR_ATMC32_FUNC__(dec_read_acqb)(var);
#endif
}
+static ETHR_INLINE void ETHR_ATMC32_FUNC__(dec_relb)(ethr_atomic32_t *var)
+{
+#if defined(ETHR_HAVE_NATMC32_DEC_RELB)
+ ETHR_NATMC32_FUNC__(dec_relb)(var);
+#elif defined(ETHR_HAVE_NATMC32_DEC_WB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ ETHR_NATMC32_FUNC__(dec_wb)(var);
+#elif defined(ETHR_HAVE_NATMC32_DEC)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ ETHR_NATMC32_FUNC__(dec)(var);
+#elif defined(ETHR_HAVE_NATMC32_DEC_MB)
+ ETHR_NATMC32_FUNC__(dec_mb)(var);
+#elif defined(ETHR_HAVE_NATMC32_DEC_RB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ ETHR_NATMC32_FUNC__(dec_rb)(var);
+#elif defined(ETHR_HAVE_NATMC32_DEC_ACQB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ ETHR_NATMC32_FUNC__(dec_acqb)(var);
+#else
+ (void) ETHR_ATMC32_FUNC__(dec_read_relb)(var);
+#endif
+}
-#endif /* ETHR_TRY_INLINE_FUNCS */
+static ETHR_INLINE void ETHR_ATMC32_FUNC__(dec_mb)(ethr_atomic32_t *var)
+{
+#if defined(ETHR_HAVE_NATMC32_DEC_MB)
+ ETHR_NATMC32_FUNC__(dec_mb)(var);
+#elif defined(ETHR_HAVE_NATMC32_DEC_RELB)
+ ETHR_NATMC32_FUNC__(dec_relb)(var);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_NATMC32_DEC_ACQB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ ETHR_NATMC32_FUNC__(dec_acqb)(var);
+#elif defined(ETHR_HAVE_NATMC32_DEC_WB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ ETHR_NATMC32_FUNC__(dec_wb)(var);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_NATMC32_DEC_RB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ ETHR_NATMC32_FUNC__(dec_rb)(var);
+ ETHR_MEMBAR(ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_NATMC32_DEC)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ ETHR_NATMC32_FUNC__(dec)(var);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#else
+ (void) ETHR_ATMC32_FUNC__(dec_read_mb)(var);
+#endif
+}
-#undef ETHR_NAINT_T__
-#undef ETHR_NATMC_FUNC__
-#undef ETHR_NATMC_ADDR_FUNC__
+static ETHR_INLINE void ETHR_ATMC32_FUNC__(dec_ddrb)(ethr_atomic32_t *var)
+{
+#ifdef ETHR_ORDERED_READ_DEPEND
+ ETHR_ATMC32_FUNC__(dec)(var);
+#else
+ ETHR_ATMC32_FUNC__(dec_rb)(var);
+#endif
+}
+
+
+/* --- read_band() --- */
-#undef ETHR_NAINT32_T__
-#undef ETHR_NATMC32_FUNC__
+static ETHR_INLINE ethr_sint32_t ETHR_ATMC32_FUNC__(read_band)(ethr_atomic32_t *var, ethr_sint32_t val)
+{
+ ethr_sint32_t res;
+#if defined(ETHR_HAVE_NATMC32_AND_RETOLD)
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(and_retold)(var, (ETHR_NAINT32_T__) val);
+#elif defined(ETHR_HAVE_NATMC32_AND_RETOLD_RB)
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(and_retold_rb)(var, (ETHR_NAINT32_T__) val);
+#elif defined(ETHR_HAVE_NATMC32_AND_RETOLD_WB)
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(and_retold_wb)(var, (ETHR_NAINT32_T__) val);
+#elif defined(ETHR_HAVE_NATMC32_AND_RETOLD_ACQB)
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(and_retold_acqb)(var, (ETHR_NAINT32_T__) val);
+#elif defined(ETHR_HAVE_NATMC32_AND_RETOLD_RELB)
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(and_retold_relb)(var, (ETHR_NAINT32_T__) val);
+#elif defined(ETHR_HAVE_NATMC32_AND_RETOLD_MB)
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(and_retold_mb)(var, (ETHR_NAINT32_T__) val);
+#elif defined(ETHR_HAVE_NATMC32_CMPXCHG)
+ ETHR_NATMC32_CMPXCHG_FALLBACK__(ETHR_NATMC32_FUNC__(cmpxchg), var, aval, res = aval; aval &= val);
+#elif defined(ETHR_HAVE_NATMC32_CMPXCHG_RB)
+ ETHR_NATMC32_CMPXCHG_FALLBACK__(ETHR_NATMC32_FUNC__(cmpxchg_rb), var, aval, res = aval; aval &= val);
+#elif defined(ETHR_HAVE_NATMC32_CMPXCHG_WB)
+ ETHR_NATMC32_CMPXCHG_FALLBACK__(ETHR_NATMC32_FUNC__(cmpxchg_wb), var, aval, res = aval; aval &= val);
+#elif defined(ETHR_HAVE_NATMC32_CMPXCHG_ACQB)
+ ETHR_NATMC32_CMPXCHG_FALLBACK__(ETHR_NATMC32_FUNC__(cmpxchg_acqb), var, aval, res = aval; aval &= val);
+#elif defined(ETHR_HAVE_NATMC32_CMPXCHG_RELB)
+ ETHR_NATMC32_CMPXCHG_FALLBACK__(ETHR_NATMC32_FUNC__(cmpxchg_relb), var, aval, res = aval; aval &= val);
+#elif defined(ETHR_HAVE_NATMC32_CMPXCHG_MB)
+ ETHR_NATMC32_CMPXCHG_FALLBACK__(ETHR_NATMC32_FUNC__(cmpxchg_mb), var, aval, res = aval; aval &= val);
+#else
+#error "Missing implementation of ethr_atomic32_read_band()!"
#endif
+ return res;
+}
+
+static ETHR_INLINE ethr_sint32_t ETHR_ATMC32_FUNC__(read_band_rb)(ethr_atomic32_t *var, ethr_sint32_t val)
+{
+ ethr_sint32_t res;
+#if defined(ETHR_HAVE_NATMC32_AND_RETOLD_RB)
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(and_retold_rb)(var, (ETHR_NAINT32_T__) val);
+#elif defined(ETHR_HAVE_NATMC32_AND_RETOLD)
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(and_retold)(var, (ETHR_NAINT32_T__) val);
+ ETHR_MEMBAR(ETHR_LoadLoad);
+#elif defined(ETHR_HAVE_NATMC32_AND_RETOLD_MB)
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(and_retold_mb)(var, (ETHR_NAINT32_T__) val);
+#elif defined(ETHR_HAVE_NATMC32_AND_RETOLD_WB)
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(and_retold_wb)(var, (ETHR_NAINT32_T__) val);
+ ETHR_MEMBAR(ETHR_LoadLoad);
+#elif defined(ETHR_HAVE_NATMC32_AND_RETOLD_ACQB)
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(and_retold_acqb)(var, (ETHR_NAINT32_T__) val);
+ ETHR_MEMBAR(ETHR_LoadLoad);
+#elif defined(ETHR_HAVE_NATMC32_AND_RETOLD_RELB)
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(and_retold_relb)(var, (ETHR_NAINT32_T__) val);
+ ETHR_MEMBAR(ETHR_LoadLoad);
+#elif defined(ETHR_HAVE_NATMC32_CMPXCHG_RB)
+ ETHR_NATMC32_CMPXCHG_FALLBACK__(ETHR_NATMC32_FUNC__(cmpxchg_rb), var, aval, res = aval; aval &= val);
+#elif defined(ETHR_HAVE_NATMC32_CMPXCHG)
+ ETHR_NATMC32_CMPXCHG_FALLBACK__(ETHR_NATMC32_FUNC__(cmpxchg), var, aval, res = aval; aval &= val);
+ ETHR_MEMBAR(ETHR_LoadLoad);
+#elif defined(ETHR_HAVE_NATMC32_CMPXCHG_MB)
+ ETHR_NATMC32_CMPXCHG_FALLBACK__(ETHR_NATMC32_FUNC__(cmpxchg_mb), var, aval, res = aval; aval &= val);
+#elif defined(ETHR_HAVE_NATMC32_CMPXCHG_WB)
+ ETHR_NATMC32_CMPXCHG_FALLBACK__(ETHR_NATMC32_FUNC__(cmpxchg_wb), var, aval, res = aval; aval &= val);
+ ETHR_MEMBAR(ETHR_LoadLoad);
+#elif defined(ETHR_HAVE_NATMC32_CMPXCHG_ACQB)
+ ETHR_NATMC32_CMPXCHG_FALLBACK__(ETHR_NATMC32_FUNC__(cmpxchg_acqb), var, aval, res = aval; aval &= val);
+ ETHR_MEMBAR(ETHR_LoadLoad);
+#elif defined(ETHR_HAVE_NATMC32_CMPXCHG_RELB)
+ ETHR_NATMC32_CMPXCHG_FALLBACK__(ETHR_NATMC32_FUNC__(cmpxchg_relb), var, aval, res = aval; aval &= val);
+ ETHR_MEMBAR(ETHR_LoadLoad);
+#else
+#error "Missing implementation of ethr_atomic32_read_band_rb()!"
+#endif
+ return res;
+}
+
+static ETHR_INLINE ethr_sint32_t ETHR_ATMC32_FUNC__(read_band_wb)(ethr_atomic32_t *var, ethr_sint32_t val)
+{
+ ethr_sint32_t res;
+#if defined(ETHR_HAVE_NATMC32_AND_RETOLD_WB)
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(and_retold_wb)(var, (ETHR_NAINT32_T__) val);
+#elif defined(ETHR_HAVE_NATMC32_AND_RETOLD)
+ ETHR_MEMBAR(ETHR_StoreStore);
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(and_retold)(var, (ETHR_NAINT32_T__) val);
+#elif defined(ETHR_HAVE_NATMC32_AND_RETOLD_MB)
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(and_retold_mb)(var, (ETHR_NAINT32_T__) val);
+#elif defined(ETHR_HAVE_NATMC32_AND_RETOLD_RB)
+ ETHR_MEMBAR(ETHR_StoreStore);
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(and_retold_rb)(var, (ETHR_NAINT32_T__) val);
+#elif defined(ETHR_HAVE_NATMC32_AND_RETOLD_ACQB)
+ ETHR_MEMBAR(ETHR_StoreStore);
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(and_retold_acqb)(var, (ETHR_NAINT32_T__) val);
+#elif defined(ETHR_HAVE_NATMC32_AND_RETOLD_RELB)
+ ETHR_MEMBAR(ETHR_StoreStore);
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(and_retold_relb)(var, (ETHR_NAINT32_T__) val);
+#elif defined(ETHR_HAVE_NATMC32_CMPXCHG_WB)
+ ETHR_NATMC32_CMPXCHG_FALLBACK__(ETHR_NATMC32_FUNC__(cmpxchg_wb), var, aval, res = aval; aval &= val);
+#elif defined(ETHR_HAVE_NATMC32_CMPXCHG)
+ ETHR_MEMBAR(ETHR_StoreStore);
+ ETHR_NATMC32_CMPXCHG_FALLBACK__(ETHR_NATMC32_FUNC__(cmpxchg), var, aval, res = aval; aval &= val);
+#elif defined(ETHR_HAVE_NATMC32_CMPXCHG_MB)
+ ETHR_NATMC32_CMPXCHG_FALLBACK__(ETHR_NATMC32_FUNC__(cmpxchg_mb), var, aval, res = aval; aval &= val);
+#elif defined(ETHR_HAVE_NATMC32_CMPXCHG_RB)
+ ETHR_MEMBAR(ETHR_StoreStore);
+ ETHR_NATMC32_CMPXCHG_FALLBACK__(ETHR_NATMC32_FUNC__(cmpxchg_rb), var, aval, res = aval; aval &= val);
+#elif defined(ETHR_HAVE_NATMC32_CMPXCHG_ACQB)
+ ETHR_MEMBAR(ETHR_StoreStore);
+ ETHR_NATMC32_CMPXCHG_FALLBACK__(ETHR_NATMC32_FUNC__(cmpxchg_acqb), var, aval, res = aval; aval &= val);
+#elif defined(ETHR_HAVE_NATMC32_CMPXCHG_RELB)
+ ETHR_MEMBAR(ETHR_StoreStore);
+ ETHR_NATMC32_CMPXCHG_FALLBACK__(ETHR_NATMC32_FUNC__(cmpxchg_relb), var, aval, res = aval; aval &= val);
+#else
+#error "Missing implementation of ethr_atomic32_read_band_wb()!"
+#endif
+ return res;
+}
+
+static ETHR_INLINE ethr_sint32_t ETHR_ATMC32_FUNC__(read_band_acqb)(ethr_atomic32_t *var, ethr_sint32_t val)
+{
+ ethr_sint32_t res;
+#if defined(ETHR_HAVE_NATMC32_AND_RETOLD_ACQB)
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(and_retold_acqb)(var, (ETHR_NAINT32_T__) val);
+#elif defined(ETHR_HAVE_NATMC32_AND_RETOLD_RB)
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(and_retold_rb)(var, (ETHR_NAINT32_T__) val);
+ ETHR_MEMBAR(ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_NATMC32_AND_RETOLD)
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(and_retold)(var, (ETHR_NAINT32_T__) val);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_NATMC32_AND_RETOLD_MB)
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(and_retold_mb)(var, (ETHR_NAINT32_T__) val);
+#elif defined(ETHR_HAVE_NATMC32_AND_RETOLD_WB)
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(and_retold_wb)(var, (ETHR_NAINT32_T__) val);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_NATMC32_AND_RETOLD_RELB)
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(and_retold_relb)(var, (ETHR_NAINT32_T__) val);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_NATMC32_CMPXCHG_ACQB)
+ ETHR_NATMC32_CMPXCHG_FALLBACK__(ETHR_NATMC32_FUNC__(cmpxchg_acqb), var, aval, res = aval; aval &= val);
+#elif defined(ETHR_HAVE_NATMC32_CMPXCHG_RB)
+ ETHR_NATMC32_CMPXCHG_FALLBACK__(ETHR_NATMC32_FUNC__(cmpxchg_rb), var, aval, res = aval; aval &= val);
+ ETHR_MEMBAR(ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_NATMC32_CMPXCHG)
+ ETHR_NATMC32_CMPXCHG_FALLBACK__(ETHR_NATMC32_FUNC__(cmpxchg), var, aval, res = aval; aval &= val);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_NATMC32_CMPXCHG_MB)
+ ETHR_NATMC32_CMPXCHG_FALLBACK__(ETHR_NATMC32_FUNC__(cmpxchg_mb), var, aval, res = aval; aval &= val);
+#elif defined(ETHR_HAVE_NATMC32_CMPXCHG_WB)
+ ETHR_NATMC32_CMPXCHG_FALLBACK__(ETHR_NATMC32_FUNC__(cmpxchg_wb), var, aval, res = aval; aval &= val);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_NATMC32_CMPXCHG_RELB)
+ ETHR_NATMC32_CMPXCHG_FALLBACK__(ETHR_NATMC32_FUNC__(cmpxchg_relb), var, aval, res = aval; aval &= val);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#else
+#error "Missing implementation of ethr_atomic32_read_band_acqb()!"
+#endif
+ return res;
+}
+
+static ETHR_INLINE ethr_sint32_t ETHR_ATMC32_FUNC__(read_band_relb)(ethr_atomic32_t *var, ethr_sint32_t val)
+{
+ ethr_sint32_t res;
+#if defined(ETHR_HAVE_NATMC32_AND_RETOLD_RELB)
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(and_retold_relb)(var, (ETHR_NAINT32_T__) val);
+#elif defined(ETHR_HAVE_NATMC32_AND_RETOLD_WB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(and_retold_wb)(var, (ETHR_NAINT32_T__) val);
+#elif defined(ETHR_HAVE_NATMC32_AND_RETOLD)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(and_retold)(var, (ETHR_NAINT32_T__) val);
+#elif defined(ETHR_HAVE_NATMC32_AND_RETOLD_MB)
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(and_retold_mb)(var, (ETHR_NAINT32_T__) val);
+#elif defined(ETHR_HAVE_NATMC32_AND_RETOLD_RB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(and_retold_rb)(var, (ETHR_NAINT32_T__) val);
+#elif defined(ETHR_HAVE_NATMC32_AND_RETOLD_ACQB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(and_retold_acqb)(var, (ETHR_NAINT32_T__) val);
+#elif defined(ETHR_HAVE_NATMC32_CMPXCHG_RELB)
+ ETHR_NATMC32_CMPXCHG_FALLBACK__(ETHR_NATMC32_FUNC__(cmpxchg_relb), var, aval, res = aval; aval &= val);
+#elif defined(ETHR_HAVE_NATMC32_CMPXCHG_WB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ ETHR_NATMC32_CMPXCHG_FALLBACK__(ETHR_NATMC32_FUNC__(cmpxchg_wb), var, aval, res = aval; aval &= val);
+#elif defined(ETHR_HAVE_NATMC32_CMPXCHG)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ ETHR_NATMC32_CMPXCHG_FALLBACK__(ETHR_NATMC32_FUNC__(cmpxchg), var, aval, res = aval; aval &= val);
+#elif defined(ETHR_HAVE_NATMC32_CMPXCHG_MB)
+ ETHR_NATMC32_CMPXCHG_FALLBACK__(ETHR_NATMC32_FUNC__(cmpxchg_mb), var, aval, res = aval; aval &= val);
+#elif defined(ETHR_HAVE_NATMC32_CMPXCHG_RB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ ETHR_NATMC32_CMPXCHG_FALLBACK__(ETHR_NATMC32_FUNC__(cmpxchg_rb), var, aval, res = aval; aval &= val);
+#elif defined(ETHR_HAVE_NATMC32_CMPXCHG_ACQB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ ETHR_NATMC32_CMPXCHG_FALLBACK__(ETHR_NATMC32_FUNC__(cmpxchg_acqb), var, aval, res = aval; aval &= val);
+#else
+#error "Missing implementation of ethr_atomic32_read_band_relb()!"
+#endif
+ return res;
+}
+
+static ETHR_INLINE ethr_sint32_t ETHR_ATMC32_FUNC__(read_band_mb)(ethr_atomic32_t *var, ethr_sint32_t val)
+{
+ ethr_sint32_t res;
+#if defined(ETHR_HAVE_NATMC32_AND_RETOLD_MB)
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(and_retold_mb)(var, (ETHR_NAINT32_T__) val);
+#elif defined(ETHR_HAVE_NATMC32_AND_RETOLD_RELB)
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(and_retold_relb)(var, (ETHR_NAINT32_T__) val);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_NATMC32_AND_RETOLD_ACQB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(and_retold_acqb)(var, (ETHR_NAINT32_T__) val);
+#elif defined(ETHR_HAVE_NATMC32_AND_RETOLD_WB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(and_retold_wb)(var, (ETHR_NAINT32_T__) val);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_NATMC32_AND_RETOLD_RB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(and_retold_rb)(var, (ETHR_NAINT32_T__) val);
+ ETHR_MEMBAR(ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_NATMC32_AND_RETOLD)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(and_retold)(var, (ETHR_NAINT32_T__) val);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_NATMC32_CMPXCHG_MB)
+ ETHR_NATMC32_CMPXCHG_FALLBACK__(ETHR_NATMC32_FUNC__(cmpxchg_mb), var, aval, res = aval; aval &= val);
+#elif defined(ETHR_HAVE_NATMC32_CMPXCHG_RELB)
+ ETHR_NATMC32_CMPXCHG_FALLBACK__(ETHR_NATMC32_FUNC__(cmpxchg_relb), var, aval, res = aval; aval &= val);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_NATMC32_CMPXCHG_ACQB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ ETHR_NATMC32_CMPXCHG_FALLBACK__(ETHR_NATMC32_FUNC__(cmpxchg_acqb), var, aval, res = aval; aval &= val);
+#elif defined(ETHR_HAVE_NATMC32_CMPXCHG_WB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ ETHR_NATMC32_CMPXCHG_FALLBACK__(ETHR_NATMC32_FUNC__(cmpxchg_wb), var, aval, res = aval; aval &= val);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_NATMC32_CMPXCHG_RB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ ETHR_NATMC32_CMPXCHG_FALLBACK__(ETHR_NATMC32_FUNC__(cmpxchg_rb), var, aval, res = aval; aval &= val);
+ ETHR_MEMBAR(ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_NATMC32_CMPXCHG)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ ETHR_NATMC32_CMPXCHG_FALLBACK__(ETHR_NATMC32_FUNC__(cmpxchg), var, aval, res = aval; aval &= val);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#else
+#error "Missing implementation of ethr_atomic32_read_band_mb()!"
+#endif
+ return res;
+}
+
+static ETHR_INLINE ethr_sint32_t ETHR_ATMC32_FUNC__(read_band_ddrb)(ethr_atomic32_t *var, ethr_sint32_t val)
+{
+#ifdef ETHR_ORDERED_READ_DEPEND
+ return ETHR_ATMC32_FUNC__(read_band)(var, val);
+#else
+ return ETHR_ATMC32_FUNC__(read_band_rb)(var, val);
+#endif
+}
+
+
+/* --- read_bor() --- */
+
+
+static ETHR_INLINE ethr_sint32_t ETHR_ATMC32_FUNC__(read_bor)(ethr_atomic32_t *var, ethr_sint32_t val)
+{
+ ethr_sint32_t res;
+#if defined(ETHR_HAVE_NATMC32_OR_RETOLD)
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(or_retold)(var, (ETHR_NAINT32_T__) val);
+#elif defined(ETHR_HAVE_NATMC32_OR_RETOLD_RB)
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(or_retold_rb)(var, (ETHR_NAINT32_T__) val);
+#elif defined(ETHR_HAVE_NATMC32_OR_RETOLD_WB)
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(or_retold_wb)(var, (ETHR_NAINT32_T__) val);
+#elif defined(ETHR_HAVE_NATMC32_OR_RETOLD_ACQB)
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(or_retold_acqb)(var, (ETHR_NAINT32_T__) val);
+#elif defined(ETHR_HAVE_NATMC32_OR_RETOLD_RELB)
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(or_retold_relb)(var, (ETHR_NAINT32_T__) val);
+#elif defined(ETHR_HAVE_NATMC32_OR_RETOLD_MB)
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(or_retold_mb)(var, (ETHR_NAINT32_T__) val);
+#elif defined(ETHR_HAVE_NATMC32_CMPXCHG)
+ ETHR_NATMC32_CMPXCHG_FALLBACK__(ETHR_NATMC32_FUNC__(cmpxchg), var, aval, res = aval; aval |= val);
+#elif defined(ETHR_HAVE_NATMC32_CMPXCHG_RB)
+ ETHR_NATMC32_CMPXCHG_FALLBACK__(ETHR_NATMC32_FUNC__(cmpxchg_rb), var, aval, res = aval; aval |= val);
+#elif defined(ETHR_HAVE_NATMC32_CMPXCHG_WB)
+ ETHR_NATMC32_CMPXCHG_FALLBACK__(ETHR_NATMC32_FUNC__(cmpxchg_wb), var, aval, res = aval; aval |= val);
+#elif defined(ETHR_HAVE_NATMC32_CMPXCHG_ACQB)
+ ETHR_NATMC32_CMPXCHG_FALLBACK__(ETHR_NATMC32_FUNC__(cmpxchg_acqb), var, aval, res = aval; aval |= val);
+#elif defined(ETHR_HAVE_NATMC32_CMPXCHG_RELB)
+ ETHR_NATMC32_CMPXCHG_FALLBACK__(ETHR_NATMC32_FUNC__(cmpxchg_relb), var, aval, res = aval; aval |= val);
+#elif defined(ETHR_HAVE_NATMC32_CMPXCHG_MB)
+ ETHR_NATMC32_CMPXCHG_FALLBACK__(ETHR_NATMC32_FUNC__(cmpxchg_mb), var, aval, res = aval; aval |= val);
+#else
+#error "Missing implementation of ethr_atomic32_read_bor()!"
+#endif
+ return res;
+}
+
+static ETHR_INLINE ethr_sint32_t ETHR_ATMC32_FUNC__(read_bor_rb)(ethr_atomic32_t *var, ethr_sint32_t val)
+{
+ ethr_sint32_t res;
+#if defined(ETHR_HAVE_NATMC32_OR_RETOLD_RB)
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(or_retold_rb)(var, (ETHR_NAINT32_T__) val);
+#elif defined(ETHR_HAVE_NATMC32_OR_RETOLD)
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(or_retold)(var, (ETHR_NAINT32_T__) val);
+ ETHR_MEMBAR(ETHR_LoadLoad);
+#elif defined(ETHR_HAVE_NATMC32_OR_RETOLD_MB)
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(or_retold_mb)(var, (ETHR_NAINT32_T__) val);
+#elif defined(ETHR_HAVE_NATMC32_OR_RETOLD_WB)
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(or_retold_wb)(var, (ETHR_NAINT32_T__) val);
+ ETHR_MEMBAR(ETHR_LoadLoad);
+#elif defined(ETHR_HAVE_NATMC32_OR_RETOLD_ACQB)
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(or_retold_acqb)(var, (ETHR_NAINT32_T__) val);
+ ETHR_MEMBAR(ETHR_LoadLoad);
+#elif defined(ETHR_HAVE_NATMC32_OR_RETOLD_RELB)
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(or_retold_relb)(var, (ETHR_NAINT32_T__) val);
+ ETHR_MEMBAR(ETHR_LoadLoad);
+#elif defined(ETHR_HAVE_NATMC32_CMPXCHG_RB)
+ ETHR_NATMC32_CMPXCHG_FALLBACK__(ETHR_NATMC32_FUNC__(cmpxchg_rb), var, aval, res = aval; aval |= val);
+#elif defined(ETHR_HAVE_NATMC32_CMPXCHG)
+ ETHR_NATMC32_CMPXCHG_FALLBACK__(ETHR_NATMC32_FUNC__(cmpxchg), var, aval, res = aval; aval |= val);
+ ETHR_MEMBAR(ETHR_LoadLoad);
+#elif defined(ETHR_HAVE_NATMC32_CMPXCHG_MB)
+ ETHR_NATMC32_CMPXCHG_FALLBACK__(ETHR_NATMC32_FUNC__(cmpxchg_mb), var, aval, res = aval; aval |= val);
+#elif defined(ETHR_HAVE_NATMC32_CMPXCHG_WB)
+ ETHR_NATMC32_CMPXCHG_FALLBACK__(ETHR_NATMC32_FUNC__(cmpxchg_wb), var, aval, res = aval; aval |= val);
+ ETHR_MEMBAR(ETHR_LoadLoad);
+#elif defined(ETHR_HAVE_NATMC32_CMPXCHG_ACQB)
+ ETHR_NATMC32_CMPXCHG_FALLBACK__(ETHR_NATMC32_FUNC__(cmpxchg_acqb), var, aval, res = aval; aval |= val);
+ ETHR_MEMBAR(ETHR_LoadLoad);
+#elif defined(ETHR_HAVE_NATMC32_CMPXCHG_RELB)
+ ETHR_NATMC32_CMPXCHG_FALLBACK__(ETHR_NATMC32_FUNC__(cmpxchg_relb), var, aval, res = aval; aval |= val);
+ ETHR_MEMBAR(ETHR_LoadLoad);
+#else
+#error "Missing implementation of ethr_atomic32_read_bor_rb()!"
+#endif
+ return res;
+}
+
+static ETHR_INLINE ethr_sint32_t ETHR_ATMC32_FUNC__(read_bor_wb)(ethr_atomic32_t *var, ethr_sint32_t val)
+{
+ ethr_sint32_t res;
+#if defined(ETHR_HAVE_NATMC32_OR_RETOLD_WB)
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(or_retold_wb)(var, (ETHR_NAINT32_T__) val);
+#elif defined(ETHR_HAVE_NATMC32_OR_RETOLD)
+ ETHR_MEMBAR(ETHR_StoreStore);
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(or_retold)(var, (ETHR_NAINT32_T__) val);
+#elif defined(ETHR_HAVE_NATMC32_OR_RETOLD_MB)
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(or_retold_mb)(var, (ETHR_NAINT32_T__) val);
+#elif defined(ETHR_HAVE_NATMC32_OR_RETOLD_RB)
+ ETHR_MEMBAR(ETHR_StoreStore);
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(or_retold_rb)(var, (ETHR_NAINT32_T__) val);
+#elif defined(ETHR_HAVE_NATMC32_OR_RETOLD_ACQB)
+ ETHR_MEMBAR(ETHR_StoreStore);
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(or_retold_acqb)(var, (ETHR_NAINT32_T__) val);
+#elif defined(ETHR_HAVE_NATMC32_OR_RETOLD_RELB)
+ ETHR_MEMBAR(ETHR_StoreStore);
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(or_retold_relb)(var, (ETHR_NAINT32_T__) val);
+#elif defined(ETHR_HAVE_NATMC32_CMPXCHG_WB)
+ ETHR_NATMC32_CMPXCHG_FALLBACK__(ETHR_NATMC32_FUNC__(cmpxchg_wb), var, aval, res = aval; aval |= val);
+#elif defined(ETHR_HAVE_NATMC32_CMPXCHG)
+ ETHR_MEMBAR(ETHR_StoreStore);
+ ETHR_NATMC32_CMPXCHG_FALLBACK__(ETHR_NATMC32_FUNC__(cmpxchg), var, aval, res = aval; aval |= val);
+#elif defined(ETHR_HAVE_NATMC32_CMPXCHG_MB)
+ ETHR_NATMC32_CMPXCHG_FALLBACK__(ETHR_NATMC32_FUNC__(cmpxchg_mb), var, aval, res = aval; aval |= val);
+#elif defined(ETHR_HAVE_NATMC32_CMPXCHG_RB)
+ ETHR_MEMBAR(ETHR_StoreStore);
+ ETHR_NATMC32_CMPXCHG_FALLBACK__(ETHR_NATMC32_FUNC__(cmpxchg_rb), var, aval, res = aval; aval |= val);
+#elif defined(ETHR_HAVE_NATMC32_CMPXCHG_ACQB)
+ ETHR_MEMBAR(ETHR_StoreStore);
+ ETHR_NATMC32_CMPXCHG_FALLBACK__(ETHR_NATMC32_FUNC__(cmpxchg_acqb), var, aval, res = aval; aval |= val);
+#elif defined(ETHR_HAVE_NATMC32_CMPXCHG_RELB)
+ ETHR_MEMBAR(ETHR_StoreStore);
+ ETHR_NATMC32_CMPXCHG_FALLBACK__(ETHR_NATMC32_FUNC__(cmpxchg_relb), var, aval, res = aval; aval |= val);
+#else
+#error "Missing implementation of ethr_atomic32_read_bor_wb()!"
+#endif
+ return res;
+}
+
+static ETHR_INLINE ethr_sint32_t ETHR_ATMC32_FUNC__(read_bor_acqb)(ethr_atomic32_t *var, ethr_sint32_t val)
+{
+ ethr_sint32_t res;
+#if defined(ETHR_HAVE_NATMC32_OR_RETOLD_ACQB)
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(or_retold_acqb)(var, (ETHR_NAINT32_T__) val);
+#elif defined(ETHR_HAVE_NATMC32_OR_RETOLD_RB)
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(or_retold_rb)(var, (ETHR_NAINT32_T__) val);
+ ETHR_MEMBAR(ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_NATMC32_OR_RETOLD)
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(or_retold)(var, (ETHR_NAINT32_T__) val);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_NATMC32_OR_RETOLD_MB)
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(or_retold_mb)(var, (ETHR_NAINT32_T__) val);
+#elif defined(ETHR_HAVE_NATMC32_OR_RETOLD_WB)
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(or_retold_wb)(var, (ETHR_NAINT32_T__) val);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_NATMC32_OR_RETOLD_RELB)
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(or_retold_relb)(var, (ETHR_NAINT32_T__) val);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_NATMC32_CMPXCHG_ACQB)
+ ETHR_NATMC32_CMPXCHG_FALLBACK__(ETHR_NATMC32_FUNC__(cmpxchg_acqb), var, aval, res = aval; aval |= val);
+#elif defined(ETHR_HAVE_NATMC32_CMPXCHG_RB)
+ ETHR_NATMC32_CMPXCHG_FALLBACK__(ETHR_NATMC32_FUNC__(cmpxchg_rb), var, aval, res = aval; aval |= val);
+ ETHR_MEMBAR(ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_NATMC32_CMPXCHG)
+ ETHR_NATMC32_CMPXCHG_FALLBACK__(ETHR_NATMC32_FUNC__(cmpxchg), var, aval, res = aval; aval |= val);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_NATMC32_CMPXCHG_MB)
+ ETHR_NATMC32_CMPXCHG_FALLBACK__(ETHR_NATMC32_FUNC__(cmpxchg_mb), var, aval, res = aval; aval |= val);
+#elif defined(ETHR_HAVE_NATMC32_CMPXCHG_WB)
+ ETHR_NATMC32_CMPXCHG_FALLBACK__(ETHR_NATMC32_FUNC__(cmpxchg_wb), var, aval, res = aval; aval |= val);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_NATMC32_CMPXCHG_RELB)
+ ETHR_NATMC32_CMPXCHG_FALLBACK__(ETHR_NATMC32_FUNC__(cmpxchg_relb), var, aval, res = aval; aval |= val);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#else
+#error "Missing implementation of ethr_atomic32_read_bor_acqb()!"
+#endif
+ return res;
+}
+
+static ETHR_INLINE ethr_sint32_t ETHR_ATMC32_FUNC__(read_bor_relb)(ethr_atomic32_t *var, ethr_sint32_t val)
+{
+ ethr_sint32_t res;
+#if defined(ETHR_HAVE_NATMC32_OR_RETOLD_RELB)
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(or_retold_relb)(var, (ETHR_NAINT32_T__) val);
+#elif defined(ETHR_HAVE_NATMC32_OR_RETOLD_WB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(or_retold_wb)(var, (ETHR_NAINT32_T__) val);
+#elif defined(ETHR_HAVE_NATMC32_OR_RETOLD)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(or_retold)(var, (ETHR_NAINT32_T__) val);
+#elif defined(ETHR_HAVE_NATMC32_OR_RETOLD_MB)
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(or_retold_mb)(var, (ETHR_NAINT32_T__) val);
+#elif defined(ETHR_HAVE_NATMC32_OR_RETOLD_RB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(or_retold_rb)(var, (ETHR_NAINT32_T__) val);
+#elif defined(ETHR_HAVE_NATMC32_OR_RETOLD_ACQB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(or_retold_acqb)(var, (ETHR_NAINT32_T__) val);
+#elif defined(ETHR_HAVE_NATMC32_CMPXCHG_RELB)
+ ETHR_NATMC32_CMPXCHG_FALLBACK__(ETHR_NATMC32_FUNC__(cmpxchg_relb), var, aval, res = aval; aval |= val);
+#elif defined(ETHR_HAVE_NATMC32_CMPXCHG_WB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ ETHR_NATMC32_CMPXCHG_FALLBACK__(ETHR_NATMC32_FUNC__(cmpxchg_wb), var, aval, res = aval; aval |= val);
+#elif defined(ETHR_HAVE_NATMC32_CMPXCHG)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ ETHR_NATMC32_CMPXCHG_FALLBACK__(ETHR_NATMC32_FUNC__(cmpxchg), var, aval, res = aval; aval |= val);
+#elif defined(ETHR_HAVE_NATMC32_CMPXCHG_MB)
+ ETHR_NATMC32_CMPXCHG_FALLBACK__(ETHR_NATMC32_FUNC__(cmpxchg_mb), var, aval, res = aval; aval |= val);
+#elif defined(ETHR_HAVE_NATMC32_CMPXCHG_RB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ ETHR_NATMC32_CMPXCHG_FALLBACK__(ETHR_NATMC32_FUNC__(cmpxchg_rb), var, aval, res = aval; aval |= val);
+#elif defined(ETHR_HAVE_NATMC32_CMPXCHG_ACQB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ ETHR_NATMC32_CMPXCHG_FALLBACK__(ETHR_NATMC32_FUNC__(cmpxchg_acqb), var, aval, res = aval; aval |= val);
+#else
+#error "Missing implementation of ethr_atomic32_read_bor_relb()!"
+#endif
+ return res;
+}
+
+static ETHR_INLINE ethr_sint32_t ETHR_ATMC32_FUNC__(read_bor_mb)(ethr_atomic32_t *var, ethr_sint32_t val)
+{
+ ethr_sint32_t res;
+#if defined(ETHR_HAVE_NATMC32_OR_RETOLD_MB)
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(or_retold_mb)(var, (ETHR_NAINT32_T__) val);
+#elif defined(ETHR_HAVE_NATMC32_OR_RETOLD_RELB)
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(or_retold_relb)(var, (ETHR_NAINT32_T__) val);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_NATMC32_OR_RETOLD_ACQB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(or_retold_acqb)(var, (ETHR_NAINT32_T__) val);
+#elif defined(ETHR_HAVE_NATMC32_OR_RETOLD_WB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(or_retold_wb)(var, (ETHR_NAINT32_T__) val);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_NATMC32_OR_RETOLD_RB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(or_retold_rb)(var, (ETHR_NAINT32_T__) val);
+ ETHR_MEMBAR(ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_NATMC32_OR_RETOLD)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ res = (ethr_sint32_t) ETHR_NATMC32_FUNC__(or_retold)(var, (ETHR_NAINT32_T__) val);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_NATMC32_CMPXCHG_MB)
+ ETHR_NATMC32_CMPXCHG_FALLBACK__(ETHR_NATMC32_FUNC__(cmpxchg_mb), var, aval, res = aval; aval |= val);
+#elif defined(ETHR_HAVE_NATMC32_CMPXCHG_RELB)
+ ETHR_NATMC32_CMPXCHG_FALLBACK__(ETHR_NATMC32_FUNC__(cmpxchg_relb), var, aval, res = aval; aval |= val);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_NATMC32_CMPXCHG_ACQB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ ETHR_NATMC32_CMPXCHG_FALLBACK__(ETHR_NATMC32_FUNC__(cmpxchg_acqb), var, aval, res = aval; aval |= val);
+#elif defined(ETHR_HAVE_NATMC32_CMPXCHG_WB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ ETHR_NATMC32_CMPXCHG_FALLBACK__(ETHR_NATMC32_FUNC__(cmpxchg_wb), var, aval, res = aval; aval |= val);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_NATMC32_CMPXCHG_RB)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ ETHR_NATMC32_CMPXCHG_FALLBACK__(ETHR_NATMC32_FUNC__(cmpxchg_rb), var, aval, res = aval; aval |= val);
+ ETHR_MEMBAR(ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#elif defined(ETHR_HAVE_NATMC32_CMPXCHG)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ ETHR_NATMC32_CMPXCHG_FALLBACK__(ETHR_NATMC32_FUNC__(cmpxchg), var, aval, res = aval; aval |= val);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#else
+#error "Missing implementation of ethr_atomic32_read_bor_mb()!"
+#endif
+ return res;
+}
+
+static ETHR_INLINE ethr_sint32_t ETHR_ATMC32_FUNC__(read_bor_ddrb)(ethr_atomic32_t *var, ethr_sint32_t val)
+{
+#ifdef ETHR_ORDERED_READ_DEPEND
+ return ETHR_ATMC32_FUNC__(read_bor)(var, val);
+#else
+ return ETHR_ATMC32_FUNC__(read_bor_rb)(var, val);
+#endif
+}
+
+#endif /* ETHR_ATMC32_INLINE__ */
+
+#endif /* ETHR_ATOMICS_H__ */
diff --git a/erts/include/internal/ethr_internal.h b/erts/include/internal/ethr_internal.h
index e9c3daf783..c9b1db5b46 100644
--- a/erts/include/internal/ethr_internal.h
+++ b/erts/include/internal/ethr_internal.h
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2010. All Rights Reserved.
+ * Copyright Ericsson AB 2010-2011. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -63,5 +63,9 @@ int ethr_late_init_common__(ethr_late_init_data *lid);
void ethr_run_exit_handlers__(void);
void ethr_ts_event_destructor__(void *vtsep);
+#if defined(ETHR_X86_RUNTIME_CONF__)
+int ethr_x86_have_cpuid__(void);
+void ethr_x86_cpuid__(int *eax, int *ebx, int *ecx, int *edx);
+#endif
#endif
diff --git a/erts/include/internal/ethr_mutex.h b/erts/include/internal/ethr_mutex.h
index fadaf1e2a4..86a1e9fbdf 100644
--- a/erts/include/internal/ethr_mutex.h
+++ b/erts/include/internal/ethr_mutex.h
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2010. All Rights Reserved.
+ * Copyright Ericsson AB 2010-2011. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -22,6 +22,23 @@
* Author: Rickard Green
*/
+/*
+ * IMPORTANT note about ethr_cond_signal() and ethr_cond_broadcast()
+ *
+ * POSIX allow a call to `pthread_cond_signal' or `pthread_cond_broadcast'
+ * even though the associated mutex/mutexes isn't/aren't locked by the
+ * caller. We do not allow that by default in order to avoid a performance
+ * penalty on some platforms.
+ *
+ * Mutexes and condition variables can, however, be initialized as POSIX
+ * compliant. When initialized as such ethr_cond_signal(), and
+ * ethr_cond_broadcast() are allowed to be called even though the associated
+ * mutexes aren't locked. This will, however, incur a performance penalty on
+ * some platforms.
+ *
+ * POSIX compliant mutexes and condition variables *need* to be used together.
+ */
+
#ifndef ETHR_MUTEX_H__
#define ETHR_MUTEX_H__
@@ -40,6 +57,14 @@
#endif
#endif
+/* #define ETHR_DBG_WIN_MTX_WITH_PTHREADS */
+#ifdef ETHR_DBG_WIN_MTX_WITH_PTHREADS
+typedef pthread_mutex_t CRITICAL_SECTION;
+int TryEnterCriticalSection(CRITICAL_SECTION *);
+void EnterCriticalSection(CRITICAL_SECTION *);
+void LeaveCriticalSection(CRITICAL_SECTION *);
+#endif
+
#ifdef ETHR_MTX_HARD_DEBUG
# ifdef __GNUC__
# warning ETHR_MTX_HARD_DEBUG
@@ -54,6 +79,10 @@
# endif
#endif
+#ifndef ETHR_INLINE_MTX_FUNC_NAME_
+# define ETHR_INLINE_MTX_FUNC_NAME_(X) X
+#endif
+
#if defined(ETHR_USE_OWN_RWMTX_IMPL__) || defined(ETHR_USE_OWN_MTX_IMPL__)
#ifdef ETHR_DEBUG
@@ -136,13 +165,19 @@ struct ethr_mutex_base_ {
typedef struct {
int main_spincount;
int aux_spincount;
+ int posix_compliant;
} ethr_mutex_opt;
+#define ETHR_MUTEX_OPT_DEFAULT_INITER {-1, -1, 0}
+
typedef struct {
int main_spincount;
int aux_spincount;
+ int posix_compliant;
} ethr_cond_opt;
+#define ETHR_COND_OPT_DEFAULT_INITER {-1, -1, 0}
+
#ifdef ETHR_USE_OWN_MTX_IMPL__
typedef struct ethr_mutex_ ethr_mutex;
@@ -175,7 +210,7 @@ struct ethr_cond_ {
#endif
};
-#else /* pthread */
+#elif defined(ETHR_PTHREADS) && !defined(ETHR_DBG_WIN_MTX_WITH_PTHREADS)
typedef struct ethr_mutex_ ethr_mutex;
struct ethr_mutex_ {
@@ -193,7 +228,36 @@ struct ethr_cond_ {
#endif
};
-#endif /* pthread */
+#elif defined(ETHR_WIN32_THREADS) || defined(ETHR_DBG_WIN_MTX_WITH_PTHREADS)
+# define ETHR_WIN_MUTEX__
+
+typedef struct ethr_mutex_ ethr_mutex;
+struct ethr_mutex_ {
+ int posix_compliant;
+ CRITICAL_SECTION cs;
+ ethr_ts_event *wakeups;
+ ethr_atomic32_t have_wakeups; /* only when posix compliant */
+ ethr_atomic32_t locked; /* only when posix compliant */
+ ethr_spinlock_t lock; /* only when posix compliant */
+#if ETHR_XCHK
+ int initialized;
+#endif
+};
+
+typedef struct ethr_cond_ ethr_cond;
+struct ethr_cond_ {
+ int posix_compliant;
+ CRITICAL_SECTION cs;
+ ethr_ts_event *waiters;
+ int spincount;
+#if ETHR_XCHK
+ int initialized;
+#endif
+};
+
+#else
+# error "no mutex implementation"
+#endif
int ethr_mutex_init_opt(ethr_mutex *, ethr_mutex_opt *);
int ethr_mutex_init(ethr_mutex *);
@@ -505,7 +569,7 @@ void ethr_mutex_lock_wait__(ethr_mutex *, ethr_sint32_t);
void ethr_mutex_unlock_wake__(ethr_mutex *, ethr_sint32_t);
static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_mutex_trylock)(ethr_mutex *mtx)
+ETHR_INLINE_MTX_FUNC_NAME_(ethr_mutex_trylock)(ethr_mutex *mtx)
{
ethr_sint32_t act;
int res;
@@ -529,7 +593,7 @@ ETHR_INLINE_FUNC_NAME_(ethr_mutex_trylock)(ethr_mutex *mtx)
}
static ETHR_INLINE void
-ETHR_INLINE_FUNC_NAME_(ethr_mutex_lock)(ethr_mutex *mtx)
+ETHR_INLINE_MTX_FUNC_NAME_(ethr_mutex_lock)(ethr_mutex *mtx)
{
ethr_sint32_t act;
ETHR_MTX_HARD_DEBUG_FENCE_CHK(mtx);
@@ -549,7 +613,7 @@ ETHR_INLINE_FUNC_NAME_(ethr_mutex_lock)(ethr_mutex *mtx)
}
static ETHR_INLINE void
-ETHR_INLINE_FUNC_NAME_(ethr_mutex_unlock)(ethr_mutex *mtx)
+ETHR_INLINE_MTX_FUNC_NAME_(ethr_mutex_unlock)(ethr_mutex *mtx)
{
ethr_sint32_t act;
ETHR_COMPILER_BARRIER;
@@ -569,12 +633,12 @@ ETHR_INLINE_FUNC_NAME_(ethr_mutex_unlock)(ethr_mutex *mtx)
#endif /* ETHR_TRY_INLINE_FUNCS */
-#else /* pthread_mutex */
+#elif defined(ETHR_PTHREADS) && !defined(ETHR_DBG_WIN_MTX_WITH_PTHREADS)
#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_MUTEX_IMPL__)
static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_mutex_trylock)(ethr_mutex *mtx)
+ETHR_INLINE_MTX_FUNC_NAME_(ethr_mutex_trylock)(ethr_mutex *mtx)
{
int res;
res = pthread_mutex_trylock(&mtx->pt_mtx);
@@ -584,7 +648,7 @@ ETHR_INLINE_FUNC_NAME_(ethr_mutex_trylock)(ethr_mutex *mtx)
}
static ETHR_INLINE void
-ETHR_INLINE_FUNC_NAME_(ethr_mutex_lock)(ethr_mutex *mtx)
+ETHR_INLINE_MTX_FUNC_NAME_(ethr_mutex_lock)(ethr_mutex *mtx)
{
int res = pthread_mutex_lock(&mtx->pt_mtx);
if (res != 0)
@@ -592,7 +656,7 @@ ETHR_INLINE_FUNC_NAME_(ethr_mutex_lock)(ethr_mutex *mtx)
}
static ETHR_INLINE void
-ETHR_INLINE_FUNC_NAME_(ethr_mutex_unlock)(ethr_mutex *mtx)
+ETHR_INLINE_MTX_FUNC_NAME_(ethr_mutex_unlock)(ethr_mutex *mtx)
{
int res = pthread_mutex_unlock(&mtx->pt_mtx);
if (res != 0)
@@ -601,7 +665,54 @@ ETHR_INLINE_FUNC_NAME_(ethr_mutex_unlock)(ethr_mutex *mtx)
#endif /* ETHR_TRY_INLINE_FUNCS */
-#endif /* pthread_mutex */
+#elif defined(ETHR_WIN32_THREADS) || defined(ETHR_DBG_WIN_MTX_WITH_PTHREADS)
+
+#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_MUTEX_IMPL__)
+
+static ETHR_INLINE int
+ETHR_INLINE_MTX_FUNC_NAME_(ethr_mutex_trylock)(ethr_mutex *mtx)
+{
+ if (!TryEnterCriticalSection(&mtx->cs))
+ return EBUSY;
+ if (mtx->posix_compliant)
+ ethr_atomic32_set(&mtx->locked, 1);
+ return 0;
+}
+
+static ETHR_INLINE void
+ETHR_INLINE_MTX_FUNC_NAME_(ethr_mutex_lock)(ethr_mutex *mtx)
+{
+ EnterCriticalSection(&mtx->cs);
+ if (mtx->posix_compliant)
+ ethr_atomic32_set(&mtx->locked, 1);
+}
+
+void ethr_mutex_cond_wakeup__(ethr_mutex *mtx);
+
+static ETHR_INLINE void
+ETHR_INLINE_MTX_FUNC_NAME_(ethr_mutex_unlock)(ethr_mutex *mtx)
+{
+ if (mtx->posix_compliant) {
+ ethr_atomic32_set_mb(&mtx->locked, 0);
+ if (ethr_atomic32_read_acqb(&mtx->have_wakeups))
+ goto cond_wakeup;
+ else
+ goto leave_cs;
+ }
+
+ if (mtx->wakeups) {
+ cond_wakeup:
+ ethr_mutex_cond_wakeup__(mtx);
+ }
+ else {
+ leave_cs:
+ LeaveCriticalSection(&mtx->cs);
+ }
+}
+
+#endif /* ETHR_TRY_INLINE_FUNCS */
+
+#endif
#ifdef ETHR_USE_OWN_RWMTX_IMPL__
@@ -615,7 +726,7 @@ ETHR_INLINE_FUNC_NAME_(ethr_mutex_unlock)(ethr_mutex *mtx)
#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_MUTEX_IMPL__)
static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_rwmutex_tryrlock)(ethr_rwmutex *rwmtx)
+ETHR_INLINE_MTX_FUNC_NAME_(ethr_rwmutex_tryrlock)(ethr_rwmutex *rwmtx)
{
int res = pthread_rwlock_tryrdlock(&rwmtx->pt_rwlock);
if (res != 0 && res != EBUSY)
@@ -624,7 +735,7 @@ ETHR_INLINE_FUNC_NAME_(ethr_rwmutex_tryrlock)(ethr_rwmutex *rwmtx)
}
static ETHR_INLINE void
-ETHR_INLINE_FUNC_NAME_(ethr_rwmutex_rlock)(ethr_rwmutex *rwmtx)
+ETHR_INLINE_MTX_FUNC_NAME_(ethr_rwmutex_rlock)(ethr_rwmutex *rwmtx)
{
int res = pthread_rwlock_rdlock(&rwmtx->pt_rwlock);
if (res != 0)
@@ -632,7 +743,7 @@ ETHR_INLINE_FUNC_NAME_(ethr_rwmutex_rlock)(ethr_rwmutex *rwmtx)
}
static ETHR_INLINE void
-ETHR_INLINE_FUNC_NAME_(ethr_rwmutex_runlock)(ethr_rwmutex *rwmtx)
+ETHR_INLINE_MTX_FUNC_NAME_(ethr_rwmutex_runlock)(ethr_rwmutex *rwmtx)
{
int res = pthread_rwlock_unlock(&rwmtx->pt_rwlock);
if (res != 0)
@@ -640,7 +751,7 @@ ETHR_INLINE_FUNC_NAME_(ethr_rwmutex_runlock)(ethr_rwmutex *rwmtx)
}
static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_rwmutex_tryrwlock)(ethr_rwmutex *rwmtx)
+ETHR_INLINE_MTX_FUNC_NAME_(ethr_rwmutex_tryrwlock)(ethr_rwmutex *rwmtx)
{
int res = pthread_rwlock_trywrlock(&rwmtx->pt_rwlock);
if (res != 0 && res != EBUSY)
@@ -649,7 +760,7 @@ ETHR_INLINE_FUNC_NAME_(ethr_rwmutex_tryrwlock)(ethr_rwmutex *rwmtx)
}
static ETHR_INLINE void
-ETHR_INLINE_FUNC_NAME_(ethr_rwmutex_rwlock)(ethr_rwmutex *rwmtx)
+ETHR_INLINE_MTX_FUNC_NAME_(ethr_rwmutex_rwlock)(ethr_rwmutex *rwmtx)
{
int res = pthread_rwlock_wrlock(&rwmtx->pt_rwlock);
if (res != 0)
@@ -657,7 +768,7 @@ ETHR_INLINE_FUNC_NAME_(ethr_rwmutex_rwlock)(ethr_rwmutex *rwmtx)
}
static ETHR_INLINE void
-ETHR_INLINE_FUNC_NAME_(ethr_rwmutex_rwunlock)(ethr_rwmutex *rwmtx)
+ETHR_INLINE_MTX_FUNC_NAME_(ethr_rwmutex_rwunlock)(ethr_rwmutex *rwmtx)
{
int res = pthread_rwlock_unlock(&rwmtx->pt_rwlock);
if (res != 0)
diff --git a/erts/include/internal/ethr_optimized_fallbacks.h b/erts/include/internal/ethr_optimized_fallbacks.h
index 8e04692856..45399d18e6 100644
--- a/erts/include/internal/ethr_optimized_fallbacks.h
+++ b/erts/include/internal/ethr_optimized_fallbacks.h
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2010. All Rights Reserved.
+ * Copyright Ericsson AB 2010-2011. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -18,152 +18,172 @@
*/
/*
- * Description: "Optimized" fallbacks used when native ops are missing
+ * Description: Optimized fallbacks used when native ops are missing
* Author: Rickard Green
*/
#ifndef ETHR_OPTIMIZED_FALLBACKS_H__
#define ETHR_OPTIMIZED_FALLBACKS_H__
-#ifdef ETHR_HAVE_NATIVE_ATOMICS
-#define ETHR_HAVE_OPTIMIZED_ATOMIC_OPS 1
+#if defined(ETHR_HAVE_NATIVE_SPINLOCKS)
+
+#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_AUX_IMPL__)
+
+static ETHR_INLINE int
+ethr_native_spinlock_destroy(ethr_native_spinlock_t *lock)
+{
+ return 0;
+}
+
#endif
-#ifdef ETHR_HAVE_NATIVE_SPINLOCKS
-#define ETHR_HAVE_OPTIMIZED_SPINLOCKS 1
#elif defined(ETHR_HAVE_PTHREAD_SPIN_LOCK)
-/* --- Optimized spinlocks using pthread spinlocks -------------------------- */
-#define ETHR_HAVE_OPTIMIZED_SPINLOCKS 1
+/* --- Native spinlocks using pthread spinlocks -------------------------- */
+#define ETHR_HAVE_NATIVE_SPINLOCKS 1
+
+#define ETHR_NATIVE_SPINLOCK_IMPL "pthread"
-typedef pthread_spinlock_t ethr_opt_spinlock_t;
+typedef pthread_spinlock_t ethr_native_spinlock_t;
#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_AUX_IMPL__)
-static ETHR_INLINE int
-ethr_opt_spinlock_init(ethr_opt_spinlock_t *lock)
+static ETHR_INLINE void
+ethr_native_spinlock_init(ethr_native_spinlock_t *lock)
{
- return pthread_spin_init((pthread_spinlock_t *) lock, 0);
+ int err = pthread_spin_init((pthread_spinlock_t *) lock, 0);
+ if (err)
+ ETHR_FATAL_ERROR__(err);
}
static ETHR_INLINE int
-ethr_opt_spinlock_destroy(ethr_opt_spinlock_t *lock)
+ethr_native_spinlock_destroy(ethr_native_spinlock_t *lock)
{
return pthread_spin_destroy((pthread_spinlock_t *) lock);
}
-
-static ETHR_INLINE int
-ethr_opt_spin_unlock(ethr_opt_spinlock_t *lock)
+static ETHR_INLINE void
+ethr_native_spin_unlock(ethr_native_spinlock_t *lock)
{
- return pthread_spin_unlock((pthread_spinlock_t *) lock);
+ int err = pthread_spin_unlock((pthread_spinlock_t *) lock);
+ if (err)
+ ETHR_FATAL_ERROR__(err);
}
-static ETHR_INLINE int
-ethr_opt_spin_lock(ethr_opt_spinlock_t *lock)
+static ETHR_INLINE void
+ethr_native_spin_lock(ethr_native_spinlock_t *lock)
{
- return pthread_spin_lock((pthread_spinlock_t *) lock);
+ int err = pthread_spin_lock((pthread_spinlock_t *) lock);
+ if (err)
+ ETHR_FATAL_ERROR__(err);
}
#endif
-#elif defined(ETHR_HAVE_NATIVE_ATOMICS)
+#elif defined(ETHR_HAVE_32BIT_NATIVE_ATOMIC_OPS)
/* --- Native spinlocks using native atomics -------------------------------- */
#define ETHR_HAVE_NATIVE_SPINLOCKS 1
-#define ETHR_HAVE_OPTIMIZED_SPINLOCKS 1
-
-#if defined(ETHR_HAVE_NATIVE_ATOMIC32)
-typedef ethr_native_atomic32_t ethr_native_spinlock_t;
-# define ETHR_NATMC_FUNC__(X) ethr_native_atomic32_ ## X
-#elif defined(ETHR_HAVE_NATIVE_ATOMIC64)
-typedef ethr_native_atomic64_t ethr_native_spinlock_t;
-# define ETHR_NATMC_FUNC__(X) ethr_native_atomic64_ ## X
-#else
-# error "Missing native atomic implementation"
-#endif
+
+#define ETHR_NATIVE_SPINLOCK_IMPL "native-atomics"
+
+typedef ethr_atomic32_t ethr_native_spinlock_t;
#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_AUX_IMPL__)
+#undef ETHR_NSPN_AOP__
+#define ETHR_NSPN_AOP__(X) ETHR_INLINE_ATMC32_FUNC_NAME_(ethr_atomic32_ ## X)
+
static ETHR_INLINE void
ethr_native_spinlock_init(ethr_native_spinlock_t *lock)
{
- ETHR_NATMC_FUNC__(init)(lock, 0);
+ ETHR_NSPN_AOP__(init)(lock, 0);
+}
+
+static ETHR_INLINE int
+ethr_native_spinlock_destroy(ethr_native_spinlock_t *lock)
+{
+ return ETHR_NSPN_AOP__(read)(lock) == 0 ? 0 : EBUSY;
}
static ETHR_INLINE void
ethr_native_spin_unlock(ethr_native_spinlock_t *lock)
{
- ETHR_COMPILER_BARRIER;
- ETHR_ASSERT(ETHR_NATMC_FUNC__(read)(lock) == 1);
- ETHR_NATMC_FUNC__(set_relb)(lock, 0);
+ ETHR_ASSERT(ETHR_NSPN_AOP__(read)(lock) == 1);
+ ETHR_NSPN_AOP__(set_relb)(lock, 0);
}
static ETHR_INLINE void
ethr_native_spin_lock(ethr_native_spinlock_t *lock)
{
- while (ETHR_NATMC_FUNC__(cmpxchg_acqb)(lock, 1, 0) != 0) {
- while (ETHR_NATMC_FUNC__(read)(lock) != 0)
+ while (ETHR_NSPN_AOP__(cmpxchg_acqb)(lock, 1, 0) != 0) {
+ while (ETHR_NSPN_AOP__(read)(lock) != 0)
ETHR_SPIN_BODY;
}
ETHR_COMPILER_BARRIER;
}
-#endif
+#undef ETHR_NSPN_AOP__
-#undef ETHR_NATMC_FUNC__
+#endif
#endif
-#ifdef ETHR_HAVE_NATIVE_RWSPINLOCKS
-#define ETHR_HAVE_OPTIMIZED_RWSPINLOCKS 1
-#elif defined(ETHR_HAVE_NATIVE_ATOMICS)
+#if defined(ETHR_HAVE_NATIVE_RWSPINLOCKS)
+
+#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_AUX_IMPL__)
+
+static ETHR_INLINE int
+ethr_native_rwlock_destroy(ethr_native_rwlock_t *lock)
+{
+ return 0;
+}
+
+#endif
+
+#elif defined(ETHR_HAVE_32BIT_NATIVE_ATOMIC_OPS)
/* --- Native rwspinlocks using native atomics ------------------------------ */
#define ETHR_HAVE_NATIVE_RWSPINLOCKS 1
-#define ETHR_HAVE_OPTIMIZED_RWSPINLOCKS 1
+#define ETHR_NATIVE_RWSPINLOCK_IMPL "native-atomics"
-#if defined(ETHR_HAVE_NATIVE_ATOMIC32)
-typedef ethr_native_atomic32_t ethr_native_rwlock_t;
-# define ETHR_NAINT_T__ ethr_sint32_t
+typedef ethr_atomic32_t ethr_native_rwlock_t;
# define ETHR_WLOCK_FLAG__ (((ethr_sint32_t) 1) << 30)
-# define ETHR_NATMC_FUNC__(X) ethr_native_atomic32_ ## X
-#elif defined(ETHR_HAVE_NATIVE_ATOMIC64)
-typedef ethr_native_atomic64_t ethr_native_rwlock_t;
-# define ETHR_NAINT_T__ ethr_sint64_t
-# define ETHR_WLOCK_FLAG__ (((ethr_sint64_t) 1) << 62)
-# define ETHR_NATMC_FUNC__(X) ethr_native_atomic64_ ## X
-#else
-# error "Missing native atomic implementation"
-#endif
#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_AUX_IMPL__)
+#undef ETHR_NRWSPN_AOP__
+#define ETHR_NRWSPN_AOP__(X) ETHR_INLINE_ATMC32_FUNC_NAME_(ethr_atomic32_ ## X)
+
static ETHR_INLINE void
ethr_native_rwlock_init(ethr_native_rwlock_t *lock)
{
- ETHR_NATMC_FUNC__(init)(lock, 0);
+ ETHR_NRWSPN_AOP__(init)(lock, 0);
+}
+
+static ETHR_INLINE int
+ethr_native_rwlock_destroy(ethr_native_rwlock_t *lock)
+{
+ return ETHR_NRWSPN_AOP__(read)(lock) == 0 ? 0 : EBUSY;
}
static ETHR_INLINE void
ethr_native_read_unlock(ethr_native_rwlock_t *lock)
{
- ETHR_COMPILER_BARRIER;
-#ifdef DEBUG
- ETHR_ASSERT(ETHR_NATMC_FUNC__(read)(lock) >= 0);
-#endif
- ETHR_NATMC_FUNC__(dec_relb)(lock);
+ ETHR_ASSERT(ETHR_NRWSPN_AOP__(read)(lock) >= 0);
+ ETHR_NRWSPN_AOP__(dec_relb)(lock);
}
static ETHR_INLINE void
ethr_native_read_lock(ethr_native_rwlock_t *lock)
{
- ETHR_NAINT_T__ act, exp = 0;
+ ethr_sint32_t act, exp = 0;
while (1) {
- act = ETHR_NATMC_FUNC__(cmpxchg_acqb)(lock, exp+1, exp);
+ act = ETHR_NRWSPN_AOP__(cmpxchg_acqb)(lock, exp+1, exp);
if (act == exp)
break;
+ /* Wait for writer to leave */
while (act & ETHR_WLOCK_FLAG__) {
ETHR_SPIN_BODY;
- act = ETHR_NATMC_FUNC__(read)(lock);
+ act = ETHR_NRWSPN_AOP__(read)(lock);
}
exp = act;
}
@@ -173,36 +193,37 @@ ethr_native_read_lock(ethr_native_rwlock_t *lock)
static ETHR_INLINE void
ethr_native_write_unlock(ethr_native_rwlock_t *lock)
{
- ETHR_COMPILER_BARRIER;
- ETHR_ASSERT(ETHR_NATMC_FUNC__(read)(lock) == ETHR_WLOCK_FLAG__);
- ETHR_NATMC_FUNC__(set_relb)(lock, 0);
+ ETHR_ASSERT(ETHR_NRWSPN_AOP__(read)(lock) == ETHR_WLOCK_FLAG__);
+ ETHR_NRWSPN_AOP__(set_relb)(lock, 0);
}
static ETHR_INLINE void
ethr_native_write_lock(ethr_native_rwlock_t *lock)
{
- ETHR_NAINT_T__ act, exp = 0;
+ ethr_sint32_t act, exp = 0;
while (1) {
- act = ETHR_NATMC_FUNC__(cmpxchg_acqb)(lock, exp|ETHR_WLOCK_FLAG__, exp);
+ act = ETHR_NRWSPN_AOP__(cmpxchg_acqb)(lock, exp|ETHR_WLOCK_FLAG__, exp);
if (act == exp)
break;
- ETHR_SPIN_BODY;
- exp = act & ~ETHR_WLOCK_FLAG__;
+ /* Wait for writer to leave */
+ while (act & ETHR_WLOCK_FLAG__) {
+ ETHR_SPIN_BODY;
+ act = ETHR_NRWSPN_AOP__(read)(lock);
+ }
+ exp = act;
}
act |= ETHR_WLOCK_FLAG__;
/* Wait for readers to leave */
while (act != ETHR_WLOCK_FLAG__) {
ETHR_SPIN_BODY;
- act = ETHR_NATMC_FUNC__(read_acqb)(lock);
+ act = ETHR_NRWSPN_AOP__(read_acqb)(lock);
}
ETHR_COMPILER_BARRIER;
}
-#endif
+#undef ETHR_NRWSPN_AOP__
-#undef ETHR_NAINT_T__
-#undef ETHR_NATMC_FUNC__
-#undef ETHR_WLOCK_FLAG__
+#endif
#endif
diff --git a/erts/include/internal/ethread.h b/erts/include/internal/ethread.h
index 4cd95faf6a..142c26c0ca 100644
--- a/erts/include/internal/ethread.h
+++ b/erts/include/internal/ethread.h
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2004-2010. All Rights Reserved.
+ * Copyright Ericsson AB 2004-2011. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -33,10 +33,6 @@
#include <stdlib.h>
#include "erl_errno.h"
-#undef ETHR_HAVE_OPTIMIZED_ATOMIC_OPS
-#undef ETHR_HAVE_OPTIMIZED_SPINLOCK
-#undef ETHR_HAVE_OPTIMIZED_RWSPINLOCK
-
#if defined(DEBUG)
# define ETHR_DEBUG
#endif
@@ -68,7 +64,7 @@
#endif
/* Assume 64-byte cache line size */
-#define ETHR_CACHE_LINE_SIZE ((ethr_uint_t) 64)
+#define ETHR_CACHE_LINE_SIZE 64
#define ETHR_CACHE_LINE_MASK (ETHR_CACHE_LINE_SIZE - 1)
#define ETHR_CACHE_LINE_ALIGN_SIZE(SZ) \
@@ -195,7 +191,6 @@ typedef DWORD ethr_tsd_key;
#undef ETHR_HAVE_ETHR_SIG_FUNCS
#define ETHR_USE_OWN_RWMTX_IMPL__
-#define ETHR_USE_OWN_MTX_IMPL__
#define ETHR_YIELD() (Sleep(0), 0)
@@ -251,13 +246,90 @@ typedef ethr_sint64_t ethr_sint_t;
typedef ethr_uint64_t ethr_uint_t;
#endif
-/* __builtin_expect() is needed by both native atomics code
- * and the fallback code */
-#if !defined(__GNUC__) || (__GNUC__ < 2) || (__GNUC__ == 2 && __GNUC_MINOR__ < 96)
+#if defined(ETHR_SIZEOF___INT128_T) && ETHR_SIZEOF___INT128_T == 16
+#define ETHR_HAVE_INT128_T
+typedef __int128_t ethr_sint128_t;
+typedef __uint128_t ethr_uint128_t;
+#endif
+
+#define ETHR_FATAL_ERROR__(ERR) \
+ ethr_fatal_error__(__FILE__, __LINE__, __func__, (ERR))
+
+ETHR_PROTO_NORETURN__ ethr_fatal_error__(const char *file,
+ int line,
+ const char *func,
+ int err);
+
+#if !defined(__GNUC__)
+# define ETHR_AT_LEAST_GCC_VSN__(MAJ, MIN, PL) 0
+#elif !defined(__GNUC_MINOR__)
+# define ETHR_AT_LEAST_GCC_VSN__(MAJ, MIN, PL) \
+ ((__GNUC__ << 24) >= (((MAJ) << 24) | ((MIN) << 12) | (PL)))
+#elif !defined(__GNUC_PATCHLEVEL__)
+# define ETHR_AT_LEAST_GCC_VSN__(MAJ, MIN, PL) \
+ (((__GNUC__ << 24) | (__GNUC_MINOR__ << 12)) >= (((MAJ) << 24) | ((MIN) << 12) | (PL)))
+#else
+# define ETHR_AT_LEAST_GCC_VSN__(MAJ, MIN, PL) \
+ (((__GNUC__ << 24) | (__GNUC_MINOR__ << 12) | __GNUC_PATCHLEVEL__) >= (((MAJ) << 24) | ((MIN) << 12) | (PL)))
+#endif
+
+#if !ETHR_AT_LEAST_GCC_VSN__(2, 96, 0)
#define __builtin_expect(X, Y) (X)
#endif
-/* For CPU-optimised atomics, spinlocks, and rwlocks. */
+#if ETHR_AT_LEAST_GCC_VSN__(3, 1, 1)
+# define ETHR_CHOOSE_EXPR __builtin_choose_expr
+#else
+# define ETHR_CHOOSE_EXPR(B, E1, E2) ((B) ? (E1) : (E2))
+#endif
+
+#if ((defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__))) \
+ || (defined(_MSC_VER) && (defined(_M_IX86) || defined(_M_AMD64))))
+# define ETHR_X86_RUNTIME_CONF__
+
+# define ETHR_X86_RUNTIME_CONF_HAVE_DW_CMPXCHG__ \
+ (__builtin_expect(ethr_runtime__.conf.have_dw_cmpxchg != 0, 1))
+# define ETHR_X86_RUNTIME_CONF_HAVE_NO_DW_CMPXCHG__ \
+ (__builtin_expect(ethr_runtime__.conf.have_dw_cmpxchg == 0, 0))
+# define ETHR_X86_RUNTIME_CONF_HAVE_SSE2__ \
+ (__builtin_expect(ethr_runtime__.conf.have_sse2 != 0, 1))
+# define ETHR_X86_RUNTIME_CONF_HAVE_NO_SSE2__ \
+ (__builtin_expect(ethr_runtime__.conf.have_sse2 == 0, 0))
+#endif
+
+#if (defined(__GNUC__) \
+ && !defined(ETHR_PPC_HAVE_LWSYNC) \
+ && !defined(ETHR_PPC_HAVE_NO_LWSYNC) \
+ && (defined(__powerpc__) || defined(__ppc__) || defined(__powerpc64__)))
+# define ETHR_PPC_RUNTIME_CONF__
+
+# define ETHR_PPC_RUNTIME_CONF_HAVE_LWSYNC__ \
+ (__builtin_expect(ethr_runtime__.conf.have_lwsync != 0, 1))
+# define ETHR_PPC_RUNTIME_CONF_HAVE_NO_LWSYNC__ \
+ (__builtin_expect(ethr_runtime__.conf.have_lwsync == 0, 0))
+#endif
+
+typedef struct {
+#if defined(ETHR_X86_RUNTIME_CONF__)
+ int have_dw_cmpxchg;
+ int have_sse2;
+#endif
+#if defined(ETHR_PPC_RUNTIME_CONF__)
+ int have_lwsync;
+#endif
+ int dummy;
+} ethr_runtime_conf_t;
+
+
+typedef union {
+ ethr_runtime_conf_t conf;
+ char pad__[ETHR_CACHE_LINE_ALIGN_SIZE(sizeof(ethr_runtime_conf_t))+ETHR_CACHE_LINE_SIZE];
+} ethr_runtime_t;
+
+
+extern ethr_runtime_t ethr_runtime__;
+
+/* For native CPU-optimised atomics, spinlocks, and rwlocks. */
#if !defined(ETHR_DISABLE_NATIVE_IMPLS)
# if defined(__GNUC__)
# if defined(ETHR_PREFER_GCC_NATIVE_IMPLS)
@@ -265,7 +337,7 @@ typedef ethr_uint64_t ethr_uint_t;
# elif defined(ETHR_PREFER_LIBATOMIC_OPS_NATIVE_IMPLS)
# include "libatomic_ops/ethread.h"
# endif
-# ifndef ETHR_HAVE_NATIVE_ATOMICS
+# if !defined(ETHR_HAVE_NATIVE_ATOMIC32) && !defined(ETHR_HAVE_NATIVE_ATOMIC64)
# if ETHR_SIZEOF_PTR == 4
# if defined(__i386__)
# include "i386/ethread.h"
@@ -283,8 +355,10 @@ typedef ethr_uint64_t ethr_uint_t;
# include "sparc64/ethread.h"
# endif
# endif
+#if 0
# include "gcc/ethread.h"
# include "libatomic_ops/ethread.h"
+#endif
# endif
# elif defined(ETHR_HAVE_LIBATOMIC_OPS)
# include "libatomic_ops/ethread.h"
@@ -293,10 +367,9 @@ typedef ethr_uint64_t ethr_uint_t;
# endif
#endif /* !ETHR_DISABLE_NATIVE_IMPLS */
+#include "ethr_atomics.h" /* The atomics API */
+
#if defined(__GNUC__)
-# ifndef ETHR_COMPILER_BARRIER
-# define ETHR_COMPILER_BARRIER __asm__ __volatile__("" : : : "memory")
-# endif
# ifndef ETHR_SPIN_BODY
# if defined(__i386__) || defined(__x86_64__)
# define ETHR_SPIN_BODY __asm__ __volatile__("rep;nop" : : : "memory")
@@ -309,11 +382,6 @@ typedef ethr_uint64_t ethr_uint_t;
# endif
# endif
#elif defined(ETHR_WIN32_THREADS)
-# ifndef ETHR_COMPILER_BARRIER
-# include <intrin.h>
-# pragma intrinsic(_ReadWriteBarrier)
-# define ETHR_COMPILER_BARRIER _ReadWriteBarrier()
-# endif
# ifndef ETHR_SPIN_BODY
# define ETHR_SPIN_BODY do {YieldProcessor();ETHR_COMPILER_BARRIER;} while(0)
# endif
@@ -321,43 +389,6 @@ typedef ethr_uint64_t ethr_uint_t;
#define ETHR_YIELD_AFTER_BUSY_LOOPS 50
-#ifndef ETHR_HAVE_NATIVE_ATOMICS
-/*
- * ETHR_*MEMORY_BARRIER orders between locked and atomic accesses only,
- * i.e. when our lock based atomic fallback is used, a noop is sufficient.
- */
-#define ETHR_MEMORY_BARRIER do { } while (0)
-#define ETHR_WRITE_MEMORY_BARRIER do { } while (0)
-#define ETHR_READ_MEMORY_BARRIER do { } while (0)
-#define ETHR_READ_DEPEND_MEMORY_BARRIER do { } while (0)
-#endif
-
-#ifndef ETHR_WRITE_MEMORY_BARRIER
-# define ETHR_WRITE_MEMORY_BARRIER ETHR_MEMORY_BARRIER
-# define ETHR_WRITE_MEMORY_BARRIER_IS_FULL
-#endif
-#ifndef ETHR_READ_MEMORY_BARRIER
-# define ETHR_READ_MEMORY_BARRIER ETHR_MEMORY_BARRIER
-# define ETHR_READ_MEMORY_BARRIER_IS_FULL
-#endif
-#ifndef ETHR_READ_DEPEND_MEMORY_BARRIER
-# define ETHR_READ_DEPEND_MEMORY_BARRIER ETHR_COMPILER_BARRIER
-# define ETHR_READ_DEPEND_MEMORY_BARRIER_IS_COMPILER_BARRIER
-#endif
-
-#define ETHR_FATAL_ERROR__(ERR) \
- ethr_fatal_error__(__FILE__, __LINE__, __func__, (ERR))
-
-ETHR_PROTO_NORETURN__ ethr_fatal_error__(const char *file,
- int line,
- const char *func,
- int err);
-
-void ethr_compiler_barrier_fallback(void);
-#ifndef ETHR_COMPILER_BARRIER
-# define ETHR_COMPILER_BARRIER ethr_compiler_barrier_fallback()
-#endif
-
#ifndef ETHR_SPIN_BODY
# define ETHR_SPIN_BODY ETHR_COMPILER_BARRIER
#endif
@@ -460,8 +491,6 @@ void ethr_compiler_barrier(void);
#if defined(ETHR_HAVE_NATIVE_SPINLOCKS)
typedef ethr_native_spinlock_t ethr_spinlock_t;
-#elif defined(ETHR_HAVE_OPTIMIZED_SPINLOCKS)
-typedef ethr_opt_spinlock_t ethr_spinlock_t;
#elif defined(__WIN32__)
typedef CRITICAL_SECTION ethr_spinlock_t;
#else
@@ -483,8 +512,6 @@ ETHR_INLINE_FUNC_NAME_(ethr_spinlock_init)(ethr_spinlock_t *lock)
#ifdef ETHR_HAVE_NATIVE_SPINLOCKS
ethr_native_spinlock_init(lock);
return 0;
-#elif defined(ETHR_HAVE_OPTIMIZED_SPINLOCKS)
- return ethr_opt_spinlock_init((ethr_opt_spinlock_t *) lock);
#elif defined(__WIN32__)
if (!InitializeCriticalSectionAndSpinCount((CRITICAL_SECTION *) lock, INT_MAX))
return ethr_win_get_errno__();
@@ -498,9 +525,7 @@ static ETHR_INLINE int
ETHR_INLINE_FUNC_NAME_(ethr_spinlock_destroy)(ethr_spinlock_t *lock)
{
#ifdef ETHR_HAVE_NATIVE_SPINLOCKS
- return 0;
-#elif defined(ETHR_HAVE_OPTIMIZED_SPINLOCKS)
- return ethr_opt_spinlock_destroy((ethr_opt_spinlock_t *) lock);
+ return ethr_native_spinlock_destroy(lock);
#elif defined(__WIN32__)
DeleteCriticalSection((CRITICAL_SECTION *) lock);
return 0;
@@ -514,10 +539,6 @@ ETHR_INLINE_FUNC_NAME_(ethr_spin_unlock)(ethr_spinlock_t *lock)
{
#ifdef ETHR_HAVE_NATIVE_SPINLOCKS
ethr_native_spin_unlock(lock);
-#elif defined(ETHR_HAVE_OPTIMIZED_SPINLOCKS)
- int err = ethr_opt_spin_unlock((ethr_opt_spinlock_t *) lock);
- if (err)
- ETHR_FATAL_ERROR__(err);
#elif defined(__WIN32__)
LeaveCriticalSection((CRITICAL_SECTION *) lock);
#else
@@ -532,10 +553,6 @@ ETHR_INLINE_FUNC_NAME_(ethr_spin_lock)(ethr_spinlock_t *lock)
{
#ifdef ETHR_HAVE_NATIVE_SPINLOCKS
ethr_native_spin_lock(lock);
-#elif defined(ETHR_HAVE_OPTIMIZED_SPINLOCKS)
- int err = ethr_opt_spin_lock((ethr_opt_spinlock_t *) lock);
- if (err)
- ETHR_FATAL_ERROR__(err);
#elif defined(__WIN32__)
EnterCriticalSection((CRITICAL_SECTION *) lock);
#else
@@ -547,8 +564,6 @@ ETHR_INLINE_FUNC_NAME_(ethr_spin_lock)(ethr_spinlock_t *lock)
#endif /* ETHR_TRY_INLINE_FUNCS */
-#include "ethr_atomics.h"
-
typedef struct ethr_ts_event_ ethr_ts_event; /* Needed by ethr_mutex.h */
#if defined(ETHR_WIN32_THREADS)
diff --git a/erts/include/internal/ethread_header_config.h.in b/erts/include/internal/ethread_header_config.h.in
index f394d790d2..dd3599f86d 100644
--- a/erts/include/internal/ethread_header_config.h.in
+++ b/erts/include/internal/ethread_header_config.h.in
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2004-2010. All Rights Reserved.
+ * Copyright Ericsson AB 2004-2011. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -32,6 +32,9 @@
/* Define to the size of __int64 */
#undef ETHR_SIZEOF___INT64
+/* Define to the size of __int128_t */
+#undef ETHR_SIZEOF___INT128_T
+
/* Define if bigendian */
#undef ETHR_BIGENDIAN
@@ -69,8 +72,44 @@
/* Define if you have a linux futex implementation. */
#undef ETHR_HAVE_LINUX_FUTEX
-/* Define if you have gcc atomic operations */
-#undef ETHR_HAVE_GCC_ATOMIC_OPS
+/* Define if x86/x86_64 out of order instructions should be synchronized */
+#undef ETHR_X86_OUT_OF_ORDER
+
+/* Define if only run in Sparc TSO mode */
+#undef ETHR_SPARC_TSO
+
+/* Define if only run in Sparc PSO, or TSO mode */
+#undef ETHR_SPARC_PSO
+
+/* Define if run in Sparc RMO, PSO, or TSO mode */
+#undef ETHR_SPARC_RMO
+
+/* Define if you have __sync_add_and_fetch() for 32-bit integers */
+#undef ETHR_HAVE___SYNC_ADD_AND_FETCH32
+
+/* Define if you have __sync_add_and_fetch() for 64-bit integers */
+#undef ETHR_HAVE___SYNC_ADD_AND_FETCH64
+
+/* Define if you have __sync_fetch_and_and() for 32-bit integers */
+#undef ETHR_HAVE___SYNC_FETCH_AND_AND32
+
+/* Define if you have __sync_fetch_and_and() for 64-bit integers */
+#undef ETHR_HAVE___SYNC_FETCH_AND_AND64
+
+/* Define if you have __sync_fetch_and_or() for 32-bit integers */
+#undef ETHR_HAVE___SYNC_FETCH_AND_OR32
+
+/* Define if you have __sync_fetch_and_or() for 64-bit integers */
+#undef ETHR_HAVE___SYNC_FETCH_AND_OR64
+
+/* Define if you have __sync_val_compare_and_swap() for 32-bit integers */
+#undef ETHR_HAVE___SYNC_VAL_COMPARE_AND_SWAP32
+
+/* Define if you have __sync_val_compare_and_swap() for 64-bit integers */
+#undef ETHR_HAVE___SYNC_VAL_COMPARE_AND_SWAP64
+
+/* Define if you have __sync_val_compare_and_swap() for 128-bit integers */
+#undef ETHR_HAVE___SYNC_VAL_COMPARE_AND_SWAP128
/* Define if you prefer gcc native ethread implementations */
#undef ETHR_PREFER_GCC_NATIVE_IMPLS
@@ -90,8 +129,14 @@
/* Define if sched_yield() returns an int. */
#undef ETHR_SCHED_YIELD_RET_INT
-/* Define if you want compatibilty with x86 processors before pentium4. */
-#undef ETHR_PRE_PENTIUM4_COMPAT
+/* Define if you use a gcc that supports -msse2 and understand sse2 specific asm statements */
+#undef ETHR_GCC_HAVE_SSE2_ASM_SUPPORT
+
+/* Define if you use a gcc that supports the double word cmpxchg instruction */
+#undef ETHR_GCC_HAVE_DW_CMPXCHG_ASM_SUPPORT
+
+/* Define if you get a register shortage with cmpxchg8b and position independent code */
+#undef ETHR_CMPXCHG8B_REGISTER_SHORTAGE
/* Define if you have the pthread_rwlockattr_setkind_np() function. */
#undef ETHR_HAVE_PTHREAD_RWLOCKATTR_SETKIND_NP
@@ -115,23 +160,74 @@
/* Define to the size of AO_t if libatomic_ops is used */
#undef ETHR_SIZEOF_AO_T
+/* Define if you have _InterlockedAnd() */
+#undef ETHR_HAVE__INTERLOCKEDAND
+
+/* Define if you have _InterlockedAnd64() */
+#undef ETHR_HAVE__INTERLOCKEDAND64
+
+/* Define if you have _InterlockedCompareExchange() */
+#undef ETHR_HAVE__INTERLOCKEDCOMPAREEXCHANGE
+
/* Define if you have _InterlockedCompareExchange64() */
#undef ETHR_HAVE__INTERLOCKEDCOMPAREEXCHANGE64
+/* Define if you have _InterlockedCompareExchange64_acq() */
+#undef ETHR_HAVE__INTERLOCKEDCOMPAREEXCHANGE64_ACQ
+
+/* Define if you have _InterlockedCompareExchange64_rel() */
+#undef ETHR_HAVE__INTERLOCKEDCOMPAREEXCHANGE64_REL
+
+/* Define if you have _InterlockedCompareExchange_acq() */
+#undef ETHR_HAVE__INTERLOCKEDCOMPAREEXCHANGE_ACQ
+
+/* Define if you have _InterlockedCompareExchange_rel() */
+#undef ETHR_HAVE__INTERLOCKEDCOMPAREEXCHANGE_REL
+
+/* Define if you have _InterlockedDecrement() */
+#undef ETHR_HAVE__INTERLOCKEDDECREMENT
+
/* Define if you have _InterlockedDecrement64() */
#undef ETHR_HAVE__INTERLOCKEDDECREMENT64
-/* Define if you have _InterlockedIncrement64() */
-#undef ETHR_HAVE__INTERLOCKEDINCREMENT64
+/* Define if you have _InterlockedDecrement64_rel() */
+#undef ETHR_HAVE__INTERLOCKEDDECREMENT64_REL
-/* Define if you have _InterlockedExchangeAdd64() */
-#undef ETHR_HAVE__INTERLOCKEDEXCHANGEADD64
+/* Define if you have _InterlockedDecrement_rel() */
+#undef ETHR_HAVE__INTERLOCKEDDECREMENT_REL
+
+/* Define if you have _InterlockedExchange() */
+#undef ETHR_HAVE__INTERLOCKEDEXCHANGE
/* Define if you have _InterlockedExchange64() */
#undef ETHR_HAVE__INTERLOCKEDEXCHANGE64
-/* Define if you have _InterlockedAnd64() */
-#undef ETHR_HAVE__INTERLOCKEDAND64
+/* Define if you have _InterlockedExchangeAdd() */
+#undef ETHR_HAVE__INTERLOCKEDEXCHANGEADD
+
+/* Define if you have _InterlockedExchangeAdd64() */
+#undef ETHR_HAVE__INTERLOCKEDEXCHANGEADD64
+
+/* Define if you have _InterlockedExchangeAdd64_acq() */
+#undef ETHR_HAVE__INTERLOCKEDEXCHANGEADD64_ACQ
+
+/* Define if you have _InterlockedExchangeAdd_acq() */
+#undef ETHR_HAVE__INTERLOCKEDEXCHANGEADD_ACQ
+
+/* Define if you have _InterlockedIncrement() */
+#undef ETHR_HAVE__INTERLOCKEDINCREMENT
+
+/* Define if you have _InterlockedIncrement64() */
+#undef ETHR_HAVE__INTERLOCKEDINCREMENT64
+
+/* Define if you have _InterlockedIncrement64_acq() */
+#undef ETHR_HAVE__INTERLOCKEDINCREMENT64_ACQ
+
+/* Define if you have _InterlockedIncrement_acq() */
+#undef ETHR_HAVE__INTERLOCKEDINCREMENT_ACQ
+
+/* Define if you have _InterlockedOr() */
+#undef ETHR_HAVE__INTERLOCKEDOR
/* Define if you have _InterlockedOr64() */
#undef ETHR_HAVE__INTERLOCKEDOR64
diff --git a/erts/include/internal/gcc/ethr_atomic.h b/erts/include/internal/gcc/ethr_atomic.h
index 16935084b1..f598f8537b 100644
--- a/erts/include/internal/gcc/ethr_atomic.h
+++ b/erts/include/internal/gcc/ethr_atomic.h
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2010. All Rights Reserved.
+ * Copyright Ericsson AB 2010-2011. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -25,11 +25,15 @@
#undef ETHR_INCLUDE_ATOMIC_IMPL__
#if !defined(ETHR_GCC_ATOMIC32_H__) && defined(ETHR_ATOMIC_WANT_32BIT_IMPL__)
#define ETHR_GCC_ATOMIC32_H__
-#define ETHR_INCLUDE_ATOMIC_IMPL__ 4
+#if defined(ETHR_HAVE___SYNC_VAL_COMPARE_AND_SWAP32)
+# define ETHR_INCLUDE_ATOMIC_IMPL__ 4
+#endif
#undef ETHR_ATOMIC_WANT_32BIT_IMPL__
#elif !defined(ETHR_GCC_ATOMIC64_H__) && defined(ETHR_ATOMIC_WANT_64BIT_IMPL__)
#define ETHR_GCC_ATOMIC64_H__
-#define ETHR_INCLUDE_ATOMIC_IMPL__ 8
+#if defined(ETHR_HAVE___SYNC_VAL_COMPARE_AND_SWAP64)
+# define ETHR_INCLUDE_ATOMIC_IMPL__ 8
+#endif
#undef ETHR_ATOMIC_WANT_64BIT_IMPL__
#endif
@@ -45,58 +49,38 @@
# define ETHR_READ_AND_SET_WITHOUT_SYNC_OP__ 1
#endif
-#if defined(__x86_64__) || (defined(__i386__) \
- && !defined(ETHR_PRE_PENTIUM4_COMPAT))
-# define ETHR_READ_ACQB_AND_SET_RELB_COMPILER_BARRIER_ONLY__ 1
-#else
-# define ETHR_READ_ACQB_AND_SET_RELB_COMPILER_BARRIER_ONLY__ 0
-#endif
-
-/*
- * According to the documentation this is what we want:
- * #define ETHR_MEMORY_BARRIER __sync_synchronize()
- * However, __sync_synchronize() is known to erroneously be
- * a noop on at least some platforms with some gcc versions.
- * This has suposedly been fixed in some gcc version, but we
- * don't know from which version. Therefore, we only use
- * it when it has been verified to work. Otherwise
- * we use a workaround.
- */
-#if defined(__mips__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 2))
-/* __sync_synchronize() has been verified to work here */
-#define ETHR_MEMORY_BARRIER __sync_synchronize()
-#define ETHR_READ_DEPEND_MEMORY_BARRIER __sync_synchronize()
-#elif defined(__x86_64__) || (defined(__i386__) \
- && !defined(ETHR_PRE_PENTIUM4_COMPAT))
-/* Use fence instructions directly instead of workaround */
-#define ETHR_MEMORY_BARRIER __asm__ __volatile__("mfence" : : : "memory")
-#define ETHR_WRITE_MEMORY_BARRIER __asm__ __volatile__("sfence" : : : "memory")
-#define ETHR_READ_MEMORY_BARRIER __asm__ __volatile__("lfence" : : : "memory")
-#define ETHR_READ_DEPEND_MEMORY_BARRIER __asm__ __volatile__("" : : : "memory")
-#else
-/* Workaround */
-#define ETHR_MEMORY_BARRIER \
-do { \
- volatile ethr_sint32_t x___ = 0; \
- (void) __sync_val_compare_and_swap(&x___, (ethr_sint32_t) 0, (ethr_sint32_t) 1); \
-} while (0)
-#define ETHR_READ_DEPEND_MEMORY_BARRIER ETHR_MEMORY_BARRIER
-#endif
-
-#define ETHR_COMPILER_BARRIER __asm__ __volatile__("" : : : "memory")
-
#endif /* ETHR_GCC_ATOMIC_COMMON__ */
#if ETHR_INCLUDE_ATOMIC_IMPL__ == 4
#define ETHR_HAVE_NATIVE_ATOMIC32 1
+#define ETHR_NATIVE_ATOMIC32_IMPL "gcc"
#define ETHR_NATMC_FUNC__(X) ethr_native_atomic32_ ## X
#define ETHR_ATMC_T__ ethr_native_atomic32_t
#define ETHR_AINT_T__ ethr_sint32_t
+#if defined(ETHR_HAVE___SYNC_ADD_AND_FETCH32)
+# define ETHR_HAVE___SYNC_ADD_AND_FETCH
+#endif
+#if defined(ETHR_HAVE___SYNC_FETCH_AND_AND32)
+# define ETHR_HAVE___SYNC_FETCH_AND_AND
+#endif
+#if defined(ETHR_HAVE___SYNC_FETCH_AND_OR32)
+# define ETHR_HAVE___SYNC_FETCH_AND_OR
+#endif
#elif ETHR_INCLUDE_ATOMIC_IMPL__ == 8
#define ETHR_HAVE_NATIVE_ATOMIC64 1
+#define ETHR_NATIVE_ATOMIC64_IMPL "gcc"
#define ETHR_NATMC_FUNC__(X) ethr_native_atomic64_ ## X
#define ETHR_ATMC_T__ ethr_native_atomic64_t
#define ETHR_AINT_T__ ethr_sint64_t
+#if defined(ETHR_HAVE___SYNC_ADD_AND_FETCH64)
+# define ETHR_HAVE___SYNC_ADD_AND_FETCH
+#endif
+#if defined(ETHR_HAVE___SYNC_FETCH_AND_AND64)
+# define ETHR_HAVE___SYNC_FETCH_AND_AND
+#endif
+#if defined(ETHR_HAVE___SYNC_FETCH_AND_OR64)
+# define ETHR_HAVE___SYNC_FETCH_AND_OR
+#endif
#else
#error "Unsupported integer size"
#endif
@@ -108,183 +92,120 @@ typedef struct {
#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_ATOMIC_IMPL__)
+#if ETHR_INCLUDE_ATOMIC_IMPL__ == 4
+# define ETHR_HAVE_ETHR_NATIVE_ATOMIC32_ADDR 1
+#else
+# define ETHR_HAVE_ETHR_NATIVE_ATOMIC64_ADDR 1
+#endif
+
static ETHR_INLINE ETHR_AINT_T__ *
ETHR_NATMC_FUNC__(addr)(ETHR_ATMC_T__ *var)
{
return (ETHR_AINT_T__ *) &var->counter;
}
-static ETHR_INLINE void
-ETHR_NATMC_FUNC__(set)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ value)
-{
#if ETHR_READ_AND_SET_WITHOUT_SYNC_OP__
- var->counter = value;
+
+#if ETHR_INCLUDE_ATOMIC_IMPL__ == 4
+# define ETHR_HAVE_ETHR_NATIVE_ATOMIC32_SET 1
#else
- /*
- * Unfortunately no __sync_store() or similar exist in the gcc atomic
- * op interface. We therefore have to simulate it this way...
- */
- ETHR_AINT_T__ act = 0, exp;
- do {
- exp = act;
- act = __sync_val_compare_and_swap(&var->counter, exp, value);
- } while (act != exp);
+# define ETHR_HAVE_ETHR_NATIVE_ATOMIC64_SET 1
#endif
-}
static ETHR_INLINE void
-ETHR_NATMC_FUNC__(init)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ value)
+ETHR_NATMC_FUNC__(set)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ value)
{
- ETHR_NATMC_FUNC__(set)(var, value);
+ var->counter = value;
}
-static ETHR_INLINE ETHR_AINT_T__
-ETHR_NATMC_FUNC__(read)(ETHR_ATMC_T__ *var)
-{
+#endif /* ETHR_READ_AND_SET_WITHOUT_SYNC_OP__ */
+
#if ETHR_READ_AND_SET_WITHOUT_SYNC_OP__
- return var->counter;
+
+#if ETHR_INCLUDE_ATOMIC_IMPL__ == 4
+# define ETHR_HAVE_ETHR_NATIVE_ATOMIC32_READ 1
#else
- /*
- * Unfortunately no __sync_fetch() or similar exist in the gcc atomic
- * op interface. We therefore have to simulate it this way...
- */
- return __sync_add_and_fetch(&var->counter, (ETHR_AINT_T__) 0);
+# define ETHR_HAVE_ETHR_NATIVE_ATOMIC64_READ 1
#endif
-}
-
-static ETHR_INLINE void
-ETHR_NATMC_FUNC__(add)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ incr)
-{
- (void) __sync_add_and_fetch(&var->counter, incr);
-}
static ETHR_INLINE ETHR_AINT_T__
-ETHR_NATMC_FUNC__(add_return)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ incr)
+ETHR_NATMC_FUNC__(read)(ETHR_ATMC_T__ *var)
{
- return __sync_add_and_fetch(&var->counter, incr);
+ return var->counter;
}
-static ETHR_INLINE void
-ETHR_NATMC_FUNC__(inc)(ETHR_ATMC_T__ *var)
-{
- (void) __sync_add_and_fetch(&var->counter, (ETHR_AINT_T__) 1);
-}
+#endif /* ETHR_READ_AND_SET_WITHOUT_SYNC_OP__ */
-static ETHR_INLINE void
-ETHR_NATMC_FUNC__(dec)(ETHR_ATMC_T__ *var)
-{
- (void) __sync_sub_and_fetch(&var->counter, (ETHR_AINT_T__) 1);
-}
+#if defined(ETHR_HAVE___SYNC_ADD_AND_FETCH)
-static ETHR_INLINE ETHR_AINT_T__
-ETHR_NATMC_FUNC__(inc_return)(ETHR_ATMC_T__ *var)
-{
- return __sync_add_and_fetch(&var->counter, (ETHR_AINT_T__) 1);
-}
+#if ETHR_INCLUDE_ATOMIC_IMPL__ == 4
+# define ETHR_HAVE_ETHR_NATIVE_ATOMIC32_ADD_RETURN_MB 1
+#else
+# define ETHR_HAVE_ETHR_NATIVE_ATOMIC64_ADD_RETURN_MB 1
+#endif
static ETHR_INLINE ETHR_AINT_T__
-ETHR_NATMC_FUNC__(dec_return)(ETHR_ATMC_T__ *var)
+ETHR_NATMC_FUNC__(add_return_mb)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ incr)
{
- return __sync_sub_and_fetch(&var->counter, (ETHR_AINT_T__) 1);
+ return __sync_add_and_fetch(&var->counter, incr);
}
-static ETHR_INLINE ETHR_AINT_T__
-ETHR_NATMC_FUNC__(and_retold)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ mask)
-{
- return __sync_fetch_and_and(&var->counter, mask);
-}
+#endif
-static ETHR_INLINE ETHR_AINT_T__
-ETHR_NATMC_FUNC__(or_retold)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ mask)
-{
- return (ETHR_AINT_T__) __sync_fetch_and_or(&var->counter, mask);
-}
+#if defined(ETHR_HAVE___SYNC_FETCH_AND_AND)
-static ETHR_INLINE ETHR_AINT_T__
-ETHR_NATMC_FUNC__(cmpxchg)(ETHR_ATMC_T__ *var,
- ETHR_AINT_T__ new,
- ETHR_AINT_T__ old)
-{
- return __sync_val_compare_and_swap(&var->counter, old, new);
-}
+#if ETHR_INCLUDE_ATOMIC_IMPL__ == 4
+# define ETHR_HAVE_ETHR_NATIVE_ATOMIC32_AND_RETOLD_MB 1
+#else
+# define ETHR_HAVE_ETHR_NATIVE_ATOMIC64_AND_RETOLD_MB 1
+#endif
static ETHR_INLINE ETHR_AINT_T__
-ETHR_NATMC_FUNC__(xchg)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ new)
+ETHR_NATMC_FUNC__(and_retold_mb)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ mask)
{
- ETHR_AINT_T__ exp, act = 0;
- do {
- exp = act;
- act = __sync_val_compare_and_swap(&var->counter, exp, new);
- } while (act != exp);
- return act;
+ return __sync_fetch_and_and(&var->counter, mask);
}
-/*
- * Atomic ops with at least specified barriers.
- */
-
-static ETHR_INLINE ETHR_AINT_T__
-ETHR_NATMC_FUNC__(read_acqb)(ETHR_ATMC_T__ *var)
-{
-#if ETHR_READ_ACQB_AND_SET_RELB_COMPILER_BARRIER_ONLY__
- ETHR_AINT_T__ val = var->counter;
- ETHR_COMPILER_BARRIER;
- return val;
-#else
- return __sync_add_and_fetch(&var->counter, (ETHR_AINT_T__) 0);
#endif
-}
-static ETHR_INLINE void
-ETHR_NATMC_FUNC__(set_relb)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ i)
-{
-#if ETHR_READ_ACQB_AND_SET_RELB_COMPILER_BARRIER_ONLY__
- ETHR_COMPILER_BARRIER;
- var->counter = i;
+#if defined(ETHR_HAVE___SYNC_FETCH_AND_OR)
+
+#if ETHR_INCLUDE_ATOMIC_IMPL__ == 4
+# define ETHR_HAVE_ETHR_NATIVE_ATOMIC32_OR_RETOLD_MB 1
#else
- (void) ETHR_NATMC_FUNC__(xchg)(var, i);
+# define ETHR_HAVE_ETHR_NATIVE_ATOMIC64_OR_RETOLD_MB 1
#endif
-}
static ETHR_INLINE ETHR_AINT_T__
-ETHR_NATMC_FUNC__(inc_return_acqb)(ETHR_ATMC_T__ *var)
-{
- return ETHR_NATMC_FUNC__(inc_return)(var);
-}
-
-static ETHR_INLINE void
-ETHR_NATMC_FUNC__(dec_relb)(ETHR_ATMC_T__ *var)
+ETHR_NATMC_FUNC__(or_retold_mb)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ mask)
{
- ETHR_NATMC_FUNC__(dec)(var);
+ return (ETHR_AINT_T__) __sync_fetch_and_or(&var->counter, mask);
}
-static ETHR_INLINE ETHR_AINT_T__
-ETHR_NATMC_FUNC__(dec_return_relb)(ETHR_ATMC_T__ *var)
-{
- return ETHR_NATMC_FUNC__(dec_return)(var);
-}
+#endif
-static ETHR_INLINE ETHR_AINT_T__
-ETHR_NATMC_FUNC__(cmpxchg_acqb)(ETHR_ATMC_T__ *var,
- ETHR_AINT_T__ new,
- ETHR_AINT_T__ old)
-{
- return ETHR_NATMC_FUNC__(cmpxchg)(var, new, old);
-}
+#if ETHR_INCLUDE_ATOMIC_IMPL__ == 4
+# define ETHR_HAVE_ETHR_NATIVE_ATOMIC32_CMPXCHG_MB 1
+#else
+# define ETHR_HAVE_ETHR_NATIVE_ATOMIC64_CMPXCHG_MB 1
+#endif
static ETHR_INLINE ETHR_AINT_T__
-ETHR_NATMC_FUNC__(cmpxchg_relb)(ETHR_ATMC_T__ *var,
- ETHR_AINT_T__ new,
- ETHR_AINT_T__ old)
+ETHR_NATMC_FUNC__(cmpxchg_mb)(ETHR_ATMC_T__ *var,
+ ETHR_AINT_T__ new,
+ ETHR_AINT_T__ old)
{
- return ETHR_NATMC_FUNC__(cmpxchg)(var, new, old);
+ return __sync_val_compare_and_swap(&var->counter, old, new);
}
-#endif
+#endif /* ETHR_TRY_INLINE_FUNCS */
#undef ETHR_NATMC_FUNC__
#undef ETHR_ATMC_T__
#undef ETHR_AINT_T__
#undef ETHR_AINT_SUFFIX__
+#undef ETHR_HAVE___SYNC_ADD_AND_FETCH
+#undef ETHR_HAVE___SYNC_FETCH_AND_AND
+#undef ETHR_HAVE___SYNC_FETCH_AND_OR
#endif
diff --git a/erts/include/internal/gcc/ethr_dw_atomic.h b/erts/include/internal/gcc/ethr_dw_atomic.h
new file mode 100644
index 0000000000..6736f9c547
--- /dev/null
+++ b/erts/include/internal/gcc/ethr_dw_atomic.h
@@ -0,0 +1,115 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2011. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/*
+ * Description: Native double word atomics using gcc's builtins
+ * Author: Rickard Green
+ */
+
+#undef ETHR_INCLUDE_DW_ATOMIC_IMPL__
+#ifndef ETHR_GCC_DW_ATOMIC_H__
+# define ETHR_GCC_DW_ATOMIC_H__
+# if ((ETHR_SIZEOF_PTR == 4 \
+ && defined(ETHR_HAVE___SYNC_VAL_COMPARE_AND_SWAP64)) \
+ || (ETHR_SIZEOF_PTR == 8 \
+ && defined(ETHR_HAVE___SYNC_VAL_COMPARE_AND_SWAP128) \
+ && defined(ETHR_HAVE_INT128_T)))
+# define ETHR_INCLUDE_DW_ATOMIC_IMPL__
+# endif
+#endif
+
+#ifdef ETHR_INCLUDE_DW_ATOMIC_IMPL__
+# define ETHR_HAVE_NATIVE_SU_DW_ATOMIC
+# define ETHR_NATIVE_DW_ATOMIC_IMPL "gcc"
+
+# if defined(__i386__) || defined(__x86_64__)
+/*
+ * If ETHR_RTCHK_USE_NATIVE_DW_ATOMIC_IMPL__ is defined, it will be used
+ * at runtime in order to determine if native or fallback implementation
+ * should be used.
+ */
+# define ETHR_RTCHK_USE_NATIVE_DW_ATOMIC_IMPL__ \
+ ETHR_X86_RUNTIME_CONF_HAVE_DW_CMPXCHG__
+# endif
+
+# if ETHR_SIZEOF_PTR == 4
+# define ETHR_DW_NATMC_ALIGN_MASK__ 0x7
+# define ETHR_NATIVE_SU_DW_SINT_T ethr_sint64_t
+# elif ETHR_SIZEOF_PTR == 8
+# define ETHR_DW_NATMC_ALIGN_MASK__ 0xf
+# define ETHR_NATIVE_SU_DW_SINT_T ethr_sint128_t
+# endif
+
+typedef volatile ETHR_NATIVE_SU_DW_SINT_T * ethr_native_dw_ptr_t;
+
+/*
+ * We need 16 byte aligned memory in 64-bit mode, and 8 byte aligned
+ * memory in 32-bit mode. 16 byte aligned malloc in 64-bit mode is
+ * not common, and at least some glibc malloc implementations
+ * only 4 byte align in 32-bit mode.
+ *
+ * This code assumes 8 byte aligned memory in 64-bit mode, and 4 byte
+ * aligned memory in 32-bit mode. A malloc implementation that does
+ * not adhere to these alignment requirements is seriously broken,
+ * and we wont bother trying to work around it.
+ *
+ * Since memory alignment may be off by one word we need to align at
+ * runtime. We, therefore, need an extra word allocated.
+ */
+#define ETHR_DW_NATMC_MEM__(VAR) \
+ (&var->c[(int) ((ethr_uint_t) &(VAR)->c[0]) & ETHR_DW_NATMC_ALIGN_MASK__])
+typedef union {
+ volatile ETHR_NATIVE_SU_DW_SINT_T dw_sint;
+ volatile ethr_sint_t sint[3];
+ volatile char c[ETHR_SIZEOF_PTR*3];
+} ethr_native_dw_atomic_t;
+
+#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_ATOMIC_IMPL__)
+
+# ifdef ETHR_DEBUG
+# define ETHR_DW_DBG_ALIGNED__(PTR) \
+ ETHR_ASSERT((((ethr_uint_t) (PTR)) & ETHR_DW_NATMC_ALIGN_MASK__) == 0);
+# else
+# define ETHR_DW_DBG_ALIGNED__(PTR)
+# endif
+
+#define ETHR_HAVE_ETHR_NATIVE_DW_ATOMIC_ADDR
+static ETHR_INLINE ethr_sint_t *
+ethr_native_dw_atomic_addr(ethr_native_dw_atomic_t *var)
+{
+ return (ethr_sint_t *) ETHR_DW_NATMC_MEM__(var);
+}
+
+
+#define ETHR_HAVE_ETHR_NATIVE_SU_DW_ATOMIC_CMPXCHG_MB
+
+static ETHR_INLINE ETHR_NATIVE_SU_DW_SINT_T
+ethr_native_su_dw_atomic_cmpxchg_mb(ethr_native_dw_atomic_t *var,
+ ETHR_NATIVE_SU_DW_SINT_T new,
+ ETHR_NATIVE_SU_DW_SINT_T old)
+{
+ ethr_native_dw_ptr_t p = (ethr_native_dw_ptr_t) ETHR_DW_NATMC_MEM__(var);
+ ETHR_DW_DBG_ALIGNED__(p);
+ return __sync_val_compare_and_swap(p, old, new);
+}
+
+#endif /* ETHR_TRY_INLINE_FUNCS */
+
+#endif /* ETHR_GCC_DW_ATOMIC_H__ */
+
diff --git a/erts/include/internal/gcc/ethr_membar.h b/erts/include/internal/gcc/ethr_membar.h
new file mode 100644
index 0000000000..7d428fc68e
--- /dev/null
+++ b/erts/include/internal/gcc/ethr_membar.h
@@ -0,0 +1,73 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2011. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/*
+ * Description: Memory barriers when using gcc's builtins
+ * Author: Rickard Green
+ */
+
+#ifndef ETHR_GCC_MEMBAR_H__
+#define ETHR_GCC_MEMBAR_H__
+
+#define ETHR_LoadLoad (1 << 0)
+#define ETHR_LoadStore (1 << 1)
+#define ETHR_StoreLoad (1 << 2)
+#define ETHR_StoreStore (1 << 3)
+
+/*
+ * According to the documentation __sync_synchronize() will
+ * issue a full memory barrier. However, __sync_synchronize()
+ * is known to erroneously be a noop on at least some
+ * platforms with some gcc versions. This has suposedly been
+ * fixed in some gcc version, but we don't know from which
+ * version. Therefore, we only use it when it has been
+ * verified to work. Otherwise we use the workaround
+ * below.
+ */
+
+#if defined(ETHR_HAVE___SYNC_VAL_COMPARE_AND_SWAP32)
+# define ETHR_MB_T__ ethr_sint32_t
+#elif defined(ETHR_HAVE___SYNC_VAL_COMPARE_AND_SWAP64)
+# define ETHR_MB_T__ ethr_sint64_t
+#else
+# error "No __sync_val_compare_and_swap"
+#endif
+#define ETHR_SYNC_SYNCHRONIZE_WORKAROUND__ \
+do { \
+ volatile ETHR_MB_T__ x___ = 0; \
+ (void) __sync_val_compare_and_swap(&x___, (ETHR_MB_T__) 0, (ETHR_MB_T__) 1); \
+} while (0)
+
+#define ETHR_COMPILER_BARRIER __asm__ __volatile__("" : : : "memory")
+
+#if defined(__mips__) && ETHR_AT_LEAST_GCC_VSN__(4, 2, 0)
+# define ETHR_MEMBAR(B) __sync_synchronize()
+# define ETHR_READ_DEPEND_MEMORY_BARRIER __sync_synchronize()
+#elif ((defined(__powerpc__) || defined(__ppc__)) \
+ && ETHR_AT_LEAST_GCC_VSN__(4, 1, 2))
+# define ETHR_MEMBAR(B) __sync_synchronize()
+#else /* Use workaround */
+# define ETHR_MEMBAR(B) \
+ ETHR_SYNC_SYNCHRONIZE_WORKAROUND__
+# define ETHR_READ_DEPEND_MEMORY_BARRIER \
+ ETHR_SYNC_SYNCHRONIZE_WORKAROUND__
+#endif
+
+
+#endif /* ETHR_GCC_MEMBAR_H__ */
diff --git a/erts/include/internal/gcc/ethread.h b/erts/include/internal/gcc/ethread.h
index 392a1aa2b2..fcfdc39441 100644
--- a/erts/include/internal/gcc/ethread.h
+++ b/erts/include/internal/gcc/ethread.h
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2010. All Rights Reserved.
+ * Copyright Ericsson AB 2010-2011. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -25,16 +25,24 @@
#ifndef ETHREAD_GCC_H__
#define ETHREAD_GCC_H__
-#if !defined(ETHR_HAVE_NATIVE_ATOMICS) && defined(ETHR_HAVE_GCC_ATOMIC_OPS)
-#define ETHR_HAVE_NATIVE_ATOMICS 1
+#ifndef ETHR_MEMBAR
+# include "ethr_membar.h"
+#endif
+
+#if !defined(ETHR_HAVE_NATIVE_ATOMIC32)
+# define ETHR_ATOMIC_WANT_32BIT_IMPL__
+# include "ethr_atomic.h"
+#endif
-#define ETHR_ATOMIC_WANT_32BIT_IMPL__
-#include "ethr_atomic.h"
-#if ETHR_SIZEOF_PTR == 8
+#if ETHR_SIZEOF_PTR == 8 && !defined(ETHR_HAVE_NATIVE_ATOMIC64)
# define ETHR_ATOMIC_WANT_64BIT_IMPL__
# include "ethr_atomic.h"
#endif
+#if (!defined(ETHR_HAVE_NATIVE_DW_ATOMIC) \
+ && !(ETHR_SIZEOF_PTR == 4 && defined(ETHR_HAVE_NATIVE_ATOMIC64)) \
+ && !(ETHR_SIZEOF_PTR == 8 && defined(ETHR_HAVE_NATIVE_ATOMIC128)))
+# include "ethr_dw_atomic.h"
#endif
#endif
diff --git a/erts/include/internal/i386/atomic.h b/erts/include/internal/i386/atomic.h
index 4e402f261a..fc1b619935 100644
--- a/erts/include/internal/i386/atomic.h
+++ b/erts/include/internal/i386/atomic.h
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2005-2010. All Rights Reserved.
+ * Copyright Ericsson AB 2005-2011. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -25,53 +25,42 @@
*/
#undef ETHR_INCLUDE_ATOMIC_IMPL__
-#if !defined(ETHR_X86_ATOMIC32_H__) && defined(ETHR_ATOMIC_WANT_32BIT_IMPL__)
-#define ETHR_X86_ATOMIC32_H__
-#define ETHR_INCLUDE_ATOMIC_IMPL__ 4
-#undef ETHR_ATOMIC_WANT_32BIT_IMPL__
-#elif !defined(ETHR_X86_ATOMIC64_H__) && defined(ETHR_ATOMIC_WANT_64BIT_IMPL__)
-#define ETHR_X86_ATOMIC64_H__
-#define ETHR_INCLUDE_ATOMIC_IMPL__ 8
-#undef ETHR_ATOMIC_WANT_64BIT_IMPL__
+#if !defined(ETHR_X86_ATOMIC32_H__) \
+ && defined(ETHR_ATOMIC_WANT_32BIT_IMPL__)
+# define ETHR_X86_ATOMIC32_H__
+# define ETHR_INCLUDE_ATOMIC_IMPL__ 4
+# undef ETHR_ATOMIC_WANT_32BIT_IMPL__
+#elif !defined(ETHR_X86_ATOMIC64_H__) \
+ && defined(ETHR_ATOMIC_WANT_64BIT_IMPL__)
+# define ETHR_X86_ATOMIC64_H__
+# define ETHR_INCLUDE_ATOMIC_IMPL__ 8
+# undef ETHR_ATOMIC_WANT_64BIT_IMPL__
#endif
#ifdef ETHR_INCLUDE_ATOMIC_IMPL__
-#ifndef ETHR_X86_ATOMIC_COMMON__
-#define ETHR_X86_ATOMIC_COMMON__
-
-#define ETHR_ATOMIC_HAVE_INC_DEC_INSTRUCTIONS 1
-
-#if defined(__x86_64__) || !defined(ETHR_PRE_PENTIUM4_COMPAT)
-#define ETHR_MEMORY_BARRIER __asm__ __volatile__("mfence" : : : "memory")
-#define ETHR_WRITE_MEMORY_BARRIER __asm__ __volatile__("sfence" : : : "memory")
-#define ETHR_READ_MEMORY_BARRIER __asm__ __volatile__("lfence" : : : "memory")
-#define ETHR_READ_DEPEND_MEMORY_BARRIER __asm__ __volatile__("" : : : "memory")
-#else
-#define ETHR_MEMORY_BARRIER \
-do { \
- volatile ethr_sint32_t x___ = 0; \
- __asm__ __volatile__("lock; incl %0" : "=m"(x___) : "m"(x___) : "memory"); \
-} while (0)
-#endif
-
-#endif /* ETHR_X86_ATOMIC_COMMON__ */
-
-#if ETHR_INCLUDE_ATOMIC_IMPL__ == 4
-#define ETHR_HAVE_NATIVE_ATOMIC32 1
-#define ETHR_NATMC_FUNC__(X) ethr_native_atomic32_ ## X
-#define ETHR_ATMC_T__ ethr_native_atomic32_t
-#define ETHR_AINT_T__ ethr_sint32_t
-#define ETHR_AINT_SUFFIX__ "l"
-#elif ETHR_INCLUDE_ATOMIC_IMPL__ == 8
-#define ETHR_HAVE_NATIVE_ATOMIC64 1
-#define ETHR_NATMC_FUNC__(X) ethr_native_atomic64_ ## X
-#define ETHR_ATMC_T__ ethr_native_atomic64_t
-#define ETHR_AINT_T__ ethr_sint64_t
-#define ETHR_AINT_SUFFIX__ "q"
-#else
-#error "Unsupported integer size"
-#endif
+# ifndef ETHR_X86_ATOMIC_COMMON__
+# define ETHR_X86_ATOMIC_COMMON__
+# define ETHR_ATOMIC_HAVE_INC_DEC_INSTRUCTIONS 1
+# endif /* ETHR_X86_ATOMIC_COMMON__ */
+
+# if ETHR_INCLUDE_ATOMIC_IMPL__ == 4
+# define ETHR_HAVE_NATIVE_ATOMIC32 1
+# define ETHR_NATIVE_ATOMIC32_IMPL "ethread"
+# define ETHR_NATMC_FUNC__(X) ethr_native_atomic32_ ## X
+# define ETHR_ATMC_T__ ethr_native_atomic32_t
+# define ETHR_AINT_T__ ethr_sint32_t
+# define ETHR_AINT_SUFFIX__ "l"
+# elif ETHR_INCLUDE_ATOMIC_IMPL__ == 8
+# define ETHR_HAVE_NATIVE_ATOMIC64 1
+# define ETHR_NATIVE_ATOMIC64_IMPL "ethread"
+# define ETHR_NATMC_FUNC__(X) ethr_native_atomic64_ ## X
+# define ETHR_ATMC_T__ ethr_native_atomic64_t
+# define ETHR_AINT_T__ ethr_sint64_t
+# define ETHR_AINT_SUFFIX__ "q"
+# else
+# error "Unsupported integer size"
+# endif
/* An atomic is an aligned ETHR_AINT_T__ accessed via locked operations.
*/
@@ -81,87 +70,28 @@ typedef struct {
#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_ATOMIC_IMPL__)
+#if ETHR_INCLUDE_ATOMIC_IMPL__ == 4
+# define ETHR_HAVE_ETHR_NATIVE_ATOMIC32_ADDR 1
+#else
+# define ETHR_HAVE_ETHR_NATIVE_ATOMIC64_ADDR 1
+#endif
+
static ETHR_INLINE ETHR_AINT_T__ *
ETHR_NATMC_FUNC__(addr)(ETHR_ATMC_T__ *var)
{
return (ETHR_AINT_T__ *) &var->counter;
}
-static ETHR_INLINE void
-ETHR_NATMC_FUNC__(init)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ i)
-{
- var->counter = i;
-}
-
-static ETHR_INLINE void
-ETHR_NATMC_FUNC__(set)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ i)
-{
- var->counter = i;
-}
-
-static ETHR_INLINE ETHR_AINT_T__
-ETHR_NATMC_FUNC__(read)(ETHR_ATMC_T__ *var)
-{
- return var->counter;
-}
-
-static ETHR_INLINE void
-ETHR_NATMC_FUNC__(add)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ incr)
-{
- __asm__ __volatile__(
- "lock; add" ETHR_AINT_SUFFIX__ " %1, %0"
- : "=m"(var->counter)
- : "ir"(incr), "m"(var->counter));
-}
-
-static ETHR_INLINE void
-ETHR_NATMC_FUNC__(inc)(ETHR_ATMC_T__ *var)
-{
- __asm__ __volatile__(
- "lock; inc" ETHR_AINT_SUFFIX__ " %0"
- : "=m"(var->counter)
- : "m"(var->counter));
-}
-
-static ETHR_INLINE void
-ETHR_NATMC_FUNC__(dec)(ETHR_ATMC_T__ *var)
-{
- __asm__ __volatile__(
- "lock; dec" ETHR_AINT_SUFFIX__ " %0"
- : "=m"(var->counter)
- : "m"(var->counter));
-}
-
-static ETHR_INLINE ETHR_AINT_T__
-ETHR_NATMC_FUNC__(add_return)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ incr)
-{
- ETHR_AINT_T__ tmp;
-
- tmp = incr;
- __asm__ __volatile__(
- "lock; xadd" ETHR_AINT_SUFFIX__ " %0, %1" /* xadd didn't exist prior to the 486 */
- : "=r"(tmp)
- : "m"(var->counter), "0"(tmp));
- /* now tmp is the atomic's previous value */
- return tmp + incr;
-}
-
-static ETHR_INLINE ETHR_AINT_T__
-ETHR_NATMC_FUNC__(inc_return)(ETHR_ATMC_T__ *var)
-{
- return ETHR_NATMC_FUNC__(add_return)(var, (ETHR_AINT_T__) 1);
-}
-
-static ETHR_INLINE ETHR_AINT_T__
-ETHR_NATMC_FUNC__(dec_return)(ETHR_ATMC_T__ *var)
-{
- return ETHR_NATMC_FUNC__(add_return)(var, (ETHR_AINT_T__) -1);
-}
+#if ETHR_INCLUDE_ATOMIC_IMPL__ == 4
+# define ETHR_HAVE_ETHR_NATIVE_ATOMIC32_CMPXCHG_MB 1
+#else
+# define ETHR_HAVE_ETHR_NATIVE_ATOMIC64_CMPXCHG_MB 1
+#endif
static ETHR_INLINE ETHR_AINT_T__
-ETHR_NATMC_FUNC__(cmpxchg)(ETHR_ATMC_T__ *var,
- ETHR_AINT_T__ new,
- ETHR_AINT_T__ old)
+ETHR_NATMC_FUNC__(cmpxchg_mb)(ETHR_ATMC_T__ *var,
+ ETHR_AINT_T__ new,
+ ETHR_AINT_T__ old)
{
__asm__ __volatile__(
"lock; cmpxchg" ETHR_AINT_SUFFIX__ " %2, %3"
@@ -171,110 +101,148 @@ ETHR_NATMC_FUNC__(cmpxchg)(ETHR_ATMC_T__ *var,
return old;
}
-static ETHR_INLINE ETHR_AINT_T__
-ETHR_NATMC_FUNC__(and_retold)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ mask)
-{
- ETHR_AINT_T__ tmp, old;
-
- tmp = var->counter;
- do {
- old = tmp;
- tmp = ETHR_NATMC_FUNC__(cmpxchg)(var, tmp & mask, tmp);
- } while (__builtin_expect(tmp != old, 0));
- /* now tmp is the atomic's previous value */
- return tmp;
-}
-
-static ETHR_INLINE ETHR_AINT_T__
-ETHR_NATMC_FUNC__(or_retold)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ mask)
-{
- ETHR_AINT_T__ tmp, old;
-
- tmp = var->counter;
- do {
- old = tmp;
- tmp = ETHR_NATMC_FUNC__(cmpxchg)(var, tmp | mask, tmp);
- } while (__builtin_expect(tmp != old, 0));
- /* now tmp is the atomic's previous value */
- return tmp;
-}
+#if ETHR_INCLUDE_ATOMIC_IMPL__ == 4
+# define ETHR_HAVE_ETHR_NATIVE_ATOMIC32_XCHG_MB 1
+#else
+# define ETHR_HAVE_ETHR_NATIVE_ATOMIC64_XCHG_MB 1
+#endif
static ETHR_INLINE ETHR_AINT_T__
-ETHR_NATMC_FUNC__(xchg)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ val)
+ETHR_NATMC_FUNC__(xchg_mb)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ val)
{
ETHR_AINT_T__ tmp = val;
__asm__ __volatile__(
"xchg" ETHR_AINT_SUFFIX__ " %0, %1"
: "=r"(tmp)
- : "m"(var->counter), "0"(tmp));
+ : "m"(var->counter), "0"(tmp)
+ : "memory");
/* now tmp is the atomic's previous value */
return tmp;
}
-/*
- * Atomic ops with at least specified barriers.
- */
+#if ETHR_INCLUDE_ATOMIC_IMPL__ == 4
+# define ETHR_HAVE_ETHR_NATIVE_ATOMIC32_SET 1
+#else
+# define ETHR_HAVE_ETHR_NATIVE_ATOMIC64_SET 1
+#endif
-static ETHR_INLINE ETHR_AINT_T__
-ETHR_NATMC_FUNC__(read_acqb)(ETHR_ATMC_T__ *var)
+static ETHR_INLINE void
+ETHR_NATMC_FUNC__(set)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ i)
{
- ETHR_AINT_T__ val;
-#if defined(__x86_64__) || !defined(ETHR_PRE_PENTIUM4_COMPAT)
- val = var->counter;
+ var->counter = i;
+}
+
+#if ETHR_INCLUDE_ATOMIC_IMPL__ == 4
+# define ETHR_HAVE_ETHR_NATIVE_ATOMIC32_SET_RELB 1
#else
- val = ETHR_NATMC_FUNC__(add_return)(var, 0);
+# define ETHR_HAVE_ETHR_NATIVE_ATOMIC64_SET_RELB 1
#endif
- __asm__ __volatile__("" : : : "memory");
- return val;
-}
static ETHR_INLINE void
ETHR_NATMC_FUNC__(set_relb)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ i)
{
- __asm__ __volatile__("" : : : "memory");
-#if defined(__x86_64__) || !defined(ETHR_PRE_PENTIUM4_COMPAT)
- var->counter = i;
+#if defined(_M_IX86)
+ if (ETHR_X86_RUNTIME_CONF_HAVE_NO_SSE2__)
+ (void) ETHR_NATMC_FUNC__(xchg_mb)(var, i);
+ else
+#endif /* _M_IX86 */
+ {
+ ETHR_MEMBAR(ETHR_LoadStore|ETHR_StoreStore);
+ var->counter = i;
+ }
+}
+
+#if ETHR_INCLUDE_ATOMIC_IMPL__ == 4
+# define ETHR_HAVE_ETHR_NATIVE_ATOMIC32_SET_MB 1
#else
- (void) ETHR_NATMC_FUNC__(xchg)(var, i);
+# define ETHR_HAVE_ETHR_NATIVE_ATOMIC64_SET_MB 1
#endif
+
+static ETHR_INLINE void
+ETHR_NATMC_FUNC__(set_mb)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ i)
+{
+ (void) ETHR_NATMC_FUNC__(xchg_mb)(var, i);
}
+#if ETHR_INCLUDE_ATOMIC_IMPL__ == 4
+# define ETHR_HAVE_ETHR_NATIVE_ATOMIC32_READ 1
+#else
+# define ETHR_HAVE_ETHR_NATIVE_ATOMIC64_READ 1
+#endif
+
static ETHR_INLINE ETHR_AINT_T__
-ETHR_NATMC_FUNC__(inc_return_acqb)(ETHR_ATMC_T__ *var)
+ETHR_NATMC_FUNC__(read)(ETHR_ATMC_T__ *var)
{
- ETHR_AINT_T__ res = ETHR_NATMC_FUNC__(inc_return)(var);
- __asm__ __volatile__("" : : : "memory");
- return res;
+ return var->counter;
}
+#if ETHR_INCLUDE_ATOMIC_IMPL__ == 4
+# define ETHR_HAVE_ETHR_NATIVE_ATOMIC32_ADD_MB 1
+#else
+# define ETHR_HAVE_ETHR_NATIVE_ATOMIC64_ADD_MB 1
+#endif
+
static ETHR_INLINE void
-ETHR_NATMC_FUNC__(dec_relb)(ETHR_ATMC_T__ *var)
+ETHR_NATMC_FUNC__(add_mb)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ incr)
{
- __asm__ __volatile__("" : : : "memory");
- ETHR_NATMC_FUNC__(dec)(var);
-}
+ __asm__ __volatile__(
+ "lock; add" ETHR_AINT_SUFFIX__ " %1, %0"
+ : "=m"(var->counter)
+ : "ir"(incr), "m"(var->counter)
+ : "memory");
+}
-static ETHR_INLINE ETHR_AINT_T__
-ETHR_NATMC_FUNC__(dec_return_relb)(ETHR_ATMC_T__ *var)
+#if ETHR_INCLUDE_ATOMIC_IMPL__ == 4
+# define ETHR_HAVE_ETHR_NATIVE_ATOMIC32_INC_MB 1
+#else
+# define ETHR_HAVE_ETHR_NATIVE_ATOMIC64_INC_MB 1
+#endif
+
+static ETHR_INLINE void
+ETHR_NATMC_FUNC__(inc_mb)(ETHR_ATMC_T__ *var)
{
- __asm__ __volatile__("" : : : "memory");
- return ETHR_NATMC_FUNC__(dec_return)(var);
+ __asm__ __volatile__(
+ "lock; inc" ETHR_AINT_SUFFIX__ " %0"
+ : "=m"(var->counter)
+ : "m"(var->counter)
+ : "memory");
}
-static ETHR_INLINE ETHR_AINT_T__
-ETHR_NATMC_FUNC__(cmpxchg_acqb)(ETHR_ATMC_T__ *var,
- ETHR_AINT_T__ new,
- ETHR_AINT_T__ old)
+#if ETHR_INCLUDE_ATOMIC_IMPL__ == 4
+# define ETHR_HAVE_ETHR_NATIVE_ATOMIC32_DEC_MB 1
+#else
+# define ETHR_HAVE_ETHR_NATIVE_ATOMIC64_DEC_MB 1
+#endif
+
+static ETHR_INLINE void
+ETHR_NATMC_FUNC__(dec_mb)(ETHR_ATMC_T__ *var)
{
- return ETHR_NATMC_FUNC__(cmpxchg)(var, new, old);
+ __asm__ __volatile__(
+ "lock; dec" ETHR_AINT_SUFFIX__ " %0"
+ : "=m"(var->counter)
+ : "m"(var->counter)
+ : "memory");
}
+#if ETHR_INCLUDE_ATOMIC_IMPL__ == 4
+# define ETHR_HAVE_ETHR_NATIVE_ATOMIC32_ADD_RETURN_MB 1
+#else
+# define ETHR_HAVE_ETHR_NATIVE_ATOMIC64_ADD_RETURN_MB 1
+#endif
+
static ETHR_INLINE ETHR_AINT_T__
-ETHR_NATMC_FUNC__(cmpxchg_relb)(ETHR_ATMC_T__ *var,
- ETHR_AINT_T__ new,
- ETHR_AINT_T__ old)
+ETHR_NATMC_FUNC__(add_return_mb)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ incr)
{
- return ETHR_NATMC_FUNC__(cmpxchg)(var, new, old);
+ ETHR_AINT_T__ tmp;
+
+ tmp = incr;
+ __asm__ __volatile__(
+ "lock; xadd" ETHR_AINT_SUFFIX__ " %0, %1" /* xadd didn't exist prior to the 486 */
+ : "=r"(tmp)
+ : "m"(var->counter), "0"(tmp)
+ : "memory");
+ /* now tmp is the atomic's previous value */
+ return tmp + incr;
}
#endif /* ETHR_TRY_INLINE_FUNCS */
diff --git a/erts/include/internal/i386/ethr_dw_atomic.h b/erts/include/internal/i386/ethr_dw_atomic.h
new file mode 100644
index 0000000000..9fb89bbe43
--- /dev/null
+++ b/erts/include/internal/i386/ethr_dw_atomic.h
@@ -0,0 +1,278 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2011. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/*
+ * Description: Native double word atomics for x86/x86_64
+ * Author: Rickard Green
+ */
+
+#ifndef ETHR_X86_DW_ATOMIC_H__
+#define ETHR_X86_DW_ATOMIC_H__
+
+#ifdef ETHR_GCC_HAVE_DW_CMPXCHG_ASM_SUPPORT
+
+#define ETHR_HAVE_NATIVE_DW_ATOMIC
+#define ETHR_NATIVE_DW_ATOMIC_IMPL "ethread"
+
+/*
+ * If ETHR_RTCHK_USE_NATIVE_DW_ATOMIC_IMPL__ is defined, it will be used
+ * at runtime in order to determine if native or fallback implementation
+ * should be used.
+ */
+#define ETHR_RTCHK_USE_NATIVE_DW_ATOMIC_IMPL__ \
+ ETHR_X86_RUNTIME_CONF_HAVE_DW_CMPXCHG__
+
+#if ETHR_SIZEOF_PTR == 4
+typedef volatile ethr_sint64_t * ethr_native_dw_ptr_t;
+# define ETHR_DW_NATMC_ALIGN_MASK__ 0x7
+# define ETHR_DW_CMPXCHG_SFX__ "8b"
+# define ETHR_NATIVE_SU_DW_SINT_T ethr_sint64_t
+#else
+#ifdef ETHR_HAVE_INT128_T
+# define ETHR_NATIVE_SU_DW_SINT_T ethr_sint128_t
+typedef volatile ethr_sint128_t * ethr_native_dw_ptr_t;
+#else
+typedef struct {
+ ethr_sint64_t sint64[2];
+} ethr_native_sint128_t__;
+typedef volatile ethr_native_sint128_t__ * ethr_native_dw_ptr_t;
+#endif
+# define ETHR_DW_NATMC_ALIGN_MASK__ 0xf
+# define ETHR_DW_CMPXCHG_SFX__ "16b"
+#endif
+
+/*
+ * We need 16 byte aligned memory in 64-bit mode, and 8 byte aligned
+ * memory in 32-bit mode. 16 byte aligned malloc in 64-bit mode is
+ * not common, and at least some glibc malloc implementations
+ * only 4 byte align in 32-bit mode.
+ *
+ * This code assumes 8 byte aligned memory in 64-bit mode, and 4 byte
+ * aligned memory in 32-bit mode. A malloc implementation that does
+ * not adhere to these alignment requirements is seriously broken,
+ * and we wont bother trying to work around it.
+ *
+ * Since memory alignment may be off by one word we need to align at
+ * runtime. We, therefore, need an extra word allocated.
+ */
+#define ETHR_DW_NATMC_MEM__(VAR) \
+ (&var->c[(int) ((ethr_uint_t) &(VAR)->c[0]) & ETHR_DW_NATMC_ALIGN_MASK__])
+typedef union {
+#ifdef ETHR_NATIVE_SU_DW_SINT_T
+ volatile ETHR_NATIVE_SU_DW_SINT_T dw_sint;
+#endif
+ volatile ethr_sint_t sint[3];
+ volatile char c[ETHR_SIZEOF_PTR*3];
+} ethr_native_dw_atomic_t;
+
+
+#if (defined(ETHR_TRY_INLINE_FUNCS) \
+ || defined(ETHR_ATOMIC_IMPL__) \
+ || defined(ETHR_X86_SSE2_ASM_C__)) \
+ && ETHR_SIZEOF_PTR == 4 \
+ && defined(ETHR_GCC_HAVE_SSE2_ASM_SUPPORT)
+ethr_sint64_t
+ethr_sse2_native_su_dw_atomic_read(ethr_native_dw_atomic_t *var);
+void
+ethr_sse2_native_su_dw_atomic_set(ethr_native_dw_atomic_t *var,
+ ethr_sint64_t val);
+#endif
+
+#if (defined(ETHR_TRY_INLINE_FUNCS) \
+ || defined(ETHR_ATOMIC_IMPL__) \
+ || defined(ETHR_X86_SSE2_ASM_C__))
+# ifdef ETHR_DEBUG
+# define ETHR_DW_DBG_ALIGNED__(PTR) \
+ ETHR_ASSERT((((ethr_uint_t) (PTR)) & ETHR_DW_NATMC_ALIGN_MASK__) == 0);
+# else
+# define ETHR_DW_DBG_ALIGNED__(PTR)
+# endif
+#endif
+
+#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_ATOMIC_IMPL__)
+
+#define ETHR_HAVE_ETHR_NATIVE_DW_ATOMIC_ADDR
+static ETHR_INLINE ethr_sint_t *
+ethr_native_dw_atomic_addr(ethr_native_dw_atomic_t *var)
+{
+ return (ethr_sint_t *) ETHR_DW_NATMC_MEM__(var);
+}
+
+#if ETHR_SIZEOF_PTR == 4 && defined(__PIC__) && __PIC__
+/*
+ * When position independent code is used in 32-bit mode, the EBX register
+ * is used for storage of global offset table address, and we may not
+ * use it as input or output in an asm. We need to save and restore the
+ * EBX register explicitly (for some reason gcc doesn't provide this
+ * service to us).
+ */
+# define ETHR_NO_CLOBBER_EBX__ 1
+#else
+# define ETHR_NO_CLOBBER_EBX__ 0
+#endif
+
+#if ETHR_NO_CLOBBER_EBX__ && !defined(ETHR_CMPXCHG8B_REGISTER_SHORTAGE)
+/* When no optimization is on, we'll run into a register shortage */
+# if defined(ETHR_DEBUG) || defined(DEBUG) || defined(VALGRIND) \
+ || defined(GCOV) || defined(PURIFY) || defined(PURECOV)
+# define ETHR_CMPXCHG8B_REGISTER_SHORTAGE 1
+# else
+# define ETHR_CMPXCHG8B_REGISTER_SHORTAGE 0
+# endif
+#endif
+
+
+#define ETHR_HAVE_ETHR_NATIVE_DW_ATOMIC_CMPXCHG_MB
+
+static ETHR_INLINE int
+ethr_native_dw_atomic_cmpxchg_mb(ethr_native_dw_atomic_t *var,
+ ethr_sint_t *new,
+ ethr_sint_t *xchg)
+{
+ ethr_native_dw_ptr_t p = (ethr_native_dw_ptr_t) ETHR_DW_NATMC_MEM__(var);
+ char xchgd;
+
+ ETHR_DW_DBG_ALIGNED__(p);
+
+ __asm__ __volatile__(
+#if ETHR_NO_CLOBBER_EBX__
+ "pushl %%ebx\n\t"
+# if ETHR_CMPXCHG8B_REGISTER_SHORTAGE
+ "movl (%7), %%ebx\n\t"
+ "movl 4(%7), %%ecx\n\t"
+# else
+ "movl %8, %%ebx\n\t"
+# endif
+#endif
+ "lock; cmpxchg" ETHR_DW_CMPXCHG_SFX__ " %0\n\t"
+ "setz %3\n\t"
+#if ETHR_NO_CLOBBER_EBX__
+ "popl %%ebx\n\t"
+#endif
+ : "=m"(*p), "=d"(xchg[1]), "=a"(xchg[0]), "=c"(xchgd)
+ : "m"(*p), "1"(xchg[1]), "2"(xchg[0]),
+#if ETHR_NO_CLOBBER_EBX__
+# if ETHR_CMPXCHG8B_REGISTER_SHORTAGE
+ "3"(new)
+# else
+ "3"(new[1]),
+ "r"(new[0])
+# endif
+#else
+ "3"(new[1]),
+ "b"(new[0])
+#endif
+ : "cc", "memory");
+
+ return (int) xchgd;
+}
+
+#undef ETHR_NO_CLOBBER_EBX__
+
+#if ETHR_SIZEOF_PTR == 4 && defined(ETHR_GCC_HAVE_SSE2_ASM_SUPPORT)
+
+typedef union {
+ ethr_sint64_t sint64;
+ ethr_sint_t sint[2];
+} ethr_dw_atomic_no_sse2_convert_t;
+
+#define ETHR_HAVE_ETHR_NATIVE_SU_DW_ATOMIC_READ
+
+static ETHR_INLINE ethr_sint64_t
+ethr_native_su_dw_atomic_read(ethr_native_dw_atomic_t *var)
+{
+ if (ETHR_X86_RUNTIME_CONF_HAVE_SSE2__)
+ return ethr_sse2_native_su_dw_atomic_read(var);
+ else {
+ ethr_sint_t new[2];
+ ethr_dw_atomic_no_sse2_convert_t xchg;
+ new[0] = new[1] = xchg.sint[0] = xchg.sint[1] = 0x83838383;
+ (void) ethr_native_dw_atomic_cmpxchg_mb(var, new, xchg.sint);
+ return xchg.sint64;
+ }
+}
+
+#define ETHR_HAVE_ETHR_NATIVE_SU_DW_ATOMIC_SET
+
+static ETHR_INLINE void
+ethr_native_su_dw_atomic_set(ethr_native_dw_atomic_t *var,
+ ethr_sint64_t val)
+{
+ if (ETHR_X86_RUNTIME_CONF_HAVE_SSE2__)
+ ethr_sse2_native_su_dw_atomic_set(var, val);
+ else {
+ ethr_sint_t xchg[2] = {0, 0};
+ ethr_dw_atomic_no_sse2_convert_t new;
+ new.sint64 = val;
+ while (!ethr_native_dw_atomic_cmpxchg_mb(var, new.sint, xchg));
+ }
+}
+
+#endif /* ETHR_SIZEOF_PTR == 4 */
+
+#endif /* ETHR_TRY_INLINE_FUNCS */
+
+#if defined(ETHR_X86_SSE2_ASM_C__) \
+ && ETHR_SIZEOF_PTR == 4 \
+ && defined(ETHR_GCC_HAVE_SSE2_ASM_SUPPORT)
+
+/*
+ * 8-byte aligned loads and stores of 64-bit values are atomic from
+ * pentium and forward. An ordinary volatile load or store in 32-bit
+ * mode generates two 32-bit operations (at least with gcc-4.1.2 using
+ * -msse2). In order to guarantee one 64-bit load/store operation
+ * from/to memory we load/store via an xmm register using movq.
+ *
+ * Load/store can be achieved using cmpxchg8b, however, using movq is
+ * much faster. Unfortunately we cannot do the same thing in 64-bit
+ * mode; instead, we have to do loads and stores via cmpxchg16b.
+ *
+ * We do not inline these, but instead compile these into a separate
+ * object file using -msse2. This since we don't want to use -msse2 for
+ * the whole system. If we detect sse2 support (pentium4 and forward)
+ * at runtime, we use them; otherwise, we fall back to using cmpxchg8b
+ * for loads and stores. This way the binary can be moved between
+ * processors with and without sse2 support.
+ */
+
+ethr_sint64_t
+ethr_sse2_native_su_dw_atomic_read(ethr_native_dw_atomic_t *var)
+{
+ ethr_native_dw_ptr_t p = (ethr_native_dw_ptr_t) ETHR_DW_NATMC_MEM__(var);
+ ethr_sint64_t val;
+ ETHR_DW_DBG_ALIGNED__(p);
+ __asm__ __volatile__("movq %1, %0\n\t" : "=x"(val) : "m"(*p) : "memory");
+ return val;
+}
+
+void
+ethr_sse2_native_su_dw_atomic_set(ethr_native_dw_atomic_t *var,
+ ethr_sint64_t val)
+{
+ ethr_native_dw_ptr_t p = (ethr_native_dw_ptr_t) ETHR_DW_NATMC_MEM__(var);
+ ETHR_DW_DBG_ALIGNED__(p);
+ __asm__ __volatile__("movq %1, %0\n\t" : "=m"(*p) : "x"(val) : "memory");
+}
+
+#endif /* ETHR_X86_SSE2_ASM_C__ */
+
+#endif /* ETHR_GCC_HAVE_DW_CMPXCHG_ASM_SUPPORT */
+
+#endif /* ETHR_X86_DW_ATOMIC_H__ */
+
diff --git a/erts/include/internal/i386/ethr_membar.h b/erts/include/internal/i386/ethr_membar.h
new file mode 100644
index 0000000000..92d9de7f3f
--- /dev/null
+++ b/erts/include/internal/i386/ethr_membar.h
@@ -0,0 +1,114 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2011. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/*
+ * Description: Memory barriers for x86/x86-64
+ * Author: Rickard Green
+ */
+
+#ifndef ETHR_X86_MEMBAR_H__
+#define ETHR_X86_MEMBAR_H__
+
+#define ETHR_LoadLoad (1 << 0)
+#define ETHR_LoadStore (1 << 1)
+#define ETHR_StoreLoad (1 << 2)
+#define ETHR_StoreStore (1 << 3)
+
+#define ETHR_NO_SSE2_MEMORY_BARRIER__ \
+do { \
+ volatile ethr_sint32_t x__ = 0; \
+ __asm__ __volatile__ ("lock; orl $0x0, %0\n\t" \
+ : "=m"(x__) \
+ : "m"(x__) \
+ : "memory"); \
+} while (0)
+
+static __inline__ void
+ethr_cfence__(void)
+{
+ __asm__ __volatile__ ("" : : : "memory");
+}
+
+static __inline__ void
+ethr_mfence__(void)
+{
+#if ETHR_SIZEOF_PTR == 4
+ if (ETHR_X86_RUNTIME_CONF_HAVE_NO_SSE2__)
+ ETHR_NO_SSE2_MEMORY_BARRIER__;
+ else
+#endif
+ __asm__ __volatile__ ("mfence\n\t" : : : "memory");
+}
+
+static __inline__ void
+ethr_sfence__(void)
+{
+#if ETHR_SIZEOF_PTR == 4
+ if (ETHR_X86_RUNTIME_CONF_HAVE_NO_SSE2__)
+ ETHR_NO_SSE2_MEMORY_BARRIER__;
+ else
+#endif
+ __asm__ __volatile__ ("sfence\n\t" : : : "memory");
+}
+
+static __inline__ void
+ethr_lfence__(void)
+{
+#if ETHR_SIZEOF_PTR == 4
+ if (ETHR_X86_RUNTIME_CONF_HAVE_NO_SSE2__)
+ ETHR_NO_SSE2_MEMORY_BARRIER__;
+ else
+#endif
+ __asm__ __volatile__ ("lfence\n\t" : : : "memory");
+}
+
+#define ETHR_X86_OUT_OF_ORDER_MEMBAR(B) \
+ ETHR_CHOOSE_EXPR((B) == ETHR_StoreStore, \
+ ethr_sfence__(), \
+ ETHR_CHOOSE_EXPR((B) == ETHR_LoadLoad, \
+ ethr_lfence__(), \
+ ethr_mfence__()))
+
+#ifdef ETHR_X86_OUT_OF_ORDER
+
+#define ETHR_MEMBAR(B) \
+ ETHR_X86_OUT_OF_ORDER_MEMBAR((B))
+
+#else /* !ETHR_X86_OUT_OF_ORDER (the default) */
+
+/*
+ * We assume that only stores before loads may be reordered. That is,
+ * we assume that *no* instructions like these are used:
+ * - CLFLUSH,
+ * - streaming stores executed with non-temporal move,
+ * - string operations, or
+ * - other instructions which aren't LoadLoad, LoadStore, and StoreStore
+ * ordered by themselves
+ * If such instructions are used, either insert memory barriers
+ * using ETHR_X86_OUT_OF_ORDER_MEMBAR() at appropriate places, or
+ * define ETHR_X86_OUT_OF_ORDER. For more info see Intel 64 and IA-32
+ * Architectures Software Developer's Manual; Vol 3A; Chapter 8.2.2.
+ */
+
+#define ETHR_MEMBAR(B) \
+ ETHR_CHOOSE_EXPR((B) & ETHR_StoreLoad, ethr_mfence__(), ethr_cfence__())
+
+#endif /* !ETHR_X86_OUT_OF_ORDER */
+
+#endif /* ETHR_X86_MEMBAR_H__ */
diff --git a/erts/include/internal/i386/ethread.h b/erts/include/internal/i386/ethread.h
index b5a17caefb..80e4dc7b99 100644
--- a/erts/include/internal/i386/ethread.h
+++ b/erts/include/internal/i386/ethread.h
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2005-2010. All Rights Reserved.
+ * Copyright Ericsson AB 2005-2011. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -24,17 +24,15 @@
#ifndef ETHREAD_I386_ETHREAD_H
#define ETHREAD_I386_ETHREAD_H
+#include "ethr_membar.h"
#define ETHR_ATOMIC_WANT_32BIT_IMPL__
#include "atomic.h"
#if ETHR_SIZEOF_PTR == 8
# define ETHR_ATOMIC_WANT_64BIT_IMPL__
# include "atomic.h"
#endif
+#include "ethr_dw_atomic.h"
#include "spinlock.h"
#include "rwlock.h"
-#define ETHR_HAVE_NATIVE_ATOMICS 1
-#define ETHR_HAVE_NATIVE_SPINLOCKS 1
-#define ETHR_HAVE_NATIVE_RWSPINLOCKS 1
-
#endif /* ETHREAD_I386_ETHREAD_H */
diff --git a/erts/include/internal/i386/rwlock.h b/erts/include/internal/i386/rwlock.h
index be47f459ce..1a8cd7da0c 100644
--- a/erts/include/internal/i386/rwlock.h
+++ b/erts/include/internal/i386/rwlock.h
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2005-2010. All Rights Reserved.
+ * Copyright Ericsson AB 2005-2011. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -26,6 +26,9 @@
#ifndef ETHREAD_I386_RWLOCK_H
#define ETHREAD_I386_RWLOCK_H
+#define ETHR_HAVE_NATIVE_RWSPINLOCKS 1
+#define ETHR_NATIVE_RWSPINLOCK_IMPL "ethread"
+
/* XXX: describe the algorithm */
typedef struct {
volatile int lock;
diff --git a/erts/include/internal/i386/spinlock.h b/erts/include/internal/i386/spinlock.h
index 0325324895..a84fba91b1 100644
--- a/erts/include/internal/i386/spinlock.h
+++ b/erts/include/internal/i386/spinlock.h
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2005-2010. All Rights Reserved.
+ * Copyright Ericsson AB 2005-2011. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -24,6 +24,9 @@
#ifndef ETHREAD_I386_SPINLOCK_H
#define ETHREAD_I386_SPINLOCK_H
+#define ETHR_HAVE_NATIVE_SPINLOCKS 1
+#define ETHR_NATIVE_SPINLOCK_IMPL "ethread"
+
/* A spinlock is the low byte of an aligned 32-bit integer.
* A non-zero value means that the lock is locked.
*/
@@ -46,16 +49,20 @@ ethr_native_spin_unlock(ethr_native_spinlock_t *lock)
* On i386 this needs to be a locked operation
* to avoid Pentium Pro errata 66 and 92.
*/
-#if defined(__x86_64__) || !defined(ETHR_PRE_PENTIUM4_COMPAT)
- __asm__ __volatile__("" : : : "memory");
- *(unsigned char*)&lock->lock = 0;
-#else
- char tmp = 0;
- __asm__ __volatile__(
- "xchgb %b0, %1"
- : "=q"(tmp), "=m"(lock->lock)
- : "0"(tmp) : "memory");
+#if !defined(__x86_64__)
+ if (ETHR_X86_RUNTIME_CONF_HAVE_NO_SSE2__) {
+ char tmp = 0;
+ __asm__ __volatile__(
+ "xchgb %b0, %1"
+ : "=q"(tmp), "=m"(lock->lock)
+ : "0"(tmp) : "memory");
+ }
+ else
#endif
+ {
+ ETHR_MEMBAR(ETHR_LoadStore|ETHR_StoreStore);
+ *(unsigned char*)&lock->lock = 0;
+ }
}
static ETHR_INLINE int
diff --git a/erts/include/internal/libatomic_ops/ethr_atomic.h b/erts/include/internal/libatomic_ops/ethr_atomic.h
index 93bc06036f..fb1288c330 100644
--- a/erts/include/internal/libatomic_ops/ethr_atomic.h
+++ b/erts/include/internal/libatomic_ops/ethr_atomic.h
@@ -25,16 +25,6 @@
#ifndef ETHR_LIBATOMIC_OPS_ATOMIC_H__
#define ETHR_LIBATOMIC_OPS_ATOMIC_H__
-#if !defined(ETHR_HAVE_NATIVE_ATOMICS) && defined(ETHR_HAVE_LIBATOMIC_OPS)
-#define ETHR_HAVE_NATIVE_ATOMICS 1
-
-#if (defined(__i386__) && !defined(ETHR_PRE_PENTIUM4_COMPAT)) \
- || defined(__x86_64__)
-#define AO_USE_PENTIUM4_INSTRS
-#endif
-
-#include "atomic_ops.h"
-
/*
* libatomic_ops can be downloaded from:
* http://www.hpl.hp.com/research/linux/atomic_ops/
@@ -46,21 +36,18 @@
* - AO_store()
* - AO_compare_and_swap()
*
- * The `AO_t' type also have to be at least as large as the `void *' type.
*/
-#if ETHR_SIZEOF_AO_T < ETHR_SIZEOF_PTR
-#error The AO_t type is too small
-#endif
-
#if ETHR_SIZEOF_AO_T == 4
#define ETHR_HAVE_NATIVE_ATOMIC32 1
+#define ETHR_NATIVE_ATOMIC32_IMPL "libatomic_ops"
#define ETHR_NATMC_FUNC__(X) ethr_native_atomic32_ ## X
#define ETHR_ATMC_T__ ethr_native_atomic32_t
#define ETHR_AINT_T__ ethr_sint32_t
#define ETHR_AINT_SUFFIX__ "l"
#elif ETHR_SIZEOF_AO_T == 8
#define ETHR_HAVE_NATIVE_ATOMIC64 1
+#define ETHR_NATIVE_ATOMIC64_IMPL "libatomic_ops"
#define ETHR_NATMC_FUNC__(X) ethr_native_atomic64_ ## X
#define ETHR_ATMC_T__ ethr_native_atomic64_t
#define ETHR_AINT_T__ ethr_sint64_t
@@ -69,61 +56,29 @@
#error "Unsupported integer size"
#endif
-#if ETHR_SIZEOF_AO_T == 8
-typedef union {
- volatile AO_t counter;
- ethr_sint32_t sint32[2];
-} ETHR_ATMC_T__;
-#else
typedef struct {
volatile AO_t counter;
} ETHR_ATMC_T__;
-#endif
-#define ETHR_MEMORY_BARRIER AO_nop_full()
-#ifdef AO_HAVE_nop_write
-# define ETHR_WRITE_MEMORY_BARRIER AO_nop_write()
-#else
-# define ETHR_WRITE_MEMORY_BARRIER ETHR_MEMORY_BARRIER
-#endif
-#ifdef AO_HAVE_nop_read
-# define ETHR_READ_MEMORY_BARRIER AO_nop_read()
-#else
-# define ETHR_READ_MEMORY_BARRIER ETHR_MEMORY_BARRIER
-#endif
-#ifdef AO_NO_DD_ORDERING
-# define ETHR_READ_DEPEND_MEMORY_BARRIER ETHR_READ_MEMORY_BARRIER
+#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_ATOMIC_IMPL__)
+
+#if ETHR_SIZEOF_AO_T == 4
+# define ETHR_HAVE_ETHR_NATIVE_ATOMIC32_ADDR 1
#else
-# define ETHR_READ_DEPEND_MEMORY_BARRIER AO_compiler_barrier()
+# define ETHR_HAVE_ETHR_NATIVE_ATOMIC64_ADDR 1
#endif
-#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_ATOMIC_IMPL__)
-
static ETHR_INLINE ETHR_AINT_T__ *
ETHR_NATMC_FUNC__(addr)(ETHR_ATMC_T__ *var)
{
return (ETHR_AINT_T__ *) &var->counter;
}
-#if ETHR_SIZEOF_AO_T == 8
-/*
- * We also need to provide an ethr_native_atomic32_addr(), since
- * this 64-bit implementation will be used implementing 32-bit
- * native atomics.
- */
-
-static ETHR_INLINE ethr_sint32_t *
-ethr_native_atomic32_addr(ETHR_ATMC_T__ *var)
-{
- ETHR_ASSERT(((void *) &var->sint32[0]) == ((void *) &var->counter));
-#ifdef ETHR_BIGENDIAN
- return &var->sint32[1];
+#if ETHR_SIZEOF_AO_T == 4
+# define ETHR_HAVE_ETHR_NATIVE_ATOMIC32_SET 1
#else
- return &var->sint32[0];
+# define ETHR_HAVE_ETHR_NATIVE_ATOMIC64_SET 1
#endif
-}
-
-#endif /* ETHR_SIZEOF_AO_T == 8 */
static ETHR_INLINE void
ETHR_NATMC_FUNC__(set)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ value)
@@ -131,197 +86,198 @@ ETHR_NATMC_FUNC__(set)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ value)
AO_store(&var->counter, (AO_t) value);
}
+#ifdef AO_HAVE_store_release
+
+#if ETHR_SIZEOF_AO_T == 4
+# define ETHR_HAVE_ETHR_NATIVE_ATOMIC32_SET_RELB 1
+#else
+# define ETHR_HAVE_ETHR_NATIVE_ATOMIC64_SET_RELB 1
+#endif
+
static ETHR_INLINE void
-ETHR_NATMC_FUNC__(init)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ value)
+ETHR_NATMC_FUNC__(set_relb)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ value)
{
- ETHR_NATMC_FUNC__(set)(var, value);
+ AO_store_release(&var->counter, (AO_t) value);
}
+#endif
+
+#if ETHR_SIZEOF_AO_T == 4
+# define ETHR_HAVE_ETHR_NATIVE_ATOMIC32_READ 1
+#else
+# define ETHR_HAVE_ETHR_NATIVE_ATOMIC64_READ 1
+#endif
+
static ETHR_INLINE ETHR_AINT_T__
ETHR_NATMC_FUNC__(read)(ETHR_ATMC_T__ *var)
{
return (ETHR_AINT_T__) AO_load(&var->counter);
}
-static ETHR_INLINE ETHR_AINT_T__
-ETHR_NATMC_FUNC__(add_return)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ incr)
-{
-#ifdef AO_HAVE_fetch_and_add_full
- return ((ETHR_AINT_T__) AO_fetch_and_add_full(&var->counter, (AO_t) incr)) + incr;
+#ifdef AO_HAVE_load_acquire
+
+#if ETHR_SIZEOF_AO_T == 4
+# define ETHR_HAVE_ETHR_NATIVE_ATOMIC32_READ_ACQB 1
#else
- while (1) {
- AO_t exp = AO_load(&var->counter);
- AO_t new = exp + (AO_t) incr;
- if (AO_compare_and_swap_full(&var->counter, exp, new))
- return (ETHR_AINT_T__) new;
- }
+# define ETHR_HAVE_ETHR_NATIVE_ATOMIC64_READ_ACQB 1
#endif
-}
-static ETHR_INLINE void
-ETHR_NATMC_FUNC__(add)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ incr)
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(read_acqb)(ETHR_ATMC_T__ *var)
{
- (void) ETHR_NATMC_FUNC__(add_return)(var, incr);
+ return (ETHR_AINT_T__) AO_load_acquire(&var->counter);
}
-static ETHR_INLINE ETHR_AINT_T__
-ETHR_NATMC_FUNC__(inc_return)(ETHR_ATMC_T__ *var)
-{
-#ifdef AO_HAVE_fetch_and_add1_full
- return ((ETHR_AINT_T__) AO_fetch_and_add1_full(&var->counter)) + 1;
-#else
- return ETHR_NATMC_FUNC__(add_return)(var, 1);
#endif
-}
-static ETHR_INLINE void
-ETHR_NATMC_FUNC__(inc)(ETHR_ATMC_T__ *var)
-{
- (void) ETHR_NATMC_FUNC__(inc_return)(var);
-}
+#ifdef AO_HAVE_fetch_and_add
-static ETHR_INLINE ETHR_AINT_T__
-ETHR_NATMC_FUNC__(dec_return)(ETHR_ATMC_T__ *var)
-{
-#ifdef AO_HAVE_fetch_and_sub1_full
- return ((ETHR_AINT_T__) AO_fetch_and_sub1_full(&var->counter)) - 1;
+#if ETHR_SIZEOF_AO_T == 4
+# define ETHR_HAVE_ETHR_NATIVE_ATOMIC32_ADD_RETURN 1
#else
- return ETHR_NATMC_FUNC__(add_return)(var, -1);
+# define ETHR_HAVE_ETHR_NATIVE_ATOMIC64_ADD_RETURN 1
#endif
-}
-
-static ETHR_INLINE void
-ETHR_NATMC_FUNC__(dec)(ETHR_ATMC_T__ *var)
-{
- (void) ETHR_NATMC_FUNC__(dec_return)(var);
-}
static ETHR_INLINE ETHR_AINT_T__
-ETHR_NATMC_FUNC__(and_retold)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ mask)
+ETHR_NATMC_FUNC__(add_return)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ incr)
{
- while (1) {
- AO_t exp = AO_load(&var->counter);
- AO_t new = exp & ((AO_t) mask);
- if (AO_compare_and_swap_full(&var->counter, exp, new))
- return (ETHR_AINT_T__) exp;
- }
+ return ((ETHR_AINT_T__) AO_fetch_and_add(&var->counter, (AO_t) incr)) + incr;
}
-static ETHR_INLINE ETHR_AINT_T__
-ETHR_NATMC_FUNC__(or_retold)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ mask)
-{
- while (1) {
- AO_t exp = AO_load(&var->counter);
- AO_t new = exp | ((AO_t) mask);
- if (AO_compare_and_swap_full(&var->counter, exp, new))
- return (ETHR_AINT_T__) exp;
- }
-}
+#endif
-static ETHR_INLINE ETHR_AINT_T__
-ETHR_NATMC_FUNC__(cmpxchg)(ETHR_ATMC_T__ *var,
- ETHR_AINT_T__ new,
- ETHR_AINT_T__ exp)
-{
- ETHR_AINT_T__ act;
- do {
- if (AO_compare_and_swap_full(&var->counter, (AO_t) exp, (AO_t) new))
- return exp;
- act = (ETHR_AINT_T__) AO_load(&var->counter);
- } while (act == exp);
- return act;
-}
+#ifdef AO_HAVE_fetch_and_add1
+
+#if ETHR_SIZEOF_AO_T == 4
+# define ETHR_HAVE_ETHR_NATIVE_ATOMIC32_INC_RETURN 1
+#else
+# define ETHR_HAVE_ETHR_NATIVE_ATOMIC64_INC_RETURN 1
+#endif
static ETHR_INLINE ETHR_AINT_T__
-ETHR_NATMC_FUNC__(xchg)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ new)
+ETHR_NATMC_FUNC__(inc_return)(ETHR_ATMC_T__ *var)
{
- while (1) {
- AO_t exp = AO_load(&var->counter);
- if (AO_compare_and_swap_full(&var->counter, exp, (AO_t) new))
- return (ETHR_AINT_T__) exp;
- }
+ return ((ETHR_AINT_T__) AO_fetch_and_add1(&var->counter)) + 1;
}
-/*
- * Atomic ops with at least specified barriers.
- */
+#endif
-static ETHR_INLINE ETHR_AINT_T__
-ETHR_NATMC_FUNC__(read_acqb)(ETHR_ATMC_T__ *var)
-{
-#ifdef AO_HAVE_load_acquire
- return (ETHR_AINT_T__) AO_load_acquire(&var->counter);
+#ifdef AO_HAVE_fetch_and_add1_acquire
+
+#if ETHR_SIZEOF_AO_T == 4
+# define ETHR_HAVE_ETHR_NATIVE_ATOMIC32_INC_RETURN_ACQB 1
#else
- ETHR_AINT_T__ res = ETHR_NATMC_FUNC__(read)(var);
- ETHR_MEMORY_BARRIER;
- return res;
+# define ETHR_HAVE_ETHR_NATIVE_ATOMIC64_INC_RETURN_ACQB 1
#endif
-}
static ETHR_INLINE ETHR_AINT_T__
ETHR_NATMC_FUNC__(inc_return_acqb)(ETHR_ATMC_T__ *var)
{
-#ifdef AO_HAVE_fetch_and_add1_acquire
return ((ETHR_AINT_T__) AO_fetch_and_add1_acquire(&var->counter)) + 1;
+}
+
+#endif
+
+#ifdef AO_HAVE_fetch_and_sub1
+
+#if ETHR_SIZEOF_AO_T == 4
+# define ETHR_HAVE_ETHR_NATIVE_ATOMIC32_DEC_RETURN 1
#else
- ETHR_AINT_T__ res = ETHR_NATMC_FUNC__(add_return)(var, 1);
- return res;
+# define ETHR_HAVE_ETHR_NATIVE_ATOMIC64_DEC_RETURN 1
#endif
-}
-static ETHR_INLINE void
-ETHR_NATMC_FUNC__(set_relb)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ value)
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(dec_return)(ETHR_ATMC_T__ *var)
{
-#ifdef AO_HAVE_store_release
- AO_store_release(&var->counter, (AO_t) value);
+ return ((ETHR_AINT_T__) AO_fetch_and_sub1(&var->counter)) - 1;
+}
+
+#endif
+
+#ifdef AO_HAVE_fetch_and_sub1_release
+
+#if ETHR_SIZEOF_AO_T == 4
+# define ETHR_HAVE_ETHR_NATIVE_ATOMIC32_DEC_RETURN_RELB 1
#else
- ETHR_MEMORY_BARRIER;
- ETHR_NATMC_FUNC__(set)(var, value);
+# define ETHR_HAVE_ETHR_NATIVE_ATOMIC64_DEC_RETURN_RELB 1
#endif
-}
static ETHR_INLINE ETHR_AINT_T__
ETHR_NATMC_FUNC__(dec_return_relb)(ETHR_ATMC_T__ *var)
{
-#ifdef AO_HAVE_fetch_and_sub1_release
return ((ETHR_AINT_T__) AO_fetch_and_sub1_release(&var->counter)) - 1;
+}
+
+#endif
+
+#ifdef AO_HAVE_compare_and_swap
+
+#if ETHR_SIZEOF_AO_T == 4
+# define ETHR_HAVE_ETHR_NATIVE_ATOMIC32_CMPXCHG 1
#else
- return ETHR_NATMC_FUNC__(dec_return)(var);
+# define ETHR_HAVE_ETHR_NATIVE_ATOMIC64_CMPXCHG 1
#endif
-}
-static ETHR_INLINE void
-ETHR_NATMC_FUNC__(dec_relb)(ETHR_ATMC_T__ *var)
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(cmpxchg)(ETHR_ATMC_T__ *var,
+ ETHR_AINT_T__ new,
+ ETHR_AINT_T__ exp)
{
- (void) ETHR_NATMC_FUNC__(dec_return_relb)(var);
+ ETHR_AINT_T__ act;
+ do {
+ if (AO_compare_and_swap(&var->counter, (AO_t) exp, (AO_t) new))
+ return exp;
+ act = (ETHR_AINT_T__) AO_load(&var->counter);
+ } while (act == exp);
+ return act;
}
+#endif
+
+#ifdef AO_HAVE_compare_and_swap_acquire
+
+#if ETHR_SIZEOF_AO_T == 4
+# define ETHR_HAVE_ETHR_NATIVE_ATOMIC32_CMPXCHG_ACQB 1
+#else
+# define ETHR_HAVE_ETHR_NATIVE_ATOMIC64_CMPXCHG_ACQB 1
+#endif
+
static ETHR_INLINE ETHR_AINT_T__
ETHR_NATMC_FUNC__(cmpxchg_acqb)(ETHR_ATMC_T__ *var,
ETHR_AINT_T__ new,
ETHR_AINT_T__ exp)
{
-#ifdef AO_HAVE_compare_and_swap_acquire
ETHR_AINT_T__ act;
do {
if (AO_compare_and_swap_acquire(&var->counter, (AO_t) exp, (AO_t) new))
return exp;
+#ifdef AO_HAVE_load_acquire
+ act = (ETHR_AINT_T__) AO_load_acquire(&var->counter);
+#else
act = (ETHR_AINT_T__) AO_load(&var->counter);
+#endif
} while (act == exp);
+#ifndef AO_HAVE_load_acquire
AO_nop_full();
+#endif
return act;
+}
+
+#endif
+
+#ifdef AO_HAVE_compare_and_swap_release
+
+#if ETHR_SIZEOF_AO_T == 4
+# define ETHR_HAVE_ETHR_NATIVE_ATOMIC32_CMPXCHG_RELB 1
#else
- ETHR_AINT_T__ act = ETHR_NATMC_FUNC__(cmpxchg)(var, new, exp);
- return act;
+# define ETHR_HAVE_ETHR_NATIVE_ATOMIC64_CMPXCHG_RELB 1
#endif
-}
static ETHR_INLINE ETHR_AINT_T__
ETHR_NATMC_FUNC__(cmpxchg_relb)(ETHR_ATMC_T__ *var,
ETHR_AINT_T__ new,
ETHR_AINT_T__ exp)
{
-#ifdef AO_HAVE_compare_and_swap_release
ETHR_AINT_T__ act;
do {
if (AO_compare_and_swap_release(&var->counter, (AO_t) exp, (AO_t) new))
@@ -329,11 +285,9 @@ ETHR_NATMC_FUNC__(cmpxchg_relb)(ETHR_ATMC_T__ *var,
act = (ETHR_AINT_T__) AO_load(&var->counter);
} while (act == exp);
return act;
-#else
- return ETHR_NATMC_FUNC__(cmpxchg)(var, new, exp);
-#endif
}
+#endif
#endif /* ETHR_TRY_INLINE_FUNCS */
@@ -341,6 +295,4 @@ ETHR_NATMC_FUNC__(cmpxchg_relb)(ETHR_ATMC_T__ *var,
#undef ETHR_ATMC_T__
#undef ETHR_AINT_T__
-#endif /* !defined(ETHR_HAVE_NATIVE_ATOMICS) && defined(ETHR_HAVE_LIBATOMIC_OPS) */
-
#endif /* ETHR_LIBATOMIC_OPS_ATOMIC_H__ */
diff --git a/erts/include/internal/libatomic_ops/ethr_membar.h b/erts/include/internal/libatomic_ops/ethr_membar.h
new file mode 100644
index 0000000000..b8530a0094
--- /dev/null
+++ b/erts/include/internal/libatomic_ops/ethr_membar.h
@@ -0,0 +1,75 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2011. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/*
+ * Description: Memory barriers when using libatomic_ops
+ * Author: Rickard Green
+ */
+
+#ifndef ETHR_LIBATOMIC_OPS_MEMBAR_H__
+#define ETHR_LIBATOMIC_OPS_MEMBAR_H__
+
+#define ETHR_LoadLoad (1 << 0)
+#define ETHR_LoadStore (1 << 1)
+#define ETHR_StoreLoad (1 << 2)
+#define ETHR_StoreStore (1 << 3)
+
+#ifndef AO_HAVE_nop_full
+# error "No AO_nop_full()"
+#endif
+
+static __inline__ void
+ethr_mb__(void)
+{
+ AO_nop_full();
+}
+
+static __inline__ void
+ethr_rb__(void)
+{
+#ifdef AO_HAVE_nop_read
+ AO_nop_read();
+#else
+ AO_nop_full();
+#endif
+}
+
+static __inline__ void
+ethr_wb__(void)
+{
+#ifdef AO_HAVE_nop_write
+ AO_nop_write();
+#else
+ AO_nop_full();
+#endif
+}
+
+#define ETHR_MEMBAR(B) \
+ ETHR_CHOOSE_EXPR((B) == ETHR_StoreStore, \
+ ethr_wb__(), \
+ ETHR_CHOOSE_EXPR((B) == ETHR_LoadLoad, \
+ ethr_rb__(), \
+ ethr_mb__()))
+
+#define ETHR_COMPILER_BARRIER AO_compiler_barrier()
+#ifdef AO_NO_DD_ORDERING
+# define ETHR_READ_DEPEND_MEMORY_BARRIER ETHR_READ_MEMORY_BARRIER
+#endif
+
+#endif /* ETHR_LIBATOMIC_OPS_MEMBAR_H__ */
diff --git a/erts/include/internal/libatomic_ops/ethread.h b/erts/include/internal/libatomic_ops/ethread.h
index ee73ba73bc..e1fdd588bb 100644
--- a/erts/include/internal/libatomic_ops/ethread.h
+++ b/erts/include/internal/libatomic_ops/ethread.h
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2010. All Rights Reserved.
+ * Copyright Ericsson AB 2010-2011. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -25,6 +25,18 @@
#ifndef ETHREAD_LIBATOMIC_OPS_H__
#define ETHREAD_LIBATOMIC_OPS_H__
+#if (defined(ETHR_HAVE_LIBATOMIC_OPS) \
+ && ((ETHR_SIZEOF_AO_T == 4 && !defined(ETHR_HAVE_NATIVE_ATOMIC32)) \
+ || (ETHR_SIZEOF_AO_T == 8 && !defined(ETHR_HAVE_NATIVE_ATOMIC64))))
+
+#if defined(__x86_64__)
+#define AO_USE_PENTIUM4_INSTRS
+#endif
+
+#include "atomic_ops.h"
+#include "ethr_membar.h"
#include "ethr_atomic.h"
#endif
+
+#endif
diff --git a/erts/include/internal/ppc32/atomic.h b/erts/include/internal/ppc32/atomic.h
index 522f433649..6001620677 100644
--- a/erts/include/internal/ppc32/atomic.h
+++ b/erts/include/internal/ppc32/atomic.h
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2005-2010. All Rights Reserved.
+ * Copyright Ericsson AB 2005-2011. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -29,27 +29,31 @@
#define ETHREAD_PPC_ATOMIC_H
#define ETHR_HAVE_NATIVE_ATOMIC32 1
+#define ETHR_NATIVE_ATOMIC32_IMPL "ethread"
typedef struct {
volatile ethr_sint32_t counter;
} ethr_native_atomic32_t;
-#define ETHR_MEMORY_BARRIER __asm__ __volatile__("sync" : : : "memory")
-
#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_ATOMIC_IMPL__)
+#define ETHR_HAVE_ETHR_NATIVE_ATOMIC32_ADDR 1
+
static ETHR_INLINE ethr_sint32_t *
ethr_native_atomic32_addr(ethr_native_atomic32_t *var)
{
return (ethr_sint32_t *) &var->counter;
}
+#define ETHR_HAVE_ETHR_NATIVE_ATOMIC32_SET 1
+
static ETHR_INLINE void
-ethr_native_atomic32_init(ethr_native_atomic32_t *var, ethr_sint32_t i)
+ethr_native_atomic32_set(ethr_native_atomic32_t *var, ethr_sint32_t i)
{
var->counter = i;
}
-#define ethr_native_atomic32_set(v, i) ethr_native_atomic32_init((v), (i))
+
+#define ETHR_HAVE_ETHR_NATIVE_ATOMIC32_READ 1
static ETHR_INLINE ethr_sint32_t
ethr_native_atomic32_read(ethr_native_atomic32_t *var)
@@ -57,57 +61,68 @@ ethr_native_atomic32_read(ethr_native_atomic32_t *var)
return var->counter;
}
+#define ETHR_HAVE_ETHR_NATIVE_ATOMIC32_ADD_RETURN 1
+
static ETHR_INLINE ethr_sint32_t
ethr_native_atomic32_add_return(ethr_native_atomic32_t *var, ethr_sint32_t incr)
{
ethr_sint32_t tmp;
__asm__ __volatile__(
- "eieio\n\t"
"1:\t"
"lwarx %0,0,%1\n\t"
"add %0,%2,%0\n\t"
"stwcx. %0,0,%1\n\t"
"bne- 1b\n\t"
- "isync"
: "=&r"(tmp)
: "r"(&var->counter), "r"(incr)
: "cc", "memory");
return tmp;
}
-static ETHR_INLINE void
-ethr_native_atomic32_add(ethr_native_atomic32_t *var, ethr_sint32_t incr)
+#define ETHR_HAVE_ETHR_NATIVE_ATOMIC32_ADD_RETURN_ACQB 1
+
+static ETHR_INLINE ethr_sint32_t
+ethr_native_atomic32_add_return_acqb(ethr_native_atomic32_t *var, ethr_sint32_t incr)
{
- /* XXX: could use weaker version here w/o eieio+isync */
- (void)ethr_native_atomic32_add_return(var, incr);
+ ethr_sint32_t res;
+ res = ethr_native_atomic32_add_return(var, incr);
+ __asm__ __volatile("isync\n\t" : : : "memory");
+ return res;
}
+#define ETHR_HAVE_ETHR_NATIVE_ATOMIC32_INC_RETURN 1
+
static ETHR_INLINE ethr_sint32_t
ethr_native_atomic32_inc_return(ethr_native_atomic32_t *var)
{
ethr_sint32_t tmp;
__asm__ __volatile__(
- "eieio\n\t"
"1:\t"
"lwarx %0,0,%1\n\t"
"addic %0,%0,1\n\t" /* due to addi's (rA|0) behaviour */
"stwcx. %0,0,%1\n\t"
"bne- 1b\n\t"
- "isync"
: "=&r"(tmp)
: "r"(&var->counter)
: "cc", "memory");
return tmp;
}
-static ETHR_INLINE void
-ethr_native_atomic32_inc(ethr_native_atomic32_t *var)
+#define ETHR_HAVE_ETHR_NATIVE_ATOMIC32_INC_RETURN_ACQB 1
+
+static ETHR_INLINE ethr_sint32_t
+ethr_native_atomic32_inc_return_acqb(ethr_native_atomic32_t *var)
{
- /* XXX: could use weaker version here w/o eieio+isync */
- (void)ethr_native_atomic32_inc_return(var);
+ ethr_sint32_t res;
+ res = ethr_native_atomic32_inc_return(var);
+ __asm__ __volatile("isync\n\t" : : : "memory");
+ return res;
}
+
+
+#define ETHR_HAVE_ETHR_NATIVE_ATOMIC32_DEC_RETURN 1
static ETHR_INLINE ethr_sint32_t
ethr_native_atomic32_dec_return(ethr_native_atomic32_t *var)
@@ -115,82 +130,120 @@ ethr_native_atomic32_dec_return(ethr_native_atomic32_t *var)
ethr_sint32_t tmp;
__asm__ __volatile__(
- "eieio\n\t"
"1:\t"
"lwarx %0,0,%1\n\t"
"addic %0,%0,-1\n\t"
"stwcx. %0,0,%1\n\t"
"bne- 1b\n\t"
- "isync"
: "=&r"(tmp)
: "r"(&var->counter)
: "cc", "memory");
return tmp;
}
-static ETHR_INLINE void
-ethr_native_atomic32_dec(ethr_native_atomic32_t *var)
+#define ETHR_HAVE_ETHR_NATIVE_ATOMIC32_DEC_RETURN_ACQB 1
+
+static ETHR_INLINE ethr_sint32_t
+ethr_native_atomic32_dec_return_acqb(ethr_native_atomic32_t *var)
{
- /* XXX: could use weaker version here w/o eieio+isync */
- (void)ethr_native_atomic32_dec_return(var);
+ ethr_sint32_t res;
+ res = ethr_native_atomic32_dec_return(var);
+ __asm__ __volatile("isync\n\t" : : : "memory");
+ return res;
}
+#define ETHR_HAVE_ETHR_NATIVE_ATOMIC32_AND_RETOLD 1
+
static ETHR_INLINE ethr_sint32_t
ethr_native_atomic32_and_retold(ethr_native_atomic32_t *var, ethr_sint32_t mask)
{
ethr_sint32_t old, new;
__asm__ __volatile__(
- "eieio\n\t"
"1:\t"
"lwarx %0,0,%2\n\t"
"and %1,%0,%3\n\t"
"stwcx. %1,0,%2\n\t"
"bne- 1b\n\t"
- "isync"
: "=&r"(old), "=&r"(new)
: "r"(&var->counter), "r"(mask)
: "cc", "memory");
return old;
}
+#define ETHR_HAVE_ETHR_NATIVE_ATOMIC32_AND_RETOLD_ACQB 1
+
+static ETHR_INLINE ethr_sint32_t
+ethr_native_atomic32_and_retold_acqb(ethr_native_atomic32_t *var, ethr_sint32_t mask)
+{
+ ethr_sint32_t res;
+ res = ethr_native_atomic32_and_retold(var, mask);
+ __asm__ __volatile("isync\n\t" : : : "memory");
+ return res;
+}
+
+#define ETHR_HAVE_ETHR_NATIVE_ATOMIC32_OR_RETOLD 1
+
static ETHR_INLINE ethr_sint32_t
ethr_native_atomic32_or_retold(ethr_native_atomic32_t *var, ethr_sint32_t mask)
{
ethr_sint32_t old, new;
__asm__ __volatile__(
- "eieio\n\t"
"1:\t"
"lwarx %0,0,%2\n\t"
"or %1,%0,%3\n\t"
"stwcx. %1,0,%2\n\t"
"bne- 1b\n\t"
- "isync"
: "=&r"(old), "=&r"(new)
: "r"(&var->counter), "r"(mask)
: "cc", "memory");
return old;
}
+#define ETHR_HAVE_ETHR_NATIVE_ATOMIC32_OR_RETOLD_ACQB 1
+
+static ETHR_INLINE ethr_sint32_t
+ethr_native_atomic32_or_retold_acqb(ethr_native_atomic32_t *var, ethr_sint32_t mask)
+{
+ ethr_sint32_t res;
+ res = ethr_native_atomic32_or_retold(var, mask);
+ __asm__ __volatile("isync\n\t" : : : "memory");
+ return res;
+}
+
+
+#define ETHR_HAVE_ETHR_NATIVE_ATOMIC32_XCHG 1
+
static ETHR_INLINE ethr_sint32_t
ethr_native_atomic32_xchg(ethr_native_atomic32_t *var, ethr_sint32_t val)
{
ethr_sint32_t tmp;
__asm__ __volatile__(
- "eieio\n\t"
"1:\t"
"lwarx %0,0,%1\n\t"
"stwcx. %2,0,%1\n\t"
"bne- 1b\n\t"
- "isync"
: "=&r"(tmp)
: "r"(&var->counter), "r"(val)
: "cc", "memory");
return tmp;
}
+#define ETHR_HAVE_ETHR_NATIVE_ATOMIC32_XCHG_ACQB 1
+
+static ETHR_INLINE ethr_sint32_t
+ethr_native_atomic32_xchg_acqb(ethr_native_atomic32_t *var, ethr_sint32_t val)
+{
+ ethr_sint32_t res;
+ res = ethr_native_atomic32_xchg(var, val);
+ __asm__ __volatile("isync\n\t" : : : "memory");
+ return res;
+}
+
+#define ETHR_HAVE_ETHR_NATIVE_ATOMIC32_CMPXCHG 1
+
static ETHR_INLINE ethr_sint32_t
ethr_native_atomic32_cmpxchg(ethr_native_atomic32_t *var,
ethr_sint32_t new,
@@ -199,14 +252,12 @@ ethr_native_atomic32_cmpxchg(ethr_native_atomic32_t *var,
ethr_sint32_t old;
__asm__ __volatile__(
- "eieio\n\t"
"1:\t"
"lwarx %0,0,%2\n\t"
"cmpw 0,%0,%3\n\t"
"bne 2f\n\t"
"stwcx. %1,0,%2\n\t"
"bne- 1b\n\t"
- "isync\n"
"2:"
: "=&r"(old)
: "r"(new), "r"(&var->counter), "r"(expected)
@@ -215,25 +266,30 @@ ethr_native_atomic32_cmpxchg(ethr_native_atomic32_t *var,
return old;
}
-/*
- * Atomic ops with at least specified barriers.
- */
+#define ETHR_HAVE_ETHR_NATIVE_ATOMIC32_CMPXCHG_ACQB 1
-static ETHR_INLINE long
-ethr_native_atomic32_read_acqb(ethr_native_atomic32_t *var)
+static ETHR_INLINE ethr_sint32_t
+ethr_native_atomic32_cmpxchg_acqb(ethr_native_atomic32_t *var,
+ ethr_sint32_t new,
+ ethr_sint32_t expected)
{
- long res = ethr_native_atomic32_read(var);
- ETHR_MEMORY_BARRIER;
- return res;
-}
+ ethr_sint32_t old;
-#define ethr_native_atomic32_set_relb ethr_native_atomic32_xchg
-#define ethr_native_atomic32_inc_return_acqb ethr_native_atomic32_inc_return
-#define ethr_native_atomic32_dec_relb ethr_native_atomic32_dec_return
-#define ethr_native_atomic32_dec_return_relb ethr_native_atomic32_dec_return
+ __asm__ __volatile__(
+ "1:\t"
+ "lwarx %0,0,%2\n\t"
+ "cmpw 0,%0,%3\n\t"
+ "bne 2f\n\t"
+ "stwcx. %1,0,%2\n\t"
+ "bne- 1b\n\t"
+ "isync\n"
+ "2:"
+ : "=&r"(old)
+ : "r"(new), "r"(&var->counter), "r"(expected)
+ : "cc", "memory");
-#define ethr_native_atomic32_cmpxchg_acqb ethr_native_atomic32_cmpxchg
-#define ethr_native_atomic32_cmpxchg_relb ethr_native_atomic32_cmpxchg
+ return old;
+}
#endif /* ETHR_TRY_INLINE_FUNCS */
diff --git a/erts/include/internal/ppc32/ethr_membar.h b/erts/include/internal/ppc32/ethr_membar.h
new file mode 100644
index 0000000000..ff5cc86bfb
--- /dev/null
+++ b/erts/include/internal/ppc32/ethr_membar.h
@@ -0,0 +1,63 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2011. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/*
+ * Description: Memory barriers for PowerPC
+ * Author: Rickard Green
+ */
+
+#ifndef ETHR_PPC_MEMBAR_H__
+#define ETHR_PPC_MEMBAR_H__
+
+#define ETHR_LoadLoad (1 << 0)
+#define ETHR_LoadStore (1 << 1)
+#define ETHR_StoreLoad (1 << 2)
+#define ETHR_StoreStore (1 << 3)
+
+static __inline__ void
+ethr_lwsync__(void)
+{
+#ifdef ETHR_PPC_HAVE_NO_LWSYNC
+ __asm__ __volatile__ ("sync\n\t" : : : "memory");
+#else
+#ifndef ETHR_PPC_HAVE_LWSYNC
+ if (ETHR_PPC_RUNTIME_CONF_HAVE_NO_LWSYNC__)
+ __asm__ __volatile__ ("sync\n\t" : : : "memory");
+ else
+#endif
+ __asm__ __volatile__ ("lwsync\n\t" : : : "memory");
+#endif
+}
+
+static __inline__ void
+ethr_sync__(void)
+{
+ __asm__ __volatile__ ("sync\n\t" : : : "memory");
+}
+
+/*
+ * According to the "memory barrier intstructions" section of
+ * http://www.ibm.com/developerworks/systems/articles/powerpc.html
+ * we want to use sync when a StoreLoad is needed and lwsync for
+ * everything else.
+ */
+#define ETHR_MEMBAR(B) \
+ ETHR_CHOOSE_EXPR((B) & ETHR_StoreLoad, ethr_sync__(), ethr_lwsync__())
+
+#endif
diff --git a/erts/include/internal/ppc32/ethread.h b/erts/include/internal/ppc32/ethread.h
index 3b619e9d01..e41c83c5da 100644
--- a/erts/include/internal/ppc32/ethread.h
+++ b/erts/include/internal/ppc32/ethread.h
@@ -24,12 +24,9 @@
#ifndef ETHREAD_PPC32_ETHREAD_H
#define ETHREAD_PPC32_ETHREAD_H
+#include "ethr_membar.h"
#include "atomic.h"
#include "spinlock.h"
#include "rwlock.h"
-#define ETHR_HAVE_NATIVE_ATOMICS 1
-#define ETHR_HAVE_NATIVE_SPINLOCKS 1
-#define ETHR_HAVE_NATIVE_RWSPINLOCKS 1
-
#endif /* ETHREAD_PPC32_ETHREAD_H */
diff --git a/erts/include/internal/ppc32/rwlock.h b/erts/include/internal/ppc32/rwlock.h
index 19ec26ab68..311f000b69 100644
--- a/erts/include/internal/ppc32/rwlock.h
+++ b/erts/include/internal/ppc32/rwlock.h
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2005-2010. All Rights Reserved.
+ * Copyright Ericsson AB 2005-2011. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -23,12 +23,14 @@
*
* Based on the examples in Appendix E of Motorola's
* "Programming Environments Manual For 32-Bit Implementations
- * of the PowerPC Architecture". Uses eieio instead of sync
- * in the unlock sequence, as suggested in the manual.
+ * of the PowerPC Architecture".
*/
#ifndef ETHREAD_PPC_RWLOCK_H
#define ETHREAD_PPC_RWLOCK_H
+#define ETHR_HAVE_NATIVE_RWSPINLOCKS 1
+#define ETHR_NATIVE_RWSPINLOCK_IMPL "ethread"
+
/* Unlocked if zero, read-locked if negative, write-locked if +1. */
typedef struct {
volatile int lock;
@@ -47,9 +49,10 @@ ethr_native_read_unlock(ethr_native_rwlock_t *lock)
{
int tmp;
- /* this is eieio + ethr_native_atomic_inc() - isync */
+ ETHR_MEMBAR(ETHR_LoadStore|ETHR_StoreStore);
+
+ /* this is ethr_native_atomic_inc() - isync */
__asm__ __volatile__(
- "eieio\n\t"
"1:\t"
"lwarx %0,0,%1\n\t"
"addic %0,%0,1\n\t"
@@ -105,7 +108,7 @@ ethr_native_read_lock(ethr_native_rwlock_t *lock)
static ETHR_INLINE void
ethr_native_write_unlock(ethr_native_rwlock_t *lock)
{
- __asm__ __volatile__("eieio" : : : "memory");
+ ETHR_MEMBAR(ETHR_LoadStore|ETHR_StoreStore);
lock->lock = 0;
}
diff --git a/erts/include/internal/ppc32/spinlock.h b/erts/include/internal/ppc32/spinlock.h
index c8460a3e8a..4c95ec9efb 100644
--- a/erts/include/internal/ppc32/spinlock.h
+++ b/erts/include/internal/ppc32/spinlock.h
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2005-2010. All Rights Reserved.
+ * Copyright Ericsson AB 2005-2011. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -23,12 +23,14 @@
*
* Based on the examples in Appendix E of Motorola's
* "Programming Environments Manual For 32-Bit Implementations
- * of the PowerPC Architecture". Uses eieio instead of sync
- * in the unlock sequence, as suggested in the manual.
+ * of the PowerPC Architecture".
*/
#ifndef ETHREAD_PPC_SPINLOCK_H
#define ETHREAD_PPC_SPINLOCK_H
+#define ETHR_HAVE_NATIVE_SPINLOCKS 1
+#define ETHR_NATIVE_SPINLOCK_IMPL "ethread"
+
/* Unlocked if zero, locked if non-zero. */
typedef struct {
volatile unsigned int lock;
@@ -45,7 +47,7 @@ ethr_native_spinlock_init(ethr_native_spinlock_t *lock)
static ETHR_INLINE void
ethr_native_spin_unlock(ethr_native_spinlock_t *lock)
{
- __asm__ __volatile__("eieio" : : : "memory");
+ ETHR_MEMBAR(ETHR_LoadStore|ETHR_StoreStore);
lock->lock = 0;
}
diff --git a/erts/include/internal/pthread/ethr_event.h b/erts/include/internal/pthread/ethr_event.h
index 4c29b28536..d0a77990cc 100644
--- a/erts/include/internal/pthread/ethr_event.h
+++ b/erts/include/internal/pthread/ethr_event.h
@@ -21,7 +21,7 @@
* Author: Rickard Green
*/
-#if defined(ETHR_HAVE_LINUX_FUTEX) && defined(ETHR_HAVE_NATIVE_ATOMICS)
+#if defined(ETHR_HAVE_LINUX_FUTEX) && defined(ETHR_HAVE_32BIT_NATIVE_ATOMIC_OPS)
/* --- Linux futex implementation of ethread events ------------------------- */
#define ETHR_LINUX_FUTEX_IMPL__
@@ -62,8 +62,7 @@ static void ETHR_INLINE
ETHR_INLINE_FUNC_NAME_(ethr_event_set)(ethr_event *e)
{
ethr_sint32_t val;
- ETHR_MEMORY_BARRIER;
- val = ethr_atomic32_xchg(&e->futex, ETHR_EVENT_ON__);
+ val = ethr_atomic32_xchg_mb(&e->futex, ETHR_EVENT_ON__);
if (val == ETHR_EVENT_OFF_WAITER__) {
int res = ETHR_FUTEX__(&e->futex, ETHR_FUTEX_WAKE__, 1);
if (res != 0)
@@ -99,8 +98,7 @@ static void ETHR_INLINE
ETHR_INLINE_FUNC_NAME_(ethr_event_set)(ethr_event *e)
{
ethr_sint32_t val;
- ETHR_MEMORY_BARRIER;
- val = ethr_atomic32_xchg(&e->state, ETHR_EVENT_ON__);
+ val = ethr_atomic32_xchg_mb(&e->state, ETHR_EVENT_ON__);
if (val == ETHR_EVENT_OFF_WAITER__) {
int res = pthread_mutex_lock(&e->mtx);
if (res != 0)
diff --git a/erts/include/internal/sparc32/atomic.h b/erts/include/internal/sparc32/atomic.h
index c297522ab1..fe1daaa9cf 100644
--- a/erts/include/internal/sparc32/atomic.h
+++ b/erts/include/internal/sparc32/atomic.h
@@ -35,23 +35,16 @@
#ifdef ETHR_INCLUDE_ATOMIC_IMPL__
-#ifndef ETHR_SPARC_V9_ATOMIC_COMMON__
-#define ETHR_SPARC_V9_ATOMIC_COMMON__
-
-#define ETHR_MEMORY_BARRIER \
- __asm__ __volatile__("membar #LoadLoad|#LoadStore|#StoreLoad|#StoreStore\n" \
- : : : "memory")
-
-#endif /* ETHR_SPARC_V9_ATOMIC_COMMON__ */
-
#if ETHR_INCLUDE_ATOMIC_IMPL__ == 4
#define ETHR_HAVE_NATIVE_ATOMIC32 1
+#define ETHR_NATIVE_ATOMIC32_IMPL "ethread"
#define ETHR_NATMC_FUNC__(X) ethr_native_atomic32_ ## X
#define ETHR_ATMC_T__ ethr_native_atomic32_t
#define ETHR_AINT_T__ ethr_sint32_t
#define ETHR_CAS__ "cas"
#elif ETHR_INCLUDE_ATOMIC_IMPL__ == 8
#define ETHR_HAVE_NATIVE_ATOMIC64 1
+#define ETHR_NATIVE_ATOMIC64_IMPL "ethread"
#define ETHR_NATMC_FUNC__(X) ethr_native_atomic64_ ## X
#define ETHR_ATMC_T__ ethr_native_atomic64_t
#define ETHR_AINT_T__ ethr_sint64_t
@@ -66,17 +59,23 @@ typedef struct {
#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_ATOMIC_IMPL__)
+#if ETHR_INCLUDE_ATOMIC_IMPL__ == 4
+# define ETHR_HAVE_ETHR_NATIVE_ATOMIC32_ADDR 1
+#else
+# define ETHR_HAVE_ETHR_NATIVE_ATOMIC64_ADDR 1
+#endif
+
static ETHR_INLINE ETHR_AINT_T__ *
ETHR_NATMC_FUNC__(addr)(ETHR_ATMC_T__ *var)
{
return (ETHR_AINT_T__ *) &var->counter;
}
-static ETHR_INLINE void
-ETHR_NATMC_FUNC__(init)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ i)
-{
- var->counter = i;
-}
+#if ETHR_INCLUDE_ATOMIC_IMPL__ == 4
+# define ETHR_HAVE_ETHR_NATIVE_ATOMIC32_SET 1
+#else
+# define ETHR_HAVE_ETHR_NATIVE_ATOMIC64_SET 1
+#endif
static ETHR_INLINE void
ETHR_NATMC_FUNC__(set)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ i)
@@ -84,182 +83,49 @@ ETHR_NATMC_FUNC__(set)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ i)
var->counter = i;
}
+#if ETHR_INCLUDE_ATOMIC_IMPL__ == 4
+# define ETHR_HAVE_ETHR_NATIVE_ATOMIC32_READ 1
+#else
+# define ETHR_HAVE_ETHR_NATIVE_ATOMIC64_READ 1
+#endif
+
static ETHR_INLINE ETHR_AINT_T__
ETHR_NATMC_FUNC__(read)(ETHR_ATMC_T__ *var)
{
return var->counter;
}
-static ETHR_INLINE ETHR_AINT_T__
-ETHR_NATMC_FUNC__(add_return)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ incr)
-{
- ETHR_AINT_T__ old, tmp;
-
- __asm__ __volatile__("membar #LoadLoad|#StoreLoad\n" : : : "memory");
- do {
- old = var->counter;
- tmp = old+incr;
- __asm__ __volatile__(
- ETHR_CAS__ " [%2], %1, %0"
- : "=&r"(tmp)
- : "r"(old), "r"(&var->counter), "0"(tmp)
- : "memory");
- } while (__builtin_expect(old != tmp, 0));
- __asm__ __volatile__("membar #StoreLoad|#StoreStore" : : : "memory");
- return old+incr;
-}
-
-static ETHR_INLINE void
-ETHR_NATMC_FUNC__(add)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ incr)
-{
- (void)ETHR_NATMC_FUNC__(add_return)(var, incr);
-}
-
-static ETHR_INLINE ETHR_AINT_T__
-ETHR_NATMC_FUNC__(inc_return)(ETHR_ATMC_T__ *var)
-{
- return ETHR_NATMC_FUNC__(add_return)(var, 1);
-}
-
-static ETHR_INLINE void
-ETHR_NATMC_FUNC__(inc)(ETHR_ATMC_T__ *var)
-{
- (void)ETHR_NATMC_FUNC__(add_return)(var, 1);
-}
-
-static ETHR_INLINE ETHR_AINT_T__
-ETHR_NATMC_FUNC__(dec_return)(ETHR_ATMC_T__ *var)
-{
- return ETHR_NATMC_FUNC__(add_return)(var, -1);
-}
-
-static ETHR_INLINE void
-ETHR_NATMC_FUNC__(dec)(ETHR_ATMC_T__ *var)
-{
- (void)ETHR_NATMC_FUNC__(add_return)(var, -1);
-}
-
-static ETHR_INLINE ETHR_AINT_T__
-ETHR_NATMC_FUNC__(and_retold)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ mask)
-{
- ETHR_AINT_T__ old, tmp;
-
- __asm__ __volatile__("membar #LoadLoad|#StoreLoad\n" : : : "memory");
- do {
- old = var->counter;
- tmp = old & mask;
- __asm__ __volatile__(
- ETHR_CAS__ " [%2], %1, %0"
- : "=&r"(tmp)
- : "r"(old), "r"(&var->counter), "0"(tmp)
- : "memory");
- } while (__builtin_expect(old != tmp, 0));
- __asm__ __volatile__("membar #StoreLoad|#StoreStore" : : : "memory");
- return old;
-}
-
-static ETHR_INLINE ETHR_AINT_T__
-ETHR_NATMC_FUNC__(or_retold)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ mask)
-{
- ETHR_AINT_T__ old, tmp;
-
- __asm__ __volatile__("membar #LoadLoad|#StoreLoad\n" : : : "memory");
- do {
- old = var->counter;
- tmp = old | mask;
- __asm__ __volatile__(
- ETHR_CAS__ " [%2], %1, %0"
- : "=&r"(tmp)
- : "r"(old), "r"(&var->counter), "0"(tmp)
- : "memory");
- } while (__builtin_expect(old != tmp, 0));
- __asm__ __volatile__("membar #StoreLoad|#StoreStore" : : : "memory");
- return old;
-}
-
-static ETHR_INLINE ETHR_AINT_T__
-ETHR_NATMC_FUNC__(xchg)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ val)
-{
- ETHR_AINT_T__ old, new;
-
- __asm__ __volatile__("membar #LoadLoad|#StoreLoad" : : : "memory");
- do {
- old = var->counter;
- new = val;
- __asm__ __volatile__(
- ETHR_CAS__ " [%2], %1, %0"
- : "=&r"(new)
- : "r"(old), "r"(&var->counter), "0"(new)
- : "memory");
- } while (__builtin_expect(old != new, 0));
- __asm__ __volatile__("membar #StoreLoad|#StoreStore" : : : "memory");
- return old;
-}
+#if ETHR_INCLUDE_ATOMIC_IMPL__ == 4
+# define ETHR_HAVE_ETHR_NATIVE_ATOMIC32_CMPXCHG 1
+#else
+# define ETHR_HAVE_ETHR_NATIVE_ATOMIC64_CMPXCHG 1
+#endif
static ETHR_INLINE ETHR_AINT_T__
ETHR_NATMC_FUNC__(cmpxchg)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ new, ETHR_AINT_T__ old)
{
- __asm__ __volatile__("membar #LoadLoad|#StoreLoad\n" : : : "memory");
__asm__ __volatile__(
ETHR_CAS__ " [%2], %1, %0"
: "=&r"(new)
: "r"(old), "r"(&var->counter), "0"(new)
: "memory");
- __asm__ __volatile__("membar #StoreLoad|#StoreStore" : : : "memory");
return new;
}
-/*
- * Atomic ops with at least specified barriers.
- */
-
-static ETHR_INLINE ETHR_AINT_T__
-ETHR_NATMC_FUNC__(read_acqb)(ETHR_ATMC_T__ *var)
-{
- ETHR_AINT_T__ res = ETHR_NATMC_FUNC__(read)(var);
- __asm__ __volatile__("membar #LoadLoad|#LoadStore" : : : "memory");
- return res;
-}
-
-static ETHR_INLINE void
-ETHR_NATMC_FUNC__(set_relb)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ i)
-{
- __asm__ __volatile__("membar #LoadStore|#StoreStore" : : : "memory");
- ETHR_NATMC_FUNC__(set)(var, i);
-}
-
-static ETHR_INLINE ETHR_AINT_T__
-ETHR_NATMC_FUNC__(inc_return_acqb)(ETHR_ATMC_T__ *var)
-{
- ETHR_AINT_T__ res = ETHR_NATMC_FUNC__(inc_return)(var);
- return res;
-}
-
-static ETHR_INLINE void
-ETHR_NATMC_FUNC__(dec_relb)(ETHR_ATMC_T__ *var)
-{
- ETHR_NATMC_FUNC__(dec)(var);
-}
-
-static ETHR_INLINE ETHR_AINT_T__
-ETHR_NATMC_FUNC__(dec_return_relb)(ETHR_ATMC_T__ *var)
-{
- return ETHR_NATMC_FUNC__(dec_return)(var);
-}
+#if ETHR_INCLUDE_ATOMIC_IMPL__ == 4
+# define ETHR_HAVE_ETHR_NATIVE_ATOMIC32_CMPXCHG_ACQB 1
+#else
+# define ETHR_HAVE_ETHR_NATIVE_ATOMIC64_CMPXCHG_ACQB 1
+#endif
static ETHR_INLINE ETHR_AINT_T__
ETHR_NATMC_FUNC__(cmpxchg_acqb)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ new, ETHR_AINT_T__ old)
{
ETHR_AINT_T__ res = ETHR_NATMC_FUNC__(cmpxchg)(var, new, old);
+ ETHR_MEMBAR(ETHR_StoreLoad|ETHR_StoreStore);
return res;
}
-static ETHR_INLINE ETHR_AINT_T__
-ETHR_NATMC_FUNC__(cmpxchg_relb)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ new, ETHR_AINT_T__ old)
-{
- return ETHR_NATMC_FUNC__(cmpxchg)(var, new, old);
-}
-
#endif /* ETHR_TRY_INLINE_FUNCS */
#undef ETHR_NATMC_FUNC__
diff --git a/erts/include/internal/sparc32/ethr_membar.h b/erts/include/internal/sparc32/ethr_membar.h
new file mode 100644
index 0000000000..6eb0c5a1d6
--- /dev/null
+++ b/erts/include/internal/sparc32/ethr_membar.h
@@ -0,0 +1,115 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2011. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/*
+ * Description: Memory barriers for sparc-v9
+ * Author: Rickard Green
+ */
+
+#ifndef ETHR_SPARC_V9_MEMBAR_H__
+#define ETHR_SPARC_V9_MEMBAR_H__
+
+#if defined(ETHR_SPARC_TSO)
+/* --- Total Store Order ------------------------------------------------ */
+
+#define ETHR_LoadLoad 0
+#define ETHR_LoadStore 0
+#define ETHR_StoreLoad 1
+#define ETHR_StoreStore 0
+
+static __inline__ void
+ethr_cb__(void)
+{
+ __asm__ __volatile__ ("" : : : "memory");
+}
+
+static __inline__ void
+ethr_StoreLoad__(void)
+{
+ __asm__ __volatile__ ("membar #StoreLoad\n\t" : : : "memory");
+}
+
+
+#define ETHR_MEMBAR(B) \
+ ETHR_CHOOSE_EXPR((B), ethr_StoreLoad__(), ethr_cb__())
+
+#elif defined(ETHR_SPARC_PSO)
+/* --- Partial Store Order ---------------------------------------------- */
+
+#define ETHR_LoadLoad 0
+#define ETHR_LoadStore 0
+#define ETHR_StoreLoad (1 << 0)
+#define ETHR_StoreStore (1 << 1)
+
+static __inline__ void
+ethr_cb__(void)
+{
+ __asm__ __volatile__ ("" : : : "memory");
+}
+
+static __inline__ void
+ethr_StoreLoad__(void)
+{
+ __asm__ __volatile__ ("membar #StoreLoad\n\t" : : : "memory");
+}
+
+static __inline__ void
+ethr_StoreStore__(void)
+{
+ __asm__ __volatile__ ("membar #StoreStore\n\t" : : : "memory");
+}
+
+static __inline__ void
+ethr_StoreLoad_StoreStore__(void)
+{
+ __asm__ __volatile__ ("membar #StoreLoad|StoreStore\n\t" : : : "memory");
+}
+
+#define ETHR_MEMBAR(B) \
+ ETHR_CHOOSE_EXPR( \
+ (B) == ETHR_StoreLoad, \
+ ethr_StoreLoad__(), \
+ ETHR_CHOOSE_EXPR( \
+ (B) == ETHR_StoreStore, \
+ ethr_StoreStore__(), \
+ ETHR_CHOOSE_EXPR( \
+ (B) == (ETHR_StoreLoad|ETHR_StoreStore), \
+ ethr_StoreLoad_StoreStore__(), \
+ ethr_cb__())))
+
+#elif defined(ETHR_SPARC_RMO)
+/* --- Relaxed Memory Order --------------------------------------------- */
+
+# define ETHR_LoadLoad #LoadLoad
+# define ETHR_LoadStore #LoadStore
+# define ETHR_StoreLoad #StoreLoad
+# define ETHR_StoreStore #StoreStore
+
+# define ETHR_MEMBAR_AUX__(B) \
+ __asm__ __volatile__("membar " #B "\n\t" : : : "memory")
+
+# define ETHR_MEMBAR(B) ETHR_MEMBAR_AUX__(B)
+
+#else
+
+# error "No memory order defined"
+
+#endif
+
+#endif
diff --git a/erts/include/internal/sparc32/ethread.h b/erts/include/internal/sparc32/ethread.h
index aea9794390..5ad92d3da7 100644
--- a/erts/include/internal/sparc32/ethread.h
+++ b/erts/include/internal/sparc32/ethread.h
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2005-2010. All Rights Reserved.
+ * Copyright Ericsson AB 2005-2011. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -24,6 +24,7 @@
#ifndef ETHREAD_SPARC32_ETHREAD_H
#define ETHREAD_SPARC32_ETHREAD_H
+#include "ethr_membar.h"
#define ETHR_ATOMIC_WANT_32BIT_IMPL__
#include "atomic.h"
#if ETHR_SIZEOF_PTR == 8
@@ -33,8 +34,4 @@
#include "spinlock.h"
#include "rwlock.h"
-#define ETHR_HAVE_NATIVE_ATOMICS 1
-#define ETHR_HAVE_NATIVE_SPINLOCKS 1
-#define ETHR_HAVE_NATIVE_RWSPINLOCKS 1
-
#endif /* ETHREAD_SPARC32_ETHREAD_H */
diff --git a/erts/include/internal/sparc32/rwlock.h b/erts/include/internal/sparc32/rwlock.h
index 465ec96866..8b6f2e9c57 100644
--- a/erts/include/internal/sparc32/rwlock.h
+++ b/erts/include/internal/sparc32/rwlock.h
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2005-2010. All Rights Reserved.
+ * Copyright Ericsson AB 2005-2011. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -24,6 +24,9 @@
#ifndef ETHREAD_SPARC32_RWLOCK_H
#define ETHREAD_SPARC32_RWLOCK_H
+#define ETHR_HAVE_NATIVE_RWSPINLOCKS 1
+#define ETHR_NATIVE_RWSPINLOCK_IMPL "ethread"
+
/* Unlocked if zero, read-locked if positive, write-locked if -1. */
typedef struct {
volatile int lock;
@@ -42,7 +45,7 @@ ethr_native_read_unlock(ethr_native_rwlock_t *lock)
{
unsigned int old, new;
- __asm__ __volatile__("membar #LoadLoad|#StoreLoad");
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
do {
old = lock->lock;
new = old-1;
@@ -70,7 +73,7 @@ ethr_native_read_trylock(ethr_native_rwlock_t *lock)
: "r"(old), "r"(&lock->lock), "0"(new)
: "memory");
} while (__builtin_expect(old != new, 0));
- __asm__ __volatile__("membar #StoreLoad|#StoreStore");
+ ETHR_MEMBAR(ETHR_StoreLoad|ETHR_StoreStore);
return 1;
}
@@ -87,7 +90,7 @@ ethr_native_read_lock(ethr_native_rwlock_t *lock)
if (__builtin_expect(ethr_native_read_trylock(lock) != 0, 1))
break;
do {
- __asm__ __volatile__("membar #LoadLoad");
+ ETHR_MEMBAR(ETHR_LoadLoad);
} while (ethr_native_read_is_locked(lock));
}
}
@@ -95,7 +98,7 @@ ethr_native_read_lock(ethr_native_rwlock_t *lock)
static ETHR_INLINE void
ethr_native_write_unlock(ethr_native_rwlock_t *lock)
{
- __asm__ __volatile__("membar #LoadStore|#StoreStore");
+ ETHR_MEMBAR(ETHR_LoadStore|ETHR_StoreStore);
lock->lock = 0;
}
@@ -115,7 +118,7 @@ ethr_native_write_trylock(ethr_native_rwlock_t *lock)
: "r"(old), "r"(&lock->lock), "0"(new)
: "memory");
} while (__builtin_expect(old != new, 0));
- __asm__ __volatile__("membar #StoreLoad|#StoreStore");
+ ETHR_MEMBAR(ETHR_StoreLoad|ETHR_StoreStore);
return 1;
}
@@ -132,7 +135,7 @@ ethr_native_write_lock(ethr_native_rwlock_t *lock)
if (__builtin_expect(ethr_native_write_trylock(lock) != 0, 1))
break;
do {
- __asm__ __volatile__("membar #LoadLoad");
+ ETHR_MEMBAR(ETHR_LoadLoad);
} while (ethr_native_write_is_locked(lock));
}
}
diff --git a/erts/include/internal/sparc32/spinlock.h b/erts/include/internal/sparc32/spinlock.h
index 493d514210..d4e36e09cf 100644
--- a/erts/include/internal/sparc32/spinlock.h
+++ b/erts/include/internal/sparc32/spinlock.h
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2005-2010. All Rights Reserved.
+ * Copyright Ericsson AB 2005-2011. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -24,6 +24,9 @@
#ifndef ETHR_SPARC32_SPINLOCK_H
#define ETHR_SPARC32_SPINLOCK_H
+#define ETHR_HAVE_NATIVE_SPINLOCKS 1
+#define ETHR_NATIVE_SPINLOCK_IMPL "ethread"
+
/* Locked with ldstub, so unlocked when 0 and locked when non-zero. */
typedef struct {
volatile unsigned char lock;
@@ -40,7 +43,7 @@ ethr_native_spinlock_init(ethr_native_spinlock_t *lock)
static ETHR_INLINE void
ethr_native_spin_unlock(ethr_native_spinlock_t *lock)
{
- __asm__ __volatile__("membar #LoadStore|#StoreStore");
+ ETHR_MEMBAR(ETHR_LoadStore|ETHR_StoreStore);
lock->lock = 0;
}
@@ -51,10 +54,10 @@ ethr_native_spin_trylock(ethr_native_spinlock_t *lock)
__asm__ __volatile__(
"ldstub [%1], %0\n\t"
- "membar #StoreLoad|#StoreStore"
: "=r"(prev)
: "r"(&lock->lock)
: "memory");
+ ETHR_MEMBAR(ETHR_StoreLoad|ETHR_StoreStore);
return prev == 0;
}
@@ -71,7 +74,7 @@ ethr_native_spin_lock(ethr_native_spinlock_t *lock)
if (__builtin_expect(ethr_native_spin_trylock(lock) != 0, 1))
break;
do {
- __asm__ __volatile__("membar #LoadLoad");
+ ETHR_MEMBAR(ETHR_LoadLoad);
} while (ethr_native_spin_is_locked(lock));
}
}
diff --git a/erts/include/internal/tile/atomic.h b/erts/include/internal/tile/atomic.h
index 5697afda25..1f1553c346 100644
--- a/erts/include/internal/tile/atomic.h
+++ b/erts/include/internal/tile/atomic.h
@@ -25,6 +25,7 @@
#define ETHREAD_TILE_ATOMIC_H
#define ETHR_HAVE_NATIVE_ATOMIC32 1
+#define ETHR_NATIVE_ATOMIC32_IMPL "tilera"
#include <atomic.h>
@@ -34,27 +35,25 @@ typedef struct {
volatile ethr_sint32_t counter;
} ethr_native_atomic32_t;
-#define ETHR_MEMORY_BARRIER __insn_mf()
-
#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_ATOMIC_IMPL__)
+#define ETHR_HAVE_ETHR_NATIVE_ATOMIC32_ADDR 1
+
static ETHR_INLINE ethr_sint32_t *
ethr_native_atomic32_addr(ethr_native_atomic32_t *var)
{
return (ethr_sint32_t *) &var->counter;
}
+#define ETHR_HAVE_ETHR_NATIVE_ATOMIC32_INIT 1
+
static ETHR_INLINE void
ethr_native_atomic32_init(ethr_native_atomic32_t *var, ethr_sint32_t i)
{
var->counter = i;
}
-static ETHR_INLINE void
-ethr_native_atomic32_set(ethr_native_atomic32_t *var, ethr_sint32_t i)
-{
- atomic_exchange_acq(&var->counter, i);
-}
+#define ETHR_HAVE_ETHR_NATIVE_ATOMIC32_READ 1
static ETHR_INLINE ethr_sint32_t
ethr_native_atomic32_read(ethr_native_atomic32_t *var)
@@ -62,139 +61,80 @@ ethr_native_atomic32_read(ethr_native_atomic32_t *var)
return var->counter;
}
+#define ETHR_HAVE_ETHR_NATIVE_ATOMIC32_READ_ACQB 1
+
+static ETHR_INLINE ethr_sint32_t
+ethr_native_atomic32_read_acqb(ethr_native_atomic32_t *var)
+{
+ return atomic_compare_and_exchange_val_acq(&var->counter,
+ 0x81818181,
+ 0x81818181);
+}
+
+#define ETHR_HAVE_ETHR_NATIVE_ATOMIC32_ADD 1
+
static ETHR_INLINE void
ethr_native_atomic32_add(ethr_native_atomic32_t *var, ethr_sint32_t incr)
{
- ETHR_MEMORY_BARRIER;
atomic_add(&var->counter, incr);
- ETHR_MEMORY_BARRIER;
-}
-
+}
+
+#define ETHR_HAVE_ETHR_NATIVE_ATOMIC32_INC 1
+
static ETHR_INLINE void
ethr_native_atomic32_inc(ethr_native_atomic32_t *var)
{
- ETHR_MEMORY_BARRIER;
atomic_increment(&var->counter);
- ETHR_MEMORY_BARRIER;
}
+#define ETHR_HAVE_ETHR_NATIVE_ATOMIC32_DEC 1
+
static ETHR_INLINE void
ethr_native_atomic32_dec(ethr_native_atomic32_t *var)
{
- ETHR_MEMORY_BARRIER;
atomic_decrement(&var->counter);
- ETHR_MEMORY_BARRIER;
}
+#define ETHR_HAVE_ETHR_NATIVE_ATOMIC32_ADD_RETURN 1
+
static ETHR_INLINE ethr_sint32_t
ethr_native_atomic32_add_return(ethr_native_atomic32_t *var, ethr_sint32_t incr)
{
- ethr_sint32_t res;
- ETHR_MEMORY_BARRIER;
- res = atomic_exchange_and_add(&var->counter, incr) + incr;
- ETHR_MEMORY_BARRIER;
- return res;
+ return atomic_exchange_and_add(&var->counter, incr) + incr;
}
-static ETHR_INLINE ethr_sint32_t
-ethr_native_atomic32_inc_return(ethr_native_atomic32_t *var)
-{
- return ethr_native_atomic32_add_return(var, 1);
-}
-
-static ETHR_INLINE ethr_sint32_t
-ethr_native_atomic32_dec_return(ethr_native_atomic32_t *var)
-{
- return ethr_native_atomic32_add_return(var, -1);
-}
+#define ETHR_HAVE_ETHR_NATIVE_ATOMIC32_AND_RETOLD 1
static ETHR_INLINE ethr_sint32_t
ethr_native_atomic32_and_retold(ethr_native_atomic32_t *var, ethr_sint32_t mask)
{
- ethr_sint32_t res;
- ETHR_MEMORY_BARRIER;
- res = atomic_and_val(&var->counter, mask);
- ETHR_MEMORY_BARRIER;
- return res;
+ return atomic_and_val(&var->counter, mask);
}
+#define ETHR_HAVE_ETHR_NATIVE_ATOMIC32_OR_RETOLD 1
+
static ETHR_INLINE ethr_sint32_t
ethr_native_atomic32_or_retold(ethr_native_atomic32_t *var, ethr_sint32_t mask)
{
- ethr_sint32_t res;
- ETHR_MEMORY_BARRIER;
- res = atomic_or_val(&var->counter, mask);
- ETHR_MEMORY_BARRIER;
- return res;
+ return atomic_or_val(&var->counter, mask);
}
+#define ETHR_HAVE_ETHR_NATIVE_ATOMIC32_XCHG_ACQB 1
+
static ETHR_INLINE ethr_sint32_t
-ethr_native_atomic32_xchg(ethr_native_atomic32_t *var, ethr_sint32_t val)
+ethr_native_atomic32_xchg_acqb(ethr_native_atomic32_t *var, ethr_sint32_t val)
{
- ETHR_MEMORY_BARRIER;
return atomic_exchange_acq(&var->counter, val);
}
-static ETHR_INLINE ethr_sint32_t
-ethr_native_atomic32_cmpxchg(ethr_native_atomic32_t *var,
- ethr_sint32_t new,
- ethr_sint32_t expected)
-{
- ETHR_MEMORY_BARRIER;
- return atomic_compare_and_exchange_val_acq(&var->counter, new, expected);
-}
-
-/*
- * Atomic ops with at least specified barriers.
- */
-
-static ETHR_INLINE ethr_sint32_t
-ethr_native_atomic32_read_acqb(ethr_native_atomic32_t *var)
-{
- ethr_sint32_t res = ethr_native_atomic32_read(var);
- ETHR_MEMORY_BARRIER;
- return res;
-}
-
-static ETHR_INLINE ethr_sint32_t
-ethr_native_atomic32_inc_return_acqb(ethr_native_atomic32_t *var)
-{
- return ethr_native_atomic32_inc_return(var);
-}
-
-static ETHR_INLINE void
-ethr_native_atomic32_set_relb(ethr_native_atomic32_t *var, ethr_sint32_t val)
-{
- ETHR_MEMORY_BARRIER;
- ethr_native_atomic32_set(var, val);
-}
-
-static ETHR_INLINE void
-ethr_native_atomic32_dec_relb(ethr_native_atomic32_t *var)
-{
- ethr_native_atomic32_dec(var);
-}
-
-static ETHR_INLINE ethr_sint32_t
-ethr_native_atomic32_dec_return_relb(ethr_native_atomic32_t *var)
-{
- return ethr_native_atomic32_dec_return(var);
-}
+#define ETHR_HAVE_ETHR_NATIVE_ATOMIC32_CMPXCHG_ACQB 1
static ETHR_INLINE ethr_sint32_t
ethr_native_atomic32_cmpxchg_acqb(ethr_native_atomic32_t *var,
ethr_sint32_t new,
- ethr_sint32_t exp)
+ ethr_sint32_t expected)
{
- return ethr_native_atomic32_cmpxchg(var, new, exp);
-}
-
-static ETHR_INLINE ethr_sint32_t
-ethr_native_atomic32_cmpxchg_relb(ethr_native_atomic32_t *var,
- ethr_sint32_t new,
- ethr_sint32_t exp)
-{
- return ethr_native_atomic32_cmpxchg(var, new, exp);
+ return atomic_compare_and_exchange_val_acq(&var->counter, new, expected);
}
#endif /* ETHR_TRY_INLINE_FUNCS */
diff --git a/erts/include/internal/tile/ethr_membar.h b/erts/include/internal/tile/ethr_membar.h
new file mode 100644
index 0000000000..7cb4f3cf9a
--- /dev/null
+++ b/erts/include/internal/tile/ethr_membar.h
@@ -0,0 +1,35 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2011. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/*
+ * Description: Memory barriers for TILE64/TILEPro
+ * Author: Rickard Green
+ */
+
+#ifndef ETHR_TILE_MEMBAR_H__
+#define ETHR_TILE_MEMBAR_H__
+
+#define ETHR_LoadLoad (1 << 0)
+#define ETHR_LoadStore (1 << 1)
+#define ETHR_StoreLoad (1 << 2)
+#define ETHR_StoreStore (1 << 3)
+
+#define ETHR_MEMBAR(B) __insn_mf()
+
+#endif
diff --git a/erts/include/internal/tile/ethread.h b/erts/include/internal/tile/ethread.h
index 2de4d42bc6..7f579b50e7 100644
--- a/erts/include/internal/tile/ethread.h
+++ b/erts/include/internal/tile/ethread.h
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2008-2009. All Rights Reserved.
+ * Copyright Ericsson AB 2008-2011. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -23,8 +23,7 @@
#ifndef ETHREAD_TILE_ETHREAD_H
#define ETHREAD_TILE_ETHREAD_H
+#include "ethr_membar.h"
#include "atomic.h"
-#define ETHR_HAVE_NATIVE_ATOMICS 1
-
#endif /* ETHREAD_TILE_ETHREAD_H */
diff --git a/erts/include/internal/win/ethr_atomic.h b/erts/include/internal/win/ethr_atomic.h
index 60def01a7e..e11f1abf47 100644
--- a/erts/include/internal/win/ethr_atomic.h
+++ b/erts/include/internal/win/ethr_atomic.h
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2010. All Rights Reserved.
+ * Copyright Ericsson AB 2010-2011. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -24,364 +24,408 @@
#undef ETHR_INCLUDE_ATOMIC_IMPL__
#if !defined(ETHR_WIN_ATOMIC32_H__) && defined(ETHR_ATOMIC_WANT_32BIT_IMPL__)
-#define ETHR_WIN_ATOMIC32_H__
-#define ETHR_INCLUDE_ATOMIC_IMPL__ 4
-#undef ETHR_ATOMIC_WANT_32BIT_IMPL__
+# define ETHR_WIN_ATOMIC32_H__
+# if (defined(ETHR_MEMBAR) \
+ && defined(ETHR_HAVE__INTERLOCKEDCOMPAREEXCHANGE) \
+ && defined(ETHR_HAVE__INTERLOCKEDEXCHANGE))
+# define ETHR_INCLUDE_ATOMIC_IMPL__ 4
+# endif
+# undef ETHR_ATOMIC_WANT_32BIT_IMPL__
#elif !defined(ETHR_WIN_ATOMIC64_H__) && defined(ETHR_ATOMIC_WANT_64BIT_IMPL__)
-#define ETHR_WIN_ATOMIC64_H__
-#ifdef ETHR_HAVE__INTERLOCKEDCOMPAREEXCHANGE64
-/* _InterlockedCompareExchange64() required... */
-#define ETHR_INCLUDE_ATOMIC_IMPL__ 8
+# define ETHR_WIN_ATOMIC64_H__
+# if (defined(ETHR_MEMBAR) \
+ && (defined(ETHR_HAVE__INTERLOCKEDCOMPAREEXCHANGE64) \
+ || defined(ETHR_HAVE__INTERLOCKEDCOMPAREEXCHANGE64_ACQ) \
+ || defined(ETHR_HAVE__INTERLOCKEDCOMPAREEXCHANGE64_REL)))
+# define ETHR_INCLUDE_ATOMIC_IMPL__ 8
+# endif
+# undef ETHR_ATOMIC_WANT_64BIT_IMPL__
#endif
-#undef ETHR_ATOMIC_WANT_64BIT_IMPL__
+
+#if !defined(_MSC_VER) || _MSC_VER < 1400
+# undef ETHR_INCLUDE_ATOMIC_IMPL__
#endif
#ifdef ETHR_INCLUDE_ATOMIC_IMPL__
-#if defined(_MSC_VER) && _MSC_VER >= 1400
+# ifndef ETHR_WIN_ATOMIC_COMMON__
+# define ETHR_WIN_ATOMIC_COMMON__
+
+# if defined(_M_IX86) || defined(_M_AMD64) || defined(_M_IA64)
+# define ETHR_READ_AND_SET_WITHOUT_INTERLOCKED_OP__ 1
+# else
+# define ETHR_READ_AND_SET_WITHOUT_INTERLOCKED_OP__ 0
+# endif
+
+# endif /* ETHR_WIN_ATOMIC_COMMON__ */
+
+# if ETHR_INCLUDE_ATOMIC_IMPL__ == 4
+
+# define ETHR_HAVE_NATIVE_ATOMIC32 1
+# define ETHR_NATIVE_ATOMIC32_IMPL "windows-interlocked"
+
+# ifdef ETHR_HAVE__INTERLOCKEDDECREMENT
+# define ETHR_WIN_HAVE_DEC
+# pragma intrinsic(_InterlockedDecrement)
+# endif
+# ifdef ETHR_HAVE__INTERLOCKEDDECREMENT_REL
+# define ETHR_WIN_HAVE_DEC_REL
+# pragma intrinsic(_InterlockedDecrement_rel)
+# endif
+# ifdef ETHR_HAVE__INTERLOCKEDINCREMENT
+# define ETHR_WIN_HAVE_INC
+# pragma intrinsic(_InterlockedIncrement)
+# endif
+# ifdef ETHR_HAVE__INTERLOCKEDINCREMENT_ACQ
+# define ETHR_WIN_HAVE_INC_ACQ
+# pragma intrinsic(_InterlockedIncrement_acq)
+# endif
+# ifdef ETHR_HAVE__INTERLOCKEDEXCHANGEADD
+# define ETHR_WIN_HAVE_XCHG_ADD
+# pragma intrinsic(_InterlockedExchangeAdd)
+# endif
+# ifdef ETHR_HAVE__INTERLOCKEDEXCHANGEADD_ACQ
+# define ETHR_WIN_HAVE_XCHG_ADD_ACQ
+# pragma intrinsic(_InterlockedExchangeAdd_acq)
+# endif
+# ifdef ETHR_HAVE__INTERLOCKEDEXCHANGE
+# define ETHR_WIN_HAVE_XCHG
+# pragma intrinsic(_InterlockedExchange)
+# endif
+# ifdef ETHR_HAVE__INTERLOCKEDAND
+# define ETHR_WIN_HAVE_AND
+# pragma intrinsic(_InterlockedAnd)
+# endif
+# ifdef ETHR_HAVE__INTERLOCKEDOR
+# define ETHR_WIN_HAVE_OR
+# pragma intrinsic(_InterlockedOr)
+# endif
+# ifdef ETHR_HAVE__INTERLOCKEDCOMPAREEXCHANGE
+# define ETHR_WIN_HAVE_CMPXCHG
+# pragma intrinsic(_InterlockedCompareExchange)
+# endif
+# ifdef ETHR_HAVE__INTERLOCKEDCOMPAREEXCHANGE_ACQ
+# define ETHR_WIN_HAVE_CMPXCHG_ACQ
+# pragma intrinsic(_InterlockedCompareExchange_acq)
+# endif
+# ifdef ETHR_HAVE__INTERLOCKEDCOMPAREEXCHANGE_REL
+# define ETHR_WIN_HAVE_CMPXCHG_REL
+# pragma intrinsic(_InterlockedCompareExchange_rel)
+# endif
+
+# define ETHR_ILCKD__(X) _Interlocked ## X
+# define ETHR_ILCKD_ACQ__(X) _Interlocked ## X ## _acq
+# define ETHR_ILCKD_REL__(X) _Interlocked ## X ## _rel
+
+# define ETHR_NATMC_FUNC__(X) ethr_native_atomic32_ ## X
+# define ETHR_ATMC_T__ ethr_native_atomic32_t
+# define ETHR_AINT_T__ ethr_sint32_t
+
+# elif ETHR_INCLUDE_ATOMIC_IMPL__ == 8
+
+# define ETHR_HAVE_NATIVE_ATOMIC64 1
+# define ETHR_NATIVE_ATOMIC64_IMPL "windows-interlocked"
+
+# ifdef ETHR_HAVE__INTERLOCKEDDECREMENT64
+# define ETHR_WIN_HAVE_DEC
+# pragma intrinsic(_InterlockedDecrement64)
+# endif
+# ifdef ETHR_HAVE__INTERLOCKEDDECREMENT64_REL
+# define ETHR_WIN_HAVE_DEC_REL
+# pragma intrinsic(_InterlockedDecrement64_rel)
+# endif
+# ifdef ETHR_HAVE__INTERLOCKEDINCREMENT64_ACQ
+# define ETHR_WIN_HAVE_INC_ACQ
+# pragma intrinsic(_InterlockedIncrement64_acq)
+# endif
+# ifdef ETHR_HAVE__INTERLOCKEDINCREMENT64
+# define ETHR_WIN_HAVE_INC
+# pragma intrinsic(_InterlockedIncrement64)
+# endif
+# ifdef ETHR_HAVE__INTERLOCKEDEXCHANGEADD64
+# define ETHR_WIN_HAVE_XCHG_ADD
+# pragma intrinsic(_InterlockedExchangeAdd64)
+# endif
+# ifdef ETHR_HAVE__INTERLOCKEDEXCHANGEADD64_ACQ
+# define ETHR_WIN_HAVE_XCHG_ADD_ACQ
+# pragma intrinsic(_InterlockedExchangeAdd64_acq)
+# endif
+# ifdef ETHR_HAVE__INTERLOCKEDEXCHANGE64
+# define ETHR_WIN_HAVE_XCHG
+# pragma intrinsic(_InterlockedExchange64)
+# endif
+# ifdef ETHR_HAVE__INTERLOCKEDAND64
+# define ETHR_WIN_HAVE_AND
+# pragma intrinsic(_InterlockedAnd64)
+# endif
+# ifdef ETHR_HAVE__INTERLOCKEDOR64
+# define ETHR_WIN_HAVE_OR
+# pragma intrinsic(_InterlockedOr64)
+# endif
+# ifdef ETHR_HAVE__INTERLOCKEDCOMPAREEXCHANGE64
+# define ETHR_WIN_HAVE_CMPXCHG
+# pragma intrinsic(_InterlockedCompareExchange64)
+# endif
+# ifdef ETHR_HAVE__INTERLOCKEDCOMPAREEXCHANGE64_ACQ
+# define ETHR_WIN_HAVE_CMPXCHG_ACQ
+# pragma intrinsic(_InterlockedCompareExchange64_acq)
+# endif
+# ifdef ETHR_HAVE__INTERLOCKEDCOMPAREEXCHANGE64_REL
+# define ETHR_WIN_HAVE_CMPXCHG_REL
+# pragma intrinsic(_InterlockedCompareExchange64_rel)
+# endif
+
+# define ETHR_ILCKD__(X) _Interlocked ## X ## 64
+# define ETHR_ILCKD_ACQ__(X) _Interlocked ## X ## 64_acq
+# define ETHR_ILCKD_REL__(X) _Interlocked ## X ## 64_rel
+
+# define ETHR_NATMC_FUNC__(X) ethr_native_atomic64_ ## X
+# define ETHR_ATMC_T__ ethr_native_atomic64_t
+# define ETHR_AINT_T__ ethr_sint64_t
+
+# else
+# error "Unsupported integer size"
+# endif
-#ifndef ETHR_WIN_ATOMIC_COMMON__
-#define ETHR_WIN_ATOMIC_COMMON__
+typedef struct {
+ volatile ETHR_AINT_T__ value;
+} ETHR_ATMC_T__;
-#define ETHR_HAVE_NATIVE_ATOMICS 1
+#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_ATOMIC_IMPL__)
-#if defined(_M_IX86) || defined(_M_AMD64) || defined(_M_IA64)
-# define ETHR_READ_AND_SET_WITHOUT_INTERLOCKED_OP__ 1
+#if ETHR_INCLUDE_ATOMIC_IMPL__ == 4
+# define ETHR_HAVE_ETHR_NATIVE_ATOMIC32_ADDR 1
#else
-# define ETHR_READ_AND_SET_WITHOUT_INTERLOCKED_OP__ 0
+# define ETHR_HAVE_ETHR_NATIVE_ATOMIC64_ADDR 1
#endif
-#if defined(_M_AMD64) || (defined(_M_IX86) \
- && !defined(ETHR_PRE_PENTIUM4_COMPAT))
-# define ETHR_READ_ACQB_AND_SET_RELB_COMPILER_BARRIER_ONLY__ 1
-#else
-# define ETHR_READ_ACQB_AND_SET_RELB_COMPILER_BARRIER_ONLY__ 0
-#endif
-/*
- * No configure test checking for interlocked acquire/release
- * versions have been written, yet. It should define
- * ETHR_HAVE_INTERLOCKED_ACQUIRE_RELEASE_BARRIERS if, and
- * only if, all used interlocked operations with barriers
- * exists.
- *
- * Note, that these are pure optimizations for the itanium
- * processor.
- */
+static ETHR_INLINE ETHR_AINT_T__ *
+ETHR_NATMC_FUNC__(addr)(ETHR_ATMC_T__ *var)
+{
+ return (ETHR_AINT_T__ *) &var->value;
+}
-#include <intrin.h>
-#undef ETHR_COMPILER_BARRIER
-#define ETHR_COMPILER_BARRIER _ReadWriteBarrier()
-#pragma intrinsic(_ReadWriteBarrier)
-#pragma intrinsic(_InterlockedCompareExchange)
-
-#if defined(_M_AMD64) || (defined(_M_IX86) \
- && !defined(ETHR_PRE_PENTIUM4_COMPAT))
-#include <emmintrin.h>
-#include <mmintrin.h>
-#pragma intrinsic(_mm_mfence)
-#define ETHR_MEMORY_BARRIER _mm_mfence()
-#pragma intrinsic(_mm_sfence)
-#define ETHR_WRITE_MEMORY_BARRIER _mm_sfence()
-#pragma intrinsic(_mm_lfence)
-#define ETHR_READ_MEMORY_BARRIER _mm_lfence()
-#define ETHR_READ_DEPEND_MEMORY_BARRIER ETHR_COMPILER_BARRIER
+#if ETHR_READ_AND_SET_WITHOUT_INTERLOCKED_OP__
+#if ETHR_INCLUDE_ATOMIC_IMPL__ == 4
+# define ETHR_HAVE_ETHR_NATIVE_ATOMIC32_SET 1
#else
-
-#define ETHR_MEMORY_BARRIER \
-do { \
- volatile long x___ = 0; \
- _InterlockedCompareExchange(&x___, (long) 1, (long) 0); \
-} while (0)
-
+# define ETHR_HAVE_ETHR_NATIVE_ATOMIC64_SET 1
#endif
-#endif /* ETHR_WIN_ATOMIC_COMMON__ */
+static ETHR_INLINE void
+ETHR_NATMC_FUNC__(set)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ i)
+{
+ var->value = i;
+}
#if ETHR_INCLUDE_ATOMIC_IMPL__ == 4
+# define ETHR_HAVE_ETHR_NATIVE_ATOMIC32_SET_RELB 1
+#else
+# define ETHR_HAVE_ETHR_NATIVE_ATOMIC64_SET_RELB 1
+#endif
-#define ETHR_HAVE_NATIVE_ATOMIC32 1
+static ETHR_INLINE void
+ETHR_NATMC_FUNC__(set_relb)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ i)
+{
+#if defined(_M_IX86)
+ if (ETHR_X86_RUNTIME_CONF_HAVE_NO_SSE2__)
+ (void) ETHR_ILCKD__(Exchange)(&var->value, i);
+ else
+#endif /* _M_IX86 */
+ {
+ ETHR_MEMBAR(ETHR_LoadStore|ETHR_StoreStore);
+ var->value = i;
+ }
+}
-/*
- * All used operations available as 32-bit intrinsics
- */
+#endif /* ETHR_READ_AND_SET_WITHOUT_INTERLOCKED_OP__ */
-#pragma intrinsic(_InterlockedDecrement)
-#pragma intrinsic(_InterlockedIncrement)
-#pragma intrinsic(_InterlockedExchangeAdd)
-#pragma intrinsic(_InterlockedExchange)
-#pragma intrinsic(_InterlockedAnd)
-#pragma intrinsic(_InterlockedOr)
-#ifdef ETHR_HAVE_INTERLOCKED_ACQUIRE_RELEASE_BARRIERS
-#pragma intrinsic(_InterlockedExchangeAdd_acq)
-#pragma intrinsic(_InterlockedIncrement_acq)
-#pragma intrinsic(_InterlockedDecrement_rel)
-#pragma intrinsic(_InterlockedCompareExchange_acq)
-#pragma intrinsic(_InterlockedCompareExchange_rel)
-#endif
+#if defined(ETHR_WIN_HAVE_XCHG)
-#define ETHR_ILCKD__(X) _Interlocked ## X
-#ifdef ETHR_HAVE_INTERLOCKED_ACQUIRE_RELEASE_BARRIERS
-#define ETHR_ILCKD_ACQ__(X) _Interlocked ## X ## _acq
-#define ETHR_ILCKD_REL__(X) _Interlocked ## X ## _rel
+#if ETHR_INCLUDE_ATOMIC_IMPL__ == 4
+# define ETHR_HAVE_ETHR_NATIVE_ATOMIC32_SET_MB 1
#else
-#define ETHR_ILCKD_ACQ__(X) _Interlocked ## X
-#define ETHR_ILCKD_REL__(X) _Interlocked ## X
+# define ETHR_HAVE_ETHR_NATIVE_ATOMIC64_SET_MB 1
#endif
-#define ETHR_NATMC_FUNC__(X) ethr_native_atomic32_ ## X
-#define ETHR_ATMC_T__ ethr_native_atomic32_t
-#define ETHR_AINT_T__ ethr_sint32_t
-
-#elif ETHR_INCLUDE_ATOMIC_IMPL__ == 8
+static ETHR_INLINE void
+ETHR_NATMC_FUNC__(set_mb)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ i)
+{
+ (void) ETHR_ILCKD__(Exchange)(&var->value, i);
+}
-#define ETHR_HAVE_NATIVE_ATOMIC64 1
+#endif
-/*
- * _InterlockedCompareExchange64() is required. The other may not
- * be available, but if so, we can generate them.
- */
-#pragma intrinsic(_InterlockedCompareExchange64)
+#if ETHR_READ_AND_SET_WITHOUT_INTERLOCKED_OP__
-#if ETHR_READ_AND_SET_WITHOUT_INTERLOCKED_OP__
-#define ETHR_OWN_ILCKD_INIT_VAL__(PTR) *(PTR)
+#if ETHR_INCLUDE_ATOMIC_IMPL__ == 4
+# define ETHR_HAVE_ETHR_NATIVE_ATOMIC32_READ 1
#else
-#define ETHR_OWN_ILCKD_INIT_VAL__(PTR) (__int64) 0
+# define ETHR_HAVE_ETHR_NATIVE_ATOMIC64_READ 1
#endif
-#define ETHR_OWN_ILCKD_BODY_IMPL__(FUNC, PTR, NEW, ACT, EXP, OPS, RET) \
-{ \
- __int64 NEW, ACT, EXP; \
- ACT = ETHR_OWN_ILCKD_INIT_VAL__(PTR); \
- do { \
- EXP = ACT; \
- { OPS; } \
- ACT = _InterlockedCompareExchange64(PTR, NEW, EXP); \
- } while (ACT != EXP); \
- return RET; \
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(read)(ETHR_ATMC_T__ *var)
+{
+ return var->value;
}
-#define ETHR_OWN_ILCKD_1_IMPL__(FUNC, NEW, ACT, EXP, OPS, RET) \
-static __forceinline __int64 \
-FUNC(__int64 volatile *ptr) \
-ETHR_OWN_ILCKD_BODY_IMPL__(FUNC, ptr, NEW, ACT, EXP, OPS, RET)
-
-#define ETHR_OWN_ILCKD_2_IMPL__(FUNC, NEW, ACT, EXP, OPS, ARG, RET) \
-static __forceinline __int64 \
-FUNC(__int64 volatile *ptr, __int64 ARG) \
-ETHR_OWN_ILCKD_BODY_IMPL__(FUNC, ptr, NEW, ACT, EXP, OPS, RET)
+#endif /* ETHR_READ_AND_SET_WITHOUT_INTERLOCKED_OP__ */
+#if defined(ETHR_WIN_HAVE_XCHG_ADD)
-#ifdef ETHR_HAVE__INTERLOCKEDDECREMENT64
-#pragma intrinsic(_InterlockedDecrement64)
-#else
-ETHR_OWN_ILCKD_1_IMPL__(_InterlockedDecrement64, new, act, exp,
- new = act - 1, new)
-#endif
-#ifdef ETHR_HAVE__INTERLOCKEDINCREMENT64
-#pragma intrinsic(_InterlockedIncrement64)
-#else
-ETHR_OWN_ILCKD_1_IMPL__(_InterlockedIncrement64, new, act, exp,
- new = act + 1, new)
-#endif
-#ifdef ETHR_HAVE__INTERLOCKEDEXCHANGEADD64
-#pragma intrinsic(_InterlockedExchangeAdd64)
-#else
-ETHR_OWN_ILCKD_2_IMPL__(_InterlockedExchangeAdd64, new, act, exp,
- new = act + arg, arg, act)
-#endif
-#ifdef ETHR_HAVE__INTERLOCKEDEXCHANGE64
-#pragma intrinsic(_InterlockedExchange64)
-#else
-ETHR_OWN_ILCKD_2_IMPL__(_InterlockedExchange64, new, act, exp,
- new = arg, arg, act)
-#endif
-#ifdef ETHR_HAVE__INTERLOCKEDAND64
-#pragma intrinsic(_InterlockedAnd64)
-#else
-ETHR_OWN_ILCKD_2_IMPL__(_InterlockedAnd64, new, act, exp,
- new = act & arg, arg, act)
-#endif
-#ifdef ETHR_HAVE__INTERLOCKEDOR64
-#pragma intrinsic(_InterlockedOr64)
+#if ETHR_INCLUDE_ATOMIC_IMPL__ == 4
+# define ETHR_HAVE_ETHR_NATIVE_ATOMIC32_ADD_RETURN_MB 1
#else
-ETHR_OWN_ILCKD_2_IMPL__(_InterlockedOr64, new, act, exp,
- new = act | arg, arg, act)
-#endif
-#ifdef ETHR_HAVE_INTERLOCKED_ACQUIRE_RELEASE_BARRIERS
-#pragma intrinsic(_InterlockedExchangeAdd64_acq)
-#pragma intrinsic(_InterlockedIncrement64_acq)
-#pragma intrinsic(_InterlockedDecrement64_rel)
-#pragma intrinsic(_InterlockedCompareExchange64_acq)
-#pragma intrinsic(_InterlockedCompareExchange64_rel)
+# define ETHR_HAVE_ETHR_NATIVE_ATOMIC64_ADD_RETURN_MB 1
#endif
-#define ETHR_ILCKD__(X) _Interlocked ## X ## 64
-#ifdef ETHR_HAVE_INTERLOCKED_ACQUIRE_RELEASE_BARRIERS
-#define ETHR_ILCKD_ACQ__(X) _Interlocked ## X ## 64_acq
-#define ETHR_ILCKD_REL__(X) _Interlocked ## X ## 64_rel
-#else
-#define ETHR_ILCKD_ACQ__(X) _Interlocked ## X ## 64
-#define ETHR_ILCKD_REL__(X) _Interlocked ## X ## 64
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(add_return_mb)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ i)
+{
+ return ETHR_ILCKD__(ExchangeAdd)(&var->value, i) + i;
+}
+
#endif
-#define ETHR_NATMC_FUNC__(X) ethr_native_atomic64_ ## X
-#define ETHR_ATMC_T__ ethr_native_atomic64_t
-#define ETHR_AINT_T__ ethr_sint64_t
+#if defined(ETHR_WIN_HAVE_INC)
+#if ETHR_INCLUDE_ATOMIC_IMPL__ == 4
+# define ETHR_HAVE_ETHR_NATIVE_ATOMIC32_INC_RETURN_MB 1
#else
-#error "Unsupported integer size"
+# define ETHR_HAVE_ETHR_NATIVE_ATOMIC64_INC_RETURN_MB 1
#endif
-typedef struct {
- volatile ETHR_AINT_T__ value;
-} ETHR_ATMC_T__;
-
-#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_ATOMIC_IMPL__)
-
-static ETHR_INLINE ETHR_AINT_T__ *
-ETHR_NATMC_FUNC__(addr)(ETHR_ATMC_T__ *var)
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(inc_return_mb)(ETHR_ATMC_T__ *var)
{
- return (ETHR_AINT_T__ *) &var->value;
+ return ETHR_ILCKD__(Increment)(&var->value);
}
-static ETHR_INLINE void
-ETHR_NATMC_FUNC__(init)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ i)
-{
-#if ETHR_READ_AND_SET_WITHOUT_INTERLOCKED_OP__
- var->value = i;
-#else
- (void) ETHR_ILCKD__(Exchange)(&var->value, i);
#endif
-}
-static ETHR_INLINE void
-ETHR_NATMC_FUNC__(set)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ i)
-{
-#if ETHR_READ_AND_SET_WITHOUT_INTERLOCKED_OP__
- var->value = i;
+#if defined(ETHR_WIN_HAVE_INC_ACQ)
+
+#if ETHR_INCLUDE_ATOMIC_IMPL__ == 4
+# define ETHR_HAVE_ETHR_NATIVE_ATOMIC32_INC_RETURN_ACQB 1
#else
- (void) ETHR_ILCKD__(Exchange)(&var->value, i);
+# define ETHR_HAVE_ETHR_NATIVE_ATOMIC64_INC_RETURN_ACQB 1
#endif
-}
static ETHR_INLINE ETHR_AINT_T__
-ETHR_NATMC_FUNC__(read)(ETHR_ATMC_T__ *var)
+ETHR_NATMC_FUNC__(inc_return_acqb)(ETHR_ATMC_T__ *var)
{
-#if ETHR_READ_AND_SET_WITHOUT_INTERLOCKED_OP__
- return var->value;
-#else
- return ETHR_ILCKD__(ExchangeAdd)(&var->value, (ETHR_AINT_T__) 0);
-#endif
+ return ETHR_ILCKD_ACQ__(Increment)(&var->value);
}
-static ETHR_INLINE void
-ETHR_NATMC_FUNC__(add)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ incr)
-{
- (void) ETHR_ILCKD__(ExchangeAdd)(&var->value, incr);
-}
+#endif
+
+#if defined(ETHR_WIN_HAVE_DEC)
+
+#if ETHR_INCLUDE_ATOMIC_IMPL__ == 4
+# define ETHR_HAVE_ETHR_NATIVE_ATOMIC32_DEC_RETURN_MB 1
+#else
+# define ETHR_HAVE_ETHR_NATIVE_ATOMIC64_DEC_RETURN_MB 1
+#endif
static ETHR_INLINE ETHR_AINT_T__
-ETHR_NATMC_FUNC__(add_return)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ i)
+ETHR_NATMC_FUNC__(dec_return_mb)(ETHR_ATMC_T__ *var)
{
- return ETHR_ILCKD__(ExchangeAdd)(&var->value, i) + i;
+ return ETHR_ILCKD__(Decrement)(&var->value);
}
-static ETHR_INLINE void
-ETHR_NATMC_FUNC__(inc)(ETHR_ATMC_T__ *var)
-{
- (void) ETHR_ILCKD__(Increment)(&var->value);
-}
+#endif
-static ETHR_INLINE void
-ETHR_NATMC_FUNC__(dec)(ETHR_ATMC_T__ *var)
-{
- (void) ETHR_ILCKD__(Decrement)(&var->value);
-}
+#if defined(ETHR_WIN_HAVE_DEC_REL)
-static ETHR_INLINE ETHR_AINT_T__
-ETHR_NATMC_FUNC__(inc_return)(ETHR_ATMC_T__ *var)
-{
- return ETHR_ILCKD__(Increment)(&var->value);
-}
+#if ETHR_INCLUDE_ATOMIC_IMPL__ == 4
+# define ETHR_HAVE_ETHR_NATIVE_ATOMIC32_DEC_RETURN_RELB 1
+#else
+# define ETHR_HAVE_ETHR_NATIVE_ATOMIC64_DEC_RETURN_RELB 1
+#endif
static ETHR_INLINE ETHR_AINT_T__
-ETHR_NATMC_FUNC__(dec_return)(ETHR_ATMC_T__ *var)
+ETHR_NATMC_FUNC__(dec_return_relb)(ETHR_ATMC_T__ *var)
{
- return ETHR_ILCKD__(Decrement)(&var->value);
+ return ETHR_ILCKD_REL__(Decrement)(&var->value);
}
+#endif
+
+#if defined(ETHR_WIN_HAVE_AND)
+
+#if ETHR_INCLUDE_ATOMIC_IMPL__ == 4
+# define ETHR_HAVE_ETHR_NATIVE_ATOMIC32_AND_RETOLD_MB 1
+#else
+# define ETHR_HAVE_ETHR_NATIVE_ATOMIC64_AND_RETOLD_MB 1
+#endif
+
static ETHR_INLINE ETHR_AINT_T__
-ETHR_NATMC_FUNC__(and_retold)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ mask)
+ETHR_NATMC_FUNC__(and_retold_mb)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ mask)
{
return ETHR_ILCKD__(And)(&var->value, mask);
}
+#endif
+
+#if defined(ETHR_WIN_HAVE_OR)
+
+#if ETHR_INCLUDE_ATOMIC_IMPL__ == 4
+# define ETHR_HAVE_ETHR_NATIVE_ATOMIC32_OR_RETOLD_MB 1
+#else
+# define ETHR_HAVE_ETHR_NATIVE_ATOMIC64_OR_RETOLD_MB 1
+#endif
+
static ETHR_INLINE ETHR_AINT_T__
-ETHR_NATMC_FUNC__(or_retold)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ mask)
+ETHR_NATMC_FUNC__(or_retold_mb)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ mask)
{
return ETHR_ILCKD__(Or)(&var->value, mask);
}
-static ETHR_INLINE ETHR_AINT_T__
-ETHR_NATMC_FUNC__(cmpxchg)(ETHR_ATMC_T__ *var,
- ETHR_AINT_T__ new,
- ETHR_AINT_T__ old)
-{
- return ETHR_ILCKD__(CompareExchange)(&var->value, new, old);
-}
+#endif
+#if defined(ETHR_WIN_HAVE_XCHG)
+
+#if ETHR_INCLUDE_ATOMIC_IMPL__ == 4
+# define ETHR_HAVE_ETHR_NATIVE_ATOMIC32_XCHG_MB 1
+#else
+# define ETHR_HAVE_ETHR_NATIVE_ATOMIC64_XCHG_MB 1
+#endif
static ETHR_INLINE ETHR_AINT_T__
-ETHR_NATMC_FUNC__(xchg)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ new)
+ETHR_NATMC_FUNC__(xchg_mb)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ new)
{
return ETHR_ILCKD__(Exchange)(&var->value, new);
}
-/*
- * Atomic ops with at least specified barriers.
- */
+#endif
-static ETHR_INLINE ETHR_AINT_T__
-ETHR_NATMC_FUNC__(read_acqb)(ETHR_ATMC_T__ *var)
-{
-#if ETHR_READ_ACQB_AND_SET_RELB_COMPILER_BARRIER_ONLY__
- ETHR_AINT_T__ val = var->value;
- ETHR_COMPILER_BARRIER;
- return val;
+#if defined(ETHR_WIN_HAVE_CMPXCHG)
+
+#if ETHR_INCLUDE_ATOMIC_IMPL__ == 4
+# define ETHR_HAVE_ETHR_NATIVE_ATOMIC32_CMPXCHG_MB 1
#else
- return ETHR_ILCKD_ACQ__(ExchangeAdd)(&var->value, (ETHR_AINT_T__) 0);
+# define ETHR_HAVE_ETHR_NATIVE_ATOMIC64_CMPXCHG_MB 1
#endif
-}
static ETHR_INLINE ETHR_AINT_T__
-ETHR_NATMC_FUNC__(inc_return_acqb)(ETHR_ATMC_T__ *var)
+ETHR_NATMC_FUNC__(cmpxchg_mb)(ETHR_ATMC_T__ *var,
+ ETHR_AINT_T__ new,
+ ETHR_AINT_T__ old)
{
- return ETHR_ILCKD_ACQ__(Increment)(&var->value);
+ return ETHR_ILCKD__(CompareExchange)(&var->value, new, old);
}
-static ETHR_INLINE void
-ETHR_NATMC_FUNC__(set_relb)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ i)
-{
-#if ETHR_READ_ACQB_AND_SET_RELB_COMPILER_BARRIER_ONLY__
- ETHR_COMPILER_BARRIER;
- var->value = i;
-#else
- (void) ETHR_ILCKD_REL__(Exchange)(&var->value, i);
#endif
-}
-static ETHR_INLINE void
-ETHR_NATMC_FUNC__(dec_relb)(ETHR_ATMC_T__ *var)
-{
- (void) ETHR_ILCKD_REL__(Decrement)(&var->value);
-}
+#if defined(ETHR_WIN_HAVE_CMPXCHG_ACQ)
-static ETHR_INLINE ETHR_AINT_T__
-ETHR_NATMC_FUNC__(dec_return_relb)(ETHR_ATMC_T__ *var)
-{
- return ETHR_ILCKD_REL__(Decrement)(&var->value);
-}
+#if ETHR_INCLUDE_ATOMIC_IMPL__ == 4
+# define ETHR_HAVE_ETHR_NATIVE_ATOMIC32_CMPXCHG_ACQB 1
+#else
+# define ETHR_HAVE_ETHR_NATIVE_ATOMIC64_CMPXCHG_ACQB 1
+#endif
static ETHR_INLINE ETHR_AINT_T__
ETHR_NATMC_FUNC__(cmpxchg_acqb)(ETHR_ATMC_T__ *var,
@@ -391,6 +435,16 @@ ETHR_NATMC_FUNC__(cmpxchg_acqb)(ETHR_ATMC_T__ *var,
return ETHR_ILCKD_ACQ__(CompareExchange)(&var->value, new, old);
}
+#endif
+
+#if defined(ETHR_WIN_HAVE_CMPXCHG_REL)
+
+#if ETHR_INCLUDE_ATOMIC_IMPL__ == 4
+# define ETHR_HAVE_ETHR_NATIVE_ATOMIC32_CMPXCHG_RELB 1
+#else
+# define ETHR_HAVE_ETHR_NATIVE_ATOMIC64_CMPXCHG_RELB 1
+#endif
+
static ETHR_INLINE ETHR_AINT_T__
ETHR_NATMC_FUNC__(cmpxchg_relb)(ETHR_ATMC_T__ *var,
ETHR_AINT_T__ new,
@@ -399,6 +453,8 @@ ETHR_NATMC_FUNC__(cmpxchg_relb)(ETHR_ATMC_T__ *var,
return ETHR_ILCKD_REL__(CompareExchange)(&var->value, new, old);
}
+#endif
+
#endif /* ETHR_TRY_INLINE_FUNCS */
#undef ETHR_ILCKD__
@@ -408,8 +464,17 @@ ETHR_NATMC_FUNC__(cmpxchg_relb)(ETHR_ATMC_T__ *var,
#undef ETHR_ATMC_T__
#undef ETHR_AINT_T__
#undef ETHR_READ_AND_SET_WITHOUT_INTERLOCKED_OP__
-#undef ETHR_READ_ACQB_AND_SET_RELB_COMPILER_BARRIER_ONLY__
-
-#endif /* _MSC_VER */
+#undef ETHR_WIN_HAVE_CMPXCHG
+#undef ETHR_WIN_HAVE_DEC
+#undef ETHR_WIN_HAVE_INC
+#undef ETHR_WIN_HAVE_XCHG_ADD
+#undef ETHR_WIN_HAVE_XCHG
+#undef ETHR_WIN_HAVE_AND
+#undef ETHR_WIN_HAVE_OR
+#undef ETHR_WIN_HAVE_XCHG_ADD_ACQ
+#undef ETHR_WIN_HAVE_INC_ACQ
+#undef ETHR_WIN_HAVE_DEC_REL
+#undef ETHR_WIN_HAVE_CMPXCHG_ACQ
+#undef ETHR_WIN_HAVE_CMPXCHG_REL
#endif /* ETHR_INCLUDE_ATOMIC_IMPL__ */
diff --git a/erts/include/internal/win/ethr_dw_atomic.h b/erts/include/internal/win/ethr_dw_atomic.h
new file mode 100644
index 0000000000..a3e7ffc3aa
--- /dev/null
+++ b/erts/include/internal/win/ethr_dw_atomic.h
@@ -0,0 +1,154 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2011. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/*
+ * Description: Native double word atomics for windows
+ * Author: Rickard Green
+ */
+
+#undef ETHR_INCLUDE_DW_ATOMIC_IMPL__
+#ifndef ETHR_X86_DW_ATOMIC_H__
+# define ETHR_X86_DW_ATOMIC_H__
+# if ((ETHR_SIZEOF_PTR == 4 \
+ && defined(ETHR_HAVE__INTERLOCKEDCOMPAREEXCHANGE64)) \
+ || (ETHR_SIZEOF_PTR == 8 \
+ && defined(ETHR_HAVE__INTERLOCKEDCOMPAREEXCHANGE128)))
+# define ETHR_INCLUDE_DW_ATOMIC_IMPL__
+# endif
+#endif
+
+#ifdef ETHR_INCLUDE_DW_ATOMIC_IMPL__
+
+# if ETHR_SIZEOF_PTR == 4
+# define ETHR_HAVE_NATIVE_SU_DW_ATOMIC
+# else
+# define ETHR_HAVE_NATIVE_DW_ATOMIC
+# endif
+# define ETHR_NATIVE_DW_ATOMIC_IMPL "windows-interlocked"
+
+# if defined(_M_IX86) || defined(_M_AMD64)
+/*
+ * If ETHR_RTCHK_USE_NATIVE_DW_ATOMIC_IMPL__ is defined, it will be used
+ * at runtime in order to determine if native or fallback implementation
+ * should be used.
+ */
+# define ETHR_RTCHK_USE_NATIVE_DW_ATOMIC_IMPL__ \
+ ETHR_X86_RUNTIME_CONF_HAVE_DW_CMPXCHG__
+# endif
+
+# include <intrin.h>
+# if ETHR_SIZEOF_PTR == 4
+# pragma intrinsic(_InterlockedCompareExchange64)
+# define ETHR_DW_NATMC_ALIGN_MASK__ 0x7
+# define ETHR_NATIVE_SU_DW_SINT_T ethr_sint64_t
+# else
+# pragma intrinsic(_InterlockedCompareExchange128)
+# define ETHR_DW_NATMC_ALIGN_MASK__ 0xf
+# endif
+
+typedef volatile __int64 * ethr_native_dw_ptr_t;
+
+/*
+ * We need 16 byte aligned memory in 64-bit mode, and 8 byte aligned
+ * memory in 32-bit mode. 16 byte aligned malloc in 64-bit mode is
+ * not common, and at least some glibc malloc implementations
+ * only 4 byte align in 32-bit mode.
+ *
+ * This code assumes 8 byte aligned memory in 64-bit mode, and 4 byte
+ * aligned memory in 32-bit mode. A malloc implementation that does
+ * not adhere to these alignment requirements is seriously broken,
+ * and we wont bother trying to work around it.
+ *
+ * Since memory alignment may be off by one word we need to align at
+ * runtime. We, therefore, need an extra word allocated.
+ */
+#define ETHR_DW_NATMC_MEM__(VAR) \
+ (&var->c[(int) ((ethr_uint_t) &(VAR)->c[0]) & ETHR_DW_NATMC_ALIGN_MASK__])
+typedef union {
+#ifdef ETHR_NATIVE_SU_DW_SINT_T
+ volatile ETHR_NATIVE_SU_DW_SINT_T dw_sint;
+#endif
+ volatile ethr_sint_t sint[3];
+ volatile char c[ETHR_SIZEOF_PTR*3];
+} ethr_native_dw_atomic_t;
+
+#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_ATOMIC_IMPL__)
+
+#ifdef ETHR_DEBUG
+# define ETHR_DW_DBG_ALIGNED__(PTR) \
+ ETHR_ASSERT((((ethr_uint_t) (PTR)) & ETHR_DW_NATMC_ALIGN_MASK__) == 0);
+#else
+# define ETHR_DW_DBG_ALIGNED__(PTR)
+#endif
+
+#define ETHR_HAVE_ETHR_NATIVE_DW_ATOMIC_ADDR
+
+static ETHR_INLINE ethr_sint_t *
+ethr_native_dw_atomic_addr(ethr_native_dw_atomic_t *var)
+{
+ ethr_sint_t *p = (ethr_sint_t *) ETHR_DW_NATMC_MEM__(var);
+ ETHR_DW_DBG_ALIGNED__(p);
+ return p;
+}
+
+
+#if ETHR_SIZEOF_PTR == 4
+
+#define ETHR_HAVE_ETHR_NATIVE_SU_DW_ATOMIC_CMPXCHG_MB
+
+static ETHR_INLINE ethr_sint64_t
+ethr_native_su_dw_atomic_cmpxchg_mb(ethr_native_dw_atomic_t *var,
+ ethr_sint64_t new,
+ ethr_sint64_t exp)
+{
+ ethr_native_dw_ptr_t p = (ethr_native_dw_ptr_t) ETHR_DW_NATMC_MEM__(var);
+ ETHR_DW_DBG_ALIGNED__(p);
+ return (ethr_sint64_t) _InterlockedCompareExchange64(p, new, exp);
+}
+
+#elif ETHR_SIZEOF_PTR == 8
+
+#define ETHR_HAVE_ETHR_NATIVE_DW_ATOMIC_CMPXCHG_MB
+
+#ifdef ETHR_BIGENDIAN
+# define ETHR_WIN_LOW_WORD__ 1
+# define ETHR_WIN_HIGH_WORD__ 0
+#else
+# define ETHR_WIN_LOW_WORD__ 0
+# define ETHR_WIN_HIGH_WORD__ 1
+#endif
+
+static ETHR_INLINE int
+ethr_native_dw_atomic_cmpxchg_mb(ethr_native_dw_atomic_t *var,
+ ethr_sint_t *new,
+ ethr_sint_t *xchg)
+{
+ ethr_native_dw_ptr_t p = (ethr_native_dw_ptr_t) ETHR_DW_NATMC_MEM__(var);
+ ETHR_DW_DBG_ALIGNED__(p);
+ return (int) _InterlockedCompareExchange128(p,
+ new[ETHR_WIN_HIGH_WORD__],
+ new[ETHR_WIN_LOW_WORD__],
+ xchg);
+}
+
+#endif
+
+#endif /* ETHR_TRY_INLINE_FUNCS */
+
+#endif /* ETHR_INCLUDE_DW_ATOMIC_IMPL__ */
diff --git a/erts/include/internal/win/ethr_event.h b/erts/include/internal/win/ethr_event.h
index 598816b2c6..6363174a74 100644
--- a/erts/include/internal/win/ethr_event.h
+++ b/erts/include/internal/win/ethr_event.h
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2009-2010. All Rights Reserved.
+ * Copyright Ericsson AB 2009-2011. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -21,12 +21,12 @@
* Author: Rickard Green
*/
-#define ETHR_EVENT_OFF_WAITER__ ((long) -1)
-#define ETHR_EVENT_OFF__ ((long) 1)
-#define ETHR_EVENT_ON__ ((long) 0)
+#define ETHR_EVENT_OFF_WAITER__ ((ethr_sint32_t) -1)
+#define ETHR_EVENT_OFF__ ((ethr_sint32_t) 1)
+#define ETHR_EVENT_ON__ ((ethr_sint32_t) 0)
typedef struct {
- volatile long state;
+ ethr_atomic32_t state;
HANDLE handle;
} ethr_event;
@@ -38,7 +38,7 @@ static ETHR_INLINE void
ETHR_INLINE_FUNC_NAME_(ethr_event_set)(ethr_event *e)
{
/* _InterlockedExchange() imply a full memory barrier which is important */
- long state = _InterlockedExchange(&e->state, ETHR_EVENT_ON__);
+ ethr_sint32_t state = ethr_atomic32_xchg_wb(&e->state, ETHR_EVENT_ON__);
if (state == ETHR_EVENT_OFF_WAITER__) {
if (!SetEvent(e->handle))
ETHR_FATAL_ERROR__(ethr_win_get_errno__());
@@ -48,8 +48,8 @@ ETHR_INLINE_FUNC_NAME_(ethr_event_set)(ethr_event *e)
static ETHR_INLINE void
ETHR_INLINE_FUNC_NAME_(ethr_event_reset)(ethr_event *e)
{
- /* _InterlockedExchange() imply a full memory barrier which is important */
- InterlockedExchange(&e->state, ETHR_EVENT_OFF__);
+ ethr_atomic32_set(&e->state, ETHR_EVENT_OFF__);
+ ETHR_MEMORY_BARRIER;
}
#endif
diff --git a/erts/include/internal/win/ethr_membar.h b/erts/include/internal/win/ethr_membar.h
new file mode 100644
index 0000000000..8237660b2c
--- /dev/null
+++ b/erts/include/internal/win/ethr_membar.h
@@ -0,0 +1,145 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2011. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/*
+ * Description: Memory barriers for Windows
+ * Author: Rickard Green
+ */
+
+#if (!defined(ETHR_WIN_MEMBAR_H__) \
+ && (defined(_MSC_VER) && _MSC_VER >= 1400) \
+ && (defined(_M_AMD64) \
+ || defined(_M_IA64) \
+ || defined(ETHR_HAVE__INTERLOCKEDCOMPAREEXCHANGE)))
+#define ETHR_WIN_MEMBAR_H__
+
+#define ETHR_LoadLoad (1 << 0)
+#define ETHR_LoadStore (1 << 1)
+#define ETHR_StoreLoad (1 << 2)
+#define ETHR_StoreStore (1 << 3)
+
+#include <intrin.h>
+#undef ETHR_COMPILER_BARRIER
+#define ETHR_COMPILER_BARRIER _ReadWriteBarrier()
+#pragma intrinsic(_ReadWriteBarrier)
+
+#pragma intrinsic(_InterlockedCompareExchange)
+
+#define ETHR_MB_USING_INTERLOCKED__ \
+do { \
+ volatile long x___ = 0; \
+ (void) _InterlockedCompareExchange(&x___, 2, 1); \
+} while (0)
+
+#if defined(_M_IA64)
+
+#define ETHR_MEMBAR(B) __mf()
+
+#elif defined(_M_AMD64) || defined(_M_IX86)
+
+#include <emmintrin.h>
+#include <mmintrin.h>
+
+#if ETHR_SIZEOF_PTR == 4
+# define ETHR_NO_SSE2_MB__ ETHR_MB_USING_INTERLOCKED__
+#endif
+#pragma intrinsic(_mm_mfence)
+#pragma intrinsic(_mm_sfence)
+#pragma intrinsic(_mm_lfence)
+
+static __forceinline void
+ethr_cfence__(void)
+{
+ _ReadWriteBarrier();
+}
+
+static __forceinline void
+ethr_mfence__(void)
+{
+#if ETHR_SIZEOF_PTR == 4
+ if (ETHR_X86_RUNTIME_CONF_HAVE_NO_SSE2__)
+ ETHR_NO_SSE2_MB__;
+ else
+#endif
+ _mm_mfence();
+}
+
+static __forceinline void
+ethr_sfence__(void)
+{
+#if ETHR_SIZEOF_PTR == 4
+ if (ETHR_X86_RUNTIME_CONF_HAVE_NO_SSE2__)
+ ETHR_NO_SSE2_MB__;
+ else
+#endif
+ _mm_sfence();
+}
+
+static __forceinline void
+ethr_lfence__(void)
+{
+#if ETHR_SIZEOF_PTR == 4
+ if (ETHR_X86_RUNTIME_CONF_HAVE_NO_SSE2__)
+ ETHR_NO_SSE2_MB__;
+ else
+#endif
+ _mm_lfence();
+}
+
+#define ETHR_X86_OUT_OF_ORDER_MEMBAR(B) \
+ ETHR_CHOOSE_EXPR((B) == ETHR_StoreStore, \
+ ethr_sfence__(), \
+ ETHR_CHOOSE_EXPR((B) == ETHR_LoadLoad, \
+ ethr_lfence__(), \
+ ethr_mfence__()))
+
+#ifdef ETHR_X86_OUT_OF_ORDER
+
+#define ETHR_MEMBAR(B) \
+ ETHR_X86_OUT_OF_ORDER_MEMBAR((B))
+
+#else /* !ETHR_X86_OUT_OF_ORDER (the default) */
+
+/*
+ * We assume that only stores before loads may be reordered. That is,
+ * we assume that *no* instructions like these are used:
+ * - CLFLUSH,
+ * - streaming stores executed with non-temporal move,
+ * - string operations, or
+ * - other instructions which aren't LoadLoad, LoadStore, and StoreStore
+ * ordered by themselves
+ * If such instructions are used, either insert memory barriers
+ * using ETHR_X86_OUT_OF_ORDER_MEMBAR() at appropriate places, or
+ * define ETHR_X86_OUT_OF_ORDER. For more info see Intel 64 and IA-32
+ * Architectures Software Developer's Manual; Vol 3A; Chapter 8.2.2.
+ */
+
+#define ETHR_MEMBAR(B) \
+ ETHR_CHOOSE_EXPR((B) & ETHR_StoreLoad, ethr_mfence__(), ethr_cfence__())
+
+#endif
+
+#else /* No knowledge about platform; use interlocked fallback */
+
+#define ETHR_MEMBAR(B) ETHR_MB_USING_INTERLOCKED__
+#define ETHR_READ_DEPEND_MEMORY_BARRIER ETHR_MB_USING_INTERLOCKED__
+
+#endif
+
+#endif /* ETHR_WIN_MEMBAR_H__ */
diff --git a/erts/include/internal/win/ethread.h b/erts/include/internal/win/ethread.h
index c01b17cf14..8be35e810e 100644
--- a/erts/include/internal/win/ethread.h
+++ b/erts/include/internal/win/ethread.h
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2010. All Rights Reserved.
+ * Copyright Ericsson AB 2010-2011. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -25,11 +25,13 @@
#ifndef ETHREAD_WIN_H__
#define ETHREAD_WIN_H__
+#include "ethr_membar.h"
#define ETHR_ATOMIC_WANT_32BIT_IMPL__
#include "ethr_atomic.h"
#if ETHR_SIZEOF_PTR == 8
# define ETHR_ATOMIC_WANT_64BIT_IMPL__
# include "ethr_atomic.h"
#endif
+#include "ethr_dw_atomic.h"
#endif
diff --git a/erts/lib_src/Makefile.in b/erts/lib_src/Makefile.in
index 757b3b24e2..ea80e74100 100644
--- a/erts/lib_src/Makefile.in
+++ b/erts/lib_src/Makefile.in
@@ -1,7 +1,7 @@
#
# %CopyrightBegin%
#
-# Copyright Ericsson AB 2004-2010. All Rights Reserved.
+# Copyright Ericsson AB 2004-2012. All Rights Reserved.
#
# The contents of this file are subject to the Erlang Public License,
# Version 1.1, (the "License"); you may not use this file except in
@@ -65,7 +65,7 @@ TYPE_SUFFIX=.purecov
PRE_LD=purecov $(PURECOV_BUILD_OPTIONS)
else
ifeq ($(TYPE),gcov)
-CFLAGS=@DEBUG_CFLAGS@ -fprofile-arcs -ftest-coverage -O0
+CFLAGS=@DEBUG_CFLAGS@ -DGCOV -fprofile-arcs -ftest-coverage -O0
TYPE_SUFFIX=.gcov
PRE_LD=
else
@@ -138,7 +138,7 @@ INCLUDES=-I$(ERTS_INCL) -I$(ERTS_INCL)/$(TARGET) -I$(ERTS_INCL_INT) -I$(ERTS_INC
INCLUDES += -I../emulator/beam -I../emulator/sys/$(ERLANG_OSTYPE)
USING_MINGW=@MIXED_CYGWIN_MINGW@
-USING_VC=@MIXED_CYGWIN_VC@
+USING_VC=@MIXED_VC@
ifeq ($(USING_VC),yes)
LIB_SUFFIX=.lib
@@ -288,6 +288,10 @@ ETHREAD_LIB_SRC=common/ethr_aux.c \
common/ethr_cbf.c \
$(ETHR_THR_LIB_BASE_DIR)/ethread.c \
$(ETHR_THR_LIB_BASE_DIR)/ethr_event.c
+ETHR_X86_SSE2_ASM=@ETHR_X86_SSE2_ASM@
+ifeq ($(ETHR_X86_SSE2_ASM),yes)
+ETHREAD_LIB_SRC += pthread/ethr_x86_sse2_asm.c
+endif
ETHREAD_LIB_NAME=ethread$(TYPE_SUFFIX)
ifeq ($(USING_VC),yes)
@@ -314,10 +318,14 @@ ETHREAD_LIB=
endif
+_create_dirs := $(shell mkdir -p $(CREATE_DIRS))
#
# Everything to build
#
-all: $(CREATE_DIRS) $(ETHREAD_LIB) $(ERTS_LIBS) $(ERTS_INTERNAL_LIBS)
+.PHONY: all
+all: $(OBJ_DIR)/MADE
+
+$(OBJ_DIR)/MADE: $(ETHREAD_LIB) $(ERTS_LIBS) $(ERTS_INTERNAL_LIBS)
ifeq ($(OMIT_OMIT_FP),yes)
@echo '* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *'
@echo '* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *'
@@ -327,6 +335,8 @@ ifeq ($(OMIT_OMIT_FP),yes)
@echo '* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *'
@echo '* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *'
endif
+ echo $? > $(OBJ_DIR)/MADE
+
#
# The libs ...
#
@@ -382,10 +392,8 @@ $(ERTS_LIB): $(ERTS_LIB_OBJS)
# Object files
#
-ifeq ($(TYPE)-@GCC@,debug-yes)
-$(r_OBJ_DIR)/ethr_aux.o: common/ethr_aux.c
- $(CC) $(THR_DEFS) $(CFLAGS) -Wno-unused-function $(INCLUDES) -c $< -o $@
-endif
+$(r_OBJ_DIR)/ethr_x86_sse2_asm.o: pthread/ethr_x86_sse2_asm.c
+ $(CC) -msse2 $(THR_DEFS) $(CFLAGS) $(INCLUDES) -c $< -o $@
$(r_OBJ_DIR)/%.o: common/%.c
$(CC) $(THR_DEFS) $(CFLAGS) $(INCLUDES) -c $< -o $@
@@ -426,13 +434,6 @@ $(MTd_OBJ_DIR)/%.o: $(ETHR_THR_LIB_BASE_DIR)/%.c
$(CC) $(THR_DEFS) $(CFLAGS) -MTd $(INCLUDES) -c $< -o $@
#
-# Create directories
-#
-
-$(CREATE_DIRS):
- $(MKDIR) -p $@
-
-#
# Install
#
@@ -469,6 +470,7 @@ INTERNAL_RELEASE_LIBS= \
$(ETHREAD_LIB) \
$(ERTS_INTERNAL_LIBS)
+.PHONY: release_spec
release_spec: all
ifneq ($(strip $(RELEASE_INCLUDES)),)
$(INSTALL_DIR) $(RELSYSDIR)/include
@@ -498,19 +500,20 @@ ifneq ($(strip $(INTERNAL_RELEASE_LIBS)),)
$(INSTALL_DATA) $(INTERNAL_RELEASE_LIBS) $(RELSYSDIR)/lib/internal
endif
+.PHONY: docs
docs:
+.PHONY: release_docs_spec
release_docs_spec:
-
#
# Cleanup
#
+.PHONY: clean
clean:
$(RM) -rf ../lib/internal/$(TARGET)/*
$(RM) -rf ../lib/$(TARGET)/*
$(RM) -rf obj/$(TARGET)/*
- $(RM) -f $(TARGET)/depend.mk
#
# Make dependencies
@@ -552,9 +555,11 @@ SED_MDd_DEPEND=sed '$(SED_PREFIX)$(SED_REPL_MDd_O);$(SED_REPL_TT_DIR);$(SED_REPL
SED_MT_DEPEND=sed '$(SED_PREFIX)$(SED_REPL_MT_O);$(SED_REPL_TT_DIR);$(SED_REPL_TARGET)'
SED_MTd_DEPEND=sed '$(SED_PREFIX)$(SED_REPL_MTd_O);$(SED_REPL_TT_DIR);$(SED_REPL_TARGET)'
-DEPEND_MK=$(TARGET)/depend.mk
+DEPEND_MK=$(OBJ_DIR)/depend.mk
-depend:
+.PHONY: depend
+depend: $(DEPEND_MK)
+$(DEPEND_MK):
@echo "Generating dependency file $(DEPEND_MK)..."
@echo "# Generated dependency rules" > $(DEPEND_MK);
@echo "# " >> $(DEPEND_MK);
@@ -627,6 +632,8 @@ endif
endif
@echo "# EOF" >> $(DEPEND_MK);
+ifneq ($(MAKECMDGOALS),clean)
-include $(DEPEND_MK)
+endif
# eof
diff --git a/erts/lib_src/common/erl_memory_trace_parser.c b/erts/lib_src/common/erl_memory_trace_parser.c
index 54c3dfadec..625c140cf9 100644
--- a/erts/lib_src/common/erl_memory_trace_parser.c
+++ b/erts/lib_src/common/erl_memory_trace_parser.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2004-2009. All Rights Reserved.
+ * Copyright Ericsson AB 2004-2011. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -368,12 +368,16 @@ emtp_state_destroy(emtp_state *statep)
#define GET_UI8(UI, BP) ((UI) = *((BP)++))
+#define SKIP_UI8(BP) ((BP)++)
+
#define GET_UI16(UI, BP) \
do { \
(UI) = ((( (usgnd_int_16) (BP)[0]) << 8) \
| ((usgnd_int_16) (BP)[1])); \
(BP) += UI16_SZ; \
} while(0)
+#define SKIP_UI16(BP) ((BP) += UI16_SZ)
+
#define GET_UI32(UI, BP) \
do { \
@@ -383,6 +387,7 @@ emtp_state_destroy(emtp_state *statep)
| ( (usgnd_int_32) (BP)[3])); \
(BP) += UI32_SZ; \
} while(0)
+#define SKIP_UI32(BP) ((BP) += UI32_SZ)
#define GET_UI64(UI, BP) \
do { \
@@ -396,6 +401,7 @@ emtp_state_destroy(emtp_state *statep)
| ( (usgnd_int_64) (BP)[7])); \
(BP) += UI64_SZ; \
} while(0)
+#define SKIP_UI64(BP) ((BP) += UI64_SZ)
#define GET_VSZ_UI16(UI, BP, MSB) \
do { \
@@ -1267,11 +1273,10 @@ parse_header(emtp_state *statep,
switch (statep->version.major) {
case 1: {
- usgnd_int_32 hdr_sz;
NEED(2*UI32_SZ + 2*UI16_SZ, trace_size);
GET_UI32(statep->flags, tracep);
- GET_UI32(hdr_sz, tracep); /* ignore this; may contain garbage! */
+ SKIP_UI32(tracep); /* ignore this; may contain garbage! */
GET_UI16(statep->max_allocator_ix, tracep);
GET_UI16(statep->max_block_type_ix, tracep);
@@ -1564,13 +1569,13 @@ parse_header(emtp_state *statep,
}
case ERTS_MT_BLOCK_TYPE_HDR_TAG: {
- usgnd_int_16 bt_ix, a_ix, btflgs;
+ usgnd_int_16 bt_ix, a_ix;
if (entry_sz
< UI8_SZ + 3*UI16_SZ + UI8_SZ + 0 + UI16_SZ)
ERROR(EMTP_PARSE_ERROR);
- GET_UI16(btflgs, c_p);
+ SKIP_UI16(c_p); /* bitflags */
GET_UI16(bt_ix, c_p);
if (bt_ix > statep->max_block_type_ix)
ERROR(EMTP_PARSE_ERROR);
diff --git a/erts/lib_src/common/erl_misc_utils.c b/erts/lib_src/common/erl_misc_utils.c
index 5dbf98c7d1..3b123063fa 100644
--- a/erts/lib_src/common/erl_misc_utils.c
+++ b/erts/lib_src/common/erl_misc_utils.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2006-2011. All Rights Reserved.
+ * Copyright Ericsson AB 2006-2012. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -55,6 +55,12 @@
# ifdef HAVE_UNISTD_H
# include <unistd.h>
# endif
+# if defined(_SC_NPROC_CONF) && !defined(_SC_NPROCESSORS_CONF)
+# define _SC_NPROCESSORS_CONF _SC_NPROC_CONF
+# endif
+# if defined(_SC_NPROC_ONLN) && !defined(_SC_NPROCESSORS_ONLN)
+# define _SC_NPROCESSORS_ONLN _SC_NPROC_ONLN
+# endif
# if (defined(NO_SYSCONF) || !defined(_SC_NPROCESSORS_CONF))
# ifdef HAVE_SYS_SYSCTL_H
# include <sys/sysctl.h>
@@ -721,7 +727,7 @@ adjust_processor_nodes(erts_cpu_info_t *cpuinfo, int no_nodes)
prev = NULL;
this = &cpuinfo->topology[0];
- last = &cpuinfo->topology[cpuinfo->configured-1];
+ last = &cpuinfo->topology[cpuinfo->topology_size-1];
while (1) {
if (processor == this->processor) {
if (node != this->node)
@@ -828,8 +834,8 @@ read_topology(erts_cpu_info_t *cpuinfo)
ix = -1;
if (realpath(ERTS_SYS_NODE_PATH, npath)) {
- got_nodes = 1;
ndir = opendir(npath);
+ got_nodes = (ndir != NULL);
}
do {
@@ -933,7 +939,7 @@ read_topology(erts_cpu_info_t *cpuinfo)
if (res > 1) {
prev = this++;
- last = &cpuinfo->topology[cpuinfo->configured-1];
+ last = &cpuinfo->topology[cpuinfo->topology_size-1];
while (1) {
this->thread = ((this->node == prev->node
@@ -1088,7 +1094,7 @@ read_topology(erts_cpu_info_t *cpuinfo)
if (res > 1) {
prev = this++;
- last = &cpuinfo->topology[cpuinfo->configured-1];
+ last = &cpuinfo->topology[cpuinfo->topology_size-1];
while (1) {
this->thread = ((this->node == prev->node
@@ -1511,7 +1517,7 @@ const char* parse_topology_spec_group(erts_cpu_info_t *cpuinfo, const char* xml,
}
}
- if (cacheLevel == 0) {
+ if (parentCacheLevel == 0) {
*core_p = 0;
*processor_p = (*processor_p)++;
} else {
diff --git a/erts/lib_src/common/erl_printf.c b/erts/lib_src/common/erl_printf.c
index 72d18ab6f1..2c177ee5ac 100644
--- a/erts/lib_src/common/erl_printf.c
+++ b/erts/lib_src/common/erl_printf.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2005-2009. All Rights Reserved.
+ * Copyright Ericsson AB 2005-2012. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -108,7 +108,7 @@ write_f_add_cr(void *vfp, char* buf, size_t len)
if (PUTC(buf[i], (FILE *) vfp) == EOF)
return get_error_result();
}
- return 0;
+ return len;
}
static int
@@ -126,13 +126,14 @@ write_f(void *vfp, char* buf, size_t len)
#endif
if (FWRITE((void *) buf, sizeof(char), len, (FILE *) vfp) != len)
return get_error_result();
- return 0;
+ return len;
}
static int
write_fd(void *vfdp, char* buf, size_t len)
{
ssize_t size;
+ size_t res = len;
ASSERT(vfdp);
while (len) {
@@ -149,7 +150,7 @@ write_fd(void *vfdp, char* buf, size_t len)
len -= size;
}
- return 0;
+ return res;
}
static int
@@ -160,7 +161,7 @@ write_s(void *vwbufpp, char* bufp, size_t len)
ASSERT(len > 0);
memcpy((void *) *wbufpp, (void *) bufp, len);
*wbufpp += len;
- return 0;
+ return len;
}
@@ -172,6 +173,7 @@ typedef struct {
static int
write_sn(void *vwsnap, char* buf, size_t len)
{
+ int rv = 0;
write_sn_arg_t *wsnap = (write_sn_arg_t *) vwsnap;
ASSERT(wsnap);
ASSERT(len > 0);
@@ -179,11 +181,13 @@ write_sn(void *vwsnap, char* buf, size_t len)
size_t sz = len;
if (sz >= wsnap->len)
sz = wsnap->len;
+ rv = (int)sz;
memcpy((void *) wsnap->buf, (void *) buf, sz);
wsnap->buf += sz;
wsnap->len -= sz;
+ return sz;
}
- return 0;
+ return rv;
}
static int
@@ -201,7 +205,7 @@ write_ds(void *vdsbufp, char* buf, size_t len)
}
memcpy((void *) (dsbufp->str + dsbufp->str_len), (void *) buf, len);
dsbufp->str_len += len;
- return 0;
+ return len;
}
int
diff --git a/erts/lib_src/common/erl_printf_format.c b/erts/lib_src/common/erl_printf_format.c
index fba3fd723c..473791dce4 100644
--- a/erts/lib_src/common/erl_printf_format.c
+++ b/erts/lib_src/common/erl_printf_format.c
@@ -388,7 +388,7 @@ static int fmt_double(fmtfn_t fn,void*arg,double val,
max_size++;
if (precision)
max_size += precision;
- else if (fmt && FMTF_alt)
+ else if (fmt & FMTF_alt)
max_size++;
break;
case FMTC_E:
@@ -402,7 +402,7 @@ static int fmt_double(fmtfn_t fn,void*arg,double val,
max_size += 4;
if (precision)
max_size += precision;
- else if (fmt && FMTF_alt)
+ else if (fmt & FMTF_alt)
max_size++;
aexp = exp >= 0 ? exp : -exp;
if (aexp < 100)
diff --git a/erts/lib_src/common/ethr_atomics.c b/erts/lib_src/common/ethr_atomics.c
index 94557d904a..c52166a7ec 100644
--- a/erts/lib_src/common/ethr_atomics.c
+++ b/erts/lib_src/common/ethr_atomics.c
@@ -1,7 +1,16 @@
/*
+ * --------------- DO NOT EDIT THIS FILE! ---------------
+ * This file was automatically generated by the
+ * $ERL_TOP/erts/lib_src/utils/make_atomics_api script.
+ * If you need to make changes, edit the script and
+ * regenerate this file.
+ * --------------- DO NOT EDIT THIS FILE! ---------------
+ */
+
+/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2010. All Rights Reserved.
+ * Copyright Ericsson AB 2011-2012. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -18,385 +27,4678 @@
*/
/*
- * Description: The ethread atomic API
+ * Description: The ethread atomics API
* Author: Rickard Green
*/
+/*
+ * This file maps native atomic implementations to ethread
+ * API atomics. If no native atomic implementation
+ * is available, a less efficient fallback is used instead.
+ * The API consists of 32-bit size, word size (pointer size),
+ * and double word size atomics.
+ *
+ * The following atomic operations are implemented for
+ * 32-bit size, and word size atomics:
+ * - cmpxchg
+ * - xchg
+ * - set
+ * - init
+ * - add_read
+ * - read
+ * - inc_read
+ * - dec_read
+ * - add
+ * - inc
+ * - dec
+ * - read_band
+ * - read_bor
+ *
+ * The following atomic operations are implemented for
+ * double word size atomics:
+ * - cmpxchg
+ * - set
+ * - read
+ * - init
+ *
+ * Appart from a function implementing the atomic operation
+ * with unspecified memory barrier semantics, there are
+ * functions implementing each operation with the following
+ * implied memory barrier semantics:
+ * - mb - Full memory barrier. Orders both loads, and
+ * stores before, and after the atomic operation.
+ * No load or store is allowed to be reordered
+ * over the atomic operation.
+ * - relb - Release barrier. Orders both loads, and
+ * stores appearing *before* the atomic
+ * operation. These are not allowed to be
+ * reordered over the atomic operation.
+ * - acqb - Acquire barrier. Orders both loads, and stores
+ * appearing *after* the atomic operation. These
+ * are not allowed to be reordered over the
+ * atomic operation.
+ * - wb - Write barrier. Orders *only* stores. These are
+ * not allowed to be reordered over the barrier.
+ * Store in atomic operation is ordered *after*
+ * the barrier.
+ * - rb - Read barrier. Orders *only* loads. These are
+ * not allowed to be reordered over the barrier.
+ * Load in atomic operation is ordered *before*
+ * the barrier.
+ * - ddrb - Data dependency read barrier. Orders *only*
+ * loads according to data dependency across the
+ * barrier. Load in atomic operation is ordered
+ * before the barrier.
+ *
+ * We implement all of these operation/barrier
+ * combinations, regardless of whether they are useful
+ * or not (some of them are useless).
+ *
+ * Double word size atomic functions are on the followning
+ * form:
+ * ethr_dw_atomic_<OP>[_<BARRIER>]
+ *
+ * Word size atomic functions are on the followning
+ * form:
+ * ethr_atomic_<OP>[_<BARRIER>]
+ *
+ * 32-bit size atomic functions are on the followning
+ * form:
+ * ethr_atomic32_<OP>[_<BARRIER>]
+ *
+ * Apart from the operation/barrier functions
+ * described above also 'addr' functions are implemented
+ * which return the actual memory address used of the
+ * atomic variable. The 'addr' functions have no barrier
+ * versions.
+ *
+ * The native atomic implementation does not need to
+ * implement all operation/barrier combinations.
+ * Functions that have no native implementation will be
+ * constructed from existing native functionality. These
+ * functions will perform the wanted operation and will
+ * produce sufficient memory barriers, but may
+ * in some cases be less efficient than pure native
+ * versions.
+ *
+ * When we create ethread API operation/barrier functions by
+ * adding barriers before and after native operations it is
+ * assumed that:
+ * - A native read operation begins, and ends with a load.
+ * - A native set operation begins, and ends with a store.
+ * - An init operation begins with either a load, or a store,
+ * and ends with either a load, or a store.
+ * - All other operations begins with a load, and ends with
+ * either a load, or a store.
+ *
+ * This is the minimum functionality that a native
+ * implementation needs to provide:
+ *
+ * - Functions that need to be implemented:
+ *
+ * - ethr_native_[dw_|su_dw_]atomic[BITS]_addr
+ * - ethr_native_[dw_|su_dw_]atomic[BITS]_cmpxchg[_<BARRIER>]
+ * (at least one cmpxchg of optional barrier)
+ *
+ * - Macros that needs to be defined:
+ *
+ * A macro informing about the presence of the native
+ * implementation:
+ *
+ * - ETHR_HAVE_NATIVE_[DW_|SU_DW_]ATOMIC[BITS]
+ *
+ * A macro naming (a string constant) the implementation:
+ *
+ * - ETHR_NATIVE_[DW_]ATOMIC[BITS]_IMPL
+ *
+ * Each implemented native atomic function has to
+ * be accompanied by a defined macro on the following
+ * form informing about its presence:
+ *
+ * - ETHR_HAVE_ETHR_NATIVE_[DW_|SU_DW_]ATOMIC[BITS]_<OP>[_<BARRIER>]
+ *
+ * A (sparc-v9 style) membar macro:
+ *
+ * - ETHR_MEMBAR(B)
+ *
+ * Which takes a combination of the following macros
+ * or:ed (using |) together:
+ *
+ * - ETHR_LoadLoad
+ * - ETHR_LoadStore
+ * - ETHR_StoreLoad
+ * - ETHR_StoreStore
+ *
+ */
+
+
#ifdef HAVE_CONFIG_H
#include "config.h"
#endif
-#define ETHR_INLINE_FUNC_NAME_(X) X ## __
+#define ETHR_TRY_INLINE_FUNCS
+#define ETHR_INLINE_DW_ATMC_FUNC_NAME_(X) X ## __
+#define ETHR_INLINE_ATMC_FUNC_NAME_(X) X ## __
+#define ETHR_INLINE_ATMC32_FUNC_NAME_(X) X ## __
#define ETHR_ATOMIC_IMPL__
#include "ethread.h"
#include "ethr_internal.h"
-#ifndef ETHR_HAVE_NATIVE_ATOMICS
-ethr_atomic_protection_t ethr_atomic_protection__[1 << ETHR_ATOMIC_ADDR_BITS];
+#if (!defined(ETHR_HAVE_WORD_SZ_NATIVE_ATOMIC_OPS) \
+ || !defined(ETHR_HAVE_32BIT_NATIVE_ATOMIC_OPS))
+/*
+ * Spinlock based fallback for atomics used in absence of a native
+ * implementation.
+ */
+
+#define ETHR_ATMC_FLLBK_ADDR_BITS 10
+#define ETHR_ATMC_FLLBK_ADDR_SHIFT 6
+
+typedef struct {
+ union {
+ ethr_spinlock_t lck;
+ char buf[ETHR_CACHE_LINE_ALIGN_SIZE(sizeof(ethr_spinlock_t))];
+ } u;
+} ethr_atomic_protection_t;
+
+extern ethr_atomic_protection_t ethr_atomic_protection__[1 << ETHR_ATMC_FLLBK_ADDR_BITS];
+
+#define ETHR_ATOMIC_PTR2LCK__(PTR) \
+(&ethr_atomic_protection__[((((ethr_uint_t) (PTR)) >> ETHR_ATMC_FLLBK_ADDR_SHIFT) \
+ & ((1 << ETHR_ATMC_FLLBK_ADDR_BITS) - 1))].u.lck)
+
+
+#define ETHR_ATOMIC_OP_FALLBACK_IMPL__(AP, EXPS) \
+do { \
+ ethr_spinlock_t *slp__ = ETHR_ATOMIC_PTR2LCK__((AP)); \
+ ethr_spin_lock(slp__); \
+ { EXPS; } \
+ ethr_spin_unlock(slp__); \
+} while (0)
+
+ethr_atomic_protection_t ethr_atomic_protection__[1 << ETHR_ATMC_FLLBK_ADDR_BITS];
+
+#endif
+
+
+#if defined(ETHR_AMC_FALLBACK__)
+
+/*
+ * Fallback for large sized (word and/or double word size) atomics using
+ * an "Atomic Modification Counter" based on smaller sized native atomics.
+ *
+ * We use a 63-bit modification counter and a one bit exclusive flag.
+ * If 32-bit native atomics are used, we need two 32-bit native atomics.
+ * The exclusive flag is the least significant bit, or if multiple atomics
+ * are used, the least significant bit of the least significant atomic.
+ *
+ * When using the AMC fallback the following is true:
+ * - Reads of the same atomic variable can be done in parallel.
+ * - Uncontended reads doesn't cause any cache line invalidations,
+ * since no modifications are done.
+ * - Assuming that the AMC atomic(s) and the integer(s) containing the
+ * value of the implemented atomic resides in the same cache line,
+ * modifications will only cause invalidations of one cache line.
+ *
+ * When using the spinlock based fallback none of the above is true,
+ * however, the spinlock based fallback consumes less memory.
+ */
+
+# if ETHR_AMC_NO_ATMCS__ != 1 && ETHR_AMC_NO_ATMCS__ != 2
+# error "Not supported"
+# endif
+# define ETHR_AMC_MAX_TRY_READ__ 10
+# ifdef ETHR_DEBUG
+# define ETHR_DBG_CHK_EXCL_STATE(ASP, S) \
+do { \
+ ETHR_AMC_SINT_T__ act = ETHR_AMC_ATMC_FUNC__(read)(&(ASP)->atomic[0]); \
+ ETHR_ASSERT(act == (S) + 1); \
+ ETHR_ASSERT(act & 1); \
+} while (0)
+# else
+# define ETHR_DBG_CHK_EXCL_STATE(ASP, S)
+# endif
+
+static ETHR_INLINE void
+amc_init(ethr_amc_t *amc, int dw, ethr_sint_t *avar, ethr_sint_t *val)
+{
+ avar[0] = val[0];
+ if (dw)
+ avar[1] = val[1];
+#if ETHR_AMC_NO_ATMCS__ == 2
+ ETHR_AMC_ATMC_FUNC__(init)(&amc->atomic[1], 0);
#endif
+ ETHR_AMC_ATMC_FUNC__(init_wb)(&amc->atomic[0], 0);
+}
+
+static ETHR_INLINE ETHR_AMC_SINT_T__
+amc_set_excl(ethr_amc_t *amc, ETHR_AMC_SINT_T__ prev_state0)
+{
+ ETHR_AMC_SINT_T__ state0 = prev_state0;
+ /* Set exclusive flag. */
+ while (1) {
+ ETHR_AMC_SINT_T__ act_state0, new_state0;
+ while (state0 & 1) { /* Wait until exclusive bit has been cleared */
+ ETHR_SPIN_BODY;
+ state0 = ETHR_AMC_ATMC_FUNC__(read)(&amc->atomic[0]);
+ }
+ /* Try to set exclusive bit */
+ new_state0 = state0 + 1;
+ act_state0 = ETHR_AMC_ATMC_FUNC__(cmpxchg_acqb)(&amc->atomic[0],
+ new_state0,
+ state0);
+ if (state0 == act_state0)
+ return state0; /* old state0 */
+ state0 = act_state0;
+ }
+}
+
+static ETHR_INLINE void
+amc_inc_mc_unset_excl(ethr_amc_t *amc, ETHR_AMC_SINT_T__ old_state0)
+{
+ ETHR_AMC_SINT_T__ state0 = old_state0;
+
+ /* Increment modification counter and reset exclusive flag. */
+
+ ETHR_DBG_CHK_EXCL_STATE(amc, state0);
+
+ state0 += 2;
+
+ ETHR_ASSERT((state0 & 1) == 0);
+
+#if ETHR_AMC_NO_ATMCS__ == 2
+ if (state0 == 0) {
+ /*
+ * state0 wrapped, so we need to increment state1. There is no need
+ * for atomic inc op, since this is always done while having exclusive
+ * flag.
+ */
+ ETHR_AMC_SINT_T__ state1 = ETHR_AMC_ATMC_FUNC__(read)(&amc->atomic[1]);
+ state1++;
+ ETHR_AMC_ATMC_FUNC__(set)(&amc->atomic[1], state1);
+ }
+#endif
+ ETHR_AMC_ATMC_FUNC__(set_relb)(&amc->atomic[0], state0);
+}
+
+static ETHR_INLINE void
+amc_unset_excl(ethr_amc_t *amc, ETHR_AMC_SINT_T__ old_state0)
+{
+ ETHR_DBG_CHK_EXCL_STATE(amc, old_state0);
+ /*
+ * Reset exclusive flag, but leave modification counter unchanged,
+ * i.e., restore state to what it was before setting exclusive
+ * flag.
+ */
+ ETHR_AMC_ATMC_FUNC__(set_relb)(&amc->atomic[0], old_state0);
+}
+
+static ETHR_INLINE void
+amc_set(ethr_amc_t *amc, int dw, ethr_sint_t *avar, ethr_sint_t *val)
+{
+ ETHR_AMC_SINT_T__ state0 = ETHR_AMC_ATMC_FUNC__(read)(&amc->atomic[0]);
+
+ state0 = amc_set_excl(amc, state0);
+
+ avar[0] = val[0];
+ if (dw)
+ avar[1] = val[1];
+
+ amc_inc_mc_unset_excl(amc, state0);
+}
+
+static ETHR_INLINE int
+amc_try_read(ethr_amc_t *amc, int dw, ethr_sint_t *avar,
+ ethr_sint_t *val, ETHR_AMC_SINT_T__ *state0p)
+{
+ /* *state0p should contain last read value if aborting */
+ ETHR_AMC_SINT_T__ old_state0;
+#if ETHR_AMC_NO_ATMCS__ == 2
+ ETHR_AMC_SINT_T__ state1;
+ int abrt;
+#endif
+
+ *state0p = ETHR_AMC_ATMC_FUNC__(read_rb)(&amc->atomic[0]);
+ if ((*state0p) & 1)
+ return 0; /* exclusive flag set; abort */
+#if ETHR_AMC_NO_ATMCS__ == 2
+ state1 = ETHR_AMC_ATMC_FUNC__(read_rb)(&amc->atomic[1]);
+#else
+ ETHR_COMPILER_BARRIER;
+#endif
+
+ val[0] = avar[0];
+ if (dw)
+ val[1] = avar[1];
+
+ ETHR_READ_MEMORY_BARRIER;
+
+ /*
+ * Abort if state has changed (i.e, either the exclusive
+ * flag is set, or modification counter changed).
+ */
+ old_state0 = *state0p;
+#if ETHR_AMC_NO_ATMCS__ == 2
+ *state0p = ETHR_AMC_ATMC_FUNC__(read_rb)(&amc->atomic[0]);
+ abrt = (old_state0 != *state0p);
+ abrt |= (state1 != ETHR_AMC_ATMC_FUNC__(read)(&amc->atomic[1]));
+ return abrt == 0;
+#else
+ *state0p = ETHR_AMC_ATMC_FUNC__(read)(&amc->atomic[0]);
+ return old_state0 == *state0p;
+#endif
+}
+
+static ETHR_INLINE void
+amc_read(ethr_amc_t *amc, int dw, ethr_sint_t *avar, ethr_sint_t *val)
+{
+ ETHR_AMC_SINT_T__ state0;
+ int i;
+
+#if ETHR_AMC_MAX_TRY_READ__ == 0
+ state0 = ETHR_AMC_ATMC_FUNC__(read)(&amc->atomic[0]);
+#else
+ for (i = 0; i < ETHR_AMC_MAX_TRY_READ__; i++) {
+ if (amc_try_read(amc, dw, avar, val, &state0))
+ return; /* read success */
+ ETHR_SPIN_BODY;
+ }
+#endif
+
+ state0 = amc_set_excl(amc, state0);
+
+ val[0] = avar[0];
+ if (dw)
+ val[1] = avar[1];
+
+ amc_unset_excl(amc, state0);
+}
+
+static ETHR_INLINE int
+amc_cmpxchg(ethr_amc_t *amc, int dw, ethr_sint_t *avar,
+ ethr_sint_t *new, ethr_sint_t *xchg)
+{
+ ethr_sint_t val[2];
+ ETHR_AMC_SINT_T__ state0;
+
+ if (amc_try_read(amc, dw, avar, val, &state0)) {
+ if (val[0] != xchg[0] || (dw && val[1] != xchg[1])) {
+ xchg[0] = val[0];
+ if (dw)
+ xchg[1] = val[1];
+ return 0; /* failed */
+ }
+ /* Operation will succeed if not interrupted */
+ }
+
+ state0 = amc_set_excl(amc, state0);
+
+ if (xchg[0] != avar[0] || (dw && xchg[1] != avar[1])) {
+ xchg[0] = avar[0];
+ if (dw)
+ xchg[1] = avar[1];
+
+ ETHR_DBG_CHK_EXCL_STATE(amc, state0);
+
+ amc_unset_excl(amc, state0);
+ return 0; /* failed */
+ }
+
+ avar[0] = new[0];
+ if (dw)
+ avar[1] = new[1];
+
+ amc_inc_mc_unset_excl(amc, state0);
+ return 1;
+}
+
+
+#define ETHR_AMC_MODIFICATION_OPS__(AMC, OPS) \
+do { \
+ ETHR_AMC_SINT_T__ state0__; \
+ state0__ = ETHR_AMC_ATMC_FUNC__(read)(&(AMC)->atomic[0]); \
+ state0__ = amc_set_excl((AMC), state0__); \
+ { OPS; } \
+ amc_inc_mc_unset_excl((AMC), state0__); \
+} while (0)
+
+#endif /* amc fallback */
+
int
ethr_init_atomics(void)
{
-#ifndef ETHR_HAVE_NATIVE_ATOMICS
- {
- int i;
- for (i = 0; i < (1 << ETHR_ATOMIC_ADDR_BITS); i++) {
- int res = ethr_spinlock_init(&ethr_atomic_protection__[i].u.lck);
- if (res != 0)
- return res;
- }
+#if (!defined(ETHR_HAVE_WORD_SZ_NATIVE_ATOMIC_OPS) \
+ || !defined(ETHR_HAVE_32BIT_NATIVE_ATOMIC_OPS))
+ int i;
+ for (i = 0; i < (1 << ETHR_ATMC_FLLBK_ADDR_BITS); i++) {
+ int res = ethr_spinlock_init(&ethr_atomic_protection__[i].u.lck);
+ if (res != 0)
+ return res;
}
#endif
return 0;
}
+
+/* ---------- Double word size atomic implementation ---------- */
+
+
+
/*
- * --- Pointer size atomics ---------------------------------------------------
+ * Double word atomics need runtime test.
*/
-ethr_sint_t *
-ethr_atomic_addr(ethr_atomic_t *var)
+int ethr_have_native_dw_atomic(void)
+{
+ return ethr_have_native_dw_atomic__();
+}
+
+
+/* --- addr() --- */
+
+ethr_sint_t *ETHR_DW_ATOMIC_FUNC__(addr)(ethr_dw_atomic_t *var)
+{
+ ethr_sint_t *res;
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+#if defined(ETHR_HAVE_DOUBLE_WORD_SZ_NATIVE_ATOMIC_OPS) && !defined(ETHR_RTCHK_USE_NATIVE_DW_ATOMIC_IMPL__)
+ res = ethr_dw_atomic_addr__(var);
+#elif defined(ETHR_AMC_FALLBACK__)
+ res = (ethr_sint_t *) ((&var->fallback))->sint;
+#else
+ res = (ethr_sint_t *) (&var->fallback);
+#endif
+ return res;
+}
+
+#ifdef ETHR_RTCHK_USE_NATIVE_DW_ATOMIC_IMPL__
+ethr_sint_t *ethr_dw_atomic_addr(ethr_dw_atomic_t *var)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+ return ethr_dw_atomic_addr__(var);
+}
+#endif
+
+
+/* -- cmpxchg() -- */
+
+
+int ETHR_DW_ATOMIC_FUNC__(cmpxchg)(ethr_dw_atomic_t *var, ethr_dw_sint_t *val, ethr_dw_sint_t *old_val)
+{
+ int res;
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+#if defined(ETHR_HAVE_DOUBLE_WORD_SZ_NATIVE_ATOMIC_OPS) && !defined(ETHR_RTCHK_USE_NATIVE_DW_ATOMIC_IMPL__)
+ res = ethr_dw_atomic_cmpxchg__(var, val, old_val);
+#elif defined(ETHR_AMC_FALLBACK__)
+ res = amc_cmpxchg(&(&var->fallback)->amc, 1, (&var->fallback)->sint, val->sint, old_val->sint);
+#else
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__((&var->fallback),
+ {
+ res = ((&var->fallback)->sint[0] == old_val->sint[0] && (&var->fallback)->sint[1] == old_val->sint[1]);
+ if (res) {
+ (&var->fallback)->sint[0] = val->sint[0];
+ (&var->fallback)->sint[1] = val->sint[1];
+ }
+ else {
+ old_val->sint[0] = (&var->fallback)->sint[0];
+ old_val->sint[1] = (&var->fallback)->sint[1];
+ }
+ });
+#endif
+ return res;
+}
+
+#ifdef ETHR_RTCHK_USE_NATIVE_DW_ATOMIC_IMPL__
+int ethr_dw_atomic_cmpxchg(ethr_dw_atomic_t *var, ethr_dw_sint_t *val, ethr_dw_sint_t *old_val)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+ return ethr_dw_atomic_cmpxchg__(var, val, old_val);
+}
+#endif
+
+int ethr_dw_atomic_cmpxchg_ddrb(ethr_dw_atomic_t *var, ethr_dw_sint_t *val, ethr_dw_sint_t *old_val)
+{
+#if defined(ETHR_HAVE_DOUBLE_WORD_SZ_NATIVE_ATOMIC_OPS)
+ return ethr_dw_atomic_cmpxchg_ddrb__(var, val, old_val);
+#elif defined(ETHR_ORDERED_READ_DEPEND)
+ return ethr_dw_atomic_cmpxchg(var, val, old_val);
+#else
+ return ethr_dw_atomic_cmpxchg_rb(var, val, old_val);
+#endif
+}
+
+int ETHR_DW_ATOMIC_FUNC__(cmpxchg_rb)(ethr_dw_atomic_t *var, ethr_dw_sint_t *val, ethr_dw_sint_t *old_val)
+{
+ int res;
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+#if defined(ETHR_HAVE_DOUBLE_WORD_SZ_NATIVE_ATOMIC_OPS) && !defined(ETHR_RTCHK_USE_NATIVE_DW_ATOMIC_IMPL__)
+ res = ethr_dw_atomic_cmpxchg_rb__(var, val, old_val);
+#elif defined(ETHR_AMC_FALLBACK__)
+ res = amc_cmpxchg(&(&var->fallback)->amc, 1, (&var->fallback)->sint, val->sint, old_val->sint);
+ ETHR_MEMBAR(ETHR_LoadLoad);
+#else
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__((&var->fallback),
+ {
+ res = ((&var->fallback)->sint[0] == old_val->sint[0] && (&var->fallback)->sint[1] == old_val->sint[1]);
+ if (res) {
+ (&var->fallback)->sint[0] = val->sint[0];
+ (&var->fallback)->sint[1] = val->sint[1];
+ }
+ else {
+ old_val->sint[0] = (&var->fallback)->sint[0];
+ old_val->sint[1] = (&var->fallback)->sint[1];
+ }
+ });
+ ETHR_MEMBAR(ETHR_LoadLoad);
+#endif
+ return res;
+}
+
+#ifdef ETHR_RTCHK_USE_NATIVE_DW_ATOMIC_IMPL__
+int ethr_dw_atomic_cmpxchg_rb(ethr_dw_atomic_t *var, ethr_dw_sint_t *val, ethr_dw_sint_t *old_val)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+ return ethr_dw_atomic_cmpxchg_rb__(var, val, old_val);
+}
+#endif
+
+int ETHR_DW_ATOMIC_FUNC__(cmpxchg_wb)(ethr_dw_atomic_t *var, ethr_dw_sint_t *val, ethr_dw_sint_t *old_val)
+{
+ int res;
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+#if defined(ETHR_HAVE_DOUBLE_WORD_SZ_NATIVE_ATOMIC_OPS) && !defined(ETHR_RTCHK_USE_NATIVE_DW_ATOMIC_IMPL__)
+ res = ethr_dw_atomic_cmpxchg_wb__(var, val, old_val);
+#elif defined(ETHR_AMC_FALLBACK__)
+ ETHR_MEMBAR(ETHR_StoreStore);
+ res = amc_cmpxchg(&(&var->fallback)->amc, 1, (&var->fallback)->sint, val->sint, old_val->sint);
+#else
+ ETHR_MEMBAR(ETHR_StoreStore);
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__((&var->fallback),
+ {
+ res = ((&var->fallback)->sint[0] == old_val->sint[0] && (&var->fallback)->sint[1] == old_val->sint[1]);
+ if (res) {
+ (&var->fallback)->sint[0] = val->sint[0];
+ (&var->fallback)->sint[1] = val->sint[1];
+ }
+ else {
+ old_val->sint[0] = (&var->fallback)->sint[0];
+ old_val->sint[1] = (&var->fallback)->sint[1];
+ }
+ });
+#endif
+ return res;
+}
+
+#ifdef ETHR_RTCHK_USE_NATIVE_DW_ATOMIC_IMPL__
+int ethr_dw_atomic_cmpxchg_wb(ethr_dw_atomic_t *var, ethr_dw_sint_t *val, ethr_dw_sint_t *old_val)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+ return ethr_dw_atomic_cmpxchg_wb__(var, val, old_val);
+}
+#endif
+
+int ETHR_DW_ATOMIC_FUNC__(cmpxchg_acqb)(ethr_dw_atomic_t *var, ethr_dw_sint_t *val, ethr_dw_sint_t *old_val)
+{
+ int res;
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+#if defined(ETHR_HAVE_DOUBLE_WORD_SZ_NATIVE_ATOMIC_OPS) && !defined(ETHR_RTCHK_USE_NATIVE_DW_ATOMIC_IMPL__)
+ res = ethr_dw_atomic_cmpxchg_acqb__(var, val, old_val);
+#elif defined(ETHR_AMC_FALLBACK__)
+ res = amc_cmpxchg(&(&var->fallback)->amc, 1, (&var->fallback)->sint, val->sint, old_val->sint);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#else
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__((&var->fallback),
+ {
+ res = ((&var->fallback)->sint[0] == old_val->sint[0] && (&var->fallback)->sint[1] == old_val->sint[1]);
+ if (res) {
+ (&var->fallback)->sint[0] = val->sint[0];
+ (&var->fallback)->sint[1] = val->sint[1];
+ }
+ else {
+ old_val->sint[0] = (&var->fallback)->sint[0];
+ old_val->sint[1] = (&var->fallback)->sint[1];
+ }
+ });
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#endif
+ return res;
+}
+
+#ifdef ETHR_RTCHK_USE_NATIVE_DW_ATOMIC_IMPL__
+int ethr_dw_atomic_cmpxchg_acqb(ethr_dw_atomic_t *var, ethr_dw_sint_t *val, ethr_dw_sint_t *old_val)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+ return ethr_dw_atomic_cmpxchg_acqb__(var, val, old_val);
+}
+#endif
+
+int ETHR_DW_ATOMIC_FUNC__(cmpxchg_relb)(ethr_dw_atomic_t *var, ethr_dw_sint_t *val, ethr_dw_sint_t *old_val)
+{
+ int res;
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+#if defined(ETHR_HAVE_DOUBLE_WORD_SZ_NATIVE_ATOMIC_OPS) && !defined(ETHR_RTCHK_USE_NATIVE_DW_ATOMIC_IMPL__)
+ res = ethr_dw_atomic_cmpxchg_relb__(var, val, old_val);
+#elif defined(ETHR_AMC_FALLBACK__)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ res = amc_cmpxchg(&(&var->fallback)->amc, 1, (&var->fallback)->sint, val->sint, old_val->sint);
+#else
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__((&var->fallback),
+ {
+ res = ((&var->fallback)->sint[0] == old_val->sint[0] && (&var->fallback)->sint[1] == old_val->sint[1]);
+ if (res) {
+ (&var->fallback)->sint[0] = val->sint[0];
+ (&var->fallback)->sint[1] = val->sint[1];
+ }
+ else {
+ old_val->sint[0] = (&var->fallback)->sint[0];
+ old_val->sint[1] = (&var->fallback)->sint[1];
+ }
+ });
+#endif
+ return res;
+}
+
+#ifdef ETHR_RTCHK_USE_NATIVE_DW_ATOMIC_IMPL__
+int ethr_dw_atomic_cmpxchg_relb(ethr_dw_atomic_t *var, ethr_dw_sint_t *val, ethr_dw_sint_t *old_val)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+ return ethr_dw_atomic_cmpxchg_relb__(var, val, old_val);
+}
+#endif
+
+int ETHR_DW_ATOMIC_FUNC__(cmpxchg_mb)(ethr_dw_atomic_t *var, ethr_dw_sint_t *val, ethr_dw_sint_t *old_val)
+{
+ int res;
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+#if defined(ETHR_HAVE_DOUBLE_WORD_SZ_NATIVE_ATOMIC_OPS) && !defined(ETHR_RTCHK_USE_NATIVE_DW_ATOMIC_IMPL__)
+ res = ethr_dw_atomic_cmpxchg_mb__(var, val, old_val);
+#elif defined(ETHR_AMC_FALLBACK__)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ res = amc_cmpxchg(&(&var->fallback)->amc, 1, (&var->fallback)->sint, val->sint, old_val->sint);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#else
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__((&var->fallback),
+ {
+ res = ((&var->fallback)->sint[0] == old_val->sint[0] && (&var->fallback)->sint[1] == old_val->sint[1]);
+ if (res) {
+ (&var->fallback)->sint[0] = val->sint[0];
+ (&var->fallback)->sint[1] = val->sint[1];
+ }
+ else {
+ old_val->sint[0] = (&var->fallback)->sint[0];
+ old_val->sint[1] = (&var->fallback)->sint[1];
+ }
+ });
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#endif
+ return res;
+}
+
+#ifdef ETHR_RTCHK_USE_NATIVE_DW_ATOMIC_IMPL__
+int ethr_dw_atomic_cmpxchg_mb(ethr_dw_atomic_t *var, ethr_dw_sint_t *val, ethr_dw_sint_t *old_val)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+ return ethr_dw_atomic_cmpxchg_mb__(var, val, old_val);
+}
+#endif
+
+
+/* -- set() -- */
+
+
+void ETHR_DW_ATOMIC_FUNC__(set)(ethr_dw_atomic_t *var, ethr_dw_sint_t *val)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+#if defined(ETHR_HAVE_DOUBLE_WORD_SZ_NATIVE_ATOMIC_OPS) && !defined(ETHR_RTCHK_USE_NATIVE_DW_ATOMIC_IMPL__)
+ ethr_dw_atomic_set__(var, val);
+#elif defined(ETHR_AMC_FALLBACK__)
+ amc_set(&(&var->fallback)->amc, 1, (&var->fallback)->sint, val->sint);
+#else
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__((&var->fallback), (&var->fallback)->sint[0] = val->sint[0]; (&var->fallback)->sint[1] = val->sint[1]);
+#endif
+
+}
+
+#ifdef ETHR_RTCHK_USE_NATIVE_DW_ATOMIC_IMPL__
+void ethr_dw_atomic_set(ethr_dw_atomic_t *var, ethr_dw_sint_t *val)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+ ethr_dw_atomic_set__(var, val);
+}
+#endif
+
+void ethr_dw_atomic_set_ddrb(ethr_dw_atomic_t *var, ethr_dw_sint_t *val)
+{
+#if defined(ETHR_HAVE_DOUBLE_WORD_SZ_NATIVE_ATOMIC_OPS)
+ ethr_dw_atomic_set_ddrb__(var, val);
+#elif defined(ETHR_ORDERED_READ_DEPEND)
+ ethr_dw_atomic_set(var, val);
+#else
+ ethr_dw_atomic_set_rb(var, val);
+#endif
+}
+
+void ETHR_DW_ATOMIC_FUNC__(set_rb)(ethr_dw_atomic_t *var, ethr_dw_sint_t *val)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+#if defined(ETHR_HAVE_DOUBLE_WORD_SZ_NATIVE_ATOMIC_OPS) && !defined(ETHR_RTCHK_USE_NATIVE_DW_ATOMIC_IMPL__)
+ ethr_dw_atomic_set_rb__(var, val);
+#elif defined(ETHR_AMC_FALLBACK__)
+ amc_set(&(&var->fallback)->amc, 1, (&var->fallback)->sint, val->sint);
+ ETHR_MEMBAR(ETHR_LoadLoad);
+#else
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__((&var->fallback), (&var->fallback)->sint[0] = val->sint[0]; (&var->fallback)->sint[1] = val->sint[1]);
+ ETHR_MEMBAR(ETHR_LoadLoad);
+#endif
+
+}
+
+#ifdef ETHR_RTCHK_USE_NATIVE_DW_ATOMIC_IMPL__
+void ethr_dw_atomic_set_rb(ethr_dw_atomic_t *var, ethr_dw_sint_t *val)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+ ethr_dw_atomic_set_rb__(var, val);
+}
+#endif
+
+void ETHR_DW_ATOMIC_FUNC__(set_wb)(ethr_dw_atomic_t *var, ethr_dw_sint_t *val)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+#if defined(ETHR_HAVE_DOUBLE_WORD_SZ_NATIVE_ATOMIC_OPS) && !defined(ETHR_RTCHK_USE_NATIVE_DW_ATOMIC_IMPL__)
+ ethr_dw_atomic_set_wb__(var, val);
+#elif defined(ETHR_AMC_FALLBACK__)
+ ETHR_MEMBAR(ETHR_StoreStore);
+ amc_set(&(&var->fallback)->amc, 1, (&var->fallback)->sint, val->sint);
+#else
+ ETHR_MEMBAR(ETHR_StoreStore);
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__((&var->fallback), (&var->fallback)->sint[0] = val->sint[0]; (&var->fallback)->sint[1] = val->sint[1]);
+#endif
+
+}
+
+#ifdef ETHR_RTCHK_USE_NATIVE_DW_ATOMIC_IMPL__
+void ethr_dw_atomic_set_wb(ethr_dw_atomic_t *var, ethr_dw_sint_t *val)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+ ethr_dw_atomic_set_wb__(var, val);
+}
+#endif
+
+void ETHR_DW_ATOMIC_FUNC__(set_acqb)(ethr_dw_atomic_t *var, ethr_dw_sint_t *val)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+#if defined(ETHR_HAVE_DOUBLE_WORD_SZ_NATIVE_ATOMIC_OPS) && !defined(ETHR_RTCHK_USE_NATIVE_DW_ATOMIC_IMPL__)
+ ethr_dw_atomic_set_acqb__(var, val);
+#elif defined(ETHR_AMC_FALLBACK__)
+ amc_set(&(&var->fallback)->amc, 1, (&var->fallback)->sint, val->sint);
+ ETHR_MEMBAR(ETHR_StoreLoad|ETHR_StoreStore);
+#else
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__((&var->fallback), (&var->fallback)->sint[0] = val->sint[0]; (&var->fallback)->sint[1] = val->sint[1]);
+ ETHR_MEMBAR(ETHR_StoreLoad|ETHR_StoreStore);
+#endif
+
+}
+
+#ifdef ETHR_RTCHK_USE_NATIVE_DW_ATOMIC_IMPL__
+void ethr_dw_atomic_set_acqb(ethr_dw_atomic_t *var, ethr_dw_sint_t *val)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+ ethr_dw_atomic_set_acqb__(var, val);
+}
+#endif
+
+void ETHR_DW_ATOMIC_FUNC__(set_relb)(ethr_dw_atomic_t *var, ethr_dw_sint_t *val)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+#if defined(ETHR_HAVE_DOUBLE_WORD_SZ_NATIVE_ATOMIC_OPS) && !defined(ETHR_RTCHK_USE_NATIVE_DW_ATOMIC_IMPL__)
+ ethr_dw_atomic_set_relb__(var, val);
+#elif defined(ETHR_AMC_FALLBACK__)
+ ETHR_MEMBAR(ETHR_LoadStore|ETHR_StoreStore);
+ amc_set(&(&var->fallback)->amc, 1, (&var->fallback)->sint, val->sint);
+#else
+ ETHR_MEMBAR(ETHR_LoadStore|ETHR_StoreStore);
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__((&var->fallback), (&var->fallback)->sint[0] = val->sint[0]; (&var->fallback)->sint[1] = val->sint[1]);
+#endif
+
+}
+
+#ifdef ETHR_RTCHK_USE_NATIVE_DW_ATOMIC_IMPL__
+void ethr_dw_atomic_set_relb(ethr_dw_atomic_t *var, ethr_dw_sint_t *val)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+ ethr_dw_atomic_set_relb__(var, val);
+}
+#endif
+
+void ETHR_DW_ATOMIC_FUNC__(set_mb)(ethr_dw_atomic_t *var, ethr_dw_sint_t *val)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+#if defined(ETHR_HAVE_DOUBLE_WORD_SZ_NATIVE_ATOMIC_OPS) && !defined(ETHR_RTCHK_USE_NATIVE_DW_ATOMIC_IMPL__)
+ ethr_dw_atomic_set_mb__(var, val);
+#elif defined(ETHR_AMC_FALLBACK__)
+ ETHR_MEMBAR(ETHR_LoadStore|ETHR_StoreStore);
+ amc_set(&(&var->fallback)->amc, 1, (&var->fallback)->sint, val->sint);
+ ETHR_MEMBAR(ETHR_StoreLoad|ETHR_StoreStore);
+#else
+ ETHR_MEMBAR(ETHR_LoadStore|ETHR_StoreStore);
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__((&var->fallback), (&var->fallback)->sint[0] = val->sint[0]; (&var->fallback)->sint[1] = val->sint[1]);
+ ETHR_MEMBAR(ETHR_StoreLoad|ETHR_StoreStore);
+#endif
+
+}
+
+#ifdef ETHR_RTCHK_USE_NATIVE_DW_ATOMIC_IMPL__
+void ethr_dw_atomic_set_mb(ethr_dw_atomic_t *var, ethr_dw_sint_t *val)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+ ethr_dw_atomic_set_mb__(var, val);
+}
+#endif
+
+
+/* -- read() -- */
+
+
+void ETHR_DW_ATOMIC_FUNC__(read)(ethr_dw_atomic_t *var, ethr_dw_sint_t *val)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+#if defined(ETHR_HAVE_DOUBLE_WORD_SZ_NATIVE_ATOMIC_OPS) && !defined(ETHR_RTCHK_USE_NATIVE_DW_ATOMIC_IMPL__)
+ ethr_dw_atomic_read__(var, val);
+#elif defined(ETHR_AMC_FALLBACK__)
+ amc_read(&(&var->fallback)->amc, 1, (&var->fallback)->sint, val->sint);
+#else
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__((&var->fallback), val->sint[0] = (&var->fallback)->sint[0]; val->sint[1] = (&var->fallback)->sint[1]);
+#endif
+
+}
+
+#ifdef ETHR_RTCHK_USE_NATIVE_DW_ATOMIC_IMPL__
+void ethr_dw_atomic_read(ethr_dw_atomic_t *var, ethr_dw_sint_t *val)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+ ethr_dw_atomic_read__(var, val);
+}
+#endif
+
+void ethr_dw_atomic_read_ddrb(ethr_dw_atomic_t *var, ethr_dw_sint_t *val)
+{
+#if defined(ETHR_HAVE_DOUBLE_WORD_SZ_NATIVE_ATOMIC_OPS)
+ ethr_dw_atomic_read_ddrb__(var, val);
+#elif defined(ETHR_ORDERED_READ_DEPEND)
+ ethr_dw_atomic_read(var, val);
+#else
+ ethr_dw_atomic_read_rb(var, val);
+#endif
+}
+
+void ETHR_DW_ATOMIC_FUNC__(read_rb)(ethr_dw_atomic_t *var, ethr_dw_sint_t *val)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+#if defined(ETHR_HAVE_DOUBLE_WORD_SZ_NATIVE_ATOMIC_OPS) && !defined(ETHR_RTCHK_USE_NATIVE_DW_ATOMIC_IMPL__)
+ ethr_dw_atomic_read_rb__(var, val);
+#elif defined(ETHR_AMC_FALLBACK__)
+ amc_read(&(&var->fallback)->amc, 1, (&var->fallback)->sint, val->sint);
+#else
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__((&var->fallback), val->sint[0] = (&var->fallback)->sint[0]; val->sint[1] = (&var->fallback)->sint[1]);
+ ETHR_MEMBAR(ETHR_LoadLoad);
+#endif
+
+}
+
+#ifdef ETHR_RTCHK_USE_NATIVE_DW_ATOMIC_IMPL__
+void ethr_dw_atomic_read_rb(ethr_dw_atomic_t *var, ethr_dw_sint_t *val)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+ ethr_dw_atomic_read_rb__(var, val);
+}
+#endif
+
+void ETHR_DW_ATOMIC_FUNC__(read_wb)(ethr_dw_atomic_t *var, ethr_dw_sint_t *val)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+#if defined(ETHR_HAVE_DOUBLE_WORD_SZ_NATIVE_ATOMIC_OPS) && !defined(ETHR_RTCHK_USE_NATIVE_DW_ATOMIC_IMPL__)
+ ethr_dw_atomic_read_wb__(var, val);
+#elif defined(ETHR_AMC_FALLBACK__)
+ ETHR_MEMBAR(ETHR_StoreStore);
+ amc_read(&(&var->fallback)->amc, 1, (&var->fallback)->sint, val->sint);
+#else
+ ETHR_MEMBAR(ETHR_StoreStore);
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__((&var->fallback), val->sint[0] = (&var->fallback)->sint[0]; val->sint[1] = (&var->fallback)->sint[1]);
+#endif
+
+}
+
+#ifdef ETHR_RTCHK_USE_NATIVE_DW_ATOMIC_IMPL__
+void ethr_dw_atomic_read_wb(ethr_dw_atomic_t *var, ethr_dw_sint_t *val)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+ ethr_dw_atomic_read_wb__(var, val);
+}
+#endif
+
+void ETHR_DW_ATOMIC_FUNC__(read_acqb)(ethr_dw_atomic_t *var, ethr_dw_sint_t *val)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+#if defined(ETHR_HAVE_DOUBLE_WORD_SZ_NATIVE_ATOMIC_OPS) && !defined(ETHR_RTCHK_USE_NATIVE_DW_ATOMIC_IMPL__)
+ ethr_dw_atomic_read_acqb__(var, val);
+#elif defined(ETHR_AMC_FALLBACK__)
+ amc_read(&(&var->fallback)->amc, 1, (&var->fallback)->sint, val->sint);
+ ETHR_MEMBAR(ETHR_LoadStore);
+#else
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__((&var->fallback), val->sint[0] = (&var->fallback)->sint[0]; val->sint[1] = (&var->fallback)->sint[1]);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore);
+#endif
+
+}
+
+#ifdef ETHR_RTCHK_USE_NATIVE_DW_ATOMIC_IMPL__
+void ethr_dw_atomic_read_acqb(ethr_dw_atomic_t *var, ethr_dw_sint_t *val)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+ ethr_dw_atomic_read_acqb__(var, val);
+}
+#endif
+
+void ETHR_DW_ATOMIC_FUNC__(read_relb)(ethr_dw_atomic_t *var, ethr_dw_sint_t *val)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+#if defined(ETHR_HAVE_DOUBLE_WORD_SZ_NATIVE_ATOMIC_OPS) && !defined(ETHR_RTCHK_USE_NATIVE_DW_ATOMIC_IMPL__)
+ ethr_dw_atomic_read_relb__(var, val);
+#elif defined(ETHR_AMC_FALLBACK__)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ amc_read(&(&var->fallback)->amc, 1, (&var->fallback)->sint, val->sint);
+#else
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__((&var->fallback), val->sint[0] = (&var->fallback)->sint[0]; val->sint[1] = (&var->fallback)->sint[1]);
+#endif
+
+}
+
+#ifdef ETHR_RTCHK_USE_NATIVE_DW_ATOMIC_IMPL__
+void ethr_dw_atomic_read_relb(ethr_dw_atomic_t *var, ethr_dw_sint_t *val)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+ ethr_dw_atomic_read_relb__(var, val);
+}
+#endif
+
+void ETHR_DW_ATOMIC_FUNC__(read_mb)(ethr_dw_atomic_t *var, ethr_dw_sint_t *val)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+#if defined(ETHR_HAVE_DOUBLE_WORD_SZ_NATIVE_ATOMIC_OPS) && !defined(ETHR_RTCHK_USE_NATIVE_DW_ATOMIC_IMPL__)
+ ethr_dw_atomic_read_mb__(var, val);
+#elif defined(ETHR_AMC_FALLBACK__)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ amc_read(&(&var->fallback)->amc, 1, (&var->fallback)->sint, val->sint);
+ ETHR_MEMBAR(ETHR_LoadStore);
+#else
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__((&var->fallback), val->sint[0] = (&var->fallback)->sint[0]; val->sint[1] = (&var->fallback)->sint[1]);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore);
+#endif
+
+}
+
+#ifdef ETHR_RTCHK_USE_NATIVE_DW_ATOMIC_IMPL__
+void ethr_dw_atomic_read_mb(ethr_dw_atomic_t *var, ethr_dw_sint_t *val)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+ ethr_dw_atomic_read_mb__(var, val);
+}
+#endif
+
+
+/* -- init() -- */
+
+
+void ETHR_DW_ATOMIC_FUNC__(init)(ethr_dw_atomic_t *var, ethr_dw_sint_t *val)
+{
+ ETHR_ASSERT(var);
+#if defined(ETHR_HAVE_DOUBLE_WORD_SZ_NATIVE_ATOMIC_OPS) && !defined(ETHR_RTCHK_USE_NATIVE_DW_ATOMIC_IMPL__)
+ ethr_dw_atomic_init__(var, val);
+#elif defined(ETHR_AMC_FALLBACK__)
+ amc_init(&(&var->fallback)->amc, 1, (&var->fallback)->sint, val->sint);
+#else
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__((&var->fallback), (&var->fallback)->sint[0] = val->sint[0]; (&var->fallback)->sint[1] = val->sint[1]);
+#endif
+
+}
+
+#ifdef ETHR_RTCHK_USE_NATIVE_DW_ATOMIC_IMPL__
+void ethr_dw_atomic_init(ethr_dw_atomic_t *var, ethr_dw_sint_t *val)
+{
+ ETHR_ASSERT(var);
+ ethr_dw_atomic_init__(var, val);
+}
+#endif
+
+void ethr_dw_atomic_init_ddrb(ethr_dw_atomic_t *var, ethr_dw_sint_t *val)
+{
+#if defined(ETHR_HAVE_DOUBLE_WORD_SZ_NATIVE_ATOMIC_OPS)
+ ethr_dw_atomic_init_ddrb__(var, val);
+#elif defined(ETHR_ORDERED_READ_DEPEND)
+ ethr_dw_atomic_init(var, val);
+#else
+ ethr_dw_atomic_init_rb(var, val);
+#endif
+}
+
+void ETHR_DW_ATOMIC_FUNC__(init_rb)(ethr_dw_atomic_t *var, ethr_dw_sint_t *val)
+{
+ ETHR_ASSERT(var);
+#if defined(ETHR_HAVE_DOUBLE_WORD_SZ_NATIVE_ATOMIC_OPS) && !defined(ETHR_RTCHK_USE_NATIVE_DW_ATOMIC_IMPL__)
+ ethr_dw_atomic_init_rb__(var, val);
+#elif defined(ETHR_AMC_FALLBACK__)
+ amc_init(&(&var->fallback)->amc, 1, (&var->fallback)->sint, val->sint);
+ ETHR_MEMBAR(ETHR_LoadLoad);
+#else
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__((&var->fallback), (&var->fallback)->sint[0] = val->sint[0]; (&var->fallback)->sint[1] = val->sint[1]);
+ ETHR_MEMBAR(ETHR_LoadLoad);
+#endif
+
+}
+
+#ifdef ETHR_RTCHK_USE_NATIVE_DW_ATOMIC_IMPL__
+void ethr_dw_atomic_init_rb(ethr_dw_atomic_t *var, ethr_dw_sint_t *val)
+{
+ ETHR_ASSERT(var);
+ ethr_dw_atomic_init_rb__(var, val);
+}
+#endif
+
+void ETHR_DW_ATOMIC_FUNC__(init_wb)(ethr_dw_atomic_t *var, ethr_dw_sint_t *val)
+{
+ ETHR_ASSERT(var);
+#if defined(ETHR_HAVE_DOUBLE_WORD_SZ_NATIVE_ATOMIC_OPS) && !defined(ETHR_RTCHK_USE_NATIVE_DW_ATOMIC_IMPL__)
+ ethr_dw_atomic_init_wb__(var, val);
+#elif defined(ETHR_AMC_FALLBACK__)
+ ETHR_MEMBAR(ETHR_StoreStore);
+ amc_init(&(&var->fallback)->amc, 1, (&var->fallback)->sint, val->sint);
+#else
+ ETHR_MEMBAR(ETHR_StoreStore);
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__((&var->fallback), (&var->fallback)->sint[0] = val->sint[0]; (&var->fallback)->sint[1] = val->sint[1]);
+#endif
+
+}
+
+#ifdef ETHR_RTCHK_USE_NATIVE_DW_ATOMIC_IMPL__
+void ethr_dw_atomic_init_wb(ethr_dw_atomic_t *var, ethr_dw_sint_t *val)
+{
+ ETHR_ASSERT(var);
+ ethr_dw_atomic_init_wb__(var, val);
+}
+#endif
+
+void ETHR_DW_ATOMIC_FUNC__(init_acqb)(ethr_dw_atomic_t *var, ethr_dw_sint_t *val)
+{
+ ETHR_ASSERT(var);
+#if defined(ETHR_HAVE_DOUBLE_WORD_SZ_NATIVE_ATOMIC_OPS) && !defined(ETHR_RTCHK_USE_NATIVE_DW_ATOMIC_IMPL__)
+ ethr_dw_atomic_init_acqb__(var, val);
+#elif defined(ETHR_AMC_FALLBACK__)
+ amc_init(&(&var->fallback)->amc, 1, (&var->fallback)->sint, val->sint);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#else
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__((&var->fallback), (&var->fallback)->sint[0] = val->sint[0]; (&var->fallback)->sint[1] = val->sint[1]);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#endif
+
+}
+
+#ifdef ETHR_RTCHK_USE_NATIVE_DW_ATOMIC_IMPL__
+void ethr_dw_atomic_init_acqb(ethr_dw_atomic_t *var, ethr_dw_sint_t *val)
+{
+ ETHR_ASSERT(var);
+ ethr_dw_atomic_init_acqb__(var, val);
+}
+#endif
+
+void ETHR_DW_ATOMIC_FUNC__(init_relb)(ethr_dw_atomic_t *var, ethr_dw_sint_t *val)
+{
+ ETHR_ASSERT(var);
+#if defined(ETHR_HAVE_DOUBLE_WORD_SZ_NATIVE_ATOMIC_OPS) && !defined(ETHR_RTCHK_USE_NATIVE_DW_ATOMIC_IMPL__)
+ ethr_dw_atomic_init_relb__(var, val);
+#elif defined(ETHR_AMC_FALLBACK__)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+ amc_init(&(&var->fallback)->amc, 1, (&var->fallback)->sint, val->sint);
+#else
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__((&var->fallback), (&var->fallback)->sint[0] = val->sint[0]; (&var->fallback)->sint[1] = val->sint[1]);
+#endif
+
+}
+
+#ifdef ETHR_RTCHK_USE_NATIVE_DW_ATOMIC_IMPL__
+void ethr_dw_atomic_init_relb(ethr_dw_atomic_t *var, ethr_dw_sint_t *val)
+{
+ ETHR_ASSERT(var);
+ ethr_dw_atomic_init_relb__(var, val);
+}
+#endif
+
+void ETHR_DW_ATOMIC_FUNC__(init_mb)(ethr_dw_atomic_t *var, ethr_dw_sint_t *val)
+{
+ ETHR_ASSERT(var);
+#if defined(ETHR_HAVE_DOUBLE_WORD_SZ_NATIVE_ATOMIC_OPS) && !defined(ETHR_RTCHK_USE_NATIVE_DW_ATOMIC_IMPL__)
+ ethr_dw_atomic_init_mb__(var, val);
+#elif defined(ETHR_AMC_FALLBACK__)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+ amc_init(&(&var->fallback)->amc, 1, (&var->fallback)->sint, val->sint);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#else
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__((&var->fallback), (&var->fallback)->sint[0] = val->sint[0]; (&var->fallback)->sint[1] = val->sint[1]);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#endif
+
+}
+
+#ifdef ETHR_RTCHK_USE_NATIVE_DW_ATOMIC_IMPL__
+void ethr_dw_atomic_init_mb(ethr_dw_atomic_t *var, ethr_dw_sint_t *val)
+{
+ ETHR_ASSERT(var);
+ ethr_dw_atomic_init_mb__(var, val);
+}
+#endif
+
+
+/* ---------- Word size atomic implementation ---------- */
+
+
+
+
+/* --- addr() --- */
+
+ethr_sint_t *ethr_atomic_addr(ethr_atomic_t *var)
+{
+ ethr_sint_t *res;
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+#if defined(ETHR_HAVE_WORD_SZ_NATIVE_ATOMIC_OPS)
+ res = ethr_atomic_addr__(var);
+#elif defined(ETHR_AMC_FALLBACK__)
+ res = (ethr_sint_t *) (var)->sint;
+#else
+ res = (ethr_sint_t *) var;
+#endif
+ return res;
+}
+
+
+/* -- cmpxchg() -- */
+
+
+ethr_sint_t ethr_atomic_cmpxchg(ethr_atomic_t *var, ethr_sint_t val, ethr_sint_t old_val)
+{
+ ethr_sint_t res;
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+#if defined(ETHR_HAVE_WORD_SZ_NATIVE_ATOMIC_OPS)
+ res = ethr_atomic_cmpxchg__(var, val, old_val);
+#elif defined(ETHR_AMC_FALLBACK__)
+ res = old_val;
+ (void) amc_cmpxchg(&var->amc, 0, &var->sint, &val, &res);
+#else
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, res = (*var == old_val ? (*var = val, old_val) : *var));
+#endif
+ return res;
+}
+
+ethr_sint_t ethr_atomic_cmpxchg_ddrb(ethr_atomic_t *var, ethr_sint_t val, ethr_sint_t old_val)
+{
+#if defined(ETHR_HAVE_WORD_SZ_NATIVE_ATOMIC_OPS)
+ return ethr_atomic_cmpxchg_ddrb__(var, val, old_val);
+#elif defined(ETHR_ORDERED_READ_DEPEND)
+ return ethr_atomic_cmpxchg(var, val, old_val);
+#else
+ return ethr_atomic_cmpxchg_rb(var, val, old_val);
+#endif
+}
+
+ethr_sint_t ethr_atomic_cmpxchg_rb(ethr_atomic_t *var, ethr_sint_t val, ethr_sint_t old_val)
+{
+ ethr_sint_t res;
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+#if defined(ETHR_HAVE_WORD_SZ_NATIVE_ATOMIC_OPS)
+ res = ethr_atomic_cmpxchg_rb__(var, val, old_val);
+#elif defined(ETHR_AMC_FALLBACK__)
+ res = old_val;
+ (void) amc_cmpxchg(&var->amc, 0, &var->sint, &val, &res);
+ ETHR_MEMBAR(ETHR_LoadLoad);
+#else
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, res = (*var == old_val ? (*var = val, old_val) : *var));
+ ETHR_MEMBAR(ETHR_LoadLoad);
+#endif
+ return res;
+}
+
+ethr_sint_t ethr_atomic_cmpxchg_wb(ethr_atomic_t *var, ethr_sint_t val, ethr_sint_t old_val)
+{
+ ethr_sint_t res;
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+#if defined(ETHR_HAVE_WORD_SZ_NATIVE_ATOMIC_OPS)
+ res = ethr_atomic_cmpxchg_wb__(var, val, old_val);
+#elif defined(ETHR_AMC_FALLBACK__)
+ ETHR_MEMBAR(ETHR_StoreStore);
+ res = old_val;
+ (void) amc_cmpxchg(&var->amc, 0, &var->sint, &val, &res);
+#else
+ ETHR_MEMBAR(ETHR_StoreStore);
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, res = (*var == old_val ? (*var = val, old_val) : *var));
+#endif
+ return res;
+}
+
+ethr_sint_t ethr_atomic_cmpxchg_acqb(ethr_atomic_t *var, ethr_sint_t val, ethr_sint_t old_val)
+{
+ ethr_sint_t res;
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+#if defined(ETHR_HAVE_WORD_SZ_NATIVE_ATOMIC_OPS)
+ res = ethr_atomic_cmpxchg_acqb__(var, val, old_val);
+#elif defined(ETHR_AMC_FALLBACK__)
+ res = old_val;
+ (void) amc_cmpxchg(&var->amc, 0, &var->sint, &val, &res);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#else
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, res = (*var == old_val ? (*var = val, old_val) : *var));
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#endif
+ return res;
+}
+
+ethr_sint_t ethr_atomic_cmpxchg_relb(ethr_atomic_t *var, ethr_sint_t val, ethr_sint_t old_val)
+{
+ ethr_sint_t res;
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+#if defined(ETHR_HAVE_WORD_SZ_NATIVE_ATOMIC_OPS)
+ res = ethr_atomic_cmpxchg_relb__(var, val, old_val);
+#elif defined(ETHR_AMC_FALLBACK__)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ res = old_val;
+ (void) amc_cmpxchg(&var->amc, 0, &var->sint, &val, &res);
+#else
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, res = (*var == old_val ? (*var = val, old_val) : *var));
+#endif
+ return res;
+}
+
+ethr_sint_t ethr_atomic_cmpxchg_mb(ethr_atomic_t *var, ethr_sint_t val, ethr_sint_t old_val)
+{
+ ethr_sint_t res;
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+#if defined(ETHR_HAVE_WORD_SZ_NATIVE_ATOMIC_OPS)
+ res = ethr_atomic_cmpxchg_mb__(var, val, old_val);
+#elif defined(ETHR_AMC_FALLBACK__)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ res = old_val;
+ (void) amc_cmpxchg(&var->amc, 0, &var->sint, &val, &res);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#else
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, res = (*var == old_val ? (*var = val, old_val) : *var));
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#endif
+ return res;
+}
+
+
+/* -- xchg() -- */
+
+
+ethr_sint_t ethr_atomic_xchg(ethr_atomic_t *var, ethr_sint_t val)
+{
+ ethr_sint_t res;
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+#if defined(ETHR_HAVE_WORD_SZ_NATIVE_ATOMIC_OPS)
+ res = ethr_atomic_xchg__(var, val);
+#elif defined(ETHR_AMC_FALLBACK__)
+ ETHR_AMC_MODIFICATION_OPS__(&var->amc, res = var->sint; var->sint = val);
+#else
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, res = *var; *var = val);
+#endif
+ return res;
+}
+
+ethr_sint_t ethr_atomic_xchg_ddrb(ethr_atomic_t *var, ethr_sint_t val)
+{
+#if defined(ETHR_HAVE_WORD_SZ_NATIVE_ATOMIC_OPS)
+ return ethr_atomic_xchg_ddrb__(var, val);
+#elif defined(ETHR_ORDERED_READ_DEPEND)
+ return ethr_atomic_xchg(var, val);
+#else
+ return ethr_atomic_xchg_rb(var, val);
+#endif
+}
+
+ethr_sint_t ethr_atomic_xchg_rb(ethr_atomic_t *var, ethr_sint_t val)
+{
+ ethr_sint_t res;
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+#if defined(ETHR_HAVE_WORD_SZ_NATIVE_ATOMIC_OPS)
+ res = ethr_atomic_xchg_rb__(var, val);
+#elif defined(ETHR_AMC_FALLBACK__)
+ ETHR_AMC_MODIFICATION_OPS__(&var->amc, res = var->sint; var->sint = val);
+ ETHR_MEMBAR(ETHR_LoadLoad);
+#else
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, res = *var; *var = val);
+ ETHR_MEMBAR(ETHR_LoadLoad);
+#endif
+ return res;
+}
+
+ethr_sint_t ethr_atomic_xchg_wb(ethr_atomic_t *var, ethr_sint_t val)
+{
+ ethr_sint_t res;
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+#if defined(ETHR_HAVE_WORD_SZ_NATIVE_ATOMIC_OPS)
+ res = ethr_atomic_xchg_wb__(var, val);
+#elif defined(ETHR_AMC_FALLBACK__)
+ ETHR_MEMBAR(ETHR_StoreStore);
+ ETHR_AMC_MODIFICATION_OPS__(&var->amc, res = var->sint; var->sint = val);
+#else
+ ETHR_MEMBAR(ETHR_StoreStore);
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, res = *var; *var = val);
+#endif
+ return res;
+}
+
+ethr_sint_t ethr_atomic_xchg_acqb(ethr_atomic_t *var, ethr_sint_t val)
+{
+ ethr_sint_t res;
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+#if defined(ETHR_HAVE_WORD_SZ_NATIVE_ATOMIC_OPS)
+ res = ethr_atomic_xchg_acqb__(var, val);
+#elif defined(ETHR_AMC_FALLBACK__)
+ ETHR_AMC_MODIFICATION_OPS__(&var->amc, res = var->sint; var->sint = val);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#else
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, res = *var; *var = val);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#endif
+ return res;
+}
+
+ethr_sint_t ethr_atomic_xchg_relb(ethr_atomic_t *var, ethr_sint_t val)
+{
+ ethr_sint_t res;
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+#if defined(ETHR_HAVE_WORD_SZ_NATIVE_ATOMIC_OPS)
+ res = ethr_atomic_xchg_relb__(var, val);
+#elif defined(ETHR_AMC_FALLBACK__)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ ETHR_AMC_MODIFICATION_OPS__(&var->amc, res = var->sint; var->sint = val);
+#else
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, res = *var; *var = val);
+#endif
+ return res;
+}
+
+ethr_sint_t ethr_atomic_xchg_mb(ethr_atomic_t *var, ethr_sint_t val)
+{
+ ethr_sint_t res;
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+#if defined(ETHR_HAVE_WORD_SZ_NATIVE_ATOMIC_OPS)
+ res = ethr_atomic_xchg_mb__(var, val);
+#elif defined(ETHR_AMC_FALLBACK__)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ ETHR_AMC_MODIFICATION_OPS__(&var->amc, res = var->sint; var->sint = val);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#else
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, res = *var; *var = val);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#endif
+ return res;
+}
+
+
+/* -- set() -- */
+
+
+void ethr_atomic_set(ethr_atomic_t *var, ethr_sint_t val)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+#if defined(ETHR_HAVE_WORD_SZ_NATIVE_ATOMIC_OPS)
+ ethr_atomic_set__(var, val);
+#elif defined(ETHR_AMC_FALLBACK__)
+ amc_set(&var->amc, 0, &var->sint, &val);
+#else
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, *var = val);
+#endif
+
+}
+
+void ethr_atomic_set_ddrb(ethr_atomic_t *var, ethr_sint_t val)
+{
+#if defined(ETHR_HAVE_WORD_SZ_NATIVE_ATOMIC_OPS)
+ ethr_atomic_set_ddrb__(var, val);
+#elif defined(ETHR_ORDERED_READ_DEPEND)
+ ethr_atomic_set(var, val);
+#else
+ ethr_atomic_set_rb(var, val);
+#endif
+}
+
+void ethr_atomic_set_rb(ethr_atomic_t *var, ethr_sint_t val)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+#if defined(ETHR_HAVE_WORD_SZ_NATIVE_ATOMIC_OPS)
+ ethr_atomic_set_rb__(var, val);
+#elif defined(ETHR_AMC_FALLBACK__)
+ amc_set(&var->amc, 0, &var->sint, &val);
+ ETHR_MEMBAR(ETHR_LoadLoad);
+#else
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, *var = val);
+ ETHR_MEMBAR(ETHR_LoadLoad);
+#endif
+
+}
+
+void ethr_atomic_set_wb(ethr_atomic_t *var, ethr_sint_t val)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+#if defined(ETHR_HAVE_WORD_SZ_NATIVE_ATOMIC_OPS)
+ ethr_atomic_set_wb__(var, val);
+#elif defined(ETHR_AMC_FALLBACK__)
+ ETHR_MEMBAR(ETHR_StoreStore);
+ amc_set(&var->amc, 0, &var->sint, &val);
+#else
+ ETHR_MEMBAR(ETHR_StoreStore);
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, *var = val);
+#endif
+
+}
+
+void ethr_atomic_set_acqb(ethr_atomic_t *var, ethr_sint_t val)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+#if defined(ETHR_HAVE_WORD_SZ_NATIVE_ATOMIC_OPS)
+ ethr_atomic_set_acqb__(var, val);
+#elif defined(ETHR_AMC_FALLBACK__)
+ amc_set(&var->amc, 0, &var->sint, &val);
+ ETHR_MEMBAR(ETHR_StoreLoad|ETHR_StoreStore);
+#else
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, *var = val);
+ ETHR_MEMBAR(ETHR_StoreLoad|ETHR_StoreStore);
+#endif
+
+}
+
+void ethr_atomic_set_relb(ethr_atomic_t *var, ethr_sint_t val)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+#if defined(ETHR_HAVE_WORD_SZ_NATIVE_ATOMIC_OPS)
+ ethr_atomic_set_relb__(var, val);
+#elif defined(ETHR_AMC_FALLBACK__)
+ ETHR_MEMBAR(ETHR_LoadStore|ETHR_StoreStore);
+ amc_set(&var->amc, 0, &var->sint, &val);
+#else
+ ETHR_MEMBAR(ETHR_LoadStore|ETHR_StoreStore);
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, *var = val);
+#endif
+
+}
+
+void ethr_atomic_set_mb(ethr_atomic_t *var, ethr_sint_t val)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+#if defined(ETHR_HAVE_WORD_SZ_NATIVE_ATOMIC_OPS)
+ ethr_atomic_set_mb__(var, val);
+#elif defined(ETHR_AMC_FALLBACK__)
+ ETHR_MEMBAR(ETHR_LoadStore|ETHR_StoreStore);
+ amc_set(&var->amc, 0, &var->sint, &val);
+ ETHR_MEMBAR(ETHR_StoreLoad|ETHR_StoreStore);
+#else
+ ETHR_MEMBAR(ETHR_LoadStore|ETHR_StoreStore);
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, *var = val);
+ ETHR_MEMBAR(ETHR_StoreLoad|ETHR_StoreStore);
+#endif
+
+}
+
+
+/* -- init() -- */
+
+
+void ethr_atomic_init(ethr_atomic_t *var, ethr_sint_t val)
+{
+ ETHR_ASSERT(var);
+#if defined(ETHR_HAVE_WORD_SZ_NATIVE_ATOMIC_OPS)
+ ethr_atomic_init__(var, val);
+#elif defined(ETHR_AMC_FALLBACK__)
+ amc_init(&var->amc, 0, &var->sint, &val);
+#else
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, *var = val);
+#endif
+
+}
+
+void ethr_atomic_init_ddrb(ethr_atomic_t *var, ethr_sint_t val)
+{
+#if defined(ETHR_HAVE_WORD_SZ_NATIVE_ATOMIC_OPS)
+ ethr_atomic_init_ddrb__(var, val);
+#elif defined(ETHR_ORDERED_READ_DEPEND)
+ ethr_atomic_init(var, val);
+#else
+ ethr_atomic_init_rb(var, val);
+#endif
+}
+
+void ethr_atomic_init_rb(ethr_atomic_t *var, ethr_sint_t val)
+{
+ ETHR_ASSERT(var);
+#if defined(ETHR_HAVE_WORD_SZ_NATIVE_ATOMIC_OPS)
+ ethr_atomic_init_rb__(var, val);
+#elif defined(ETHR_AMC_FALLBACK__)
+ amc_init(&var->amc, 0, &var->sint, &val);
+ ETHR_MEMBAR(ETHR_LoadLoad);
+#else
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, *var = val);
+ ETHR_MEMBAR(ETHR_LoadLoad);
+#endif
+
+}
+
+void ethr_atomic_init_wb(ethr_atomic_t *var, ethr_sint_t val)
+{
+ ETHR_ASSERT(var);
+#if defined(ETHR_HAVE_WORD_SZ_NATIVE_ATOMIC_OPS)
+ ethr_atomic_init_wb__(var, val);
+#elif defined(ETHR_AMC_FALLBACK__)
+ ETHR_MEMBAR(ETHR_StoreStore);
+ amc_init(&var->amc, 0, &var->sint, &val);
+#else
+ ETHR_MEMBAR(ETHR_StoreStore);
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, *var = val);
+#endif
+
+}
+
+void ethr_atomic_init_acqb(ethr_atomic_t *var, ethr_sint_t val)
+{
+ ETHR_ASSERT(var);
+#if defined(ETHR_HAVE_WORD_SZ_NATIVE_ATOMIC_OPS)
+ ethr_atomic_init_acqb__(var, val);
+#elif defined(ETHR_AMC_FALLBACK__)
+ amc_init(&var->amc, 0, &var->sint, &val);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#else
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, *var = val);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#endif
+
+}
+
+void ethr_atomic_init_relb(ethr_atomic_t *var, ethr_sint_t val)
+{
+ ETHR_ASSERT(var);
+#if defined(ETHR_HAVE_WORD_SZ_NATIVE_ATOMIC_OPS)
+ ethr_atomic_init_relb__(var, val);
+#elif defined(ETHR_AMC_FALLBACK__)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+ amc_init(&var->amc, 0, &var->sint, &val);
+#else
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, *var = val);
+#endif
+
+}
+
+void ethr_atomic_init_mb(ethr_atomic_t *var, ethr_sint_t val)
+{
+ ETHR_ASSERT(var);
+#if defined(ETHR_HAVE_WORD_SZ_NATIVE_ATOMIC_OPS)
+ ethr_atomic_init_mb__(var, val);
+#elif defined(ETHR_AMC_FALLBACK__)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+ amc_init(&var->amc, 0, &var->sint, &val);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#else
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, *var = val);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#endif
+
+}
+
+
+/* -- add_read() -- */
+
+
+ethr_sint_t ethr_atomic_add_read(ethr_atomic_t *var, ethr_sint_t val)
+{
+ ethr_sint_t res;
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+#if defined(ETHR_HAVE_WORD_SZ_NATIVE_ATOMIC_OPS)
+ res = ethr_atomic_add_read__(var, val);
+#elif defined(ETHR_AMC_FALLBACK__)
+ ETHR_AMC_MODIFICATION_OPS__(&var->amc, var->sint += val; res = var->sint);
+#else
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, *var += val; res = *var);
+#endif
+ return res;
+}
+
+ethr_sint_t ethr_atomic_add_read_ddrb(ethr_atomic_t *var, ethr_sint_t val)
+{
+#if defined(ETHR_HAVE_WORD_SZ_NATIVE_ATOMIC_OPS)
+ return ethr_atomic_add_read_ddrb__(var, val);
+#elif defined(ETHR_ORDERED_READ_DEPEND)
+ return ethr_atomic_add_read(var, val);
+#else
+ return ethr_atomic_add_read_rb(var, val);
+#endif
+}
+
+ethr_sint_t ethr_atomic_add_read_rb(ethr_atomic_t *var, ethr_sint_t val)
+{
+ ethr_sint_t res;
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+#if defined(ETHR_HAVE_WORD_SZ_NATIVE_ATOMIC_OPS)
+ res = ethr_atomic_add_read_rb__(var, val);
+#elif defined(ETHR_AMC_FALLBACK__)
+ ETHR_AMC_MODIFICATION_OPS__(&var->amc, var->sint += val; res = var->sint);
+ ETHR_MEMBAR(ETHR_LoadLoad);
+#else
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, *var += val; res = *var);
+ ETHR_MEMBAR(ETHR_LoadLoad);
+#endif
+ return res;
+}
+
+ethr_sint_t ethr_atomic_add_read_wb(ethr_atomic_t *var, ethr_sint_t val)
+{
+ ethr_sint_t res;
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+#if defined(ETHR_HAVE_WORD_SZ_NATIVE_ATOMIC_OPS)
+ res = ethr_atomic_add_read_wb__(var, val);
+#elif defined(ETHR_AMC_FALLBACK__)
+ ETHR_MEMBAR(ETHR_StoreStore);
+ ETHR_AMC_MODIFICATION_OPS__(&var->amc, var->sint += val; res = var->sint);
+#else
+ ETHR_MEMBAR(ETHR_StoreStore);
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, *var += val; res = *var);
+#endif
+ return res;
+}
+
+ethr_sint_t ethr_atomic_add_read_acqb(ethr_atomic_t *var, ethr_sint_t val)
+{
+ ethr_sint_t res;
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+#if defined(ETHR_HAVE_WORD_SZ_NATIVE_ATOMIC_OPS)
+ res = ethr_atomic_add_read_acqb__(var, val);
+#elif defined(ETHR_AMC_FALLBACK__)
+ ETHR_AMC_MODIFICATION_OPS__(&var->amc, var->sint += val; res = var->sint);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#else
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, *var += val; res = *var);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#endif
+ return res;
+}
+
+ethr_sint_t ethr_atomic_add_read_relb(ethr_atomic_t *var, ethr_sint_t val)
+{
+ ethr_sint_t res;
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+#if defined(ETHR_HAVE_WORD_SZ_NATIVE_ATOMIC_OPS)
+ res = ethr_atomic_add_read_relb__(var, val);
+#elif defined(ETHR_AMC_FALLBACK__)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ ETHR_AMC_MODIFICATION_OPS__(&var->amc, var->sint += val; res = var->sint);
+#else
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, *var += val; res = *var);
+#endif
+ return res;
+}
+
+ethr_sint_t ethr_atomic_add_read_mb(ethr_atomic_t *var, ethr_sint_t val)
+{
+ ethr_sint_t res;
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+#if defined(ETHR_HAVE_WORD_SZ_NATIVE_ATOMIC_OPS)
+ res = ethr_atomic_add_read_mb__(var, val);
+#elif defined(ETHR_AMC_FALLBACK__)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ ETHR_AMC_MODIFICATION_OPS__(&var->amc, var->sint += val; res = var->sint);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#else
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, *var += val; res = *var);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#endif
+ return res;
+}
+
+
+/* -- read() -- */
+
+
+ethr_sint_t ethr_atomic_read(ethr_atomic_t *var)
{
+ ethr_sint_t res;
+ ETHR_ASSERT(!ethr_not_inited__);
ETHR_ASSERT(var);
- return ethr_atomic_addr__(var);
+#if defined(ETHR_HAVE_WORD_SZ_NATIVE_ATOMIC_OPS)
+ res = ethr_atomic_read__(var);
+#elif defined(ETHR_AMC_FALLBACK__)
+ amc_read(&var->amc, 0, &var->sint, &res);
+#else
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, res = *var);
+#endif
+ return res;
+}
+
+ethr_sint_t ethr_atomic_read_ddrb(ethr_atomic_t *var)
+{
+#if defined(ETHR_HAVE_WORD_SZ_NATIVE_ATOMIC_OPS)
+ return ethr_atomic_read_ddrb__(var);
+#elif defined(ETHR_ORDERED_READ_DEPEND)
+ return ethr_atomic_read(var);
+#else
+ return ethr_atomic_read_rb(var);
+#endif
}
-void
-ethr_atomic_init(ethr_atomic_t *var, ethr_sint_t i)
+ethr_sint_t ethr_atomic_read_rb(ethr_atomic_t *var)
{
+ ethr_sint_t res;
+ ETHR_ASSERT(!ethr_not_inited__);
ETHR_ASSERT(var);
- ethr_atomic_init__(var, i);
+#if defined(ETHR_HAVE_WORD_SZ_NATIVE_ATOMIC_OPS)
+ res = ethr_atomic_read_rb__(var);
+#elif defined(ETHR_AMC_FALLBACK__)
+ amc_read(&var->amc, 0, &var->sint, &res);
+#else
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, res = *var);
+ ETHR_MEMBAR(ETHR_LoadLoad);
+#endif
+ return res;
}
-void
-ethr_atomic_set(ethr_atomic_t *var, ethr_sint_t i)
+ethr_sint_t ethr_atomic_read_wb(ethr_atomic_t *var)
{
+ ethr_sint_t res;
ETHR_ASSERT(!ethr_not_inited__);
ETHR_ASSERT(var);
- ethr_atomic_set__(var, i);
+#if defined(ETHR_HAVE_WORD_SZ_NATIVE_ATOMIC_OPS)
+ res = ethr_atomic_read_wb__(var);
+#elif defined(ETHR_AMC_FALLBACK__)
+ ETHR_MEMBAR(ETHR_StoreStore);
+ amc_read(&var->amc, 0, &var->sint, &res);
+#else
+ ETHR_MEMBAR(ETHR_StoreStore);
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, res = *var);
+#endif
+ return res;
}
-ethr_sint_t
-ethr_atomic_read(ethr_atomic_t *var)
+ethr_sint_t ethr_atomic_read_acqb(ethr_atomic_t *var)
{
+ ethr_sint_t res;
ETHR_ASSERT(!ethr_not_inited__);
ETHR_ASSERT(var);
- return ethr_atomic_read__(var);
+#if defined(ETHR_HAVE_WORD_SZ_NATIVE_ATOMIC_OPS)
+ res = ethr_atomic_read_acqb__(var);
+#elif defined(ETHR_AMC_FALLBACK__)
+ amc_read(&var->amc, 0, &var->sint, &res);
+ ETHR_MEMBAR(ETHR_LoadStore);
+#else
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, res = *var);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore);
+#endif
+ return res;
}
-ethr_sint_t
-ethr_atomic_add_read(ethr_atomic_t *var, ethr_sint_t incr)
+ethr_sint_t ethr_atomic_read_relb(ethr_atomic_t *var)
{
+ ethr_sint_t res;
ETHR_ASSERT(!ethr_not_inited__);
ETHR_ASSERT(var);
- return ethr_atomic_add_read__(var, incr);
-}
+#if defined(ETHR_HAVE_WORD_SZ_NATIVE_ATOMIC_OPS)
+ res = ethr_atomic_read_relb__(var);
+#elif defined(ETHR_AMC_FALLBACK__)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ amc_read(&var->amc, 0, &var->sint, &res);
+#else
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, res = *var);
+#endif
+ return res;
+}
-ethr_sint_t
-ethr_atomic_inc_read(ethr_atomic_t *var)
+ethr_sint_t ethr_atomic_read_mb(ethr_atomic_t *var)
{
+ ethr_sint_t res;
ETHR_ASSERT(!ethr_not_inited__);
ETHR_ASSERT(var);
- return ethr_atomic_inc_read__(var);
+#if defined(ETHR_HAVE_WORD_SZ_NATIVE_ATOMIC_OPS)
+ res = ethr_atomic_read_mb__(var);
+#elif defined(ETHR_AMC_FALLBACK__)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ amc_read(&var->amc, 0, &var->sint, &res);
+ ETHR_MEMBAR(ETHR_LoadStore);
+#else
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, res = *var);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore);
+#endif
+ return res;
}
-ethr_sint_t
-ethr_atomic_dec_read(ethr_atomic_t *var)
+
+/* -- inc_read() -- */
+
+
+ethr_sint_t ethr_atomic_inc_read(ethr_atomic_t *var)
{
+ ethr_sint_t res;
ETHR_ASSERT(!ethr_not_inited__);
ETHR_ASSERT(var);
- return ethr_atomic_dec_read__(var);
+#if defined(ETHR_HAVE_WORD_SZ_NATIVE_ATOMIC_OPS)
+ res = ethr_atomic_inc_read__(var);
+#elif defined(ETHR_AMC_FALLBACK__)
+ ETHR_AMC_MODIFICATION_OPS__(&var->amc, res = ++(var->sint));
+#else
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, res = ++(*var));
+#endif
+ return res;
}
-void
-ethr_atomic_add(ethr_atomic_t *var, ethr_sint_t incr)
+ethr_sint_t ethr_atomic_inc_read_ddrb(ethr_atomic_t *var)
{
+#if defined(ETHR_HAVE_WORD_SZ_NATIVE_ATOMIC_OPS)
+ return ethr_atomic_inc_read_ddrb__(var);
+#elif defined(ETHR_ORDERED_READ_DEPEND)
+ return ethr_atomic_inc_read(var);
+#else
+ return ethr_atomic_inc_read_rb(var);
+#endif
+}
+
+ethr_sint_t ethr_atomic_inc_read_rb(ethr_atomic_t *var)
+{
+ ethr_sint_t res;
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+#if defined(ETHR_HAVE_WORD_SZ_NATIVE_ATOMIC_OPS)
+ res = ethr_atomic_inc_read_rb__(var);
+#elif defined(ETHR_AMC_FALLBACK__)
+ ETHR_AMC_MODIFICATION_OPS__(&var->amc, res = ++(var->sint));
+ ETHR_MEMBAR(ETHR_LoadLoad);
+#else
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, res = ++(*var));
+ ETHR_MEMBAR(ETHR_LoadLoad);
+#endif
+ return res;
+}
+
+ethr_sint_t ethr_atomic_inc_read_wb(ethr_atomic_t *var)
+{
+ ethr_sint_t res;
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+#if defined(ETHR_HAVE_WORD_SZ_NATIVE_ATOMIC_OPS)
+ res = ethr_atomic_inc_read_wb__(var);
+#elif defined(ETHR_AMC_FALLBACK__)
+ ETHR_MEMBAR(ETHR_StoreStore);
+ ETHR_AMC_MODIFICATION_OPS__(&var->amc, res = ++(var->sint));
+#else
+ ETHR_MEMBAR(ETHR_StoreStore);
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, res = ++(*var));
+#endif
+ return res;
+}
+
+ethr_sint_t ethr_atomic_inc_read_acqb(ethr_atomic_t *var)
+{
+ ethr_sint_t res;
ETHR_ASSERT(!ethr_not_inited__);
ETHR_ASSERT(var);
- ethr_atomic_add__(var, incr);
-}
-
-void
-ethr_atomic_inc(ethr_atomic_t *var)
+#if defined(ETHR_HAVE_WORD_SZ_NATIVE_ATOMIC_OPS)
+ res = ethr_atomic_inc_read_acqb__(var);
+#elif defined(ETHR_AMC_FALLBACK__)
+ ETHR_AMC_MODIFICATION_OPS__(&var->amc, res = ++(var->sint));
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#else
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, res = ++(*var));
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#endif
+ return res;
+}
+
+ethr_sint_t ethr_atomic_inc_read_relb(ethr_atomic_t *var)
{
+ ethr_sint_t res;
ETHR_ASSERT(!ethr_not_inited__);
ETHR_ASSERT(var);
+#if defined(ETHR_HAVE_WORD_SZ_NATIVE_ATOMIC_OPS)
+ res = ethr_atomic_inc_read_relb__(var);
+#elif defined(ETHR_AMC_FALLBACK__)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ ETHR_AMC_MODIFICATION_OPS__(&var->amc, res = ++(var->sint));
+#else
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, res = ++(*var));
+#endif
+ return res;
+}
+
+ethr_sint_t ethr_atomic_inc_read_mb(ethr_atomic_t *var)
+{
+ ethr_sint_t res;
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+#if defined(ETHR_HAVE_WORD_SZ_NATIVE_ATOMIC_OPS)
+ res = ethr_atomic_inc_read_mb__(var);
+#elif defined(ETHR_AMC_FALLBACK__)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ ETHR_AMC_MODIFICATION_OPS__(&var->amc, res = ++(var->sint));
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#else
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, res = ++(*var));
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#endif
+ return res;
+}
+
+
+/* -- dec_read() -- */
+
+
+ethr_sint_t ethr_atomic_dec_read(ethr_atomic_t *var)
+{
+ ethr_sint_t res;
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+#if defined(ETHR_HAVE_WORD_SZ_NATIVE_ATOMIC_OPS)
+ res = ethr_atomic_dec_read__(var);
+#elif defined(ETHR_AMC_FALLBACK__)
+ ETHR_AMC_MODIFICATION_OPS__(&var->amc, res = --(var->sint));
+#else
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, res = --(*var));
+#endif
+ return res;
+}
+
+ethr_sint_t ethr_atomic_dec_read_ddrb(ethr_atomic_t *var)
+{
+#if defined(ETHR_HAVE_WORD_SZ_NATIVE_ATOMIC_OPS)
+ return ethr_atomic_dec_read_ddrb__(var);
+#elif defined(ETHR_ORDERED_READ_DEPEND)
+ return ethr_atomic_dec_read(var);
+#else
+ return ethr_atomic_dec_read_rb(var);
+#endif
+}
+
+ethr_sint_t ethr_atomic_dec_read_rb(ethr_atomic_t *var)
+{
+ ethr_sint_t res;
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+#if defined(ETHR_HAVE_WORD_SZ_NATIVE_ATOMIC_OPS)
+ res = ethr_atomic_dec_read_rb__(var);
+#elif defined(ETHR_AMC_FALLBACK__)
+ ETHR_AMC_MODIFICATION_OPS__(&var->amc, res = --(var->sint));
+ ETHR_MEMBAR(ETHR_LoadLoad);
+#else
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, res = --(*var));
+ ETHR_MEMBAR(ETHR_LoadLoad);
+#endif
+ return res;
+}
+
+ethr_sint_t ethr_atomic_dec_read_wb(ethr_atomic_t *var)
+{
+ ethr_sint_t res;
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+#if defined(ETHR_HAVE_WORD_SZ_NATIVE_ATOMIC_OPS)
+ res = ethr_atomic_dec_read_wb__(var);
+#elif defined(ETHR_AMC_FALLBACK__)
+ ETHR_MEMBAR(ETHR_StoreStore);
+ ETHR_AMC_MODIFICATION_OPS__(&var->amc, res = --(var->sint));
+#else
+ ETHR_MEMBAR(ETHR_StoreStore);
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, res = --(*var));
+#endif
+ return res;
+}
+
+ethr_sint_t ethr_atomic_dec_read_acqb(ethr_atomic_t *var)
+{
+ ethr_sint_t res;
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+#if defined(ETHR_HAVE_WORD_SZ_NATIVE_ATOMIC_OPS)
+ res = ethr_atomic_dec_read_acqb__(var);
+#elif defined(ETHR_AMC_FALLBACK__)
+ ETHR_AMC_MODIFICATION_OPS__(&var->amc, res = --(var->sint));
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#else
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, res = --(*var));
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#endif
+ return res;
+}
+
+ethr_sint_t ethr_atomic_dec_read_relb(ethr_atomic_t *var)
+{
+ ethr_sint_t res;
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+#if defined(ETHR_HAVE_WORD_SZ_NATIVE_ATOMIC_OPS)
+ res = ethr_atomic_dec_read_relb__(var);
+#elif defined(ETHR_AMC_FALLBACK__)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ ETHR_AMC_MODIFICATION_OPS__(&var->amc, res = --(var->sint));
+#else
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, res = --(*var));
+#endif
+ return res;
+}
+
+ethr_sint_t ethr_atomic_dec_read_mb(ethr_atomic_t *var)
+{
+ ethr_sint_t res;
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+#if defined(ETHR_HAVE_WORD_SZ_NATIVE_ATOMIC_OPS)
+ res = ethr_atomic_dec_read_mb__(var);
+#elif defined(ETHR_AMC_FALLBACK__)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ ETHR_AMC_MODIFICATION_OPS__(&var->amc, res = --(var->sint));
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#else
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, res = --(*var));
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#endif
+ return res;
+}
+
+
+/* -- add() -- */
+
+
+void ethr_atomic_add(ethr_atomic_t *var, ethr_sint_t val)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+#if defined(ETHR_HAVE_WORD_SZ_NATIVE_ATOMIC_OPS)
+ ethr_atomic_add__(var, val);
+#elif defined(ETHR_AMC_FALLBACK__)
+ ETHR_AMC_MODIFICATION_OPS__(&var->amc, var->sint += val);
+#else
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, *var += val);
+#endif
+
+}
+
+void ethr_atomic_add_ddrb(ethr_atomic_t *var, ethr_sint_t val)
+{
+#if defined(ETHR_HAVE_WORD_SZ_NATIVE_ATOMIC_OPS)
+ ethr_atomic_add_ddrb__(var, val);
+#elif defined(ETHR_ORDERED_READ_DEPEND)
+ ethr_atomic_add(var, val);
+#else
+ ethr_atomic_add_rb(var, val);
+#endif
+}
+
+void ethr_atomic_add_rb(ethr_atomic_t *var, ethr_sint_t val)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+#if defined(ETHR_HAVE_WORD_SZ_NATIVE_ATOMIC_OPS)
+ ethr_atomic_add_rb__(var, val);
+#elif defined(ETHR_AMC_FALLBACK__)
+ ETHR_AMC_MODIFICATION_OPS__(&var->amc, var->sint += val);
+ ETHR_MEMBAR(ETHR_LoadLoad);
+#else
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, *var += val);
+ ETHR_MEMBAR(ETHR_LoadLoad);
+#endif
+
+}
+
+void ethr_atomic_add_wb(ethr_atomic_t *var, ethr_sint_t val)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+#if defined(ETHR_HAVE_WORD_SZ_NATIVE_ATOMIC_OPS)
+ ethr_atomic_add_wb__(var, val);
+#elif defined(ETHR_AMC_FALLBACK__)
+ ETHR_MEMBAR(ETHR_StoreStore);
+ ETHR_AMC_MODIFICATION_OPS__(&var->amc, var->sint += val);
+#else
+ ETHR_MEMBAR(ETHR_StoreStore);
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, *var += val);
+#endif
+
+}
+
+void ethr_atomic_add_acqb(ethr_atomic_t *var, ethr_sint_t val)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+#if defined(ETHR_HAVE_WORD_SZ_NATIVE_ATOMIC_OPS)
+ ethr_atomic_add_acqb__(var, val);
+#elif defined(ETHR_AMC_FALLBACK__)
+ ETHR_AMC_MODIFICATION_OPS__(&var->amc, var->sint += val);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#else
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, *var += val);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#endif
+
+}
+
+void ethr_atomic_add_relb(ethr_atomic_t *var, ethr_sint_t val)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+#if defined(ETHR_HAVE_WORD_SZ_NATIVE_ATOMIC_OPS)
+ ethr_atomic_add_relb__(var, val);
+#elif defined(ETHR_AMC_FALLBACK__)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ ETHR_AMC_MODIFICATION_OPS__(&var->amc, var->sint += val);
+#else
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, *var += val);
+#endif
+
+}
+
+void ethr_atomic_add_mb(ethr_atomic_t *var, ethr_sint_t val)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+#if defined(ETHR_HAVE_WORD_SZ_NATIVE_ATOMIC_OPS)
+ ethr_atomic_add_mb__(var, val);
+#elif defined(ETHR_AMC_FALLBACK__)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ ETHR_AMC_MODIFICATION_OPS__(&var->amc, var->sint += val);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#else
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, *var += val);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#endif
+
+}
+
+
+/* -- inc() -- */
+
+
+void ethr_atomic_inc(ethr_atomic_t *var)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+#if defined(ETHR_HAVE_WORD_SZ_NATIVE_ATOMIC_OPS)
ethr_atomic_inc__(var);
+#elif defined(ETHR_AMC_FALLBACK__)
+ ETHR_AMC_MODIFICATION_OPS__(&var->amc, ++(var->sint));
+#else
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, ++(*var));
+#endif
+
+}
+
+void ethr_atomic_inc_ddrb(ethr_atomic_t *var)
+{
+#if defined(ETHR_HAVE_WORD_SZ_NATIVE_ATOMIC_OPS)
+ ethr_atomic_inc_ddrb__(var);
+#elif defined(ETHR_ORDERED_READ_DEPEND)
+ ethr_atomic_inc(var);
+#else
+ ethr_atomic_inc_rb(var);
+#endif
}
-void
-ethr_atomic_dec(ethr_atomic_t *var)
+void ethr_atomic_inc_rb(ethr_atomic_t *var)
{
ETHR_ASSERT(!ethr_not_inited__);
ETHR_ASSERT(var);
- ethr_atomic_dec__(var);
+#if defined(ETHR_HAVE_WORD_SZ_NATIVE_ATOMIC_OPS)
+ ethr_atomic_inc_rb__(var);
+#elif defined(ETHR_AMC_FALLBACK__)
+ ETHR_AMC_MODIFICATION_OPS__(&var->amc, ++(var->sint));
+ ETHR_MEMBAR(ETHR_LoadLoad);
+#else
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, ++(*var));
+ ETHR_MEMBAR(ETHR_LoadLoad);
+#endif
+
}
-ethr_sint_t
-ethr_atomic_read_band(ethr_atomic_t *var, ethr_sint_t mask)
+void ethr_atomic_inc_wb(ethr_atomic_t *var)
{
ETHR_ASSERT(!ethr_not_inited__);
ETHR_ASSERT(var);
- return ethr_atomic_read_band__(var, mask);
+#if defined(ETHR_HAVE_WORD_SZ_NATIVE_ATOMIC_OPS)
+ ethr_atomic_inc_wb__(var);
+#elif defined(ETHR_AMC_FALLBACK__)
+ ETHR_MEMBAR(ETHR_StoreStore);
+ ETHR_AMC_MODIFICATION_OPS__(&var->amc, ++(var->sint));
+#else
+ ETHR_MEMBAR(ETHR_StoreStore);
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, ++(*var));
+#endif
+
}
-ethr_sint_t
-ethr_atomic_read_bor(ethr_atomic_t *var, ethr_sint_t mask)
+void ethr_atomic_inc_acqb(ethr_atomic_t *var)
{
ETHR_ASSERT(!ethr_not_inited__);
ETHR_ASSERT(var);
- return ethr_atomic_read_bor__(var, mask);
+#if defined(ETHR_HAVE_WORD_SZ_NATIVE_ATOMIC_OPS)
+ ethr_atomic_inc_acqb__(var);
+#elif defined(ETHR_AMC_FALLBACK__)
+ ETHR_AMC_MODIFICATION_OPS__(&var->amc, ++(var->sint));
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#else
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, ++(*var));
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#endif
+
}
-ethr_sint_t
-ethr_atomic_xchg(ethr_atomic_t *var, ethr_sint_t new)
+void ethr_atomic_inc_relb(ethr_atomic_t *var)
{
ETHR_ASSERT(!ethr_not_inited__);
ETHR_ASSERT(var);
- return ethr_atomic_xchg__(var, new);
-}
+#if defined(ETHR_HAVE_WORD_SZ_NATIVE_ATOMIC_OPS)
+ ethr_atomic_inc_relb__(var);
+#elif defined(ETHR_AMC_FALLBACK__)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ ETHR_AMC_MODIFICATION_OPS__(&var->amc, ++(var->sint));
+#else
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, ++(*var));
+#endif
-ethr_sint_t
-ethr_atomic_cmpxchg(ethr_atomic_t *var, ethr_sint_t new, ethr_sint_t expected)
+}
+
+void ethr_atomic_inc_mb(ethr_atomic_t *var)
{
ETHR_ASSERT(!ethr_not_inited__);
ETHR_ASSERT(var);
- return ethr_atomic_cmpxchg__(var, new, expected);
+#if defined(ETHR_HAVE_WORD_SZ_NATIVE_ATOMIC_OPS)
+ ethr_atomic_inc_mb__(var);
+#elif defined(ETHR_AMC_FALLBACK__)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ ETHR_AMC_MODIFICATION_OPS__(&var->amc, ++(var->sint));
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#else
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, ++(*var));
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#endif
+
}
-ethr_sint_t
-ethr_atomic_read_acqb(ethr_atomic_t *var)
+
+/* -- dec() -- */
+
+
+void ethr_atomic_dec(ethr_atomic_t *var)
{
ETHR_ASSERT(!ethr_not_inited__);
ETHR_ASSERT(var);
- return ethr_atomic_read_acqb__(var);
+#if defined(ETHR_HAVE_WORD_SZ_NATIVE_ATOMIC_OPS)
+ ethr_atomic_dec__(var);
+#elif defined(ETHR_AMC_FALLBACK__)
+ ETHR_AMC_MODIFICATION_OPS__(&var->amc, --(var->sint));
+#else
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, --(*var));
+#endif
+
+}
+
+void ethr_atomic_dec_ddrb(ethr_atomic_t *var)
+{
+#if defined(ETHR_HAVE_WORD_SZ_NATIVE_ATOMIC_OPS)
+ ethr_atomic_dec_ddrb__(var);
+#elif defined(ETHR_ORDERED_READ_DEPEND)
+ ethr_atomic_dec(var);
+#else
+ ethr_atomic_dec_rb(var);
+#endif
}
-ethr_sint_t
-ethr_atomic_inc_read_acqb(ethr_atomic_t *var)
+void ethr_atomic_dec_rb(ethr_atomic_t *var)
{
ETHR_ASSERT(!ethr_not_inited__);
ETHR_ASSERT(var);
- return ethr_atomic_inc_read_acqb__(var);
+#if defined(ETHR_HAVE_WORD_SZ_NATIVE_ATOMIC_OPS)
+ ethr_atomic_dec_rb__(var);
+#elif defined(ETHR_AMC_FALLBACK__)
+ ETHR_AMC_MODIFICATION_OPS__(&var->amc, --(var->sint));
+ ETHR_MEMBAR(ETHR_LoadLoad);
+#else
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, --(*var));
+ ETHR_MEMBAR(ETHR_LoadLoad);
+#endif
+
}
-void
-ethr_atomic_set_relb(ethr_atomic_t *var, ethr_sint_t i)
+void ethr_atomic_dec_wb(ethr_atomic_t *var)
{
ETHR_ASSERT(!ethr_not_inited__);
ETHR_ASSERT(var);
- ethr_atomic_set_relb__(var, i);
+#if defined(ETHR_HAVE_WORD_SZ_NATIVE_ATOMIC_OPS)
+ ethr_atomic_dec_wb__(var);
+#elif defined(ETHR_AMC_FALLBACK__)
+ ETHR_MEMBAR(ETHR_StoreStore);
+ ETHR_AMC_MODIFICATION_OPS__(&var->amc, --(var->sint));
+#else
+ ETHR_MEMBAR(ETHR_StoreStore);
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, --(*var));
+#endif
+
}
-void
-ethr_atomic_dec_relb(ethr_atomic_t *var)
+void ethr_atomic_dec_acqb(ethr_atomic_t *var)
{
ETHR_ASSERT(!ethr_not_inited__);
ETHR_ASSERT(var);
+#if defined(ETHR_HAVE_WORD_SZ_NATIVE_ATOMIC_OPS)
+ ethr_atomic_dec_acqb__(var);
+#elif defined(ETHR_AMC_FALLBACK__)
+ ETHR_AMC_MODIFICATION_OPS__(&var->amc, --(var->sint));
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#else
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, --(*var));
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#endif
+
+}
+
+void ethr_atomic_dec_relb(ethr_atomic_t *var)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+#if defined(ETHR_HAVE_WORD_SZ_NATIVE_ATOMIC_OPS)
ethr_atomic_dec_relb__(var);
+#elif defined(ETHR_AMC_FALLBACK__)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ ETHR_AMC_MODIFICATION_OPS__(&var->amc, --(var->sint));
+#else
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, --(*var));
+#endif
+
}
-ethr_sint_t
-ethr_atomic_dec_read_relb(ethr_atomic_t *var)
+void ethr_atomic_dec_mb(ethr_atomic_t *var)
{
ETHR_ASSERT(!ethr_not_inited__);
ETHR_ASSERT(var);
- return ethr_atomic_dec_read_relb__(var);
+#if defined(ETHR_HAVE_WORD_SZ_NATIVE_ATOMIC_OPS)
+ ethr_atomic_dec_mb__(var);
+#elif defined(ETHR_AMC_FALLBACK__)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ ETHR_AMC_MODIFICATION_OPS__(&var->amc, --(var->sint));
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#else
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, --(*var));
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#endif
+
}
-ethr_sint_t
-ethr_atomic_cmpxchg_acqb(ethr_atomic_t *var, ethr_sint_t new, ethr_sint_t exp)
+
+/* -- read_band() -- */
+
+
+ethr_sint_t ethr_atomic_read_band(ethr_atomic_t *var, ethr_sint_t val)
{
+ ethr_sint_t res;
ETHR_ASSERT(!ethr_not_inited__);
ETHR_ASSERT(var);
- return ethr_atomic_cmpxchg_acqb__(var, new, exp);
+#if defined(ETHR_HAVE_WORD_SZ_NATIVE_ATOMIC_OPS)
+ res = ethr_atomic_read_band__(var, val);
+#elif defined(ETHR_AMC_FALLBACK__)
+ ETHR_AMC_MODIFICATION_OPS__(&var->amc, res = var->sint; var->sint &= val);
+#else
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, res = *var; *var &= val);
+#endif
+ return res;
}
-ethr_sint_t
-ethr_atomic_cmpxchg_relb(ethr_atomic_t *var, ethr_sint_t new, ethr_sint_t exp)
+ethr_sint_t ethr_atomic_read_band_ddrb(ethr_atomic_t *var, ethr_sint_t val)
{
+#if defined(ETHR_HAVE_WORD_SZ_NATIVE_ATOMIC_OPS)
+ return ethr_atomic_read_band_ddrb__(var, val);
+#elif defined(ETHR_ORDERED_READ_DEPEND)
+ return ethr_atomic_read_band(var, val);
+#else
+ return ethr_atomic_read_band_rb(var, val);
+#endif
+}
+
+ethr_sint_t ethr_atomic_read_band_rb(ethr_atomic_t *var, ethr_sint_t val)
+{
+ ethr_sint_t res;
ETHR_ASSERT(!ethr_not_inited__);
ETHR_ASSERT(var);
- return ethr_atomic_cmpxchg_relb__(var, new, exp);
+#if defined(ETHR_HAVE_WORD_SZ_NATIVE_ATOMIC_OPS)
+ res = ethr_atomic_read_band_rb__(var, val);
+#elif defined(ETHR_AMC_FALLBACK__)
+ ETHR_AMC_MODIFICATION_OPS__(&var->amc, res = var->sint; var->sint &= val);
+ ETHR_MEMBAR(ETHR_LoadLoad);
+#else
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, res = *var; *var &= val);
+ ETHR_MEMBAR(ETHR_LoadLoad);
+#endif
+ return res;
}
+ethr_sint_t ethr_atomic_read_band_wb(ethr_atomic_t *var, ethr_sint_t val)
+{
+ ethr_sint_t res;
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+#if defined(ETHR_HAVE_WORD_SZ_NATIVE_ATOMIC_OPS)
+ res = ethr_atomic_read_band_wb__(var, val);
+#elif defined(ETHR_AMC_FALLBACK__)
+ ETHR_MEMBAR(ETHR_StoreStore);
+ ETHR_AMC_MODIFICATION_OPS__(&var->amc, res = var->sint; var->sint &= val);
+#else
+ ETHR_MEMBAR(ETHR_StoreStore);
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, res = *var; *var &= val);
+#endif
+ return res;
+}
-/*
- * --- 32-bit atomics ---------------------------------------------------------
- */
+ethr_sint_t ethr_atomic_read_band_acqb(ethr_atomic_t *var, ethr_sint_t val)
+{
+ ethr_sint_t res;
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+#if defined(ETHR_HAVE_WORD_SZ_NATIVE_ATOMIC_OPS)
+ res = ethr_atomic_read_band_acqb__(var, val);
+#elif defined(ETHR_AMC_FALLBACK__)
+ ETHR_AMC_MODIFICATION_OPS__(&var->amc, res = var->sint; var->sint &= val);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#else
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, res = *var; *var &= val);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#endif
+ return res;
+}
+
+ethr_sint_t ethr_atomic_read_band_relb(ethr_atomic_t *var, ethr_sint_t val)
+{
+ ethr_sint_t res;
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+#if defined(ETHR_HAVE_WORD_SZ_NATIVE_ATOMIC_OPS)
+ res = ethr_atomic_read_band_relb__(var, val);
+#elif defined(ETHR_AMC_FALLBACK__)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ ETHR_AMC_MODIFICATION_OPS__(&var->amc, res = var->sint; var->sint &= val);
+#else
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, res = *var; *var &= val);
+#endif
+ return res;
+}
+
+ethr_sint_t ethr_atomic_read_band_mb(ethr_atomic_t *var, ethr_sint_t val)
+{
+ ethr_sint_t res;
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+#if defined(ETHR_HAVE_WORD_SZ_NATIVE_ATOMIC_OPS)
+ res = ethr_atomic_read_band_mb__(var, val);
+#elif defined(ETHR_AMC_FALLBACK__)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ ETHR_AMC_MODIFICATION_OPS__(&var->amc, res = var->sint; var->sint &= val);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#else
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, res = *var; *var &= val);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#endif
+ return res;
+}
+
+
+/* -- read_bor() -- */
+
+
+ethr_sint_t ethr_atomic_read_bor(ethr_atomic_t *var, ethr_sint_t val)
+{
+ ethr_sint_t res;
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+#if defined(ETHR_HAVE_WORD_SZ_NATIVE_ATOMIC_OPS)
+ res = ethr_atomic_read_bor__(var, val);
+#elif defined(ETHR_AMC_FALLBACK__)
+ ETHR_AMC_MODIFICATION_OPS__(&var->amc, res = var->sint; var->sint |= val);
+#else
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, res = *var; *var |= val);
+#endif
+ return res;
+}
+
+ethr_sint_t ethr_atomic_read_bor_ddrb(ethr_atomic_t *var, ethr_sint_t val)
+{
+#if defined(ETHR_HAVE_WORD_SZ_NATIVE_ATOMIC_OPS)
+ return ethr_atomic_read_bor_ddrb__(var, val);
+#elif defined(ETHR_ORDERED_READ_DEPEND)
+ return ethr_atomic_read_bor(var, val);
+#else
+ return ethr_atomic_read_bor_rb(var, val);
+#endif
+}
+
+ethr_sint_t ethr_atomic_read_bor_rb(ethr_atomic_t *var, ethr_sint_t val)
+{
+ ethr_sint_t res;
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+#if defined(ETHR_HAVE_WORD_SZ_NATIVE_ATOMIC_OPS)
+ res = ethr_atomic_read_bor_rb__(var, val);
+#elif defined(ETHR_AMC_FALLBACK__)
+ ETHR_AMC_MODIFICATION_OPS__(&var->amc, res = var->sint; var->sint |= val);
+ ETHR_MEMBAR(ETHR_LoadLoad);
+#else
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, res = *var; *var |= val);
+ ETHR_MEMBAR(ETHR_LoadLoad);
+#endif
+ return res;
+}
+
+ethr_sint_t ethr_atomic_read_bor_wb(ethr_atomic_t *var, ethr_sint_t val)
+{
+ ethr_sint_t res;
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+#if defined(ETHR_HAVE_WORD_SZ_NATIVE_ATOMIC_OPS)
+ res = ethr_atomic_read_bor_wb__(var, val);
+#elif defined(ETHR_AMC_FALLBACK__)
+ ETHR_MEMBAR(ETHR_StoreStore);
+ ETHR_AMC_MODIFICATION_OPS__(&var->amc, res = var->sint; var->sint |= val);
+#else
+ ETHR_MEMBAR(ETHR_StoreStore);
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, res = *var; *var |= val);
+#endif
+ return res;
+}
+
+ethr_sint_t ethr_atomic_read_bor_acqb(ethr_atomic_t *var, ethr_sint_t val)
+{
+ ethr_sint_t res;
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+#if defined(ETHR_HAVE_WORD_SZ_NATIVE_ATOMIC_OPS)
+ res = ethr_atomic_read_bor_acqb__(var, val);
+#elif defined(ETHR_AMC_FALLBACK__)
+ ETHR_AMC_MODIFICATION_OPS__(&var->amc, res = var->sint; var->sint |= val);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#else
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, res = *var; *var |= val);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#endif
+ return res;
+}
+
+ethr_sint_t ethr_atomic_read_bor_relb(ethr_atomic_t *var, ethr_sint_t val)
+{
+ ethr_sint_t res;
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+#if defined(ETHR_HAVE_WORD_SZ_NATIVE_ATOMIC_OPS)
+ res = ethr_atomic_read_bor_relb__(var, val);
+#elif defined(ETHR_AMC_FALLBACK__)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ ETHR_AMC_MODIFICATION_OPS__(&var->amc, res = var->sint; var->sint |= val);
+#else
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, res = *var; *var |= val);
+#endif
+ return res;
+}
+
+ethr_sint_t ethr_atomic_read_bor_mb(ethr_atomic_t *var, ethr_sint_t val)
+{
+ ethr_sint_t res;
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+#if defined(ETHR_HAVE_WORD_SZ_NATIVE_ATOMIC_OPS)
+ res = ethr_atomic_read_bor_mb__(var, val);
+#elif defined(ETHR_AMC_FALLBACK__)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ ETHR_AMC_MODIFICATION_OPS__(&var->amc, res = var->sint; var->sint |= val);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#else
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, res = *var; *var |= val);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#endif
+ return res;
+}
+
+
+/* ---------- 32-bit atomic implementation ---------- */
+
+
+
+
+/* --- addr() --- */
+
+ethr_sint32_t *ethr_atomic32_addr(ethr_atomic32_t *var)
+{
+ ethr_sint32_t *res;
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+#if defined(ETHR_HAVE_32BIT_NATIVE_ATOMIC_OPS)
+ res = ethr_atomic32_addr__(var);
+
+#else
+ res = (ethr_sint32_t *) var;
+#endif
+ return res;
+}
+
+
+/* -- cmpxchg() -- */
+
+
+ethr_sint32_t ethr_atomic32_cmpxchg(ethr_atomic32_t *var, ethr_sint32_t val, ethr_sint32_t old_val)
+{
+ ethr_sint32_t res;
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+#if defined(ETHR_HAVE_32BIT_NATIVE_ATOMIC_OPS)
+ res = ethr_atomic32_cmpxchg__(var, val, old_val);
+#else
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, res = (*var == old_val ? (*var = val, old_val) : *var));
+#endif
+ return res;
+}
+
+ethr_sint32_t ethr_atomic32_cmpxchg_ddrb(ethr_atomic32_t *var, ethr_sint32_t val, ethr_sint32_t old_val)
+{
+#if defined(ETHR_HAVE_32BIT_NATIVE_ATOMIC_OPS)
+ return ethr_atomic32_cmpxchg_ddrb__(var, val, old_val);
+#elif defined(ETHR_ORDERED_READ_DEPEND)
+ return ethr_atomic32_cmpxchg(var, val, old_val);
+#else
+ return ethr_atomic32_cmpxchg_rb(var, val, old_val);
+#endif
+}
+
+ethr_sint32_t ethr_atomic32_cmpxchg_rb(ethr_atomic32_t *var, ethr_sint32_t val, ethr_sint32_t old_val)
+{
+ ethr_sint32_t res;
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+#if defined(ETHR_HAVE_32BIT_NATIVE_ATOMIC_OPS)
+ res = ethr_atomic32_cmpxchg_rb__(var, val, old_val);
+#else
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, res = (*var == old_val ? (*var = val, old_val) : *var));
+ ETHR_MEMBAR(ETHR_LoadLoad);
+#endif
+ return res;
+}
+
+ethr_sint32_t ethr_atomic32_cmpxchg_wb(ethr_atomic32_t *var, ethr_sint32_t val, ethr_sint32_t old_val)
+{
+ ethr_sint32_t res;
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+#if defined(ETHR_HAVE_32BIT_NATIVE_ATOMIC_OPS)
+ res = ethr_atomic32_cmpxchg_wb__(var, val, old_val);
+#else
+ ETHR_MEMBAR(ETHR_StoreStore);
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, res = (*var == old_val ? (*var = val, old_val) : *var));
+#endif
+ return res;
+}
+
+ethr_sint32_t ethr_atomic32_cmpxchg_acqb(ethr_atomic32_t *var, ethr_sint32_t val, ethr_sint32_t old_val)
+{
+ ethr_sint32_t res;
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+#if defined(ETHR_HAVE_32BIT_NATIVE_ATOMIC_OPS)
+ res = ethr_atomic32_cmpxchg_acqb__(var, val, old_val);
+#else
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, res = (*var == old_val ? (*var = val, old_val) : *var));
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#endif
+ return res;
+}
+
+ethr_sint32_t ethr_atomic32_cmpxchg_relb(ethr_atomic32_t *var, ethr_sint32_t val, ethr_sint32_t old_val)
+{
+ ethr_sint32_t res;
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+#if defined(ETHR_HAVE_32BIT_NATIVE_ATOMIC_OPS)
+ res = ethr_atomic32_cmpxchg_relb__(var, val, old_val);
+#else
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, res = (*var == old_val ? (*var = val, old_val) : *var));
+#endif
+ return res;
+}
+
+ethr_sint32_t ethr_atomic32_cmpxchg_mb(ethr_atomic32_t *var, ethr_sint32_t val, ethr_sint32_t old_val)
+{
+ ethr_sint32_t res;
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+#if defined(ETHR_HAVE_32BIT_NATIVE_ATOMIC_OPS)
+ res = ethr_atomic32_cmpxchg_mb__(var, val, old_val);
+#else
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, res = (*var == old_val ? (*var = val, old_val) : *var));
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#endif
+ return res;
+}
+
+
+/* -- xchg() -- */
+
+
+ethr_sint32_t ethr_atomic32_xchg(ethr_atomic32_t *var, ethr_sint32_t val)
+{
+ ethr_sint32_t res;
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+#if defined(ETHR_HAVE_32BIT_NATIVE_ATOMIC_OPS)
+ res = ethr_atomic32_xchg__(var, val);
+#else
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, res = *var; *var = val);
+#endif
+ return res;
+}
+
+ethr_sint32_t ethr_atomic32_xchg_ddrb(ethr_atomic32_t *var, ethr_sint32_t val)
+{
+#if defined(ETHR_HAVE_32BIT_NATIVE_ATOMIC_OPS)
+ return ethr_atomic32_xchg_ddrb__(var, val);
+#elif defined(ETHR_ORDERED_READ_DEPEND)
+ return ethr_atomic32_xchg(var, val);
+#else
+ return ethr_atomic32_xchg_rb(var, val);
+#endif
+}
+
+ethr_sint32_t ethr_atomic32_xchg_rb(ethr_atomic32_t *var, ethr_sint32_t val)
+{
+ ethr_sint32_t res;
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+#if defined(ETHR_HAVE_32BIT_NATIVE_ATOMIC_OPS)
+ res = ethr_atomic32_xchg_rb__(var, val);
+#else
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, res = *var; *var = val);
+ ETHR_MEMBAR(ETHR_LoadLoad);
+#endif
+ return res;
+}
+
+ethr_sint32_t ethr_atomic32_xchg_wb(ethr_atomic32_t *var, ethr_sint32_t val)
+{
+ ethr_sint32_t res;
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+#if defined(ETHR_HAVE_32BIT_NATIVE_ATOMIC_OPS)
+ res = ethr_atomic32_xchg_wb__(var, val);
+#else
+ ETHR_MEMBAR(ETHR_StoreStore);
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, res = *var; *var = val);
+#endif
+ return res;
+}
+
+ethr_sint32_t ethr_atomic32_xchg_acqb(ethr_atomic32_t *var, ethr_sint32_t val)
+{
+ ethr_sint32_t res;
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+#if defined(ETHR_HAVE_32BIT_NATIVE_ATOMIC_OPS)
+ res = ethr_atomic32_xchg_acqb__(var, val);
+#else
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, res = *var; *var = val);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#endif
+ return res;
+}
+
+ethr_sint32_t ethr_atomic32_xchg_relb(ethr_atomic32_t *var, ethr_sint32_t val)
+{
+ ethr_sint32_t res;
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+#if defined(ETHR_HAVE_32BIT_NATIVE_ATOMIC_OPS)
+ res = ethr_atomic32_xchg_relb__(var, val);
+#else
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, res = *var; *var = val);
+#endif
+ return res;
+}
+
+ethr_sint32_t ethr_atomic32_xchg_mb(ethr_atomic32_t *var, ethr_sint32_t val)
+{
+ ethr_sint32_t res;
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+#if defined(ETHR_HAVE_32BIT_NATIVE_ATOMIC_OPS)
+ res = ethr_atomic32_xchg_mb__(var, val);
+#else
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, res = *var; *var = val);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#endif
+ return res;
+}
+
+
+/* -- set() -- */
+
+
+void ethr_atomic32_set(ethr_atomic32_t *var, ethr_sint32_t val)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+#if defined(ETHR_HAVE_32BIT_NATIVE_ATOMIC_OPS)
+ ethr_atomic32_set__(var, val);
+#else
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, *var = val);
+#endif
+
+}
+
+void ethr_atomic32_set_ddrb(ethr_atomic32_t *var, ethr_sint32_t val)
+{
+#if defined(ETHR_HAVE_32BIT_NATIVE_ATOMIC_OPS)
+ ethr_atomic32_set_ddrb__(var, val);
+#elif defined(ETHR_ORDERED_READ_DEPEND)
+ ethr_atomic32_set(var, val);
+#else
+ ethr_atomic32_set_rb(var, val);
+#endif
+}
+
+void ethr_atomic32_set_rb(ethr_atomic32_t *var, ethr_sint32_t val)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+#if defined(ETHR_HAVE_32BIT_NATIVE_ATOMIC_OPS)
+ ethr_atomic32_set_rb__(var, val);
+#else
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, *var = val);
+ ETHR_MEMBAR(ETHR_LoadLoad);
+#endif
+
+}
+
+void ethr_atomic32_set_wb(ethr_atomic32_t *var, ethr_sint32_t val)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+#if defined(ETHR_HAVE_32BIT_NATIVE_ATOMIC_OPS)
+ ethr_atomic32_set_wb__(var, val);
+#else
+ ETHR_MEMBAR(ETHR_StoreStore);
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, *var = val);
+#endif
+
+}
+
+void ethr_atomic32_set_acqb(ethr_atomic32_t *var, ethr_sint32_t val)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+#if defined(ETHR_HAVE_32BIT_NATIVE_ATOMIC_OPS)
+ ethr_atomic32_set_acqb__(var, val);
+#else
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, *var = val);
+ ETHR_MEMBAR(ETHR_StoreLoad|ETHR_StoreStore);
+#endif
+
+}
+
+void ethr_atomic32_set_relb(ethr_atomic32_t *var, ethr_sint32_t val)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+#if defined(ETHR_HAVE_32BIT_NATIVE_ATOMIC_OPS)
+ ethr_atomic32_set_relb__(var, val);
+#else
+ ETHR_MEMBAR(ETHR_LoadStore|ETHR_StoreStore);
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, *var = val);
+#endif
+
+}
+
+void ethr_atomic32_set_mb(ethr_atomic32_t *var, ethr_sint32_t val)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+#if defined(ETHR_HAVE_32BIT_NATIVE_ATOMIC_OPS)
+ ethr_atomic32_set_mb__(var, val);
+#else
+ ETHR_MEMBAR(ETHR_LoadStore|ETHR_StoreStore);
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, *var = val);
+ ETHR_MEMBAR(ETHR_StoreLoad|ETHR_StoreStore);
+#endif
+
+}
+
+
+/* -- init() -- */
+
+
+void ethr_atomic32_init(ethr_atomic32_t *var, ethr_sint32_t val)
+{
+ ETHR_ASSERT(var);
+#if defined(ETHR_HAVE_32BIT_NATIVE_ATOMIC_OPS)
+ ethr_atomic32_init__(var, val);
+#else
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, *var = val);
+#endif
+
+}
+
+void ethr_atomic32_init_ddrb(ethr_atomic32_t *var, ethr_sint32_t val)
+{
+#if defined(ETHR_HAVE_32BIT_NATIVE_ATOMIC_OPS)
+ ethr_atomic32_init_ddrb__(var, val);
+#elif defined(ETHR_ORDERED_READ_DEPEND)
+ ethr_atomic32_init(var, val);
+#else
+ ethr_atomic32_init_rb(var, val);
+#endif
+}
+
+void ethr_atomic32_init_rb(ethr_atomic32_t *var, ethr_sint32_t val)
+{
+ ETHR_ASSERT(var);
+#if defined(ETHR_HAVE_32BIT_NATIVE_ATOMIC_OPS)
+ ethr_atomic32_init_rb__(var, val);
+#else
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, *var = val);
+ ETHR_MEMBAR(ETHR_LoadLoad);
+#endif
+
+}
+
+void ethr_atomic32_init_wb(ethr_atomic32_t *var, ethr_sint32_t val)
+{
+ ETHR_ASSERT(var);
+#if defined(ETHR_HAVE_32BIT_NATIVE_ATOMIC_OPS)
+ ethr_atomic32_init_wb__(var, val);
+#else
+ ETHR_MEMBAR(ETHR_StoreStore);
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, *var = val);
+#endif
+
+}
+
+void ethr_atomic32_init_acqb(ethr_atomic32_t *var, ethr_sint32_t val)
+{
+ ETHR_ASSERT(var);
+#if defined(ETHR_HAVE_32BIT_NATIVE_ATOMIC_OPS)
+ ethr_atomic32_init_acqb__(var, val);
+#else
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, *var = val);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#endif
+
+}
+
+void ethr_atomic32_init_relb(ethr_atomic32_t *var, ethr_sint32_t val)
+{
+ ETHR_ASSERT(var);
+#if defined(ETHR_HAVE_32BIT_NATIVE_ATOMIC_OPS)
+ ethr_atomic32_init_relb__(var, val);
+#else
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, *var = val);
+#endif
+
+}
+
+void ethr_atomic32_init_mb(ethr_atomic32_t *var, ethr_sint32_t val)
+{
+ ETHR_ASSERT(var);
+#if defined(ETHR_HAVE_32BIT_NATIVE_ATOMIC_OPS)
+ ethr_atomic32_init_mb__(var, val);
+#else
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, *var = val);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#endif
+
+}
+
+
+/* -- add_read() -- */
+
+
+ethr_sint32_t ethr_atomic32_add_read(ethr_atomic32_t *var, ethr_sint32_t val)
+{
+ ethr_sint32_t res;
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+#if defined(ETHR_HAVE_32BIT_NATIVE_ATOMIC_OPS)
+ res = ethr_atomic32_add_read__(var, val);
+#else
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, *var += val; res = *var);
+#endif
+ return res;
+}
+
+ethr_sint32_t ethr_atomic32_add_read_ddrb(ethr_atomic32_t *var, ethr_sint32_t val)
+{
+#if defined(ETHR_HAVE_32BIT_NATIVE_ATOMIC_OPS)
+ return ethr_atomic32_add_read_ddrb__(var, val);
+#elif defined(ETHR_ORDERED_READ_DEPEND)
+ return ethr_atomic32_add_read(var, val);
+#else
+ return ethr_atomic32_add_read_rb(var, val);
+#endif
+}
+
+ethr_sint32_t ethr_atomic32_add_read_rb(ethr_atomic32_t *var, ethr_sint32_t val)
+{
+ ethr_sint32_t res;
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+#if defined(ETHR_HAVE_32BIT_NATIVE_ATOMIC_OPS)
+ res = ethr_atomic32_add_read_rb__(var, val);
+#else
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, *var += val; res = *var);
+ ETHR_MEMBAR(ETHR_LoadLoad);
+#endif
+ return res;
+}
+
+ethr_sint32_t ethr_atomic32_add_read_wb(ethr_atomic32_t *var, ethr_sint32_t val)
+{
+ ethr_sint32_t res;
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+#if defined(ETHR_HAVE_32BIT_NATIVE_ATOMIC_OPS)
+ res = ethr_atomic32_add_read_wb__(var, val);
+#else
+ ETHR_MEMBAR(ETHR_StoreStore);
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, *var += val; res = *var);
+#endif
+ return res;
+}
+
+ethr_sint32_t ethr_atomic32_add_read_acqb(ethr_atomic32_t *var, ethr_sint32_t val)
+{
+ ethr_sint32_t res;
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+#if defined(ETHR_HAVE_32BIT_NATIVE_ATOMIC_OPS)
+ res = ethr_atomic32_add_read_acqb__(var, val);
+#else
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, *var += val; res = *var);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#endif
+ return res;
+}
+
+ethr_sint32_t ethr_atomic32_add_read_relb(ethr_atomic32_t *var, ethr_sint32_t val)
+{
+ ethr_sint32_t res;
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+#if defined(ETHR_HAVE_32BIT_NATIVE_ATOMIC_OPS)
+ res = ethr_atomic32_add_read_relb__(var, val);
+#else
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, *var += val; res = *var);
+#endif
+ return res;
+}
+
+ethr_sint32_t ethr_atomic32_add_read_mb(ethr_atomic32_t *var, ethr_sint32_t val)
+{
+ ethr_sint32_t res;
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+#if defined(ETHR_HAVE_32BIT_NATIVE_ATOMIC_OPS)
+ res = ethr_atomic32_add_read_mb__(var, val);
+#else
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, *var += val; res = *var);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#endif
+ return res;
+}
+
+
+/* -- read() -- */
+
+
+ethr_sint32_t ethr_atomic32_read(ethr_atomic32_t *var)
+{
+ ethr_sint32_t res;
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+#if defined(ETHR_HAVE_32BIT_NATIVE_ATOMIC_OPS)
+ res = ethr_atomic32_read__(var);
+#else
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, res = *var);
+#endif
+ return res;
+}
+
+ethr_sint32_t ethr_atomic32_read_ddrb(ethr_atomic32_t *var)
+{
+#if defined(ETHR_HAVE_32BIT_NATIVE_ATOMIC_OPS)
+ return ethr_atomic32_read_ddrb__(var);
+#elif defined(ETHR_ORDERED_READ_DEPEND)
+ return ethr_atomic32_read(var);
+#else
+ return ethr_atomic32_read_rb(var);
+#endif
+}
+
+ethr_sint32_t ethr_atomic32_read_rb(ethr_atomic32_t *var)
+{
+ ethr_sint32_t res;
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+#if defined(ETHR_HAVE_32BIT_NATIVE_ATOMIC_OPS)
+ res = ethr_atomic32_read_rb__(var);
+#else
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, res = *var);
+ ETHR_MEMBAR(ETHR_LoadLoad);
+#endif
+ return res;
+}
+
+ethr_sint32_t ethr_atomic32_read_wb(ethr_atomic32_t *var)
+{
+ ethr_sint32_t res;
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+#if defined(ETHR_HAVE_32BIT_NATIVE_ATOMIC_OPS)
+ res = ethr_atomic32_read_wb__(var);
+#else
+ ETHR_MEMBAR(ETHR_StoreStore);
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, res = *var);
+#endif
+ return res;
+}
+
+ethr_sint32_t ethr_atomic32_read_acqb(ethr_atomic32_t *var)
+{
+ ethr_sint32_t res;
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+#if defined(ETHR_HAVE_32BIT_NATIVE_ATOMIC_OPS)
+ res = ethr_atomic32_read_acqb__(var);
+#else
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, res = *var);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore);
+#endif
+ return res;
+}
+
+ethr_sint32_t ethr_atomic32_read_relb(ethr_atomic32_t *var)
+{
+ ethr_sint32_t res;
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+#if defined(ETHR_HAVE_32BIT_NATIVE_ATOMIC_OPS)
+ res = ethr_atomic32_read_relb__(var);
+#else
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, res = *var);
+#endif
+ return res;
+}
+
+ethr_sint32_t ethr_atomic32_read_mb(ethr_atomic32_t *var)
+{
+ ethr_sint32_t res;
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+#if defined(ETHR_HAVE_32BIT_NATIVE_ATOMIC_OPS)
+ res = ethr_atomic32_read_mb__(var);
+#else
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, res = *var);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore);
+#endif
+ return res;
+}
+
+
+/* -- inc_read() -- */
+
+
+ethr_sint32_t ethr_atomic32_inc_read(ethr_atomic32_t *var)
+{
+ ethr_sint32_t res;
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+#if defined(ETHR_HAVE_32BIT_NATIVE_ATOMIC_OPS)
+ res = ethr_atomic32_inc_read__(var);
+#else
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, res = ++(*var));
+#endif
+ return res;
+}
+
+ethr_sint32_t ethr_atomic32_inc_read_ddrb(ethr_atomic32_t *var)
+{
+#if defined(ETHR_HAVE_32BIT_NATIVE_ATOMIC_OPS)
+ return ethr_atomic32_inc_read_ddrb__(var);
+#elif defined(ETHR_ORDERED_READ_DEPEND)
+ return ethr_atomic32_inc_read(var);
+#else
+ return ethr_atomic32_inc_read_rb(var);
+#endif
+}
+
+ethr_sint32_t ethr_atomic32_inc_read_rb(ethr_atomic32_t *var)
+{
+ ethr_sint32_t res;
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+#if defined(ETHR_HAVE_32BIT_NATIVE_ATOMIC_OPS)
+ res = ethr_atomic32_inc_read_rb__(var);
+#else
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, res = ++(*var));
+ ETHR_MEMBAR(ETHR_LoadLoad);
+#endif
+ return res;
+}
+
+ethr_sint32_t ethr_atomic32_inc_read_wb(ethr_atomic32_t *var)
+{
+ ethr_sint32_t res;
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+#if defined(ETHR_HAVE_32BIT_NATIVE_ATOMIC_OPS)
+ res = ethr_atomic32_inc_read_wb__(var);
+#else
+ ETHR_MEMBAR(ETHR_StoreStore);
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, res = ++(*var));
+#endif
+ return res;
+}
+
+ethr_sint32_t ethr_atomic32_inc_read_acqb(ethr_atomic32_t *var)
+{
+ ethr_sint32_t res;
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+#if defined(ETHR_HAVE_32BIT_NATIVE_ATOMIC_OPS)
+ res = ethr_atomic32_inc_read_acqb__(var);
+#else
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, res = ++(*var));
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#endif
+ return res;
+}
+
+ethr_sint32_t ethr_atomic32_inc_read_relb(ethr_atomic32_t *var)
+{
+ ethr_sint32_t res;
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+#if defined(ETHR_HAVE_32BIT_NATIVE_ATOMIC_OPS)
+ res = ethr_atomic32_inc_read_relb__(var);
+#else
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, res = ++(*var));
+#endif
+ return res;
+}
-ethr_sint32_t *
-ethr_atomic32_addr(ethr_atomic32_t *var)
+ethr_sint32_t ethr_atomic32_inc_read_mb(ethr_atomic32_t *var)
{
+ ethr_sint32_t res;
+ ETHR_ASSERT(!ethr_not_inited__);
ETHR_ASSERT(var);
- return ethr_atomic32_addr__(var);
+#if defined(ETHR_HAVE_32BIT_NATIVE_ATOMIC_OPS)
+ res = ethr_atomic32_inc_read_mb__(var);
+#else
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, res = ++(*var));
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#endif
+ return res;
}
-void
-ethr_atomic32_init(ethr_atomic32_t *var, ethr_sint32_t i)
+
+/* -- dec_read() -- */
+
+
+ethr_sint32_t ethr_atomic32_dec_read(ethr_atomic32_t *var)
{
+ ethr_sint32_t res;
+ ETHR_ASSERT(!ethr_not_inited__);
ETHR_ASSERT(var);
- ethr_atomic32_init__(var, i);
+#if defined(ETHR_HAVE_32BIT_NATIVE_ATOMIC_OPS)
+ res = ethr_atomic32_dec_read__(var);
+#else
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, res = --(*var));
+#endif
+ return res;
}
-void
-ethr_atomic32_set(ethr_atomic32_t *var, ethr_sint32_t i)
+ethr_sint32_t ethr_atomic32_dec_read_ddrb(ethr_atomic32_t *var)
{
+#if defined(ETHR_HAVE_32BIT_NATIVE_ATOMIC_OPS)
+ return ethr_atomic32_dec_read_ddrb__(var);
+#elif defined(ETHR_ORDERED_READ_DEPEND)
+ return ethr_atomic32_dec_read(var);
+#else
+ return ethr_atomic32_dec_read_rb(var);
+#endif
+}
+
+ethr_sint32_t ethr_atomic32_dec_read_rb(ethr_atomic32_t *var)
+{
+ ethr_sint32_t res;
ETHR_ASSERT(!ethr_not_inited__);
ETHR_ASSERT(var);
- ethr_atomic32_set__(var, i);
+#if defined(ETHR_HAVE_32BIT_NATIVE_ATOMIC_OPS)
+ res = ethr_atomic32_dec_read_rb__(var);
+#else
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, res = --(*var));
+ ETHR_MEMBAR(ETHR_LoadLoad);
+#endif
+ return res;
}
-ethr_sint32_t
-ethr_atomic32_read(ethr_atomic32_t *var)
+ethr_sint32_t ethr_atomic32_dec_read_wb(ethr_atomic32_t *var)
{
+ ethr_sint32_t res;
ETHR_ASSERT(!ethr_not_inited__);
ETHR_ASSERT(var);
- return ethr_atomic32_read__(var);
+#if defined(ETHR_HAVE_32BIT_NATIVE_ATOMIC_OPS)
+ res = ethr_atomic32_dec_read_wb__(var);
+#else
+ ETHR_MEMBAR(ETHR_StoreStore);
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, res = --(*var));
+#endif
+ return res;
}
+ethr_sint32_t ethr_atomic32_dec_read_acqb(ethr_atomic32_t *var)
+{
+ ethr_sint32_t res;
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+#if defined(ETHR_HAVE_32BIT_NATIVE_ATOMIC_OPS)
+ res = ethr_atomic32_dec_read_acqb__(var);
+#else
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, res = --(*var));
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#endif
+ return res;
+}
+
+ethr_sint32_t ethr_atomic32_dec_read_relb(ethr_atomic32_t *var)
+{
+ ethr_sint32_t res;
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+#if defined(ETHR_HAVE_32BIT_NATIVE_ATOMIC_OPS)
+ res = ethr_atomic32_dec_read_relb__(var);
+#else
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, res = --(*var));
+#endif
+ return res;
+}
+
+ethr_sint32_t ethr_atomic32_dec_read_mb(ethr_atomic32_t *var)
+{
+ ethr_sint32_t res;
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+#if defined(ETHR_HAVE_32BIT_NATIVE_ATOMIC_OPS)
+ res = ethr_atomic32_dec_read_mb__(var);
+#else
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, res = --(*var));
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#endif
+ return res;
+}
+
+
+/* -- add() -- */
+
+
+void ethr_atomic32_add(ethr_atomic32_t *var, ethr_sint32_t val)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+#if defined(ETHR_HAVE_32BIT_NATIVE_ATOMIC_OPS)
+ ethr_atomic32_add__(var, val);
+#else
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, *var += val);
+#endif
+
+}
+
+void ethr_atomic32_add_ddrb(ethr_atomic32_t *var, ethr_sint32_t val)
+{
+#if defined(ETHR_HAVE_32BIT_NATIVE_ATOMIC_OPS)
+ ethr_atomic32_add_ddrb__(var, val);
+#elif defined(ETHR_ORDERED_READ_DEPEND)
+ ethr_atomic32_add(var, val);
+#else
+ ethr_atomic32_add_rb(var, val);
+#endif
+}
+
+void ethr_atomic32_add_rb(ethr_atomic32_t *var, ethr_sint32_t val)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+#if defined(ETHR_HAVE_32BIT_NATIVE_ATOMIC_OPS)
+ ethr_atomic32_add_rb__(var, val);
+#else
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, *var += val);
+ ETHR_MEMBAR(ETHR_LoadLoad);
+#endif
+
+}
-ethr_sint32_t
-ethr_atomic32_add_read(ethr_atomic32_t *var, ethr_sint32_t incr)
+void ethr_atomic32_add_wb(ethr_atomic32_t *var, ethr_sint32_t val)
{
ETHR_ASSERT(!ethr_not_inited__);
ETHR_ASSERT(var);
- return ethr_atomic32_add_read__(var, incr);
-}
+#if defined(ETHR_HAVE_32BIT_NATIVE_ATOMIC_OPS)
+ ethr_atomic32_add_wb__(var, val);
+#else
+ ETHR_MEMBAR(ETHR_StoreStore);
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, *var += val);
+#endif
-ethr_sint32_t
-ethr_atomic32_inc_read(ethr_atomic32_t *var)
+}
+
+void ethr_atomic32_add_acqb(ethr_atomic32_t *var, ethr_sint32_t val)
{
ETHR_ASSERT(!ethr_not_inited__);
ETHR_ASSERT(var);
- return ethr_atomic32_inc_read__(var);
+#if defined(ETHR_HAVE_32BIT_NATIVE_ATOMIC_OPS)
+ ethr_atomic32_add_acqb__(var, val);
+#else
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, *var += val);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#endif
+
}
-ethr_sint32_t
-ethr_atomic32_dec_read(ethr_atomic32_t *var)
+void ethr_atomic32_add_relb(ethr_atomic32_t *var, ethr_sint32_t val)
{
ETHR_ASSERT(!ethr_not_inited__);
ETHR_ASSERT(var);
- return ethr_atomic32_dec_read__(var);
+#if defined(ETHR_HAVE_32BIT_NATIVE_ATOMIC_OPS)
+ ethr_atomic32_add_relb__(var, val);
+#else
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, *var += val);
+#endif
+
}
-void
-ethr_atomic32_add(ethr_atomic32_t *var, ethr_sint32_t incr)
+void ethr_atomic32_add_mb(ethr_atomic32_t *var, ethr_sint32_t val)
{
ETHR_ASSERT(!ethr_not_inited__);
ETHR_ASSERT(var);
- ethr_atomic32_add__(var, incr);
-}
-
-void
-ethr_atomic32_inc(ethr_atomic32_t *var)
+#if defined(ETHR_HAVE_32BIT_NATIVE_ATOMIC_OPS)
+ ethr_atomic32_add_mb__(var, val);
+#else
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, *var += val);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#endif
+
+}
+
+
+/* -- inc() -- */
+
+
+void ethr_atomic32_inc(ethr_atomic32_t *var)
{
ETHR_ASSERT(!ethr_not_inited__);
ETHR_ASSERT(var);
+#if defined(ETHR_HAVE_32BIT_NATIVE_ATOMIC_OPS)
ethr_atomic32_inc__(var);
+#else
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, ++(*var));
+#endif
+
}
-void
-ethr_atomic32_dec(ethr_atomic32_t *var)
+void ethr_atomic32_inc_ddrb(ethr_atomic32_t *var)
+{
+#if defined(ETHR_HAVE_32BIT_NATIVE_ATOMIC_OPS)
+ ethr_atomic32_inc_ddrb__(var);
+#elif defined(ETHR_ORDERED_READ_DEPEND)
+ ethr_atomic32_inc(var);
+#else
+ ethr_atomic32_inc_rb(var);
+#endif
+}
+
+void ethr_atomic32_inc_rb(ethr_atomic32_t *var)
{
ETHR_ASSERT(!ethr_not_inited__);
ETHR_ASSERT(var);
- ethr_atomic32_dec__(var);
+#if defined(ETHR_HAVE_32BIT_NATIVE_ATOMIC_OPS)
+ ethr_atomic32_inc_rb__(var);
+#else
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, ++(*var));
+ ETHR_MEMBAR(ETHR_LoadLoad);
+#endif
+
}
-ethr_sint32_t
-ethr_atomic32_read_band(ethr_atomic32_t *var, ethr_sint32_t mask)
+void ethr_atomic32_inc_wb(ethr_atomic32_t *var)
{
ETHR_ASSERT(!ethr_not_inited__);
ETHR_ASSERT(var);
- return ethr_atomic32_read_band__(var, mask);
+#if defined(ETHR_HAVE_32BIT_NATIVE_ATOMIC_OPS)
+ ethr_atomic32_inc_wb__(var);
+#else
+ ETHR_MEMBAR(ETHR_StoreStore);
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, ++(*var));
+#endif
+
}
-ethr_sint32_t
-ethr_atomic32_read_bor(ethr_atomic32_t *var, ethr_sint32_t mask)
+void ethr_atomic32_inc_acqb(ethr_atomic32_t *var)
{
ETHR_ASSERT(!ethr_not_inited__);
ETHR_ASSERT(var);
- return ethr_atomic32_read_bor__(var, mask);
+#if defined(ETHR_HAVE_32BIT_NATIVE_ATOMIC_OPS)
+ ethr_atomic32_inc_acqb__(var);
+#else
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, ++(*var));
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#endif
+
}
-ethr_sint32_t
-ethr_atomic32_xchg(ethr_atomic32_t *var, ethr_sint32_t new)
+void ethr_atomic32_inc_relb(ethr_atomic32_t *var)
{
ETHR_ASSERT(!ethr_not_inited__);
ETHR_ASSERT(var);
- return ethr_atomic32_xchg__(var, new);
-}
+#if defined(ETHR_HAVE_32BIT_NATIVE_ATOMIC_OPS)
+ ethr_atomic32_inc_relb__(var);
+#else
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, ++(*var));
+#endif
-ethr_sint32_t
-ethr_atomic32_cmpxchg(ethr_atomic32_t *var,
- ethr_sint32_t new,
- ethr_sint32_t expected)
+}
+
+void ethr_atomic32_inc_mb(ethr_atomic32_t *var)
{
ETHR_ASSERT(!ethr_not_inited__);
ETHR_ASSERT(var);
- return ethr_atomic32_cmpxchg__(var, new, expected);
+#if defined(ETHR_HAVE_32BIT_NATIVE_ATOMIC_OPS)
+ ethr_atomic32_inc_mb__(var);
+#else
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, ++(*var));
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#endif
+
}
-ethr_sint32_t
-ethr_atomic32_read_acqb(ethr_atomic32_t *var)
+
+/* -- dec() -- */
+
+
+void ethr_atomic32_dec(ethr_atomic32_t *var)
{
ETHR_ASSERT(!ethr_not_inited__);
ETHR_ASSERT(var);
- return ethr_atomic32_read_acqb__(var);
+#if defined(ETHR_HAVE_32BIT_NATIVE_ATOMIC_OPS)
+ ethr_atomic32_dec__(var);
+#else
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, --(*var));
+#endif
+
}
-ethr_sint32_t
-ethr_atomic32_inc_read_acqb(ethr_atomic32_t *var)
+void ethr_atomic32_dec_ddrb(ethr_atomic32_t *var)
+{
+#if defined(ETHR_HAVE_32BIT_NATIVE_ATOMIC_OPS)
+ ethr_atomic32_dec_ddrb__(var);
+#elif defined(ETHR_ORDERED_READ_DEPEND)
+ ethr_atomic32_dec(var);
+#else
+ ethr_atomic32_dec_rb(var);
+#endif
+}
+
+void ethr_atomic32_dec_rb(ethr_atomic32_t *var)
{
ETHR_ASSERT(!ethr_not_inited__);
ETHR_ASSERT(var);
- return ethr_atomic32_inc_read_acqb__(var);
+#if defined(ETHR_HAVE_32BIT_NATIVE_ATOMIC_OPS)
+ ethr_atomic32_dec_rb__(var);
+#else
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, --(*var));
+ ETHR_MEMBAR(ETHR_LoadLoad);
+#endif
+
}
-void
-ethr_atomic32_set_relb(ethr_atomic32_t *var, ethr_sint32_t i)
+void ethr_atomic32_dec_wb(ethr_atomic32_t *var)
{
ETHR_ASSERT(!ethr_not_inited__);
ETHR_ASSERT(var);
- ethr_atomic32_set_relb__(var, i);
+#if defined(ETHR_HAVE_32BIT_NATIVE_ATOMIC_OPS)
+ ethr_atomic32_dec_wb__(var);
+#else
+ ETHR_MEMBAR(ETHR_StoreStore);
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, --(*var));
+#endif
+
}
-void
-ethr_atomic32_dec_relb(ethr_atomic32_t *var)
+void ethr_atomic32_dec_acqb(ethr_atomic32_t *var)
{
ETHR_ASSERT(!ethr_not_inited__);
ETHR_ASSERT(var);
+#if defined(ETHR_HAVE_32BIT_NATIVE_ATOMIC_OPS)
+ ethr_atomic32_dec_acqb__(var);
+#else
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, --(*var));
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#endif
+
+}
+
+void ethr_atomic32_dec_relb(ethr_atomic32_t *var)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+#if defined(ETHR_HAVE_32BIT_NATIVE_ATOMIC_OPS)
ethr_atomic32_dec_relb__(var);
+#else
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, --(*var));
+#endif
+
}
-ethr_sint32_t
-ethr_atomic32_dec_read_relb(ethr_atomic32_t *var)
+void ethr_atomic32_dec_mb(ethr_atomic32_t *var)
{
ETHR_ASSERT(!ethr_not_inited__);
ETHR_ASSERT(var);
- return ethr_atomic32_dec_read_relb__(var);
+#if defined(ETHR_HAVE_32BIT_NATIVE_ATOMIC_OPS)
+ ethr_atomic32_dec_mb__(var);
+#else
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, --(*var));
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#endif
+
}
-ethr_sint32_t
-ethr_atomic32_cmpxchg_acqb(ethr_atomic32_t *var,
- ethr_sint32_t new,
- ethr_sint32_t exp)
+
+/* -- read_band() -- */
+
+
+ethr_sint32_t ethr_atomic32_read_band(ethr_atomic32_t *var, ethr_sint32_t val)
{
+ ethr_sint32_t res;
ETHR_ASSERT(!ethr_not_inited__);
ETHR_ASSERT(var);
- return ethr_atomic32_cmpxchg_acqb__(var, new, exp);
+#if defined(ETHR_HAVE_32BIT_NATIVE_ATOMIC_OPS)
+ res = ethr_atomic32_read_band__(var, val);
+#else
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, res = *var; *var &= val);
+#endif
+ return res;
+}
+
+ethr_sint32_t ethr_atomic32_read_band_ddrb(ethr_atomic32_t *var, ethr_sint32_t val)
+{
+#if defined(ETHR_HAVE_32BIT_NATIVE_ATOMIC_OPS)
+ return ethr_atomic32_read_band_ddrb__(var, val);
+#elif defined(ETHR_ORDERED_READ_DEPEND)
+ return ethr_atomic32_read_band(var, val);
+#else
+ return ethr_atomic32_read_band_rb(var, val);
+#endif
}
-ethr_sint32_t
-ethr_atomic32_cmpxchg_relb(ethr_atomic32_t *var,
- ethr_sint32_t new,
- ethr_sint32_t exp)
+ethr_sint32_t ethr_atomic32_read_band_rb(ethr_atomic32_t *var, ethr_sint32_t val)
{
+ ethr_sint32_t res;
ETHR_ASSERT(!ethr_not_inited__);
ETHR_ASSERT(var);
- return ethr_atomic32_cmpxchg_relb__(var, new, exp);
+#if defined(ETHR_HAVE_32BIT_NATIVE_ATOMIC_OPS)
+ res = ethr_atomic32_read_band_rb__(var, val);
+#else
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, res = *var; *var &= val);
+ ETHR_MEMBAR(ETHR_LoadLoad);
+#endif
+ return res;
+}
+
+ethr_sint32_t ethr_atomic32_read_band_wb(ethr_atomic32_t *var, ethr_sint32_t val)
+{
+ ethr_sint32_t res;
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+#if defined(ETHR_HAVE_32BIT_NATIVE_ATOMIC_OPS)
+ res = ethr_atomic32_read_band_wb__(var, val);
+#else
+ ETHR_MEMBAR(ETHR_StoreStore);
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, res = *var; *var &= val);
+#endif
+ return res;
+}
+
+ethr_sint32_t ethr_atomic32_read_band_acqb(ethr_atomic32_t *var, ethr_sint32_t val)
+{
+ ethr_sint32_t res;
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+#if defined(ETHR_HAVE_32BIT_NATIVE_ATOMIC_OPS)
+ res = ethr_atomic32_read_band_acqb__(var, val);
+#else
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, res = *var; *var &= val);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#endif
+ return res;
+}
+
+ethr_sint32_t ethr_atomic32_read_band_relb(ethr_atomic32_t *var, ethr_sint32_t val)
+{
+ ethr_sint32_t res;
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+#if defined(ETHR_HAVE_32BIT_NATIVE_ATOMIC_OPS)
+ res = ethr_atomic32_read_band_relb__(var, val);
+#else
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, res = *var; *var &= val);
+#endif
+ return res;
+}
+
+ethr_sint32_t ethr_atomic32_read_band_mb(ethr_atomic32_t *var, ethr_sint32_t val)
+{
+ ethr_sint32_t res;
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+#if defined(ETHR_HAVE_32BIT_NATIVE_ATOMIC_OPS)
+ res = ethr_atomic32_read_band_mb__(var, val);
+#else
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, res = *var; *var &= val);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#endif
+ return res;
+}
+
+
+/* -- read_bor() -- */
+
+
+ethr_sint32_t ethr_atomic32_read_bor(ethr_atomic32_t *var, ethr_sint32_t val)
+{
+ ethr_sint32_t res;
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+#if defined(ETHR_HAVE_32BIT_NATIVE_ATOMIC_OPS)
+ res = ethr_atomic32_read_bor__(var, val);
+#else
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, res = *var; *var |= val);
+#endif
+ return res;
+}
+
+ethr_sint32_t ethr_atomic32_read_bor_ddrb(ethr_atomic32_t *var, ethr_sint32_t val)
+{
+#if defined(ETHR_HAVE_32BIT_NATIVE_ATOMIC_OPS)
+ return ethr_atomic32_read_bor_ddrb__(var, val);
+#elif defined(ETHR_ORDERED_READ_DEPEND)
+ return ethr_atomic32_read_bor(var, val);
+#else
+ return ethr_atomic32_read_bor_rb(var, val);
+#endif
+}
+
+ethr_sint32_t ethr_atomic32_read_bor_rb(ethr_atomic32_t *var, ethr_sint32_t val)
+{
+ ethr_sint32_t res;
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+#if defined(ETHR_HAVE_32BIT_NATIVE_ATOMIC_OPS)
+ res = ethr_atomic32_read_bor_rb__(var, val);
+#else
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, res = *var; *var |= val);
+ ETHR_MEMBAR(ETHR_LoadLoad);
+#endif
+ return res;
+}
+
+ethr_sint32_t ethr_atomic32_read_bor_wb(ethr_atomic32_t *var, ethr_sint32_t val)
+{
+ ethr_sint32_t res;
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+#if defined(ETHR_HAVE_32BIT_NATIVE_ATOMIC_OPS)
+ res = ethr_atomic32_read_bor_wb__(var, val);
+#else
+ ETHR_MEMBAR(ETHR_StoreStore);
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, res = *var; *var |= val);
+#endif
+ return res;
+}
+
+ethr_sint32_t ethr_atomic32_read_bor_acqb(ethr_atomic32_t *var, ethr_sint32_t val)
+{
+ ethr_sint32_t res;
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+#if defined(ETHR_HAVE_32BIT_NATIVE_ATOMIC_OPS)
+ res = ethr_atomic32_read_bor_acqb__(var, val);
+#else
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, res = *var; *var |= val);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#endif
+ return res;
+}
+
+ethr_sint32_t ethr_atomic32_read_bor_relb(ethr_atomic32_t *var, ethr_sint32_t val)
+{
+ ethr_sint32_t res;
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+#if defined(ETHR_HAVE_32BIT_NATIVE_ATOMIC_OPS)
+ res = ethr_atomic32_read_bor_relb__(var, val);
+#else
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, res = *var; *var |= val);
+#endif
+ return res;
+}
+
+ethr_sint32_t ethr_atomic32_read_bor_mb(ethr_atomic32_t *var, ethr_sint32_t val)
+{
+ ethr_sint32_t res;
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+#if defined(ETHR_HAVE_32BIT_NATIVE_ATOMIC_OPS)
+ res = ethr_atomic32_read_bor_mb__(var, val);
+#else
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, res = *var; *var |= val);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);
+#endif
+ return res;
+}
+
+
+
+/* --------- Info functions --------- */
+
+#if defined(ETHR_RTCHK_USE_NATIVE_DW_ATOMIC_IMPL__)
+char *zero_ops[] = {NULL};
+#endif
+
+
+static char *native_su_dw_atomic_ops[] = {
+#ifdef ETHR_HAVE_ETHR_NATIVE_SU_DW_ATOMIC_CMPXCHG
+ "cmpxchg",
+#endif
+#ifdef ETHR_HAVE_ETHR_NATIVE_SU_DW_ATOMIC_CMPXCHG_RB
+ "cmpxchg_rb",
+#endif
+#ifdef ETHR_HAVE_ETHR_NATIVE_SU_DW_ATOMIC_CMPXCHG_WB
+ "cmpxchg_wb",
+#endif
+#ifdef ETHR_HAVE_ETHR_NATIVE_SU_DW_ATOMIC_CMPXCHG_ACQB
+ "cmpxchg_acqb",
+#endif
+#ifdef ETHR_HAVE_ETHR_NATIVE_SU_DW_ATOMIC_CMPXCHG_RELB
+ "cmpxchg_relb",
+#endif
+#ifdef ETHR_HAVE_ETHR_NATIVE_SU_DW_ATOMIC_CMPXCHG_MB
+ "cmpxchg_mb",
+#endif
+#ifdef ETHR_HAVE_ETHR_NATIVE_SU_DW_ATOMIC_SET
+ "set",
+#endif
+#ifdef ETHR_HAVE_ETHR_NATIVE_SU_DW_ATOMIC_SET_RB
+ "set_rb",
+#endif
+#ifdef ETHR_HAVE_ETHR_NATIVE_SU_DW_ATOMIC_SET_WB
+ "set_wb",
+#endif
+#ifdef ETHR_HAVE_ETHR_NATIVE_SU_DW_ATOMIC_SET_ACQB
+ "set_acqb",
+#endif
+#ifdef ETHR_HAVE_ETHR_NATIVE_SU_DW_ATOMIC_SET_RELB
+ "set_relb",
+#endif
+#ifdef ETHR_HAVE_ETHR_NATIVE_SU_DW_ATOMIC_SET_MB
+ "set_mb",
+#endif
+#ifdef ETHR_HAVE_ETHR_NATIVE_SU_DW_ATOMIC_READ
+ "read",
+#endif
+#ifdef ETHR_HAVE_ETHR_NATIVE_SU_DW_ATOMIC_READ_RB
+ "read_rb",
+#endif
+#ifdef ETHR_HAVE_ETHR_NATIVE_SU_DW_ATOMIC_READ_WB
+ "read_wb",
+#endif
+#ifdef ETHR_HAVE_ETHR_NATIVE_SU_DW_ATOMIC_READ_ACQB
+ "read_acqb",
+#endif
+#ifdef ETHR_HAVE_ETHR_NATIVE_SU_DW_ATOMIC_READ_RELB
+ "read_relb",
+#endif
+#ifdef ETHR_HAVE_ETHR_NATIVE_SU_DW_ATOMIC_READ_MB
+ "read_mb",
+#endif
+#ifdef ETHR_HAVE_ETHR_NATIVE_SU_DW_ATOMIC_INIT
+ "init",
+#endif
+#ifdef ETHR_HAVE_ETHR_NATIVE_SU_DW_ATOMIC_INIT_RB
+ "init_rb",
+#endif
+#ifdef ETHR_HAVE_ETHR_NATIVE_SU_DW_ATOMIC_INIT_WB
+ "init_wb",
+#endif
+#ifdef ETHR_HAVE_ETHR_NATIVE_SU_DW_ATOMIC_INIT_ACQB
+ "init_acqb",
+#endif
+#ifdef ETHR_HAVE_ETHR_NATIVE_SU_DW_ATOMIC_INIT_RELB
+ "init_relb",
+#endif
+#ifdef ETHR_HAVE_ETHR_NATIVE_SU_DW_ATOMIC_INIT_MB
+ "init_mb",
+#endif
+ NULL
+};
+
+char **
+ethr_native_su_dw_atomic_ops(void)
+{
+
+#if defined(ETHR_RTCHK_USE_NATIVE_DW_ATOMIC_IMPL__)
+ if (!ETHR_RTCHK_USE_NATIVE_DW_ATOMIC_IMPL__)
+ return &zero_ops[0];
+#endif
+ return &native_su_dw_atomic_ops[0];
}
+
+static char *native_dw_atomic_ops[] = {
+#ifdef ETHR_HAVE_ETHR_NATIVE_DW_ATOMIC_CMPXCHG
+ "cmpxchg",
+#endif
+#ifdef ETHR_HAVE_ETHR_NATIVE_DW_ATOMIC_CMPXCHG_RB
+ "cmpxchg_rb",
+#endif
+#ifdef ETHR_HAVE_ETHR_NATIVE_DW_ATOMIC_CMPXCHG_WB
+ "cmpxchg_wb",
+#endif
+#ifdef ETHR_HAVE_ETHR_NATIVE_DW_ATOMIC_CMPXCHG_ACQB
+ "cmpxchg_acqb",
+#endif
+#ifdef ETHR_HAVE_ETHR_NATIVE_DW_ATOMIC_CMPXCHG_RELB
+ "cmpxchg_relb",
+#endif
+#ifdef ETHR_HAVE_ETHR_NATIVE_DW_ATOMIC_CMPXCHG_MB
+ "cmpxchg_mb",
+#endif
+#ifdef ETHR_HAVE_ETHR_NATIVE_DW_ATOMIC_SET
+ "set",
+#endif
+#ifdef ETHR_HAVE_ETHR_NATIVE_DW_ATOMIC_SET_RB
+ "set_rb",
+#endif
+#ifdef ETHR_HAVE_ETHR_NATIVE_DW_ATOMIC_SET_WB
+ "set_wb",
+#endif
+#ifdef ETHR_HAVE_ETHR_NATIVE_DW_ATOMIC_SET_ACQB
+ "set_acqb",
+#endif
+#ifdef ETHR_HAVE_ETHR_NATIVE_DW_ATOMIC_SET_RELB
+ "set_relb",
+#endif
+#ifdef ETHR_HAVE_ETHR_NATIVE_DW_ATOMIC_SET_MB
+ "set_mb",
+#endif
+#ifdef ETHR_HAVE_ETHR_NATIVE_DW_ATOMIC_READ
+ "read",
+#endif
+#ifdef ETHR_HAVE_ETHR_NATIVE_DW_ATOMIC_READ_RB
+ "read_rb",
+#endif
+#ifdef ETHR_HAVE_ETHR_NATIVE_DW_ATOMIC_READ_WB
+ "read_wb",
+#endif
+#ifdef ETHR_HAVE_ETHR_NATIVE_DW_ATOMIC_READ_ACQB
+ "read_acqb",
+#endif
+#ifdef ETHR_HAVE_ETHR_NATIVE_DW_ATOMIC_READ_RELB
+ "read_relb",
+#endif
+#ifdef ETHR_HAVE_ETHR_NATIVE_DW_ATOMIC_READ_MB
+ "read_mb",
+#endif
+#ifdef ETHR_HAVE_ETHR_NATIVE_DW_ATOMIC_INIT
+ "init",
+#endif
+#ifdef ETHR_HAVE_ETHR_NATIVE_DW_ATOMIC_INIT_RB
+ "init_rb",
+#endif
+#ifdef ETHR_HAVE_ETHR_NATIVE_DW_ATOMIC_INIT_WB
+ "init_wb",
+#endif
+#ifdef ETHR_HAVE_ETHR_NATIVE_DW_ATOMIC_INIT_ACQB
+ "init_acqb",
+#endif
+#ifdef ETHR_HAVE_ETHR_NATIVE_DW_ATOMIC_INIT_RELB
+ "init_relb",
+#endif
+#ifdef ETHR_HAVE_ETHR_NATIVE_DW_ATOMIC_INIT_MB
+ "init_mb",
+#endif
+ NULL
+};
+
+char **
+ethr_native_dw_atomic_ops(void)
+{
+
+#if defined(ETHR_RTCHK_USE_NATIVE_DW_ATOMIC_IMPL__)
+ if (!ETHR_RTCHK_USE_NATIVE_DW_ATOMIC_IMPL__)
+ return &zero_ops[0];
+#endif
+ return &native_dw_atomic_ops[0];
+}
+
+
+static char *native_atomic64_ops[] = {
+#ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_CMPXCHG
+ "cmpxchg",
+#endif
+#ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_CMPXCHG_RB
+ "cmpxchg_rb",
+#endif
+#ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_CMPXCHG_WB
+ "cmpxchg_wb",
+#endif
+#ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_CMPXCHG_ACQB
+ "cmpxchg_acqb",
+#endif
+#ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_CMPXCHG_RELB
+ "cmpxchg_relb",
+#endif
+#ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_CMPXCHG_MB
+ "cmpxchg_mb",
+#endif
+#ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_XCHG
+ "xchg",
+#endif
+#ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_XCHG_RB
+ "xchg_rb",
+#endif
+#ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_XCHG_WB
+ "xchg_wb",
+#endif
+#ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_XCHG_ACQB
+ "xchg_acqb",
+#endif
+#ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_XCHG_RELB
+ "xchg_relb",
+#endif
+#ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_XCHG_MB
+ "xchg_mb",
+#endif
+#ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_SET
+ "set",
+#endif
+#ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_SET_RB
+ "set_rb",
+#endif
+#ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_SET_WB
+ "set_wb",
+#endif
+#ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_SET_ACQB
+ "set_acqb",
+#endif
+#ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_SET_RELB
+ "set_relb",
+#endif
+#ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_SET_MB
+ "set_mb",
+#endif
+#ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_INIT
+ "init",
+#endif
+#ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_INIT_RB
+ "init_rb",
+#endif
+#ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_INIT_WB
+ "init_wb",
+#endif
+#ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_INIT_ACQB
+ "init_acqb",
+#endif
+#ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_INIT_RELB
+ "init_relb",
+#endif
+#ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_INIT_MB
+ "init_mb",
+#endif
+#ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_ADD_RETURN
+ "add_return",
+#endif
+#ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_ADD_RETURN_RB
+ "add_return_rb",
+#endif
+#ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_ADD_RETURN_WB
+ "add_return_wb",
+#endif
+#ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_ADD_RETURN_ACQB
+ "add_return_acqb",
+#endif
+#ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_ADD_RETURN_RELB
+ "add_return_relb",
+#endif
+#ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_ADD_RETURN_MB
+ "add_return_mb",
+#endif
+#ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_READ
+ "read",
+#endif
+#ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_READ_RB
+ "read_rb",
+#endif
+#ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_READ_WB
+ "read_wb",
+#endif
+#ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_READ_ACQB
+ "read_acqb",
+#endif
+#ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_READ_RELB
+ "read_relb",
+#endif
+#ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_READ_MB
+ "read_mb",
+#endif
+#ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_INC_RETURN
+ "inc_return",
+#endif
+#ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_INC_RETURN_RB
+ "inc_return_rb",
+#endif
+#ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_INC_RETURN_WB
+ "inc_return_wb",
+#endif
+#ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_INC_RETURN_ACQB
+ "inc_return_acqb",
+#endif
+#ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_INC_RETURN_RELB
+ "inc_return_relb",
+#endif
+#ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_INC_RETURN_MB
+ "inc_return_mb",
+#endif
+#ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_DEC_RETURN
+ "dec_return",
+#endif
+#ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_DEC_RETURN_RB
+ "dec_return_rb",
+#endif
+#ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_DEC_RETURN_WB
+ "dec_return_wb",
+#endif
+#ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_DEC_RETURN_ACQB
+ "dec_return_acqb",
+#endif
+#ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_DEC_RETURN_RELB
+ "dec_return_relb",
+#endif
+#ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_DEC_RETURN_MB
+ "dec_return_mb",
+#endif
+#ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_ADD
+ "add",
+#endif
+#ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_ADD_RB
+ "add_rb",
+#endif
+#ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_ADD_WB
+ "add_wb",
+#endif
+#ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_ADD_ACQB
+ "add_acqb",
+#endif
+#ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_ADD_RELB
+ "add_relb",
+#endif
+#ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_ADD_MB
+ "add_mb",
+#endif
+#ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_INC
+ "inc",
+#endif
+#ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_INC_RB
+ "inc_rb",
+#endif
+#ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_INC_WB
+ "inc_wb",
+#endif
+#ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_INC_ACQB
+ "inc_acqb",
+#endif
+#ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_INC_RELB
+ "inc_relb",
+#endif
+#ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_INC_MB
+ "inc_mb",
+#endif
+#ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_DEC
+ "dec",
+#endif
+#ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_DEC_RB
+ "dec_rb",
+#endif
+#ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_DEC_WB
+ "dec_wb",
+#endif
+#ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_DEC_ACQB
+ "dec_acqb",
+#endif
+#ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_DEC_RELB
+ "dec_relb",
+#endif
+#ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_DEC_MB
+ "dec_mb",
+#endif
+#ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_AND_RETOLD
+ "and_retold",
+#endif
+#ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_AND_RETOLD_RB
+ "and_retold_rb",
+#endif
+#ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_AND_RETOLD_WB
+ "and_retold_wb",
+#endif
+#ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_AND_RETOLD_ACQB
+ "and_retold_acqb",
+#endif
+#ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_AND_RETOLD_RELB
+ "and_retold_relb",
+#endif
+#ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_AND_RETOLD_MB
+ "and_retold_mb",
+#endif
+#ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_OR_RETOLD
+ "or_retold",
+#endif
+#ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_OR_RETOLD_RB
+ "or_retold_rb",
+#endif
+#ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_OR_RETOLD_WB
+ "or_retold_wb",
+#endif
+#ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_OR_RETOLD_ACQB
+ "or_retold_acqb",
+#endif
+#ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_OR_RETOLD_RELB
+ "or_retold_relb",
+#endif
+#ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC64_OR_RETOLD_MB
+ "or_retold_mb",
+#endif
+ NULL
+};
+
+char **
+ethr_native_atomic64_ops(void)
+{
+
+ return &native_atomic64_ops[0];
+}
+
+
+static char *native_atomic32_ops[] = {
+#ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_CMPXCHG
+ "cmpxchg",
+#endif
+#ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_CMPXCHG_RB
+ "cmpxchg_rb",
+#endif
+#ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_CMPXCHG_WB
+ "cmpxchg_wb",
+#endif
+#ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_CMPXCHG_ACQB
+ "cmpxchg_acqb",
+#endif
+#ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_CMPXCHG_RELB
+ "cmpxchg_relb",
+#endif
+#ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_CMPXCHG_MB
+ "cmpxchg_mb",
+#endif
+#ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_XCHG
+ "xchg",
+#endif
+#ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_XCHG_RB
+ "xchg_rb",
+#endif
+#ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_XCHG_WB
+ "xchg_wb",
+#endif
+#ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_XCHG_ACQB
+ "xchg_acqb",
+#endif
+#ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_XCHG_RELB
+ "xchg_relb",
+#endif
+#ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_XCHG_MB
+ "xchg_mb",
+#endif
+#ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_SET
+ "set",
+#endif
+#ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_SET_RB
+ "set_rb",
+#endif
+#ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_SET_WB
+ "set_wb",
+#endif
+#ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_SET_ACQB
+ "set_acqb",
+#endif
+#ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_SET_RELB
+ "set_relb",
+#endif
+#ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_SET_MB
+ "set_mb",
+#endif
+#ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_INIT
+ "init",
+#endif
+#ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_INIT_RB
+ "init_rb",
+#endif
+#ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_INIT_WB
+ "init_wb",
+#endif
+#ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_INIT_ACQB
+ "init_acqb",
+#endif
+#ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_INIT_RELB
+ "init_relb",
+#endif
+#ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_INIT_MB
+ "init_mb",
+#endif
+#ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_ADD_RETURN
+ "add_return",
+#endif
+#ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_ADD_RETURN_RB
+ "add_return_rb",
+#endif
+#ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_ADD_RETURN_WB
+ "add_return_wb",
+#endif
+#ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_ADD_RETURN_ACQB
+ "add_return_acqb",
+#endif
+#ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_ADD_RETURN_RELB
+ "add_return_relb",
+#endif
+#ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_ADD_RETURN_MB
+ "add_return_mb",
+#endif
+#ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_READ
+ "read",
+#endif
+#ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_READ_RB
+ "read_rb",
+#endif
+#ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_READ_WB
+ "read_wb",
+#endif
+#ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_READ_ACQB
+ "read_acqb",
+#endif
+#ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_READ_RELB
+ "read_relb",
+#endif
+#ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_READ_MB
+ "read_mb",
+#endif
+#ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_INC_RETURN
+ "inc_return",
+#endif
+#ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_INC_RETURN_RB
+ "inc_return_rb",
+#endif
+#ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_INC_RETURN_WB
+ "inc_return_wb",
+#endif
+#ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_INC_RETURN_ACQB
+ "inc_return_acqb",
+#endif
+#ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_INC_RETURN_RELB
+ "inc_return_relb",
+#endif
+#ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_INC_RETURN_MB
+ "inc_return_mb",
+#endif
+#ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_DEC_RETURN
+ "dec_return",
+#endif
+#ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_DEC_RETURN_RB
+ "dec_return_rb",
+#endif
+#ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_DEC_RETURN_WB
+ "dec_return_wb",
+#endif
+#ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_DEC_RETURN_ACQB
+ "dec_return_acqb",
+#endif
+#ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_DEC_RETURN_RELB
+ "dec_return_relb",
+#endif
+#ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_DEC_RETURN_MB
+ "dec_return_mb",
+#endif
+#ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_ADD
+ "add",
+#endif
+#ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_ADD_RB
+ "add_rb",
+#endif
+#ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_ADD_WB
+ "add_wb",
+#endif
+#ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_ADD_ACQB
+ "add_acqb",
+#endif
+#ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_ADD_RELB
+ "add_relb",
+#endif
+#ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_ADD_MB
+ "add_mb",
+#endif
+#ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_INC
+ "inc",
+#endif
+#ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_INC_RB
+ "inc_rb",
+#endif
+#ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_INC_WB
+ "inc_wb",
+#endif
+#ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_INC_ACQB
+ "inc_acqb",
+#endif
+#ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_INC_RELB
+ "inc_relb",
+#endif
+#ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_INC_MB
+ "inc_mb",
+#endif
+#ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_DEC
+ "dec",
+#endif
+#ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_DEC_RB
+ "dec_rb",
+#endif
+#ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_DEC_WB
+ "dec_wb",
+#endif
+#ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_DEC_ACQB
+ "dec_acqb",
+#endif
+#ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_DEC_RELB
+ "dec_relb",
+#endif
+#ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_DEC_MB
+ "dec_mb",
+#endif
+#ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_AND_RETOLD
+ "and_retold",
+#endif
+#ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_AND_RETOLD_RB
+ "and_retold_rb",
+#endif
+#ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_AND_RETOLD_WB
+ "and_retold_wb",
+#endif
+#ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_AND_RETOLD_ACQB
+ "and_retold_acqb",
+#endif
+#ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_AND_RETOLD_RELB
+ "and_retold_relb",
+#endif
+#ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_AND_RETOLD_MB
+ "and_retold_mb",
+#endif
+#ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_OR_RETOLD
+ "or_retold",
+#endif
+#ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_OR_RETOLD_RB
+ "or_retold_rb",
+#endif
+#ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_OR_RETOLD_WB
+ "or_retold_wb",
+#endif
+#ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_OR_RETOLD_ACQB
+ "or_retold_acqb",
+#endif
+#ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_OR_RETOLD_RELB
+ "or_retold_relb",
+#endif
+#ifdef ETHR_HAVE_ETHR_NATIVE_ATOMIC32_OR_RETOLD_MB
+ "or_retold_mb",
+#endif
+ NULL
+};
+
+char **
+ethr_native_atomic32_ops(void)
+{
+
+ return &native_atomic32_ops[0];
+}
diff --git a/erts/lib_src/common/ethr_aux.c b/erts/lib_src/common/ethr_aux.c
index 2c3e25a805..521640317e 100644
--- a/erts/lib_src/common/ethr_aux.c
+++ b/erts/lib_src/common/ethr_aux.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2010. All Rights Reserved.
+ * Copyright Ericsson AB 2010-2011. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -31,10 +31,6 @@
#define ETHR_INLINE_FUNC_NAME_(X) X ## __
#define ETHR_AUX_IMPL__
-#define ETHR_ATOMIC_IMPL__ /* Needed in order to pull in
- native atomic implementations
- for optimized fallbacks of
- spinlocks and rwspinlocks */
#include "ethread.h"
#include "ethr_internal.h"
#include <string.h>
@@ -75,10 +71,87 @@ static int main_threads;
static int init_ts_event_alloc(void);
+ethr_runtime_t ethr_runtime__
+#ifdef __GNUC__
+__attribute__ ((aligned (ETHR_CACHE_LINE_SIZE)))
+#endif
+ ;
+
+#if defined(ETHR_X86_RUNTIME_CONF__)
+
+/*
+ * x86/x86_64 specifics shared between windows and
+ * pthread implementations.
+ */
+
+#define ETHR_IS_X86_VENDOR(V, B, C, D) \
+ (sizeof(V) == 13 && is_x86_vendor((V), (B), (C), (D)))
+
+static ETHR_INLINE int
+is_x86_vendor(char *str, int ebx, int ecx, int edx)
+{
+ return (*((int *) &str[0]) == ebx
+ && *((int *) &str[sizeof(int)]) == edx
+ && *((int *) &str[sizeof(int)*2]) == ecx);
+}
+
+static void
+x86_init(void)
+{
+ int eax, ebx, ecx, edx;
+
+ eax = ebx = ecx = edx = 0;
+
+ ethr_x86_cpuid__(&eax, &ebx, &ecx, &edx);
+
+ if (eax > 0
+ && (ETHR_IS_X86_VENDOR("GenuineIntel", ebx, ecx, edx)
+ || ETHR_IS_X86_VENDOR("AuthenticAMD", ebx, ecx, edx))) {
+ eax = 1;
+ ethr_x86_cpuid__(&eax, &ebx, &ecx, &edx);
+ }
+ else {
+ /*
+ * The meaning of the feature flags for this
+ * vendor have not been verified.
+ */
+ eax = ebx = ecx = edx = 0;
+ }
+
+ /*
+ * The feature flags tested below have only been verified
+ * for vendors checked above. Also note that only these
+ * feature flags have been verified to have these specific
+ * meanings. If another feature flag test is introduced,
+ * it has to be verified to have the same meaning for all
+ * vendors above.
+ */
+
+#if ETHR_SIZEOF_PTR == 8
+ /* bit 13 of ecx is set if we have cmpxchg16b */
+ ethr_runtime__.conf.have_dw_cmpxchg = (ecx & (1 << 13));
+#elif ETHR_SIZEOF_PTR == 4
+ /* bit 8 of edx is set if we have cmpxchg8b */
+ ethr_runtime__.conf.have_dw_cmpxchg = (edx & (1 << 8));
+#else
+# error "Not supported"
+#endif
+ /* bit 26 of edx is set if we have sse2 */
+ ethr_runtime__.conf.have_sse2 = (edx & (1 << 26));
+}
+
+#endif /* ETHR_X86_RUNTIME_CONF__ */
+
+
int
ethr_init_common__(ethr_init_data *id)
{
int res;
+
+#if defined(ETHR_X86_RUNTIME_CONF__)
+ x86_init();
+#endif
+
if (id) {
ethr_thr_prepare_func__ = id->thread_create_prepare_func;
ethr_thr_parent_func__ = id->thread_create_parent_func;
diff --git a/erts/lib_src/common/ethr_mutex.c b/erts/lib_src/common/ethr_mutex.c
index 2ddef32dfc..e363279f2e 100644
--- a/erts/lib_src/common/ethr_mutex.c
+++ b/erts/lib_src/common/ethr_mutex.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2010. All Rights Reserved.
+ * Copyright Ericsson AB 2010-2011. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -26,8 +26,9 @@
#include "config.h"
#endif
-#define ETHR_INLINE_FUNC_NAME_(X) X ## __
+#define ETHR_INLINE_MTX_FUNC_NAME_(X) X ## __
#define ETHR_MUTEX_IMPL__
+#define ETHR_TRY_INLINE_FUNCS
#include <limits.h>
#include "ethread.h"
@@ -222,9 +223,59 @@ rwmutex_try_complete_runlock(ethr_rwmutex *rwmtx,
int try_write_lock);
#endif
+/* -- Utilities used by multiple implementations -- */
+
+#if defined(ETHR_USE_OWN_RWMTX_IMPL__) || defined(ETHR_USE_OWN_MTX_IMPL__) \
+ || defined(ETHR_WIN32_THREADS)
+
+static ETHR_INLINE void
+enqueue(ethr_ts_event **queue,
+ ethr_ts_event *tse_start,
+ ethr_ts_event *tse_end)
+{
+ if (!*queue) {
+ *queue = tse_start;
+ tse_start->prev = tse_end;
+ tse_end->next = tse_start;
+ }
+ else {
+ tse_end->next = *queue;
+ tse_start->prev = (*queue)->prev;
+ (*queue)->prev->next = tse_start;
+ (*queue)->prev = tse_end;
+ }
+}
+
+
+static ETHR_INLINE void
+dequeue(ethr_ts_event **queue,
+ ethr_ts_event *tse_start,
+ ethr_ts_event *tse_end)
+{
+ if (tse_start->prev == tse_end) {
+ ETHR_ASSERT(*queue == tse_start && tse_end->next == tse_start);
+ *queue = NULL;
+ }
+ else {
+ if (*queue == tse_start)
+ *queue = tse_end->next;
+ tse_end->next->prev = tse_start->prev;
+ tse_start->prev->next = tse_end->next;
+ }
+}
+
+#endif
+
#if defined(ETHR_USE_OWN_RWMTX_IMPL__) || defined(ETHR_USE_OWN_MTX_IMPL__)
-/* -- Utilities operating both on ordinary mutexes and read write mutexes -- */
+static ETHR_INLINE void
+insert(ethr_ts_event *tse_pred, ethr_ts_event *tse)
+{
+ tse->next = tse_pred->next;
+ tse->prev = tse_pred;
+ tse_pred->next->prev = tse;
+ tse_pred->next = tse;
+}
static ETHR_INLINE void
rwmutex_freqread_wtng_rdrs_inc(ethr_rwmutex *rwmtx, ethr_ts_event *tse)
@@ -354,51 +405,6 @@ rwmutex_freqread_rdrs_read(ethr_rwmutex *rwmtx, int ix)
return res;
}
-
-static ETHR_INLINE void
-enqueue(ethr_ts_event **queue,
- ethr_ts_event *tse_start,
- ethr_ts_event *tse_end)
-{
- if (!*queue) {
- *queue = tse_start;
- tse_start->prev = tse_end;
- tse_end->next = tse_start;
- }
- else {
- tse_end->next = *queue;
- tse_start->prev = (*queue)->prev;
- (*queue)->prev->next = tse_start;
- (*queue)->prev = tse_end;
- }
-}
-
-static ETHR_INLINE void
-insert(ethr_ts_event *tse_pred, ethr_ts_event *tse)
-{
- tse->next = tse_pred->next;
- tse->prev = tse_pred;
- tse_pred->next->prev = tse;
- tse_pred->next = tse;
-}
-
-static ETHR_INLINE void
-dequeue(ethr_ts_event **queue,
- ethr_ts_event *tse_start,
- ethr_ts_event *tse_end)
-{
- if (tse_start->prev == tse_end) {
- ETHR_ASSERT(*queue == tse_start && tse_end->next == tse_start);
- *queue = NULL;
- }
- else {
- if (*queue == tse_start)
- *queue = tse_end->next;
- tse_end->next->prev = tse_start->prev;
- tse_start->prev->next = tse_end->next;
- }
-}
-
static void
event_wait(struct ethr_mutex_base_ *mtxb,
ethr_ts_event *tse,
@@ -1243,7 +1249,7 @@ ethr_cond_wait(ethr_cond *cnd, ethr_mutex *mtx)
return 0;
}
-#else
+#elif defined(ETHR_PTHREADS) && !defined(ETHR_DBG_WIN_MTX_WITH_PTHREADS)
/* -- pthread mutex and condition variables -------------------------------- */
int
@@ -1260,6 +1266,12 @@ ethr_mutex_init(ethr_mutex *mtx)
}
int
+ethr_mutex_init_opt(ethr_mutex *mtx, ethr_mutex_opt *opt)
+{
+ return ethr_mutex_init(mtx);
+}
+
+int
ethr_mutex_destroy(ethr_mutex *mtx)
{
#if ETHR_XCHK
@@ -1292,6 +1304,12 @@ ethr_cond_init(ethr_cond *cnd)
}
int
+ethr_cond_init_opt(ethr_cond *cnd, ethr_cond_opt *opt)
+{
+ return ethr_cond_init(cnd);
+}
+
+int
ethr_cond_destroy(ethr_cond *cnd)
{
#if ETHR_XCHK
@@ -1353,7 +1371,388 @@ ethr_cond_wait(ethr_cond *cnd, ethr_mutex *mtx)
return res;
}
-#endif /* pthread_mutex */
+#elif defined(ETHR_WIN32_THREADS) || defined(ETHR_DBG_WIN_MTX_WITH_PTHREADS)
+
+/*
+ * As of Vista/Server, 2008 Windows has condition variables that can be
+ * used with critical sections. However, we need to be able to run on
+ * older Windows versions too, so we need to implement condition variables
+ * ourselves.
+ */
+
+#ifdef ETHR_DBG_WIN_MTX_WITH_PTHREADS
+/*
+ * For debugging of this implementation on POSIX platforms...
+ */
+
+#define ethr_win_get_errno__() EINVAL
+#if defined(__GNUC__)
+#define __forceinline __inline__
+#else
+#define __forceinline
+#endif
+
+static int
+InitializeCriticalSectionAndSpinCount(CRITICAL_SECTION *cs, int sc)
+{
+ return 0 == pthread_mutex_init((pthread_mutex_t *) cs, NULL);
+}
+
+static void DeleteCriticalSection(CRITICAL_SECTION *cs)
+{
+ int res = pthread_mutex_destroy((pthread_mutex_t *) cs);
+ if (res != 0)
+ ETHR_FATAL_ERROR__(res);
+}
+
+int TryEnterCriticalSection(CRITICAL_SECTION *cs)
+{
+ int res;
+ res = pthread_mutex_trylock((pthread_mutex_t *) cs);
+ if (res != 0 && res != EBUSY)
+ ETHR_FATAL_ERROR__(res);
+ return res == 0;
+}
+
+void EnterCriticalSection(CRITICAL_SECTION *cs)
+{
+ int res = pthread_mutex_lock((pthread_mutex_t *) cs);
+ if (res != 0)
+ ETHR_FATAL_ERROR__(res);
+}
+
+void LeaveCriticalSection(CRITICAL_SECTION *cs)
+{
+ int res = pthread_mutex_unlock((pthread_mutex_t *) cs);
+ if (res != 0)
+ ETHR_FATAL_ERROR__(res);
+}
+
+#endif
+
+#define ETHR_CND_WAIT__ ((ethr_sint32_t) 0x11dead11)
+#define ETHR_CND_WAKEUP__ ((ethr_sint32_t) 0x11beef11)
+
+static __forceinline void
+cond_wakeup(ethr_ts_event *tse)
+{
+ ETHR_ASSERT(ethr_atomic32_read(&tse->uaflgs) == ETHR_CND_WAIT__);
+
+ ethr_atomic32_set_relb(&tse->uaflgs, ETHR_CND_WAKEUP__);
+ ethr_event_set(&tse->event);
+}
+
+void
+ethr_mutex_cond_wakeup__(ethr_mutex *mtx)
+{
+ /*
+ * Called by ethr_mutex_unlock() when we have
+ * cond signal/broadcast wakeups waiting to
+ * be completed.
+ */
+ ethr_ts_event *tse;
+
+ if (!mtx->posix_compliant) {
+ tse = mtx->wakeups;
+ dequeue(&mtx->wakeups, tse, tse);
+ }
+ else {
+ ethr_spin_lock(&mtx->lock);
+ tse = mtx->wakeups;
+ if (tse)
+ dequeue(&mtx->wakeups, tse, tse);
+ if (!mtx->wakeups)
+ ethr_atomic32_set_relb(&mtx->have_wakeups, 0);
+ ethr_spin_unlock(&mtx->lock);
+ }
+
+ LeaveCriticalSection(&mtx->cs);
+
+ ETHR_ASSERT(tse || mtx->posix_compliant);
+
+ /*
+ * We delay actual condition variable wakeup until
+ * this point when we have left the critical section.
+ * This in order to avoid that the other thread is
+ * woken and then right away have to go to sleep
+ * waiting for the critical section that we are in.
+ *
+ * We also only wake one thread at a time even if
+ * there are multiple threads waiting to be woken.
+ * Otherwise all but one will be woken and then right
+ * away have to go to sleep on the critical section.
+ * Since each wakeup is guaranteed to generate at
+ * least one lock/unlock sequence on this mutex, all
+ * threads will eventually be woken.
+ */
+
+ if (tse)
+ cond_wakeup(tse);
+}
+
+int
+ethr_mutex_init_opt(ethr_mutex *mtx, ethr_mutex_opt *opt)
+{
+ int spincount;
+#if ETHR_XCHK
+ if (!mtx) {
+ ETHR_ASSERT(0);
+ return EINVAL;
+ }
+ mtx->initialized = ETHR_MUTEX_INITIALIZED;
+#endif
+
+ spincount = opt ? opt->aux_spincount : 0;
+ if (spincount < 0)
+ spincount = 0;
+
+ if (!InitializeCriticalSectionAndSpinCount(&mtx->cs, spincount)) {
+#if ETHR_XCHK
+ mtx->initialized = 0;
+#endif
+ return ethr_win_get_errno__();
+ }
+
+ mtx->posix_compliant = opt ? opt->posix_compliant : 0;
+ mtx->wakeups = NULL;
+ if (mtx->posix_compliant) {
+ ethr_atomic32_init(&mtx->locked, 0);
+ ethr_atomic32_init(&mtx->have_wakeups, 0);
+ ethr_spinlock_init(&mtx->lock);
+ }
+ return 0;
+}
+
+int
+ethr_mutex_init(ethr_mutex *mtx)
+{
+ return ethr_mutex_init_opt(mtx, NULL);
+}
+
+int
+ethr_mutex_destroy(ethr_mutex *mtx)
+{
+ DeleteCriticalSection(&mtx->cs);
+ if (mtx->posix_compliant)
+ return ethr_spinlock_destroy(&mtx->lock);
+ else
+ return 0;
+}
+
+int
+ethr_cond_wait(ethr_cond *cnd, ethr_mutex *mtx)
+{
+ void *udata;
+ ethr_ts_event *tse = ethr_get_ts_event();
+ int spincount;
+
+ udata = tse->udata;
+ tse->udata = (void *) mtx;
+ ethr_atomic32_set_relb(&tse->uaflgs, ETHR_CND_WAIT__);
+
+ EnterCriticalSection(&cnd->cs);
+ enqueue(&cnd->waiters, tse, tse);
+ LeaveCriticalSection(&cnd->cs);
+
+ ethr_mutex_unlock(mtx);
+
+ spincount = cnd->spincount;
+
+ while (ethr_atomic32_read_acqb(&tse->uaflgs) != ETHR_CND_WAKEUP__) {
+ ethr_event_reset(&tse->event);
+ if (ethr_atomic32_read_acqb(&tse->uaflgs) == ETHR_CND_WAKEUP__)
+ break;
+ ethr_event_swait(&tse->event, spincount);
+ spincount = 0;
+ }
+
+ tse->udata = udata;
+ ethr_leave_ts_event(tse);
+
+ ethr_mutex_lock(mtx);
+
+ return 0;
+}
+
+static __forceinline void
+posix_compliant_mtx_enqueue(ethr_mutex *mtx,
+ ethr_ts_event *tse_start,
+ ethr_ts_event *tse_end)
+{
+ ethr_ts_event *tse_wakeup = NULL; /* Avoid erroneous compiler warning... */
+ /*
+ * The associated mutex might not be locked, so we need to
+ * check if it is. If locked, enqueue for wakeup at unlock;
+ * otherwise, wakeup the first one now and enqueue the rest.
+ */
+ if (tse_start == tse_end && !ethr_atomic32_read(&mtx->locked)) {
+ tse_wakeup = tse_start;
+ wakeup:
+ cond_wakeup(tse_wakeup);
+ }
+ else {
+ int need_wakeup;
+ ethr_spin_lock(&mtx->lock);
+ if (!mtx->wakeups)
+ ethr_atomic32_set_mb(&mtx->have_wakeups, 1);
+ need_wakeup = !ethr_atomic32_read(&mtx->locked);
+ if (need_wakeup) {
+ if (tse_start == tse_end) {
+ if (!mtx->wakeups)
+ ethr_atomic32_set_relb(&mtx->have_wakeups, 0);
+ ethr_spin_unlock(&mtx->lock);
+ tse_wakeup = tse_start;
+ goto wakeup;
+ }
+ tse_wakeup = tse_start;
+ tse_start = tse_start->next;
+ }
+ enqueue(&mtx->wakeups, tse_start, tse_end);
+ ethr_spin_unlock(&mtx->lock);
+ if (need_wakeup)
+ goto wakeup;
+ }
+}
+
+static __forceinline void
+enqueue_cond_wakeups(ethr_ts_event *queue, int posix_compliant)
+{
+ if (queue) {
+ int more;
+ ethr_ts_event *q = queue;
+
+ /*
+ * Waiters may be using different mutexes...
+ */
+
+ do {
+ ethr_mutex *mtx;
+ ethr_ts_event *tse, *tse_start, *tse_end;
+
+ more = 0;
+ tse_start = q;
+ mtx = (ethr_mutex *) tse_start->udata;
+
+ ETHR_ASSERT(posix_compliant
+ ? mtx->posix_compliant
+ : !mtx->posix_compliant);
+
+ ETHR_ASSERT(ethr_atomic32_read(&tse_start->uaflgs)
+ == ETHR_CND_WAIT__);
+ ETHR_ASSERT(mtx->initialized == ETHR_MUTEX_INITIALIZED);
+
+ tse_end = tse_start->prev;
+
+ for (tse = tse_start->next; tse != tse_start; tse = tse->next) {
+
+ ETHR_ASSERT(ethr_atomic32_read(&tse->uaflgs)
+ == ETHR_CND_WAIT__);
+
+ if (mtx != (ethr_mutex *) tse->udata) {
+ tse_end = tse->prev;
+ dequeue(&q, tse_start, tse_end);
+ more = 1;
+ break;
+ }
+ }
+
+ if (posix_compliant)
+ posix_compliant_mtx_enqueue(mtx, tse_start, tse_end);
+ else
+ enqueue(&mtx->wakeups, tse_start, tse_end);
+
+ } while (more);
+ }
+}
+
+void
+ethr_cond_broadcast(ethr_cond *cnd)
+{
+ ethr_ts_event *waiters;
+
+ EnterCriticalSection(&cnd->cs);
+ waiters = cnd->waiters;
+ cnd->waiters = NULL;
+ LeaveCriticalSection(&cnd->cs);
+
+ if (cnd->posix_compliant)
+ enqueue_cond_wakeups(waiters, 1);
+ else
+ enqueue_cond_wakeups(waiters, 0);
+}
+
+void
+ethr_cond_signal(ethr_cond *cnd)
+{
+ ethr_mutex *mtx;
+ ethr_ts_event *tse;
+
+ EnterCriticalSection(&cnd->cs);
+ tse = cnd->waiters;
+ if (tse)
+ dequeue(&cnd->waiters, tse, tse);
+ LeaveCriticalSection(&cnd->cs);
+
+ if (tse) {
+ mtx = (ethr_mutex *) tse->udata;
+
+ ETHR_ASSERT(ethr_atomic32_read(&tse->uaflgs) == ETHR_CND_WAIT__);
+ ETHR_ASSERT(mtx->initialized == ETHR_MUTEX_INITIALIZED);
+ ETHR_ASSERT(cnd->posix_compliant
+ ? mtx->posix_compliant
+ : !mtx->posix_compliant);
+
+ if (cnd->posix_compliant)
+ posix_compliant_mtx_enqueue(mtx, tse, tse);
+ else
+ enqueue(&mtx->wakeups, tse, tse);
+ }
+}
+
+int
+ethr_cond_init_opt(ethr_cond *cnd, ethr_cond_opt *opt)
+{
+ int spincount;
+
+#if ETHR_XCHK
+ if (!cnd) {
+ ETHR_ASSERT(0);
+ return EINVAL;
+ }
+ cnd->initialized = ETHR_COND_INITIALIZED;
+#endif
+
+ spincount = opt ? opt->aux_spincount : 0;
+ if (spincount < 0)
+ spincount = 0;
+
+ if (!InitializeCriticalSectionAndSpinCount(&cnd->cs, spincount)) {
+#if ETHR_XCHK
+ cnd->initialized = 0;
+#endif
+ return ethr_win_get_errno__();
+ }
+
+ cnd->posix_compliant = opt ? opt->posix_compliant : 0;
+ cnd->waiters = NULL;
+ cnd->spincount = spincount;
+ return 0;
+}
+
+int
+ethr_cond_init(ethr_cond *cnd)
+{
+ return ethr_cond_init_opt(cnd, NULL);
+}
+
+int
+ethr_cond_destroy(ethr_cond *cnd)
+{
+ DeleteCriticalSection(&cnd->cs);
+ return 0;
+}
+
+#endif
/* -- Exported symbols of inline functions --------------------------------- */
@@ -1968,7 +2367,7 @@ dbg_unlock_wake(ethr_rwmutex *rwmtx,
exp = have_w ? ETHR_RWMTX_W_FLG__ : 0;
if (rwmtx->type != ETHR_RWMUTEX_TYPE_NORMAL)
- imask = ETHR_RWMTX_R_PEND_UNLCK_MASK__;
+ imask = ETHR_RWMTX_R_PEND_UNLCK_MASK__|ETHR_RWMTX_R_ABRT_UNLCK_FLG__;
else {
#ifdef ETHR_RLOCK_WITH_INC_DEC
imask = ETHR_RWMTX_RS_MASK__;
diff --git a/erts/lib_src/pthread/ethr_x86_sse2_asm.c b/erts/lib_src/pthread/ethr_x86_sse2_asm.c
new file mode 100644
index 0000000000..6cbe73cf16
--- /dev/null
+++ b/erts/lib_src/pthread/ethr_x86_sse2_asm.c
@@ -0,0 +1,31 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2011. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/*
+ * Description: sse2 asm:s
+ * Author: Rickard Green
+ */
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+/* ETHR_X86_SSE2_ASM_C__ will trigger asm:s to compile to be included */
+#define ETHR_X86_SSE2_ASM_C__
+#include "ethread.h"
diff --git a/erts/lib_src/pthread/ethread.c b/erts/lib_src/pthread/ethread.c
index f047104103..fb7d135418 100644
--- a/erts/lib_src/pthread/ethread.c
+++ b/erts/lib_src/pthread/ethread.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2010. All Rights Reserved.
+ * Copyright Ericsson AB 2010-2011. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -121,6 +121,117 @@ ethr_ts_event *ethr_get_tse__(void)
return pthread_getspecific(ethr_ts_event_key__);
}
+#if defined(ETHR_PPC_RUNTIME_CONF__)
+
+#include <sys/wait.h>
+
+static void
+handle_lwsync_sigill(int signum)
+{
+ _exit(1);
+}
+
+static int
+ppc_init__(void)
+{
+ int pid;
+
+ /* If anything what so ever failes we assume no lwsync for safety */
+ ethr_runtime__.conf.have_lwsync = 0;
+
+ /*
+ * We perform the lwsync test (which might cause an illegal
+ * instruction signal) in a separate process in order to be
+ * completely certain that we do not mess up our own state.
+ */
+ pid = fork();
+ if (pid == 0) {
+ struct sigaction act, oact;
+
+ sigemptyset(&act.sa_mask);
+ act.sa_flags = SA_RESETHAND;
+ act.sa_handler = handle_lwsync_sigill;
+ if (sigaction(SIGILL, &act, &oact) != 0)
+ _exit(2);
+
+ __asm__ __volatile__ ("lwsync\n\t" : : : "memory");
+
+ _exit(0);
+ }
+
+ if (pid != -1) {
+ while (1) {
+ int status, res;
+ res = waitpid(pid, &status, 0);
+ if (res == pid) {
+ if (WIFEXITED(status) && WEXITSTATUS(status) == 0)
+ ethr_runtime__.conf.have_lwsync = 1;
+ break;
+ }
+ }
+ }
+ return 0;
+}
+
+#endif
+
+#if defined(ETHR_X86_RUNTIME_CONF__)
+
+void
+ethr_x86_cpuid__(int *eax, int *ebx, int *ecx, int *edx)
+{
+#if ETHR_SIZEOF_PTR == 4
+ int have_cpuid;
+ /*
+ * If it is possible to toggle eflags bit 21,
+ * we have the cpuid instruction.
+ */
+ __asm__ ("pushf\n\t"
+ "popl %%eax\n\t"
+ "movl %%eax, %%ecx\n\t"
+ "xorl $0x200000, %%eax\n\t"
+ "pushl %%eax\n\t"
+ "popf\n\t"
+ "pushf\n\t"
+ "popl %%eax\n\t"
+ "movl $0x0, %0\n\t"
+ "xorl %%ecx, %%eax\n\t"
+ "jz no_cpuid\n\t"
+ "movl $0x1, %0\n\t"
+ "no_cpuid:\n\t"
+ : "=r"(have_cpuid)
+ :
+ : "%eax", "%ecx", "cc");
+ if (!have_cpuid) {
+ *eax = *ebx = *ecx = *edx = 0;
+ return;
+ }
+#endif
+#if ETHR_SIZEOF_PTR == 4 && defined(__PIC__) && __PIC__
+ /*
+ * When position independet code is used in 32-bit mode, the B register
+ * is used for storage of global offset table address, and we may not
+ * use it as input or output in an asm. We need to save and restore the
+ * B register explicitly (for some reason gcc doesn't provide this
+ * service to us).
+ */
+ __asm__ ("pushl %%ebx\n\t"
+ "cpuid\n\t"
+ "movl %%ebx, %1\n\t"
+ "popl %%ebx\n\t"
+ : "=a"(*eax), "=r"(*ebx), "=c"(*ecx), "=d"(*edx)
+ : "0"(*eax)
+ : "cc");
+#else
+ __asm__ ("cpuid\n\t"
+ : "=a"(*eax), "=b"(*ebx), "=c"(*ecx), "=d"(*edx)
+ : "0"(*eax)
+ : "cc");
+#endif
+}
+
+#endif /* ETHR_X86_RUNTIME_CONF__ */
+
/*
* --------------------------------------------------------------------------
* Exported functions
@@ -137,6 +248,12 @@ ethr_init(ethr_init_data *id)
ethr_not_inited__ = 0;
+#if defined(ETHR_PPC_RUNTIME_CONF__)
+ res = ppc_init__();
+ if (res != 0)
+ goto error;
+#endif
+
res = ethr_init_common__(id);
if (res != 0)
goto error;
@@ -146,6 +263,8 @@ ethr_init(ethr_init_data *id)
child_wait_spin_count = 0;
res = pthread_key_create(&ethr_ts_event_key__, ethr_ts_event_destructor__);
+ if (res != 0)
+ goto error;
return 0;
error:
diff --git a/erts/lib_src/utils/make_atomics_api b/erts/lib_src/utils/make_atomics_api
new file mode 100755
index 0000000000..74736c5a2d
--- /dev/null
+++ b/erts/lib_src/utils/make_atomics_api
@@ -0,0 +1,2286 @@
+#!/usr/bin/env escript
+%% -*- erlang -*-
+
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2011-2012. All Rights Reserved.
+%%
+%% The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved online at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% %CopyrightEnd%
+%%
+
+-mode(compile).
+
+%%%-------------------------------------------------------------------
+%%% @author Rickard Green <[email protected]>
+%%% @copyright (C) 2011, Rickard Green
+%%% @doc
+%%% Generation of the ethread atomic API
+%%% @end
+%%% Created : 17 Jan 2011 by Rickard Green <[email protected]>
+%%%-------------------------------------------------------------------
+
+-define(H_FILE, "erts/include/internal/ethr_atomics.h").
+-define(C_FILE, "erts/lib_src/common/ethr_atomics.c").
+
+%% These order constraints are important:
+%% - 'cmpxchg' needs to appear before 'read'
+%% - 'xchg' needs to apper before 'set'
+%% - 'set' needs to apper before 'init'
+%% - 'add_read' needs to apper before 'add', 'inc_read', and 'dec_read'
+%% - 'inc_read' needs to apper before and 'inc'
+%% - 'dec_read' needs to apper before and 'dec'
+-define(ATOMIC_OPS, [cmpxchg, xchg, set, init, add_read,
+ read, inc_read, dec_read, add, inc,
+ dec, read_band, read_bor]).
+
+-define(DW_ATOMIC_OPS, [cmpxchg, set, read, init]).
+-define(DW_FUNC_MACRO, "ETHR_DW_ATOMIC_FUNC__").
+-define(DW_RTCHK_MACRO, "ETHR_RTCHK_USE_NATIVE_DW_ATOMIC_IMPL__").
+
+%% Barrier versions we implement
+-define(BARRIERS, [none, ddrb, rb, wb, acqb, relb, mb]).
+-define(NON_NATIVE_BARRIERS, [ddrb]).
+-define(NATIVE_BARRIERS, (?BARRIERS -- ?NON_NATIVE_BARRIERS)).
+
+-define(ATOMIC_SIZES, ["dword", "word", "32"]).
+
+-define(HAVE_NATIVE_ATOMIC, "ETHR_HAVE_ETHR_NATIVE_ATOMIC").
+
+-define(SU_DW_SINT_FIELD, "dw_sint").
+-define(DW_SINT_FIELD, "sint").
+
+%% Fallback
+-define(ETHR_ATMC_FLLBK_ADDR_BITS, "10").
+-define(ETHR_ATMC_FLLBK_ADDR_SHIFT, "6").
+
+-record(atomic_context, {dw,
+ amc_fallback,
+ ret_type,
+ ret_var,
+ arg1,
+ arg2,
+ arg3,
+ have_native_atomic_ops,
+ atomic,
+ atomic_t,
+ addr_aint_t,
+ aint_t,
+ naint_t,
+ 'NATMC',
+ 'ATMC',
+ unusual_val}).
+
+atomic_context("dword") ->
+ #atomic_context{dw = true,
+ amc_fallback = true,
+ ret_type = "int",
+ ret_var = "res",
+ arg1 = "var",
+ arg2 = "val",
+ arg3 = "old_val",
+ have_native_atomic_ops = "ETHR_HAVE_DOUBLE_WORD_SZ_NATIVE_ATOMIC_OPS",
+ atomic = "ethr_dw_atomic",
+ atomic_t = "ethr_dw_atomic_t",
+ addr_aint_t = "ethr_sint_t",
+ aint_t = "ethr_dw_sint_t",
+ naint_t = "ETHR_SU_DW_NAINT_T__",
+ 'NATMC' = "DW_NATMC",
+ 'ATMC' = "DW_ATMC",
+ unusual_val = "ETHR_UNUSUAL_SINT_VAL__"};
+atomic_context(Size) ->
+ {SizeSuffix, HaveSize, AMC} = case Size of
+ "word" -> {"", "WORD_SZ", true};
+ _ -> {Size, Size++"BIT", false}
+ end,
+ AintT = ["ethr_sint", SizeSuffix, "_t"],
+ #atomic_context{dw = false,
+ amc_fallback = AMC,
+ ret_type = AintT,
+ ret_var = "res",
+ arg1 = "var",
+ arg2 = "val",
+ arg3 = "old_val",
+ have_native_atomic_ops = ["ETHR_HAVE_", HaveSize, "_NATIVE_ATOMIC_OPS"],
+ atomic = ["ethr_atomic", SizeSuffix],
+ atomic_t = ["ethr_atomic", SizeSuffix, "_t"],
+ addr_aint_t = AintT,
+ aint_t = AintT,
+ naint_t = ["ETHR_NAINT", SizeSuffix, "_T__"],
+ 'NATMC' = ["NATMC", SizeSuffix],
+ 'ATMC' = ["ATMC", SizeSuffix],
+ unusual_val = ["ETHR_UNUSUAL_SINT", SizeSuffix, "_VAL__"]}.
+
+-record(op_context, {ret, var, val1, val2}).
+
+-define(POTENTIAL_NBITS, ["64", "32"]).
+
+is_return_op(#atomic_context{dw = false}, add) -> false;
+is_return_op(#atomic_context{dw = false}, inc) -> false;
+is_return_op(#atomic_context{dw = false}, dec) -> false;
+is_return_op(#atomic_context{dw = true}, read) -> false;
+is_return_op(_AC, init) -> false;
+is_return_op(_AC, set) -> false;
+is_return_op(_AC, _OP) -> true.
+
+native(add_read) -> add_return;
+native(inc_read) -> inc_return;
+native(dec_read) -> dec_return;
+native(read_band) -> and_retold;
+native(read_bor) -> or_retold;
+native(Op) -> Op.
+
+op(Op, #op_context{var = Var, val1 = Val1}) when Op == init; Op == set ->
+ [Var, " = ", Val1];
+op(read, #op_context{ret = Ret, var = Var}) ->
+ [Ret, " = ", Var];
+op(add_read, OpC) ->
+ [op(add, OpC), "; ", op(read, OpC)];
+op(add, #op_context{var = Var, val1 = Val1}) ->
+ [Var, " += ", Val1];
+op(inc, #op_context{var = Var}) ->
+ ["++(", Var, ")"];
+op(dec, #op_context{var = Var}) ->
+ ["--(", Var, ")"];
+op(inc_read, #op_context{ret = Ret, var = Var}) ->
+ [Ret, " = ++(", Var, ")"];
+op(dec_read, #op_context{ret = Ret, var = Var}) ->
+ [Ret, " = --(", Var, ")"];
+op(read_band, #op_context{var = Var, val1 = Val1} = OpC) ->
+ [op(read, OpC), "; ", Var, " &= ", Val1];
+op(read_bor, #op_context{var = Var, val1 = Val1} = OpC) ->
+ [op(read, OpC), "; ", Var, " |= ", Val1];
+op(xchg, OpC) ->
+ [op(read, OpC), "; ", op(set, OpC)];
+op(cmpxchg, #op_context{ret = Ret, var = Var, val1 = Val1, val2 = Val2}) ->
+ [Ret, " = (", Var, " == ", Val2, " ? (", Var, " = ", Val1, ", ", Val2, ") : ", Var, ")"].
+
+dw_op(Op, #op_context{var = Var, val1 = Val1}) when Op == init; Op == set ->
+ [Var, "[0] = ", Val1, "[0]; ", Var, "[1] = ", Val1, "[1]"];
+dw_op(read, #op_context{var = Var, val1 = Val1}) ->
+ [Val1, "[0] = ", Var, "[0]; ", Val1, "[1] = ", Var, "[1]"];
+dw_op(cmpxchg, #op_context{ret = Ret, var = Var, val1 = Val1, val2 = Val2}) ->
+ ["
+ {
+ ", Ret, " = (", Var, "[0] == ", Val2, "[0] && ", Var, "[1] == ", Val2, "[1]);
+ if (", Ret, ") {
+ ", Var, "[0] = ", Val1, "[0];
+ ", Var, "[1] = ", Val1, "[1];
+ }
+ else {
+ ", Val2, "[0] = ", Var, "[0];
+ ", Val2, "[1] = ", Var, "[1];
+ }
+ }"].
+
+op_head_tail(init) -> {undef, undef};
+op_head_tail(set) -> {store, store};
+op_head_tail(read) -> {load, load};
+op_head_tail(_) -> {load, undef}.
+
+op_barrier_ext(none) -> "";
+op_barrier_ext(Barrier) -> [$_, a2l(Barrier)].
+
+op_call(addr, _DW, Ret, Func, Arg1, _Arg2, _Arg3, _TypeCast) ->
+ [Ret, " ", Func, "(", Arg1, ");"];
+op_call(Op, false, Ret, Func, Arg1, _Arg2, _Arg3, _TypeCast) when Op == read;
+ Op == inc_read;
+ Op == inc_return;
+ Op == dec_read;
+ Op == dec_return ->
+ [Ret, " ", Func, "(", Arg1, ");"];
+op_call(Op, false, _Ret, Func, Arg1, _Arg2, _Arg3, _TypeCast) when Op == inc;
+ Op == dec ->
+ [Func, "(", Arg1, ");"];
+op_call(Op, false, Ret, Func, Arg1, Arg2, _Arg3, TypeCast) when Op == add_return;
+ Op == add_read;
+ Op == read_band;
+ Op == and_retold;
+ Op == read_bor;
+ Op == or_retold;
+ Op == xchg ->
+ [Ret, " ", Func, "(", Arg1, ",", TypeCast, " ", Arg2, ");"];
+op_call(cmpxchg, _DW, Ret, Func, Arg1, Arg2, Arg3, TypeCast) ->
+ [Ret, " ", Func, "(", Arg1, ",", TypeCast, " ", Arg2, ",", TypeCast, " ", Arg3, ");"];
+op_call(_Op, _DW, _Ret, Func, Arg1, Arg2, _Arg3, TypeCast) ->
+ [Func, "(", Arg1, ",", TypeCast, " ", Arg2, ");"]. % set, init, add (!= dw), read (== dw)
+
+native_op_call(#atomic_context{dw = DW,
+ ret_var = RetVar,
+ arg1 = Arg1,
+ arg2 = Arg2,
+ arg3 = Arg3,
+ aint_t = AintT,
+ 'NATMC' = NATMC,
+ naint_t = NAintT},
+ Op, B, TypeCasts) ->
+ op_call(Op,
+ DW,
+ [RetVar, " =",
+ case TypeCasts of
+ true -> [" (", AintT, ")"];
+ false -> ""
+ end],
+ ["ETHR_", NATMC, "_FUNC__(", opstr(native(Op)), op_barrier_ext(B), ")"],
+ Arg1,
+ Arg2,
+ Arg3,
+ case TypeCasts of
+ true -> [" (", NAintT, ")"];
+ false -> ""
+ end).
+
+simple_fallback(#atomic_context{arg1 = Arg1,
+ arg2 = Arg2,
+ 'ATMC' = ATMC},
+ init, B) -> %% Also double word
+ [" ETHR_", ATMC, "_FUNC__(set", op_barrier_ext(B),")(", Arg1, ", ", Arg2, ");\n"];
+simple_fallback(#atomic_context{dw = false,
+ arg1 = Arg1,
+ arg2 = Arg2,
+ 'ATMC' = ATMC},
+ set, B) ->
+ [" (void) ETHR_", ATMC, "_FUNC__(xchg", op_barrier_ext(B),")(", Arg1, ", ", Arg2, ");\n"];
+simple_fallback(#atomic_context{dw = false,
+ arg1 = Arg1,
+ arg2 = Arg2,
+ 'ATMC' = ATMC},
+ add, B) ->
+ [" (void) ETHR_", ATMC, "_FUNC__(add_read", op_barrier_ext(B), ")(", Arg1, ", ", Arg2, ");\n"];
+simple_fallback(#atomic_context{dw = false,
+ ret_var = RetVar,
+ arg1 = Arg1,
+ aint_t = AintT,
+ 'ATMC' = ATMC},
+ inc_read, B) ->
+ [" ", RetVar, " = ETHR_", ATMC, "_FUNC__(add_read", op_barrier_ext(B), ")(", Arg1, ", (", AintT,") 1);\n"];
+simple_fallback(#atomic_context{dw = false,
+ ret_var = RetVar,
+ arg1 = Arg1,
+ aint_t = AintT,
+ 'ATMC' = ATMC},
+ dec_read, B) ->
+ [" ", RetVar, " = ETHR_", ATMC, "_FUNC__(add_read", op_barrier_ext(B), ")(", Arg1, ", (", AintT,") -1);\n"];
+simple_fallback(#atomic_context{dw = false,
+ arg1 = Arg1,
+ 'ATMC' = ATMC},
+ inc, B) ->
+ [" (void) ETHR_", ATMC, "_FUNC__(inc_read", op_barrier_ext(B), ")(", Arg1, ");\n"];
+simple_fallback(#atomic_context{dw = false,
+ arg1 = Arg1,
+ 'ATMC' = ATMC},
+ dec, B) ->
+ [" (void) ETHR_", ATMC, "_FUNC__(dec_read", op_barrier_ext(B), ")(", Arg1, ");\n"];
+simple_fallback(#atomic_context{dw = false,
+ unusual_val = UnusualVal,
+ ret_var = RetVar,
+ arg1 = Arg1,
+ aint_t = AintT,
+ 'ATMC' = ATMC},
+ read, B) ->
+ [" ", RetVar, " = ETHR_", ATMC, "_FUNC__(cmpxchg", op_barrier_ext(B), ")(", Arg1, ", (", AintT, ") ", UnusualVal, ", (", AintT,") ", UnusualVal, ");\n"];
+simple_fallback(#atomic_context{dw = true,
+ unusual_val = UnusualVal,
+ arg1 = Arg1,
+ arg2 = Arg2,
+ aint_t = AintT,
+ 'ATMC' = ATMC},
+ read, B) ->
+ [" ", AintT, " tmp;
+ tmp.", ?DW_SINT_FIELD, "[0] = ", UnusualVal, ";
+ tmp.", ?DW_SINT_FIELD, "[1] = ", UnusualVal, ";
+ ", Arg2, "->", ?DW_SINT_FIELD, "[0] = ", UnusualVal, ";
+ ", Arg2, "->", ?DW_SINT_FIELD, "[1] = ", UnusualVal, ";
+ (void) ETHR_", ATMC, "_FUNC__(cmpxchg", op_barrier_ext(B), ")(", Arg1, ", &tmp, ", Arg2, ");
+"
+ ];
+simple_fallback(_AC, _Op, _B) ->
+ [].
+
+func_header(AC, prototype, MacroName, Op, B) ->
+ [func_header(AC, implementation, MacroName, Op, B), ";"];
+func_header(#atomic_context{'ATMC' = ATMC} = AC, inline_implementation, _MacroName, Op, B) ->
+ do_func_header(AC, Op, "static ETHR_INLINE ",
+ ["ETHR_", ATMC, "_FUNC__(", opstr(Op), op_barrier_ext(B), ")"]);
+func_header(#atomic_context{atomic = Atomic} = AC, implementation, false, Op, B) ->
+ do_func_header(AC, Op, "", [Atomic, "_", opstr(Op), op_barrier_ext(B)]);
+func_header(AC, implementation, MacroName, Op, B) ->
+ do_func_header(AC, Op, "", [MacroName, "(", opstr(Op), op_barrier_ext(B), ")"]).
+
+
+do_func_header(#atomic_context{atomic_t = AtomicT,
+ addr_aint_t = AddrAintT,
+ arg1 = Arg1},
+ addr, Inline, Func) ->
+ [Inline, AddrAintT, " *", Func, "(", AtomicT, " *", Arg1, ")"];
+do_func_header(#atomic_context{dw = false,
+ atomic_t = AtomicT,
+ aint_t = AintT,
+ arg1 = Arg1,
+ arg2 = Arg2},
+ Op, Inline, Func) when Op == init;
+ Op == set;
+ Op == add ->
+ [Inline, "void ", Func, "(", AtomicT, " *", Arg1, ", ", AintT, " ", Arg2, ")"];
+do_func_header(#atomic_context{dw = false,
+ atomic_t = AtomicT,
+ arg1 = Arg1},
+ Op, Inline, Func) when Op == inc;
+ Op == dec ->
+ [Inline, "void ", Func, "(", AtomicT, " *", Arg1, ")"];
+do_func_header(#atomic_context{dw = false,
+ atomic_t = AtomicT,
+ aint_t = AintT,
+ arg1 = Arg1},
+ Op, Inline, Func) when Op == read;
+ Op == inc_read;
+ Op == dec_read ->
+ [Inline, AintT, " ", Func, "(", AtomicT, " *", Arg1, ")"];
+do_func_header(#atomic_context{dw = false,
+ atomic_t = AtomicT,
+ aint_t = AintT,
+ arg1 = Arg1,
+ arg2 = Arg2},
+ Op, Inline, Func) when Op == add_read;
+ Op == read_band;
+ Op == read_bor;
+ Op == xchg ->
+ [Inline, AintT, " ", Func, "(", AtomicT, " *", Arg1, ", ", AintT, " ", Arg2, ")"];
+do_func_header(#atomic_context{dw = false,
+ atomic_t = AtomicT,
+ aint_t = AintT,
+ arg1 = Arg1,
+ arg2 = Arg2,
+ arg3 = Arg3},
+ cmpxchg, Inline, Func) ->
+ [Inline, AintT, " ", Func, "(", AtomicT, " *", Arg1, ", ", AintT, " ", Arg2, ", ", AintT, " ", Arg3, ")"];
+do_func_header(#atomic_context{dw = true,
+ atomic_t = AtomicT,
+ aint_t = AintT,
+ arg1 = Arg1,
+ arg2 = Arg2},
+ Op, Inline, Func) when Op == init;
+ Op == set;
+ Op == read ->
+ [Inline, "void ", Func, "(", AtomicT, " *", Arg1, ", ", AintT, " *", Arg2, ")"];
+do_func_header(#atomic_context{dw = true,
+ atomic_t = AtomicT,
+ aint_t = AintT,
+ arg1 = Arg1,
+ arg2 = Arg2,
+ arg3 = Arg3},
+ cmpxchg, Inline, Func) ->
+ [Inline, "int ", Func, "(", AtomicT, " *", Arg1, ", ", AintT, " *", Arg2, ", ", AintT, " *", Arg3, ")"].
+
+xbarriers(_Op, none, _NB) ->
+ {"", ""};
+
+xbarriers(_Op, acqb, NB) when NB == acqb; NB == mb ->
+ {"", ""};
+xbarriers(Op, acqb, NB) ->
+ case {op_head_tail(Op), NB} of
+ {{_, load}, rb} -> {"", "ETHR_MEMBAR(ETHR_LoadStore);"};
+ {{_, load}, _} -> {"", "ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore);"};
+ {{_, store}, _} -> {"", "ETHR_MEMBAR(ETHR_StoreLoad|ETHR_StoreStore);"};
+ {_, rb} -> {"", "ETHR_MEMBAR(ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);"};
+ _ -> {"", "ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);"}
+ end;
+
+xbarriers(_Op, relb, NB) when NB == relb; NB == mb ->
+ {"", ""};
+xbarriers(Op, relb, NB) ->
+ case {op_head_tail(Op), NB} of
+ {{store, _}, wb} -> {"ETHR_MEMBAR(ETHR_LoadStore);", ""};
+ {{store, _}, _} -> {"ETHR_MEMBAR(ETHR_LoadStore|ETHR_StoreStore);", ""};
+ {{load, _}, _} -> {"ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);", ""};
+ {_, wb} -> {"ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad);", ""};
+ _ -> {"ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);", ""}
+ end;
+
+xbarriers(_Op, wb, NB) when NB == wb; NB == mb ->
+ {"", ""};
+xbarriers(_Op, wb, _NB) ->
+ {"ETHR_MEMBAR(ETHR_StoreStore);", ""};
+
+xbarriers(_Op, rb, NB) when NB == rb; NB == mb ->
+ {"", ""};
+xbarriers(_Op, rb, _NB) ->
+ {"", "ETHR_MEMBAR(ETHR_LoadLoad);"};
+
+xbarriers(_Op, mb, mb) ->
+ {"", ""};
+xbarriers(Op, mb, NB) ->
+ MB = "ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);",
+ {Head, Tail} = op_head_tail(Op),
+ PreOp = case {Head, NB} of
+ {_, relb} -> "";
+ {store, wb} -> "ETHR_MEMBAR(ETHR_LoadStore);";
+ {store, _} -> "ETHR_MEMBAR(ETHR_LoadStore|ETHR_StoreStore);";
+ {load, _} -> "ETHR_MEMBAR(ETHR_LoadLoad|ETHR_StoreLoad);";
+ {_, wb} -> "ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad);";
+ _ -> MB
+ end,
+ PostOp = case {Tail, NB} of
+ {_, acqb} -> "";
+ {load, rb} -> "ETHR_MEMBAR(ETHR_LoadStore);";
+ {load, _} -> "ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore);";
+ {store, _} -> "ETHR_MEMBAR(ETHR_StoreLoad|ETHR_StoreStore);";
+ {_, rb} -> "ETHR_MEMBAR(ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore);";
+ _ -> MB
+ end,
+ {PreOp, PostOp}.
+
+try_barrier_order_first(none) ->
+ [none, rb, wb, acqb, relb];
+try_barrier_order_first(acqb) ->
+ [acqb, rb, none, mb];
+try_barrier_order_first(relb) ->
+ [relb, wb, none, mb];
+try_barrier_order_first(rb) ->
+ [rb, none, mb];
+try_barrier_order_first(wb) ->
+ [wb, none, mb];
+try_barrier_order_first(mb) ->
+ [mb, relb, acqb, wb, rb, none].
+
+try_barrier_order(B) ->
+ First = try_barrier_order_first(B),
+ First ++ (?NATIVE_BARRIERS -- First).
+
+native_barrier_op(#atomic_context{'NATMC' = NATMC} = AC, If, ExtraDecl, Op, B, NB, TypeCasts) ->
+ NOpStr = opstr(native(Op)),
+ CapNOpStr = to_upper(NOpStr),
+ NBExt = op_barrier_ext(NB),
+ CapNBExt = to_upper(NBExt),
+ {PreB, PostB} = xbarriers(Op, B, NB),
+ [If, " defined(ETHR_HAVE_", NATMC, "_", CapNOpStr, CapNBExt, ")\n",
+ ExtraDecl,
+ case PreB of
+ "" -> "";
+ _ -> [" ", PreB, "\n"]
+ end,
+ " ", native_op_call(AC, Op, NB, TypeCasts), "\n",
+ case PostB of
+ "" -> "";
+ _ -> [" ", PostB, "\n"]
+ end].
+
+dw_native_barrier_op(#atomic_context{arg1 = Arg1, arg2 = Arg2, arg3 = Arg3} = AC, If, ExtraDecl, Op, B, NB) ->
+ native_barrier_op(AC#atomic_context{arg1 = ["&", Arg1, "->native"],
+ arg2 = [Arg2, "->", ?DW_SINT_FIELD],
+ arg3 = [Arg3, "->", ?DW_SINT_FIELD]},
+ If, ExtraDecl, Op, B, NB, false).
+
+su_dw_native_barrier_op(#atomic_context{dw = true,
+ naint_t = NAintT,
+ ret_var = RetVar,
+ arg1 = Arg1,
+ arg2 = Arg2,
+ arg3 = Arg3,
+ 'NATMC' = NATMC} = AC, If, cmpxchg, B, NB) ->
+ SU = ["->", ?SU_DW_SINT_FIELD],
+ TmpVar = "act",
+ SUArg1 = ["&", Arg1, "->native"],
+ SUArg2 = [Arg2, SU],
+ SUArg3 = [Arg3, SU],
+ ExtraDecl = [" ", NAintT, " ", TmpVar, ";\n"],
+ [native_barrier_op(AC#atomic_context{dw = false,
+ ret_var = TmpVar,
+ arg1 = SUArg1,
+ arg2 = SUArg2,
+ arg3 = SUArg3,
+ 'NATMC' = ["SU_", NATMC]},
+ If, ExtraDecl, cmpxchg, B, NB, false),
+ " ", RetVar, " = (", TmpVar, " == ", SUArg3, ");
+ ", SUArg3, " = ", TmpVar, ";
+"
+ ];
+su_dw_native_barrier_op(#atomic_context{dw = true,
+ arg1 = Arg1,
+ arg2 = Arg2,
+ 'NATMC' = NATMC} = AC, If, Op, B, NB) ->
+ SUArg1 = ["&", Arg1, "->native"],
+ SUArg2 = [Arg2, "->", ?SU_DW_SINT_FIELD],
+ native_barrier_op(AC#atomic_context{dw = false,
+ ret_var = SUArg2,
+ arg1 = SUArg1,
+ arg2 = SUArg2,
+ arg3 = not_used,
+ 'NATMC' = ["SU_", NATMC]}, If, "", Op, B, NB, false).
+
+cmpxchg_fallback_define(#atomic_context{dw = false, aint_t = AintT} = AC) ->
+ do_cmpxchg_fallback_define(AC, true, AintT);
+cmpxchg_fallback_define(#atomic_context{dw = true,
+ 'NATMC' = NATMC,
+ naint_t = NAintT} = AC) ->
+ ["\n\n#if defined(ETHR_HAVE_NATIVE_DW_ATOMIC)\n",
+ do_cmpxchg_fallback_define(AC, false, not_used),
+ "\n\n#elif defined(ETHR_HAVE_NATIVE_SU_DW_ATOMIC)\n",
+ do_cmpxchg_fallback_define(AC#atomic_context{'NATMC' = ["SU_", NATMC],
+ naint_t = NAintT},
+ true,
+ NAintT),
+ "
+
+#else
+# error \"?!?\"
+#endif
+"].
+
+do_cmpxchg_fallback_define(#atomic_context{'NATMC' = NATMC,
+ aint_t = AintT,
+ naint_t = NAintT},
+ SU, SUType) ->
+
+ ReadFunc = fun (IF) ->
+ fun (B) ->
+ BExt = op_barrier_ext(B),
+ CapBExt = to_upper(BExt),
+ [IF, " defined(ETHR_HAVE_", NATMC, "_READ", CapBExt, ")",
+ case SU of
+ true -> ["
+#define ETHR_", NATMC, "_CMPXCHG_FALLBACK_READ__(VAR) \\
+ ETHR_", NATMC, "_FUNC__(read", BExt, ")(VAR)
+"
+ ];
+ false -> ["
+#define ETHR_", NATMC, "_CMPXCHG_FALLBACK_READ__(VAR, VAL) \\
+ ETHR_", NATMC, "_FUNC__(read", BExt, ")(VAR, VAL)
+#elif defined(ETHR_HAVE_SU_", NATMC, "_READ", CapBExt, ")
+#define ETHR_", NATMC, "_CMPXCHG_FALLBACK_READ__(VAR, VAL) \\
+ VAL.", ?SU_DW_SINT_FIELD, " = ETHR_SU_", NATMC, "_FUNC__(read", BExt, ")(VAR)
+"
+ ]
+ end]
+ end
+ end,
+ NotDefCMPXCHG = fun (B) ->
+ CapBExt = to_upper(op_barrier_ext(B)),
+ ["!defined(ETHR_HAVE_", NATMC, "_CMPXCHG", CapBExt, ")"]
+ end,
+ NoneTryBarrierOrder = try_barrier_order(none),
+ %% First a sanity check
+ ["
+#if (", NotDefCMPXCHG(hd(?NATIVE_BARRIERS)) ,
+ lists:map(fun (B) ->
+ [" \\
+ && ", NotDefCMPXCHG(B)]
+ end,
+ tl(?NATIVE_BARRIERS)), ")
+# error \"No native cmpxchg() op available\"
+#endif
+
+
+/*
+ * Read op used together with cmpxchg() fallback when no native op present.
+ */
+",
+
+ %% Read op to use with cmpxchg fallback
+ (ReadFunc("#if"))(hd(NoneTryBarrierOrder)),
+ lists:map(ReadFunc("#elif"), tl(NoneTryBarrierOrder)),
+"#else
+/*
+ * We have no native read() op; guess zero and then use the
+ * the atomics actual value returned from cmpxchg().
+ */",
+ case SU of
+ true -> ["
+#define ETHR_", NATMC, "_CMPXCHG_FALLBACK_READ__(VAR) \\
+ ((", NAintT, ") 0)"];
+ false -> ["
+#define ETHR_", NATMC, "_CMPXCHG_FALLBACK_READ__(VAR, VAL) \\
+do { \\
+ VAL.", ?DW_SINT_FIELD, "[0] = (ethr_sint_t) 0; \\
+ VAL.", ?DW_SINT_FIELD, "[1] = (ethr_sint_t) 0; \\
+} while (0)"]
+ end, "
+#endif
+",
+
+ %% The fallback
+ "
+/*
+ * Native cmpxchg() fallback used when no native op present.
+ */
+#define ETHR_", NATMC, "_CMPXCHG_FALLBACK__(CMPXCHG, VAR, AVAL, OPS) \\
+do { \\",
+ case SU of
+ true -> ["
+ ", SUType, " AVAL; \\
+ ", NAintT, " new__, act__, exp__; \\
+ act__ = ETHR_", NATMC, "_CMPXCHG_FALLBACK_READ__(VAR); \\
+ do { \\
+ exp__ = act__; \\
+ AVAL = (", SUType, ") act__; \\
+ { OPS; } \\
+ new__ = (", NAintT, ") AVAL; \\
+ act__ = CMPXCHG(VAR, new__, exp__); \\
+ } while (__builtin_expect(act__ != exp__, 0)); \\"];
+ false -> ["
+ int res__; \\
+ ", AintT, " AVAL, exp_act__; \\
+ ETHR_", NATMC, "_CMPXCHG_FALLBACK_READ__(VAR, exp_act__); \\
+ do { \\
+ AVAL.", ?DW_SINT_FIELD, "[0] = exp_act__.", ?DW_SINT_FIELD, "[0]; \\
+ AVAL.", ?DW_SINT_FIELD, "[1] = exp_act__.", ?DW_SINT_FIELD, "[1]; \\
+ { OPS; } \\
+ res__ = CMPXCHG(VAR, AVAL.", ?DW_SINT_FIELD, ", exp_act__.", ?DW_SINT_FIELD, "); \\
+ } while (__builtin_expect(res__ == 0, 0)); \\"]
+ end, "
+} while (0)
+"
+ ].
+
+cmpxchg_fallbacks(#atomic_context{}, _SUDW, cmpxchg, _B) ->
+ ""; %% No need for a fallback
+cmpxchg_fallbacks(#atomic_context{dw = DW,
+ ret_var = RetVar,
+ arg1 = Arg1,
+ arg2 = Arg2,
+ arg3 = Arg3,
+ 'NATMC' = NATMC},
+ SUDW, Op, B) ->
+ Operation = case DW of
+ false ->
+ op(Op, #op_context{ret = RetVar,
+ var = "aval",
+ val1 = Arg2,
+ val2 = Arg3});
+ true ->
+ case SUDW of
+ true ->
+ op(Op, #op_context{ret = [Arg2, "->", ?SU_DW_SINT_FIELD],
+ var = "aval",
+ val1 = [Arg2, "->", ?SU_DW_SINT_FIELD]});
+ false ->
+ dw_op(Op, #op_context{ret = RetVar,
+ var = ["aval.", ?DW_SINT_FIELD],
+ val1 = [Arg2, "->", ?DW_SINT_FIELD]})
+ end
+ end,
+ [lists:map(fun (NB) ->
+ NativeVar = case DW of
+ true -> ["&", Arg1, "->native"];
+ false -> Arg1
+ end,
+ NBExt = op_barrier_ext(NB),
+ CapNBExt = to_upper(NBExt),
+ {PreB, PostB} = xbarriers(cmpxchg, B, NB),
+ ["#elif defined(ETHR_HAVE_", NATMC, "_CMPXCHG", CapNBExt, ")\n",
+ case PreB of
+ "" -> "";
+ _ -> [" ", PreB, "\n"]
+ end,
+ " ETHR_", NATMC, "_CMPXCHG_FALLBACK__(ETHR_", NATMC, "_FUNC__(cmpxchg", NBExt, "), ", NativeVar, ", aval, ", Operation, ");\n",
+ case PostB of
+ "" -> "";
+ _ -> [" ", PostB, "\n"]
+ end]
+ end,
+ try_barrier_order(B))].
+
+translate_have_defs(#atomic_context{dw = DW, 'NATMC' = NATMC}) ->
+ ["
+#if !defined(ETHR_", NATMC, "_BITS__)
+# error \"Missing native atomic implementation\"",
+ lists:map(fun (NBits) ->
+ {HaveInPrefix,
+ HaveOutPrefix,
+ HaveInPrefixExtra,
+ HaveOutPrefixExtra,
+ NativeTypeCheck} = case NBits of
+ "dw" ->
+ {"ETHR_HAVE_ETHR_NATIVE_DW_ATOMIC",
+ ["ETHR_HAVE_", NATMC],
+ "ETHR_HAVE_ETHR_NATIVE_SU_DW_ATOMIC",
+ ["ETHR_HAVE_SU_", NATMC],
+ "\n#elif defined(ETHR_HAVE_NATIVE_DW_ATOMIC) || defined(ETHR_HAVE_NATIVE_SU_DW_ATOMIC)"};
+ _ ->
+ {[?HAVE_NATIVE_ATOMIC, NBits],
+ case DW of
+ true -> ["ETHR_HAVE_SU_", NATMC];
+ false -> ["ETHR_HAVE_", NATMC]
+ end,
+ false,
+ ["ETHR_HAVE_", NATMC],
+ ["\n#elif ETHR_", NATMC, "_BITS__ == ", NBits]}
+ end,
+ [NativeTypeCheck,
+ lists:map(fun (Op) ->
+ NOpStr = opstr(native(Op)),
+ CapNOpStr = to_upper(NOpStr),
+ lists:map(fun (B) ->
+ NBExt = op_barrier_ext(B),
+ CapNBExt = to_upper(NBExt),
+ HaveOutDef = [HaveOutPrefix, "_", CapNOpStr, CapNBExt],
+ HaveOutDefExtra = [HaveOutPrefixExtra, "_", CapNOpStr, CapNBExt],
+ [case DW of
+ true ->
+ ["\n# undef ", HaveOutDefExtra];
+ false ->
+ ""
+ end, "
+# undef ", HaveOutDef,"
+# ifdef ", HaveInPrefix, "_", CapNOpStr, CapNBExt, "
+# define ", HaveOutDef, " 1
+# endif",
+ case HaveInPrefixExtra of
+ false -> "";
+ _ -> ["
+# ifdef ", HaveInPrefixExtra, "_", CapNOpStr, CapNBExt, "
+# define ", HaveOutDefExtra, " 1
+# endif"
+ ]
+ end]
+ end,
+ ?NATIVE_BARRIERS)
+ end,
+ case DW of
+ true -> ?DW_ATOMIC_OPS;
+ false -> ?ATOMIC_OPS
+ end)]
+ end,
+ case DW of
+ true -> ["dw", "64"];
+ false -> ?POTENTIAL_NBITS
+ end),
+ "
+#else
+# error \"Invalid native atomic size\"
+#endif
+"].
+
+
+
+make_prototypes(#atomic_context{dw = DW, 'ATMC' = ATMC} = AC) ->
+ MkProt = fun (MacroName) ->
+ %% addr() is special
+ [func_header(AC, prototype, MacroName, addr, none), "\n",
+ lists:map(fun (Op) ->
+ lists:map(fun (B) ->
+ [func_header(AC, prototype, MacroName, Op, B), "\n"]
+ end,
+ ?BARRIERS)
+ end,
+ case DW of
+ true -> ?DW_ATOMIC_OPS;
+ false -> ?ATOMIC_OPS
+ end)]
+ end,
+ ["
+#ifdef ETHR_NEED_", ATMC, "_PROTOTYPES__
+",
+ MkProt(false),
+ case DW of
+ true -> ["#if defined(", ?DW_RTCHK_MACRO, ")\n",
+ MkProt(?DW_FUNC_MACRO),
+ "#endif\n"];
+ false -> ""
+ end,
+ "#endif /* ETHR_NEED_", ATMC, "_PROTOTYPES__ */\n"].
+
+rtchk_fallback_call(Return, #atomic_context{dw = DW,
+ ret_var = RetVar,
+ arg1 = Arg1,
+ arg2 = Arg2,
+ arg3 = Arg3},
+ Op, B) ->
+ op_call(Op, DW, case Return of
+ true -> "return";
+ false -> [RetVar, " ="]
+ end, [?DW_FUNC_MACRO, "(", opstr(Op), op_barrier_ext(B), ")"], Arg1, Arg2, Arg3, "").
+
+non_native_barrier(B) ->
+ lists:member(B, ?NON_NATIVE_BARRIERS).
+
+non_native_barrier_impl(AC, inline_implementation = Type, Op, B) ->
+ ["
+", func_header(AC, Type, false, Op, B), "
+{",
+ case B of
+ ddrb ->
+ ["
+#ifdef ETHR_ORDERED_READ_DEPEND
+ ", func_call(AC, Type, Op, none, true), "
+#else
+ ", func_call(AC, Type, Op, rb, true), "
+#endif
+"
+ ]
+ end,
+ "}
+"
+ ];
+non_native_barrier_impl(#atomic_context{have_native_atomic_ops = HaveNative} = AC,
+ implementation = Type,
+ Op,
+ B) ->
+ ["
+", func_header(AC, Type, false, Op, B), "
+{",
+ case B of
+ ddrb ->
+ ["
+#if defined(", HaveNative, ")
+ ", func_call(AC, Type, Op, B, true), "
+#elif defined(ETHR_ORDERED_READ_DEPEND)
+ ", func_call(AC, symbol_implementation, Op, none, true), "
+#else
+ ", func_call(AC, symbol_implementation, Op, rb, true), "
+#endif
+"
+ ]
+ end,
+ "}
+"
+ ].
+
+func_call(#atomic_context{'ATMC' = ATMC} = AC, inline_implementation, Op, B, RetStatement) ->
+ func_call(AC, Op, ["ETHR_", ATMC, "_FUNC__(", opstr(Op), op_barrier_ext(B), ")"], RetStatement);
+func_call(#atomic_context{atomic = Atomic} = AC, implementation, Op, B, RetStatement) ->
+ func_call(AC, Op, [Atomic, "_", opstr(Op), op_barrier_ext(B), "__"], RetStatement);
+func_call(#atomic_context{atomic = Atomic} = AC, symbol_implementation, Op, B, RetStatement) ->
+ func_call(AC, Op, [Atomic, "_", opstr(Op), op_barrier_ext(B)], RetStatement).
+
+func_call(#atomic_context{dw = DW, arg1 = Arg1, arg2 = Arg2, arg3 = Arg3} = AC, Op, Func, true) ->
+ op_call(Op, DW, case is_return_op(AC, Op) of
+ true -> "return";
+ false -> ""
+ end, Func, Arg1, Arg2, Arg3, "");
+func_call(#atomic_context{dw = DW, arg1 = Arg1, arg2 = Arg2, arg3 = Arg3, ret_var = RetVar} = AC, Op, Func, false) ->
+ op_call(Op, DW, case is_return_op(AC, Op) of
+ true -> [RetVar, " = "];
+ false -> ""
+ end, Func, Arg1, Arg2, Arg3, "").
+
+make_implementations(#atomic_context{dw = DW,
+ ret_type = RetType,
+ ret_var = RetVar,
+ arg1 = Arg1,
+ addr_aint_t = AddrAintT,
+ atomic = Atomic,
+ have_native_atomic_ops = HaveNativeAtomicOps,
+ 'ATMC' = ATMC,
+ 'NATMC' = NATMC} = AC) ->
+ NativeVar = case DW of
+ true -> ["(&", Arg1, "->native)"];
+ false -> Arg1
+ end,
+ RtchkBegin = ["
+#if defined(", ?DW_RTCHK_MACRO, ")
+ if (", ?DW_RTCHK_MACRO, ") {
+#endif
+"],
+ RtchkEnd = fun (Return, Operation, Barrier) ->
+ ["
+#if defined(", ?DW_RTCHK_MACRO, ")
+ } else { ", rtchk_fallback_call(Return, AC, Operation, Barrier), " }
+#endif\n"
+ ]
+ end,
+ ["
+#if (defined(", HaveNativeAtomicOps, ") \\
+ && (defined(ETHR_", ATMC, "_INLINE__) || defined(ETHR_ATOMIC_IMPL__)))
+",
+ translate_have_defs(AC),
+ cmpxchg_fallback_define(AC),
+ %% addr() is special
+ "
+
+
+/* --- addr() --- */
+
+", func_header(AC, inline_implementation, false, addr, none), "
+{", case DW of
+ true -> RtchkBegin;
+ false -> ""
+ end, "
+ return (", AddrAintT, " *) ETHR_", NATMC, "_ADDR_FUNC__(", NativeVar, ");
+",case DW of
+ true -> RtchkEnd(true, addr, none);
+ false -> ""
+ end, "
+}
+",
+ lists:map(fun (Op) ->
+ OpStr = opstr(Op),
+ ["
+
+/* --- ", OpStr, "() --- */
+
+",
+ lists:map(fun (B) ->
+ case non_native_barrier(B) of
+ true ->
+ non_native_barrier_impl(AC, inline_implementation, Op, B);
+ false ->
+ TryBarriers = try_barrier_order(B),
+ ["
+", func_header(AC, inline_implementation, false, Op, B), "
+{
+",
+ case is_return_op(AC, Op) of
+ true ->
+ [" ", RetType, " ", RetVar, ";\n"];
+ _ -> ""
+ end,
+ case DW of
+ true ->
+ [RtchkBegin,
+ "\n",
+ su_dw_native_barrier_op(AC, "#if", Op, B, hd(TryBarriers)),
+ lists:map(fun (NB) ->
+ su_dw_native_barrier_op(AC, "#elif", Op, B, NB)
+ end,
+ tl(TryBarriers)),
+ lists:map(fun (NB) ->
+ dw_native_barrier_op(AC, "#elif", "", Op, B, NB)
+ end,
+ TryBarriers),
+ case simple_fallback(AC, Op, B) of
+ "" ->
+ %% No simple fallback available;
+ %% use cmpxchg() fallbacks...
+ [cmpxchg_fallbacks(AC#atomic_context{'NATMC' = ["SU_", NATMC]}, true, Op, B),
+ cmpxchg_fallbacks(AC, false, Op, B),
+ "#else
+#error \"Missing implementation of ", Atomic, "_", opstr(Op), op_barrier_ext(B), "()!\"
+#endif
+"
+ ];
+ SimpleFallback ->
+ ["#else\n", SimpleFallback, "#endif\n"]
+ end,
+ RtchkEnd(false, Op, B), "\n"];
+ false ->
+ [native_barrier_op(AC, "#if", "", Op, B, hd(TryBarriers), true),
+ lists:map(fun (NB) ->
+ native_barrier_op(AC, "#elif", "", Op, B, NB, true)
+ end,
+ tl(TryBarriers)),
+ case simple_fallback(AC, Op, B) of
+ "" ->
+ %% No simple fallback available;
+ %% use cmpxchg() fallbacks...
+ [cmpxchg_fallbacks(AC, false, Op, B),
+ "#else
+#error \"Missing implementation of ", Atomic, "_", opstr(Op), op_barrier_ext(B), "()!\"
+#endif
+"
+ ];
+ SimpleFallback ->
+ ["#else\n", SimpleFallback, "#endif\n"]
+ end]
+ end,
+ case is_return_op(AC, Op) of
+ true ->
+ [" return ", RetVar, ";\n"];
+ false ->
+ ""
+ end,
+ "}\n"]
+ end
+ end,
+ ?NATIVE_BARRIERS ++ ?NON_NATIVE_BARRIERS)] %% non-native needs to be after native...
+ end,
+ case DW of
+ true -> ?DW_ATOMIC_OPS;
+ false -> ?ATOMIC_OPS
+ end),
+ "
+#endif /* ETHR_", ATMC, "_INLINE__ */
+"
+ ].
+
+atomic_implementation_comment(AtomicSize) ->
+ CSz = case AtomicSize of
+ "dword" -> "Double word size";
+ "word" -> "Word size";
+ _ -> AtomicSize ++ "-bit"
+ end,
+ ["
+
+/* ---------- ", CSz, " atomic implementation ---------- */
+
+"
+ ].
+
+write_h_file(FileName) ->
+ {ok, FD} = file:open(FileName, [write, latin1]),
+ ok = file:write(FD, comments()),
+ ok = file:write(FD, "
+#ifndef ETHR_ATOMICS_H__
+#define ETHR_ATOMICS_H__
+"
+ ),
+ ok = file:write(FD, h_top()),
+ ok = lists:foreach(fun (AtomicSize) ->
+ AC = atomic_context(AtomicSize),
+ ok = file:write(FD,
+ [atomic_implementation_comment(AtomicSize),
+ make_prototypes(AC),
+ make_implementations(AC)])
+ end,
+ ?ATOMIC_SIZES),
+ ok = file:write(FD, "
+#endif /* ETHR_ATOMICS_H__ */
+"
+ ),
+ ok = file:close(FD).
+
+
+make_native_impl_op(#atomic_context{dw = DW,
+ atomic = Atomic,
+ have_native_atomic_ops = HaveNativeAtomicOps,
+ ret_var = RetVar,
+ arg1 = Arg1,
+ arg2 = Arg2,
+ arg3 = Arg3}, Op, B) ->
+ ["#if defined(", HaveNativeAtomicOps, ")",
+ case DW of
+ true -> [" && !defined(", ?DW_RTCHK_MACRO, ")"];
+ false -> ""
+ end,
+ "\n",
+ " ", op_call(Op, DW, [RetVar, " = "], [Atomic, "_", opstr(Op), op_barrier_ext(B), "__"], Arg1, Arg2, Arg3, ""),
+ "\n"].
+
+amc_op_dw_arg(#atomic_context{dw = false}) ->
+ "0";
+amc_op_dw_arg(#atomic_context{dw = true}) ->
+ "1".
+
+amc_op_arg_prefix(#atomic_context{dw = false}) ->
+ "&";
+amc_op_arg_prefix(#atomic_context{dw = true}) ->
+ "".
+
+amc_sint_arg(#atomic_context{dw = DW, arg2 = Arg}, arg2) ->
+ amc_sint_arg(DW, Arg);
+amc_sint_arg(#atomic_context{dw = DW, arg3 = Arg}, arg3) ->
+ amc_sint_arg(DW, Arg);
+amc_sint_arg(#atomic_context{dw = DW, ret_var = Arg}, ret_var) ->
+ amc_sint_arg(DW, Arg);
+amc_sint_arg(true, Arg) ->
+ [Arg, "->" ?DW_SINT_FIELD];
+amc_sint_arg(false, Arg) ->
+ ["&", Arg].
+
+amc_op_call(#atomic_context{arg1 = Arg1} = AC, init) ->
+ [" amc_init(&", Arg1, "->amc, ", amc_op_dw_arg(AC), ", ", amc_op_arg_prefix(AC), Arg1, "->sint, ", amc_sint_arg(AC, arg2), ");\n"];
+amc_op_call(#atomic_context{arg1 = Arg1} = AC, set) ->
+ [" amc_set(&", Arg1, "->amc, ", amc_op_dw_arg(AC), ", ", amc_op_arg_prefix(AC), Arg1, "->sint, ", amc_sint_arg(AC, arg2), ");\n"];
+amc_op_call(#atomic_context{dw = false, arg1 = Arg1} = AC, read) ->
+ [" amc_read(&", Arg1, "->amc, ", amc_op_dw_arg(AC), ", ", amc_op_arg_prefix(AC), Arg1, "->sint, ", amc_sint_arg(AC, ret_var), ");\n"];
+amc_op_call(#atomic_context{dw = true, arg1 = Arg1} = AC, read) ->
+ [" amc_read(&", Arg1, "->amc, ", amc_op_dw_arg(AC), ", ", amc_op_arg_prefix(AC), Arg1, "->sint, ", amc_sint_arg(AC, arg2), ");\n"];
+amc_op_call(#atomic_context{dw = false, arg1 = Arg1, arg3 = Arg3, ret_var = RetVar} = AC, cmpxchg) ->
+ [" ", RetVar, " = ", Arg3, ";
+ (void) amc_cmpxchg(&", Arg1, "->amc, ", amc_op_dw_arg(AC), ", ", amc_op_arg_prefix(AC), Arg1, "->sint, ", amc_sint_arg(AC, arg2), ", ", amc_sint_arg(AC, ret_var), ");\n"];
+amc_op_call(#atomic_context{dw = true, arg1 = Arg1, ret_var = RetVar} = AC, cmpxchg) ->
+ [" ", RetVar, " = amc_cmpxchg(&", Arg1, "->amc, ", amc_op_dw_arg(AC), ", ", amc_op_arg_prefix(AC), Arg1, "->sint, ", amc_sint_arg(AC, arg2), ", ", amc_sint_arg(AC, arg3), ");\n"];
+amc_op_call(#atomic_context{dw = DW, arg1 = Arg1, arg2 = Arg2, arg3 = Arg3, ret_var = RetVar}, Op) ->
+ OpCtxt = #op_context{ret = RetVar, var = [Arg1,"->sint"], val1 = Arg2, val2 = Arg3},
+ OpStr = case DW of
+ true -> dw_op(Op, OpCtxt);
+ false -> op(Op, OpCtxt)
+ end,
+ [" ETHR_AMC_MODIFICATION_OPS__(&", Arg1, "->amc, ", OpStr, ");\n"].
+
+make_amc_fallback_op(#atomic_context{amc_fallback = false}, _Op, _B) ->
+ "";
+make_amc_fallback_op(#atomic_context{amc_fallback = true} = AC, Op, B) ->
+ NB = case Op of
+ read -> rb;
+ _ -> none
+ end,
+ {PreB, PostB} = xbarriers(Op, B, NB),
+ ["#elif defined(ETHR_AMC_FALLBACK__)\n",
+ case PreB of
+ "" -> "";
+ _ -> [" ", PreB, "\n"]
+ end,
+ amc_op_call(AC, Op),
+ case PostB of
+ "" -> "";
+ _ -> [" ", PostB, "\n"]
+ end].
+
+make_locked_fallback_op(#atomic_context{dw = DW,
+ ret_var = RetVar,
+ arg1 = Arg1,
+ arg2 = Arg2,
+ arg3 = Arg3}, Op, B) ->
+ OpStr = case DW of
+ true ->
+ dw_op(Op, #op_context{ret = RetVar,
+ var = [Arg1, "->" ?DW_SINT_FIELD],
+ val1 = [Arg2, "->" ?DW_SINT_FIELD],
+ val2 = [Arg3, "->" ?DW_SINT_FIELD]});
+ false ->
+ op(Op, #op_context{ret = RetVar,
+ var = ["*", Arg1],
+ val1 = Arg2,
+ val2 = Arg3})
+ end,
+ {PreB, PostB} = xbarriers(Op, B, none),
+ ["#else\n",
+ case PreB of
+ "" -> "";
+ _ -> [" ", PreB, "\n"]
+ end,
+ [" ETHR_ATOMIC_OP_FALLBACK_IMPL__(", Arg1, ", ", OpStr, ");\n"],
+ case PostB of
+ "" -> "";
+ _ -> [" ", PostB, "\n"]
+ end,
+ "#endif\n"].
+
+make_symbol_to_fallback_impl(#atomic_context{dw = true,
+ atomic = Atomic,
+ arg1 = Arg1,
+ arg2 = Arg2,
+ arg3 = Arg3} = AC,
+ Op, B) ->
+ ["
+#ifdef ", ?DW_RTCHK_MACRO, "
+", func_header(AC, implementation, false, Op, B), "
+{",
+ case Op of
+ init -> "";
+ _ -> ["\n ETHR_ASSERT(!ethr_not_inited__);"]
+ end, "
+ ETHR_ASSERT(", Arg1, ");
+ ", op_call(Op, true, "return", [Atomic, "_", opstr(Op), op_barrier_ext(B), "__"], Arg1, Arg2, Arg3, ""), "
+}
+#endif
+"
+ ];
+make_symbol_to_fallback_impl(_, _, _) ->
+ "".
+
+make_symbol_implementations(#atomic_context{dw = DW,
+ amc_fallback = AMC,
+ ret_type = RetType,
+ addr_aint_t = AddrAintT,
+ ret_var = RetVar,
+ arg1 = Arg1} = AC) ->
+ FallbackVar = case DW of
+ true -> ["(&", Arg1, "->fallback)"];
+ false -> Arg1
+ end,
+ ["
+",
+ case DW of
+ true -> ["
+/*
+ * Double word atomics need runtime test.
+ */
+
+int ethr_have_native_dw_atomic(void)
+{
+ return ethr_have_native_dw_atomic__();
+}
+ "];
+ false -> ""
+ end, "
+
+/* --- addr() --- */
+
+", func_header(AC, implementation,
+ case DW of
+ true -> ?DW_FUNC_MACRO;
+ false -> false
+ end, addr, none), "
+{
+ ", AddrAintT, " *", RetVar, ";
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(", Arg1, ");
+", make_native_impl_op(AC, addr, none),
+ case AMC of
+ true -> ["#elif defined(ETHR_AMC_FALLBACK__)
+ ", RetVar ," = (", AddrAintT, " *) (", FallbackVar, ")->sint;"];
+ false -> ""
+ end, "
+#else
+ ", RetVar, " = (", AddrAintT, " *) ", FallbackVar, ";
+#endif
+ return ", RetVar, ";
+}
+",
+ make_symbol_to_fallback_impl(AC, addr, none),
+ lists:map(fun (Op) ->
+ ["
+
+/* -- ", opstr(Op), "() -- */
+
+",
+ lists:map(fun (B) ->
+ Macro = case DW of
+ true -> ?DW_FUNC_MACRO;
+ false -> false
+ end,
+ case non_native_barrier(B) of
+ true ->
+ non_native_barrier_impl(AC, implementation, Op, B);
+ false ->
+ ["\n",
+ func_header(AC, implementation, Macro, Op, B),
+ "\n{\n",
+ case is_return_op(AC, Op) of
+ true -> [" ", RetType, " ", RetVar, ";\n"];
+ false -> ""
+ end,
+ case Op of
+ init -> "";
+ _ -> [" ETHR_ASSERT(!ethr_not_inited__);\n"]
+ end,
+ [" ETHR_ASSERT(", Arg1, ");\n"],
+ make_native_impl_op(AC, Op, B),
+ make_amc_fallback_op(AC#atomic_context{arg1 = FallbackVar}, Op, B),
+ make_locked_fallback_op(AC#atomic_context{arg1 = FallbackVar}, Op, B),
+ case is_return_op(AC, Op) of
+ true -> [" return ", RetVar, ";"
+ ];
+ false ->
+ ""
+ end,
+ "\n}\n",
+ make_symbol_to_fallback_impl(AC, Op, B)]
+ end
+ end,
+ ?BARRIERS)]
+ end,
+ case DW of
+ true -> ?DW_ATOMIC_OPS;
+ false -> ?ATOMIC_OPS
+ end)].
+
+make_info_functions() ->
+ ["
+
+
+/* --------- Info functions --------- */
+
+#if defined(", ?DW_RTCHK_MACRO, ")
+char *zero_ops[] = {NULL};
+#endif
+",
+ [lists:map(fun (NBits) ->
+ {DW, Bits} = case NBits of
+ "su_dw" -> {"su_dw_", ""};
+ "dw" -> {"dw_", ""};
+ _ -> {"", NBits}
+ end,
+ ["
+
+static char *native_", DW, "atomic", Bits, "_ops[] = {",
+ lists:map(fun (Op) ->
+ NOpStr = opstr(native(Op)),
+ CapNOpStr = to_upper(NOpStr),
+ lists:map(fun (B) ->
+ HaveNative = case NBits of
+ "dw" ->
+ "ETHR_HAVE_ETHR_NATIVE_DW_ATOMIC";
+ "su_dw" ->
+ "ETHR_HAVE_ETHR_NATIVE_SU_DW_ATOMIC";
+ _ ->
+ [?HAVE_NATIVE_ATOMIC, NBits]
+ end,
+ NBExt = op_barrier_ext(B),
+ CapNBExt = to_upper(NBExt),
+ ["
+#ifdef ", HaveNative, "_", CapNOpStr, CapNBExt, "
+ \"", NOpStr, NBExt, "\",
+#endif"
+ ]
+ end,
+ ?NATIVE_BARRIERS)
+ end,
+ case NBits of
+ "dw" -> ?DW_ATOMIC_OPS;
+ "su_dw" -> ?DW_ATOMIC_OPS;
+ _ -> ?ATOMIC_OPS
+ end), "
+ NULL
+};
+
+char **
+ethr_native_", DW, "atomic", Bits, "_ops(void)
+{
+",
+ case DW of
+ "" -> "";
+ _ -> ["
+#if defined(", ?DW_RTCHK_MACRO, ")
+ if (!", ?DW_RTCHK_MACRO, ")
+ return &zero_ops[0];
+#endif"
+ ]
+ end, "
+ return &native_", DW, "atomic", Bits, "_ops[0];
+}
+"
+ ]
+ end, ["su_dw", "dw" | ?POTENTIAL_NBITS])]].
+
+write_c_file(FileName) ->
+ {ok, FD} = file:open(FileName, [write, latin1]),
+ ok = file:write(FD, comments()),
+ ok = file:write(FD, c_top()),
+ lists:foreach(fun (AtomicSize) ->
+ ok = file:write(FD,
+ [atomic_implementation_comment(AtomicSize),
+ make_symbol_implementations(atomic_context(AtomicSize))])
+ end,
+ ?ATOMIC_SIZES),
+ ok = file:write(FD, make_info_functions()).
+
+
+main([]) ->
+ case os:getenv("ERL_TOP") of
+ false ->
+ io:format("$ERL_TOP not set!~n", []),
+ halt(1);
+ ErlTop ->
+ HFile = filename:join(ErlTop, ?H_FILE),
+ WHFile = fun () ->
+ write_h_file(HFile)
+ end,
+ CFile = filename:join(ErlTop, ?C_FILE),
+ WCFile = fun () ->
+ write_c_file(CFile)
+ end,
+ case erlang:system_info(schedulers_online) of
+ 1 ->
+ WHFile(),
+ WCFile();
+ _ ->
+ {HPid, HMon} = spawn_monitor(WHFile),
+ {CPid, CMon} = spawn_monitor(WCFile),
+ receive
+ {'DOWN', HMon, process, HPid, HReason} ->
+ normal = HReason
+ end,
+ receive
+ {'DOWN', CMon, process, CPid, CReason} ->
+ normal = CReason
+ end
+ end,
+ io:format("Wrote: ~s~n", [HFile]),
+ io:format("Wrote: ~s~n", [CFile]),
+ init:stop()
+ end.
+
+a2l(A) ->
+ atom_to_list(A).
+
+opstr(A) ->
+ a2l(A).
+
+to_upper([]) ->
+ [];
+to_upper([C|Cs]) when is_list(C) ->
+ [to_upper(C)|to_upper(Cs)];
+to_upper([C|Cs]) when is_integer(C), 97 =< C, C =< 122 ->
+ [C-32|to_upper(Cs)];
+to_upper([C|Cs]) ->
+ [C|to_upper(Cs)].
+
+
+comments() ->
+ Years = case erlang:date() of
+ {2011, _, _} -> "2011";
+ {Y, _, _} -> "2011-"++integer_to_list(Y)
+ end,
+ ["/*
+ * --------------- DO NOT EDIT THIS FILE! ---------------
+ * This file was automatically generated by the
+ * \$ERL_TOP/erts/lib_src/utils/make_atomics_api script.
+ * If you need to make changes, edit the script and
+ * regenerate this file.
+ * --------------- DO NOT EDIT THIS FILE! ---------------
+ */
+
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB ", Years, ". All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the \"License\"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an \"AS IS\"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/*
+ * Description: The ethread atomics API
+ * Author: Rickard Green
+ */
+
+/*
+ * This file maps native atomic implementations to ethread
+ * API atomics. If no native atomic implementation
+ * is available, a less efficient fallback is used instead.
+ * The API consists of 32-bit size, word size (pointer size),
+ * and double word size atomics.
+ *
+ * The following atomic operations are implemented for
+ * 32-bit size, and word size atomics:
+",
+ lists:map(fun (Op) ->
+ [" * - ", opstr(Op), "\n"]
+ end,
+ ?ATOMIC_OPS),
+ " *
+ * The following atomic operations are implemented for
+ * double word size atomics:
+",
+ lists:map(fun (Op) ->
+ [" * - ", opstr(Op), "\n"]
+ end,
+ ?DW_ATOMIC_OPS),
+ " *
+ * Appart from a function implementing the atomic operation
+ * with unspecified memory barrier semantics, there are
+ * functions implementing each operation with the following
+ * implied memory barrier semantics:",
+ lists:map(fun (none) ->
+ "";
+ (mb) ->
+ ["
+ * - mb - Full memory barrier. Orders both loads, and
+ * stores before, and after the atomic operation.
+ * No load or store is allowed to be reordered
+ * over the atomic operation."];
+ (acqb) ->
+ ["
+ * - acqb - Acquire barrier. Orders both loads, and stores
+ * appearing *after* the atomic operation. These
+ * are not allowed to be reordered over the
+ * atomic operation."];
+ (relb) ->
+ ["
+ * - relb - Release barrier. Orders both loads, and
+ * stores appearing *before* the atomic
+ * operation. These are not allowed to be
+ * reordered over the atomic operation."];
+ (rb) ->
+ ["
+ * - rb - Read barrier. Orders *only* loads. These are
+ * not allowed to be reordered over the barrier.
+ * Load in atomic operation is ordered *before*
+ * the barrier. "];
+ (ddrb) ->
+ ["
+ * - ddrb - Data dependency read barrier. Orders *only*
+ * loads according to data dependency across the
+ * barrier. Load in atomic operation is ordered
+ * before the barrier."];
+ (wb) ->
+ ["
+ * - wb - Write barrier. Orders *only* stores. These are
+ * not allowed to be reordered over the barrier.
+ * Store in atomic operation is ordered *after*
+ * the barrier."];
+ (B) ->
+ [" * - ", a2l(B), "\n"]
+ end,
+ lists:reverse(?BARRIERS)),
+ "
+ *
+ * We implement all of these operation/barrier
+ * combinations, regardless of whether they are useful
+ * or not (some of them are useless).
+ *
+ * Double word size atomic functions are on the followning
+ * form:
+ * ethr_dw_atomic_<OP>[_<BARRIER>]
+ *
+ * Word size atomic functions are on the followning
+ * form:
+ * ethr_atomic_<OP>[_<BARRIER>]
+ *
+ * 32-bit size atomic functions are on the followning
+ * form:
+ * ethr_atomic32_<OP>[_<BARRIER>]
+ *
+ * Apart from the operation/barrier functions
+ * described above also 'addr' functions are implemented
+ * which return the actual memory address used of the
+ * atomic variable. The 'addr' functions have no barrier
+ * versions.
+ *
+ * The native atomic implementation does not need to
+ * implement all operation/barrier combinations.
+ * Functions that have no native implementation will be
+ * constructed from existing native functionality. These
+ * functions will perform the wanted operation and will
+ * produce sufficient memory barriers, but may
+ * in some cases be less efficient than pure native
+ * versions.
+ *
+ * When we create ethread API operation/barrier functions by
+ * adding barriers before and after native operations it is
+ * assumed that:
+ * - A native read operation begins, and ends with a load.
+ * - A native set operation begins, and ends with a store.
+ * - An init operation begins with either a load, or a store,
+ * and ends with either a load, or a store.
+ * - All other operations begins with a load, and ends with
+ * either a load, or a store.
+ *
+ * This is the minimum functionality that a native
+ * implementation needs to provide:
+ *
+ * - Functions that need to be implemented:
+ *
+ * - ethr_native_[dw_|su_dw_]atomic[BITS]_addr
+ * - ethr_native_[dw_|su_dw_]atomic[BITS]_cmpxchg[_<BARRIER>]
+ * (at least one cmpxchg of optional barrier)
+ *
+ * - Macros that needs to be defined:
+ *
+ * A macro informing about the presence of the native
+ * implementation:
+ *
+ * - ETHR_HAVE_NATIVE_[DW_|SU_DW_]ATOMIC[BITS]
+ *
+ * A macro naming (a string constant) the implementation:
+ *
+ * - ETHR_NATIVE_[DW_]ATOMIC[BITS]_IMPL
+ *
+ * Each implemented native atomic function has to
+ * be accompanied by a defined macro on the following
+ * form informing about its presence:
+ *
+ * - ETHR_HAVE_ETHR_NATIVE_[DW_|SU_DW_]ATOMIC[BITS]_<OP>[_<BARRIER>]
+ *
+ * A (sparc-v9 style) membar macro:
+ *
+ * - ETHR_MEMBAR(B)
+ *
+ * Which takes a combination of the following macros
+ * or:ed (using |) together:
+ *
+ * - ETHR_LoadLoad
+ * - ETHR_LoadStore
+ * - ETHR_StoreLoad
+ * - ETHR_StoreStore
+ *
+ */
+"
+ ].
+
+h_top() ->
+ ["
+#undef ETHR_AMC_FALLBACK__
+#undef ETHR_AMC_NO_ATMCS__
+#undef ETHR_AMC_ATMC_T__
+#undef ETHR_AMC_ATMC_FUNC__
+
+/* -- 32-bit atomics -- */
+
+#undef ETHR_NAINT32_T__
+#undef ETHR_NATMC32_FUNC__
+#undef ETHR_NATMC32_ADDR_FUNC__
+#undef ETHR_NATMC32_BITS__
+#if defined(ETHR_HAVE_NATIVE_ATOMIC32)
+# define ETHR_NEED_NATMC32_ADDR
+# define ETHR_NATMC32_ADDR_FUNC__ ethr_native_atomic32_addr
+typedef ethr_native_atomic32_t ethr_atomic32_t;
+# define ETHR_NAINT32_T__ ethr_sint32_t
+# define ETHR_NATMC32_FUNC__(X) ethr_native_atomic32_ ## X
+# define ETHR_NATMC32_BITS__ 32
+#elif defined(ETHR_HAVE_NATIVE_ATOMIC64)
+# define ETHR_NEED_NATMC64_ADDR
+#ifdef ETHR_BIGENDIAN
+# define ETHR_NATMC32_ADDR_FUNC__(VAR) \\
+ (((ethr_sint32_t *) ethr_native_atomic64_addr((VAR))) + 1)
+#else
+# define ETHR_NATMC32_ADDR_FUNC__(VAR) \\
+ ((ethr_sint32_t *) ethr_native_atomic64_addr((VAR)))
+#endif
+typedef ethr_native_atomic64_t ethr_atomic32_t;
+# define ETHR_NAINT32_T__ ethr_sint64_t
+# define ETHR_NATMC32_FUNC__(X) ethr_native_atomic64_ ## X
+# define ETHR_NATMC32_BITS__ 64
+#else
+/*
+ * No native atomics usable for 32-bits atomics :(
+ * Use fallback...
+ */
+typedef ethr_sint32_t ethr_atomic32_t;
+#endif
+
+#undef ETHR_ATMC32_INLINE__
+#ifdef ETHR_NATMC32_BITS__
+# ifdef ETHR_TRY_INLINE_FUNCS
+# define ETHR_ATMC32_INLINE__
+# endif
+# define ETHR_HAVE_32BIT_NATIVE_ATOMIC_OPS
+#endif
+
+#if !defined(ETHR_ATMC32_INLINE__) || defined(ETHR_ATOMIC_IMPL__)
+# define ETHR_NEED_ATMC32_PROTOTYPES__
+#endif
+
+#ifndef ETHR_INLINE_ATMC32_FUNC_NAME_
+# define ETHR_INLINE_ATMC32_FUNC_NAME_(X) X
+#endif
+
+#undef ETHR_ATMC32_FUNC__
+#define ETHR_ATMC32_FUNC__(X) ETHR_INLINE_ATMC32_FUNC_NAME_(ethr_atomic32_ ## X)
+
+
+/* -- Word size atomics -- */
+
+#undef ETHR_NEED_NATMC32_ADDR
+#undef ETHR_NEED_NATMC64_ADDR
+
+#undef ETHR_NAINT_T__
+#undef ETHR_NATMC_FUNC__
+#undef ETHR_NATMC_ADDR_FUNC__
+#undef ETHR_NATMC_BITS__
+#if ETHR_SIZEOF_PTR == 8 && defined(ETHR_HAVE_NATIVE_ATOMIC64)
+# ifndef ETHR_NEED_NATMC64_ADDR
+# define ETHR_NEED_NATMC64_ADDR
+# endif
+# define ETHR_NATMC_ADDR_FUNC__ ethr_native_atomic64_addr
+typedef ethr_native_atomic64_t ethr_atomic_t;
+# define ETHR_NAINT_T__ ethr_sint64_t
+# define ETHR_NATMC_FUNC__(X) ethr_native_atomic64_ ## X
+# define ETHR_NATMC_BITS__ 64
+#elif ETHR_SIZEOF_PTR == 4 && defined(ETHR_HAVE_NATIVE_ATOMIC32)
+# ifndef ETHR_NEED_NATMC64_ADDR
+# define ETHR_NEED_NATMC32_ADDR
+# endif
+# define ETHR_NATMC_ADDR_FUNC__ ethr_native_atomic32_addr
+typedef ethr_native_atomic32_t ethr_atomic_t;
+# define ETHR_NAINT_T__ ethr_sint32_t
+# define ETHR_NATMC_FUNC__(X) ethr_native_atomic32_ ## X
+# define ETHR_NATMC_BITS__ 32
+#elif ETHR_SIZEOF_PTR == 4 && defined(ETHR_HAVE_NATIVE_ATOMIC64)
+# ifndef ETHR_NEED_NATMC64_ADDR
+# define ETHR_NEED_NATMC64_ADDR
+# endif
+#ifdef ETHR_BIGENDIAN
+# define ETHR_NATMC_ADDR_FUNC__(VAR) \\
+ (((ethr_sint32_t *) ethr_native_atomic64_addr((VAR))) + 1)
+#else
+# define ETHR_NATMC_ADDR_FUNC__(VAR) \\
+ ((ethr_sint32_t *) ethr_native_atomic64_addr((VAR)))
+#endif
+typedef ethr_native_atomic64_t ethr_atomic_t;
+# define ETHR_NATMC_T__ ethr_native_atomic64_t
+# define ETHR_NAINT_T__ ethr_sint64_t
+# define ETHR_NATMC_FUNC__(X) ethr_native_atomic64_ ## X
+# define ETHR_NATMC_BITS__ 64
+#else
+/*
+ * No native atomics usable for pointer size atomics :(
+ * Use fallback...
+ */
+
+# if defined(ETHR_HAVE_32BIT_NATIVE_ATOMIC_OPS)
+# define ETHR_AMC_FALLBACK__
+# define ETHR_AMC_NO_ATMCS__ 2
+# define ETHR_AMC_SINT_T__ ethr_sint32_t
+# define ETHR_AMC_ATMC_T__ ethr_atomic32_t
+# define ETHR_AMC_ATMC_FUNC__(X) ETHR_INLINE_ATMC32_FUNC_NAME_(ethr_atomic32_ ## X)
+typedef struct {
+ ETHR_AMC_ATMC_T__ atomic[ETHR_AMC_NO_ATMCS__];
+} ethr_amc_t;
+typedef struct {
+ ethr_amc_t amc;
+ ethr_sint_t sint;
+} ethr_atomic_t;
+# else /* locked fallback */
+typedef ethr_sint_t ethr_atomic_t;
+# endif
+#endif
+
+#undef ETHR_ATMC_INLINE__
+#ifdef ETHR_NATMC_BITS__
+# ifdef ETHR_TRY_INLINE_FUNCS
+# define ETHR_ATMC_INLINE__
+# endif
+# define ETHR_HAVE_WORD_SZ_NATIVE_ATOMIC_OPS
+#endif
+
+#if !defined(ETHR_ATMC_INLINE__) || defined(ETHR_ATOMIC_IMPL__)
+# define ETHR_NEED_ATMC_PROTOTYPES__
+#endif
+
+#ifndef ETHR_INLINE_ATMC_FUNC_NAME_
+# define ETHR_INLINE_ATMC_FUNC_NAME_(X) X
+#endif
+
+#undef ETHR_ATMC_FUNC__
+#define ETHR_ATMC_FUNC__(X) ETHR_INLINE_ATMC_FUNC_NAME_(ethr_atomic_ ## X)
+
+/* -- Double word atomics -- */
+
+#undef ETHR_SU_DW_NAINT_T__
+#undef ETHR_SU_DW_NATMC_FUNC__
+#undef ETHR_SU_DW_NATMC_ADDR_FUNC__
+#undef ETHR_DW_NATMC_FUNC__
+#undef ETHR_DW_NATMC_ADDR_FUNC__
+#undef ETHR_DW_NATMC_BITS__
+#if defined(ETHR_HAVE_NATIVE_DW_ATOMIC) || defined(ETHR_HAVE_NATIVE_SU_DW_ATOMIC)
+# define ETHR_NEED_DW_NATMC_ADDR
+# define ETHR_DW_NATMC_ADDR_FUNC__ ethr_native_dw_atomic_addr
+# define ETHR_NATIVE_DW_ATOMIC_T__ ethr_native_dw_atomic_t
+# define ETHR_DW_NATMC_FUNC__(X) ethr_native_dw_atomic_ ## X
+# define ETHR_SU_DW_NATMC_FUNC__(X) ethr_native_su_dw_atomic_ ## X
+# if ETHR_SIZEOF_PTR == 8
+# define ETHR_DW_NATMC_BITS__ 128
+# elif ETHR_SIZEOF_PTR == 4
+# define ETHR_DW_NATMC_BITS__ 64
+# else
+# error \"Word size not supported\"
+# endif
+# ifdef ETHR_NATIVE_SU_DW_SINT_T
+# define ETHR_SU_DW_NAINT_T__ ETHR_NATIVE_SU_DW_SINT_T
+# endif
+#elif ETHR_SIZEOF_PTR == 4 && defined(ETHR_HAVE_NATIVE_ATOMIC64)
+# define ETHR_HAVE_NATIVE_SU_DW_ATOMIC
+# ifndef ETHR_NEED_NATMC64_ADDR
+# define ETHR_NEED_NATMC64_ADDR
+# endif
+# define ETHR_DW_NATMC_ADDR_FUNC__(VAR) \\
+ ((ethr_dw_sint_t *) ethr_native_atomic64_addr((VAR)))
+# define ETHR_NATIVE_DW_ATOMIC_T__ ethr_native_atomic64_t
+# define ETHR_SU_DW_NAINT_T__ ethr_sint64_t
+# define ETHR_SU_DW_NATMC_FUNC__(X) ethr_native_atomic64_ ## X
+# define ETHR_DW_NATMC_BITS__ 64
+#endif
+
+#if defined(", ?DW_RTCHK_MACRO, ")
+#define ", ?DW_FUNC_MACRO, "(X) ethr_dw_atomic_ ## X ## _fallback__
+#else
+#define ", ?DW_FUNC_MACRO, "(X) ethr_dw_atomic_ ## X
+#endif
+
+#if !defined(ETHR_DW_NATMC_BITS__) || defined(", ?DW_RTCHK_MACRO, ")
+# define ETHR_NEED_DW_FALLBACK__
+#endif
+
+#if defined(ETHR_NEED_DW_FALLBACK__)
+/*
+ * No native atomics usable for double word atomics :(
+ * Use fallback...
+ */
+
+# ifndef ETHR_AMC_FALLBACK__
+# if ETHR_SIZEOF_PTR == 8 && defined(ETHR_HAVE_WORD_SZ_NATIVE_ATOMIC_OPS)
+# define ETHR_AMC_FALLBACK__
+# define ETHR_AMC_NO_ATMCS__ 1
+# define ETHR_AMC_SINT_T__ ethr_sint_t
+# define ETHR_AMC_ATMC_T__ ethr_atomic_t
+# define ETHR_AMC_ATMC_FUNC__(X) ETHR_INLINE_ATMC_FUNC_NAME_(ethr_atomic_ ## X)
+# elif defined(ETHR_HAVE_32BIT_NATIVE_ATOMIC_OPS)
+# define ETHR_AMC_FALLBACK__
+# define ETHR_AMC_NO_ATMCS__ 2
+# define ETHR_AMC_SINT_T__ ethr_sint32_t
+# define ETHR_AMC_ATMC_T__ ethr_atomic32_t
+# define ETHR_AMC_ATMC_FUNC__(X) ETHR_INLINE_ATMC32_FUNC_NAME_(ethr_atomic32_ ## X)
+# endif
+# ifdef ETHR_AMC_FALLBACK__
+typedef struct {
+ ETHR_AMC_ATMC_T__ atomic[ETHR_AMC_NO_ATMCS__];
+} ethr_amc_t;
+# endif
+# endif
+
+typedef struct {
+#ifdef ETHR_AMC_FALLBACK__
+ ethr_amc_t amc;
+#endif
+ ethr_sint_t sint[2];
+} ethr_dw_atomic_fallback_t;
+
+#endif
+
+typedef union {
+#ifdef ETHR_NATIVE_DW_ATOMIC_T__
+ ETHR_NATIVE_DW_ATOMIC_T__ native;
+#endif
+#ifdef ETHR_NEED_DW_FALLBACK__
+ ethr_dw_atomic_fallback_t fallback;
+#endif
+ ethr_sint_t sint[2];
+} ethr_dw_atomic_t;
+
+typedef union {
+#ifdef ETHR_SU_DW_NAINT_T__
+ ETHR_SU_DW_NAINT_T__ ", ?SU_DW_SINT_FIELD, ";
+#endif
+ ethr_sint_t ", ?DW_SINT_FIELD, "[2];
+} ethr_dw_sint_t;
+
+#ifdef ETHR_BIGENDIAN
+# define ETHR_DW_SINT_LOW_WORD 1
+# define ETHR_DW_SINT_HIGH_WORD 0
+#else
+# define ETHR_DW_SINT_LOW_WORD 0
+# define ETHR_DW_SINT_HIGH_WORD 1
+#endif
+
+#undef ETHR_DW_ATMC_INLINE__
+#ifdef ETHR_DW_NATMC_BITS__
+# ifdef ETHR_TRY_INLINE_FUNCS
+# define ETHR_ATMC32_INLINE__
+# endif
+# define ETHR_HAVE_DOUBLE_WORD_SZ_NATIVE_ATOMIC_OPS
+#endif
+
+#if !defined(ETHR_DW_ATMC_INLINE__) || defined(ETHR_ATOMIC_IMPL__)
+# define ETHR_NEED_DW_ATMC_PROTOTYPES__
+#endif
+
+#ifndef ETHR_INLINE_DW_ATMC_FUNC_NAME_
+# define ETHR_INLINE_DW_ATMC_FUNC_NAME_(X) X
+#endif
+
+#undef ETHR_DW_ATMC_FUNC__
+#define ETHR_DW_ATMC_FUNC__(X) ETHR_INLINE_DW_ATMC_FUNC_NAME_(ethr_dw_atomic_ ## X)
+
+#if defined(ETHR_NEED_DW_ATMC_PROTOTYPES__)
+int ethr_have_native_dw_atomic(void);
+#endif
+#if defined(ETHR_DW_ATMC_INLINE__) || defined(ETHR_ATOMIC_IMPL__)
+static ETHR_INLINE int
+ETHR_INLINE_DW_ATMC_FUNC_NAME_(ethr_have_native_dw_atomic)(void)
+{
+#if defined(", ?DW_RTCHK_MACRO, ")
+ return ", ?DW_RTCHK_MACRO, ";
+#elif defined(ETHR_DW_NATMC_BITS__)
+ return 1;
+#else
+ return 0;
+#endif
+}
+#endif
+
+/* -- Misc -- */
+
+#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_ATOMIC_IMPL__)
+/*
+ * Unusual values are used by read() fallbacks implemented via cmpxchg().
+ * We want to use an unusual value in hope that it is more efficient
+ * not to match the value in memory.
+ *
+ * - Negative integer values are probably more unusual.
+ * - Very large absolute integer values are probably more unusual.
+ * - Odd pointers are probably more unusual (only char pointers can be odd).
+ */
+# define ETHR_UNUSUAL_SINT32_VAL__ ((ethr_sint32_t) 0x81818181)
+# if ETHR_SIZEOF_PTR == 4
+# define ETHR_UNUSUAL_SINT_VAL__ ((ethr_sint_t) ETHR_UNUSUAL_SINT32_VAL__)
+# elif ETHR_SIZEOF_PTR == 8
+# define ETHR_UNUSUAL_SINT_VAL__ ((ethr_sint_t) 0x8181818181818181L)
+# else
+# error \"Word size not supported\"
+# endif
+# if defined(ETHR_NEED_DW_NATMC_ADDR) && !defined(ETHR_HAVE_ETHR_NATIVE_DW_ATOMIC_ADDR)
+# error \"No ethr_native_dw_atomic_addr() available\"
+# endif
+# if defined(ETHR_NEED_NATMC32_ADDR) && !defined(ETHR_HAVE_ETHR_NATIVE_ATOMIC32_ADDR)
+# error \"No ethr_native_atomic32_addr() available\"
+# endif
+# if defined(ETHR_NEED_NATMC64_ADDR) && !defined(ETHR_HAVE_ETHR_NATIVE_ATOMIC64_ADDR)
+# error \"No ethr_native_atomic64_addr() available\"
+# endif
+#endif
+
+#if defined(__GNUC__)
+# ifndef ETHR_COMPILER_BARRIER
+# define ETHR_COMPILER_BARRIER __asm__ __volatile__(\"\" : : : \"memory\")
+# endif
+#elif defined(ETHR_WIN32_THREADS)
+# ifndef ETHR_COMPILER_BARRIER
+# include <intrin.h>
+# pragma intrinsic(_ReadWriteBarrier)
+# define ETHR_COMPILER_BARRIER _ReadWriteBarrier()
+# endif
+#endif
+
+void ethr_compiler_barrier_fallback(void);
+#ifndef ETHR_COMPILER_BARRIER
+# define ETHR_COMPILER_BARRIER ethr_compiler_barrier_fallback()
+#endif
+
+int ethr_init_atomics(void);
+
+/* info */
+char **ethr_native_atomic32_ops(void);
+char **ethr_native_atomic64_ops(void);
+char **ethr_native_dw_atomic_ops(void);
+char **ethr_native_su_dw_atomic_ops(void);
+
+#if !defined(ETHR_DW_NATMC_BITS__) && !defined(ETHR_NATMC_BITS__) && !defined(ETHR_NATMC32_BITS__)
+/*
+ * ETHR_*MEMORY_BARRIER orders between locked and atomic accesses only,
+ * i.e. when no native atomic implementation exist and only our lock
+ * based atomic fallback is used, a noop is sufficient.
+ */
+# undef ETHR_MEMORY_BARRIER
+# undef ETHR_WRITE_MEMORY_BARRIER
+# undef ETHR_READ_MEMORY_BARRIER
+# undef ETHR_READ_DEPEND_MEMORY_BARRIER
+# undef ETHR_MEMBAR
+# define ETHR_MEMBAR(B) do { } while (0)
+#endif
+
+#ifndef ETHR_MEMBAR
+# error \"No ETHR_MEMBAR defined\"
+#endif
+
+#define ETHR_MEMORY_BARRIER ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreLoad|ETHR_StoreStore)
+#define ETHR_WRITE_MEMORY_BARRIER ETHR_MEMBAR(ETHR_StoreStore)
+#define ETHR_READ_MEMORY_BARRIER ETHR_MEMBAR(ETHR_LoadLoad)
+#ifdef ETHR_READ_DEPEND_MEMORY_BARRIER
+# undef ETHR_ORDERED_READ_DEPEND
+#else
+# define ETHR_READ_DEPEND_MEMORY_BARRIER ETHR_COMPILER_BARRIER
+# define ETHR_ORDERED_READ_DEPEND
+#endif
+"].
+
+c_top() ->
+ ["
+
+#ifdef HAVE_CONFIG_H
+#include \"config.h\"
+#endif
+
+#define ETHR_TRY_INLINE_FUNCS
+#define ETHR_INLINE_DW_ATMC_FUNC_NAME_(X) X ## __
+#define ETHR_INLINE_ATMC_FUNC_NAME_(X) X ## __
+#define ETHR_INLINE_ATMC32_FUNC_NAME_(X) X ## __
+#define ETHR_ATOMIC_IMPL__
+
+#include \"ethread.h\"
+#include \"ethr_internal.h\"
+
+#if (!defined(ETHR_HAVE_WORD_SZ_NATIVE_ATOMIC_OPS) \\
+ || !defined(ETHR_HAVE_32BIT_NATIVE_ATOMIC_OPS))
+/*
+ * Spinlock based fallback for atomics used in absence of a native
+ * implementation.
+ */
+
+#define ETHR_ATMC_FLLBK_ADDR_BITS ", ?ETHR_ATMC_FLLBK_ADDR_BITS, "
+#define ETHR_ATMC_FLLBK_ADDR_SHIFT ", ?ETHR_ATMC_FLLBK_ADDR_SHIFT, "
+
+typedef struct {
+ union {
+ ethr_spinlock_t lck;
+ char buf[ETHR_CACHE_LINE_ALIGN_SIZE(sizeof(ethr_spinlock_t))];
+ } u;
+} ethr_atomic_protection_t;
+
+extern ethr_atomic_protection_t ethr_atomic_protection__[1 << ETHR_ATMC_FLLBK_ADDR_BITS];
+
+#define ETHR_ATOMIC_PTR2LCK__(PTR) \\
+(&ethr_atomic_protection__[((((ethr_uint_t) (PTR)) >> ETHR_ATMC_FLLBK_ADDR_SHIFT) \\
+ & ((1 << ETHR_ATMC_FLLBK_ADDR_BITS) - 1))].u.lck)
+
+
+#define ETHR_ATOMIC_OP_FALLBACK_IMPL__(AP, EXPS) \\
+do { \\
+ ethr_spinlock_t *slp__ = ETHR_ATOMIC_PTR2LCK__((AP)); \\
+ ethr_spin_lock(slp__); \\
+ { EXPS; } \\
+ ethr_spin_unlock(slp__); \\
+} while (0)
+
+ethr_atomic_protection_t ethr_atomic_protection__[1 << ETHR_ATMC_FLLBK_ADDR_BITS];
+
+#endif
+
+", make_amc_fallback(), "
+
+int
+ethr_init_atomics(void)
+{
+#if (!defined(ETHR_HAVE_WORD_SZ_NATIVE_ATOMIC_OPS) \\
+ || !defined(ETHR_HAVE_32BIT_NATIVE_ATOMIC_OPS))
+ int i;
+ for (i = 0; i < (1 << ETHR_ATMC_FLLBK_ADDR_BITS); i++) {
+ int res = ethr_spinlock_init(&ethr_atomic_protection__[i].u.lck);
+ if (res != 0)
+ return res;
+ }
+#endif
+ return 0;
+}
+"].
+
+make_amc_fallback() ->
+ ["
+#if defined(ETHR_AMC_FALLBACK__)
+
+/*
+ * Fallback for large sized (word and/or double word size) atomics using
+ * an \"Atomic Modification Counter\" based on smaller sized native atomics.
+ *
+ * We use a 63-bit modification counter and a one bit exclusive flag.
+ * If 32-bit native atomics are used, we need two 32-bit native atomics.
+ * The exclusive flag is the least significant bit, or if multiple atomics
+ * are used, the least significant bit of the least significant atomic.
+ *
+ * When using the AMC fallback the following is true:
+ * - Reads of the same atomic variable can be done in parallel.
+ * - Uncontended reads doesn't cause any cache line invalidations,
+ * since no modifications are done.
+ * - Assuming that the AMC atomic(s) and the integer(s) containing the
+ * value of the implemented atomic resides in the same cache line,
+ * modifications will only cause invalidations of one cache line.
+ *
+ * When using the spinlock based fallback none of the above is true,
+ * however, the spinlock based fallback consumes less memory.
+ */
+
+# if ETHR_AMC_NO_ATMCS__ != 1 && ETHR_AMC_NO_ATMCS__ != 2
+# error \"Not supported\"
+# endif
+# define ETHR_AMC_MAX_TRY_READ__ 10
+# ifdef ETHR_DEBUG
+# define ETHR_DBG_CHK_EXCL_STATE(ASP, S) \\
+do { \\
+ ETHR_AMC_SINT_T__ act = ETHR_AMC_ATMC_FUNC__(read)(&(ASP)->atomic[0]); \\
+ ETHR_ASSERT(act == (S) + 1); \\
+ ETHR_ASSERT(act & 1); \\
+} while (0)
+# else
+# define ETHR_DBG_CHK_EXCL_STATE(ASP, S)
+# endif
+
+static ETHR_INLINE void
+amc_init(ethr_amc_t *amc, int dw, ethr_sint_t *avar, ethr_sint_t *val)
+{
+ avar[0] = val[0];
+ if (dw)
+ avar[1] = val[1];
+#if ETHR_AMC_NO_ATMCS__ == 2
+ ETHR_AMC_ATMC_FUNC__(init)(&amc->atomic[1], 0);
+#endif
+ ETHR_AMC_ATMC_FUNC__(init_wb)(&amc->atomic[0], 0);
+}
+
+static ETHR_INLINE ETHR_AMC_SINT_T__
+amc_set_excl(ethr_amc_t *amc, ETHR_AMC_SINT_T__ prev_state0)
+{
+ ETHR_AMC_SINT_T__ state0 = prev_state0;
+ /* Set exclusive flag. */
+ while (1) {
+ ETHR_AMC_SINT_T__ act_state0, new_state0;
+ while (state0 & 1) { /* Wait until exclusive bit has been cleared */
+ ETHR_SPIN_BODY;
+ state0 = ETHR_AMC_ATMC_FUNC__(read)(&amc->atomic[0]);
+ }
+ /* Try to set exclusive bit */
+ new_state0 = state0 + 1;
+ act_state0 = ETHR_AMC_ATMC_FUNC__(cmpxchg_acqb)(&amc->atomic[0],
+ new_state0,
+ state0);
+ if (state0 == act_state0)
+ return state0; /* old state0 */
+ state0 = act_state0;
+ }
+}
+
+static ETHR_INLINE void
+amc_inc_mc_unset_excl(ethr_amc_t *amc, ETHR_AMC_SINT_T__ old_state0)
+{
+ ETHR_AMC_SINT_T__ state0 = old_state0;
+
+ /* Increment modification counter and reset exclusive flag. */
+
+ ETHR_DBG_CHK_EXCL_STATE(amc, state0);
+
+ state0 += 2;
+
+ ETHR_ASSERT((state0 & 1) == 0);
+
+#if ETHR_AMC_NO_ATMCS__ == 2
+ if (state0 == 0) {
+ /*
+ * state0 wrapped, so we need to increment state1. There is no need
+ * for atomic inc op, since this is always done while having exclusive
+ * flag.
+ */
+ ETHR_AMC_SINT_T__ state1 = ETHR_AMC_ATMC_FUNC__(read)(&amc->atomic[1]);
+ state1++;
+ ETHR_AMC_ATMC_FUNC__(set)(&amc->atomic[1], state1);
+ }
+#endif
+ ETHR_AMC_ATMC_FUNC__(set_relb)(&amc->atomic[0], state0);
+}
+
+static ETHR_INLINE void
+amc_unset_excl(ethr_amc_t *amc, ETHR_AMC_SINT_T__ old_state0)
+{
+ ETHR_DBG_CHK_EXCL_STATE(amc, old_state0);
+ /*
+ * Reset exclusive flag, but leave modification counter unchanged,
+ * i.e., restore state to what it was before setting exclusive
+ * flag.
+ */
+ ETHR_AMC_ATMC_FUNC__(set_relb)(&amc->atomic[0], old_state0);
+}
+
+static ETHR_INLINE void
+amc_set(ethr_amc_t *amc, int dw, ethr_sint_t *avar, ethr_sint_t *val)
+{
+ ETHR_AMC_SINT_T__ state0 = ETHR_AMC_ATMC_FUNC__(read)(&amc->atomic[0]);
+
+ state0 = amc_set_excl(amc, state0);
+
+ avar[0] = val[0];
+ if (dw)
+ avar[1] = val[1];
+
+ amc_inc_mc_unset_excl(amc, state0);
+}
+
+static ETHR_INLINE int
+amc_try_read(ethr_amc_t *amc, int dw, ethr_sint_t *avar,
+ ethr_sint_t *val, ETHR_AMC_SINT_T__ *state0p)
+{
+ /* *state0p should contain last read value if aborting */
+ ETHR_AMC_SINT_T__ old_state0;
+#if ETHR_AMC_NO_ATMCS__ == 2
+ ETHR_AMC_SINT_T__ state1;
+ int abrt;
+#endif
+
+ *state0p = ETHR_AMC_ATMC_FUNC__(read_rb)(&amc->atomic[0]);
+ if ((*state0p) & 1)
+ return 0; /* exclusive flag set; abort */
+#if ETHR_AMC_NO_ATMCS__ == 2
+ state1 = ETHR_AMC_ATMC_FUNC__(read_rb)(&amc->atomic[1]);
+#else
+ ETHR_COMPILER_BARRIER;
+#endif
+
+ val[0] = avar[0];
+ if (dw)
+ val[1] = avar[1];
+
+ ETHR_READ_MEMORY_BARRIER;
+
+ /*
+ * Abort if state has changed (i.e, either the exclusive
+ * flag is set, or modification counter changed).
+ */
+ old_state0 = *state0p;
+#if ETHR_AMC_NO_ATMCS__ == 2
+ *state0p = ETHR_AMC_ATMC_FUNC__(read_rb)(&amc->atomic[0]);
+ abrt = (old_state0 != *state0p);
+ abrt |= (state1 != ETHR_AMC_ATMC_FUNC__(read)(&amc->atomic[1]));
+ return abrt == 0;
+#else
+ *state0p = ETHR_AMC_ATMC_FUNC__(read)(&amc->atomic[0]);
+ return old_state0 == *state0p;
+#endif
+}
+
+static ETHR_INLINE void
+amc_read(ethr_amc_t *amc, int dw, ethr_sint_t *avar, ethr_sint_t *val)
+{
+ ETHR_AMC_SINT_T__ state0;
+ int i;
+
+#if ETHR_AMC_MAX_TRY_READ__ == 0
+ state0 = ETHR_AMC_ATMC_FUNC__(read)(&amc->atomic[0]);
+#else
+ for (i = 0; i < ETHR_AMC_MAX_TRY_READ__; i++) {
+ if (amc_try_read(amc, dw, avar, val, &state0))
+ return; /* read success */
+ ETHR_SPIN_BODY;
+ }
+#endif
+
+ state0 = amc_set_excl(amc, state0);
+
+ val[0] = avar[0];
+ if (dw)
+ val[1] = avar[1];
+
+ amc_unset_excl(amc, state0);
+}
+
+static ETHR_INLINE int
+amc_cmpxchg(ethr_amc_t *amc, int dw, ethr_sint_t *avar,
+ ethr_sint_t *new, ethr_sint_t *xchg)
+{
+ ethr_sint_t val[2];
+ ETHR_AMC_SINT_T__ state0;
+
+ if (amc_try_read(amc, dw, avar, val, &state0)) {
+ if (val[0] != xchg[0] || (dw && val[1] != xchg[1])) {
+ xchg[0] = val[0];
+ if (dw)
+ xchg[1] = val[1];
+ return 0; /* failed */
+ }
+ /* Operation will succeed if not interrupted */
+ }
+
+ state0 = amc_set_excl(amc, state0);
+
+ if (xchg[0] != avar[0] || (dw && xchg[1] != avar[1])) {
+ xchg[0] = avar[0];
+ if (dw)
+ xchg[1] = avar[1];
+
+ ETHR_DBG_CHK_EXCL_STATE(amc, state0);
+
+ amc_unset_excl(amc, state0);
+ return 0; /* failed */
+ }
+
+ avar[0] = new[0];
+ if (dw)
+ avar[1] = new[1];
+
+ amc_inc_mc_unset_excl(amc, state0);
+ return 1;
+}
+
+
+#define ETHR_AMC_MODIFICATION_OPS__(AMC, OPS) \\
+do { \\
+ ETHR_AMC_SINT_T__ state0__; \\
+ state0__ = ETHR_AMC_ATMC_FUNC__(read)(&(AMC)->atomic[0]); \\
+ state0__ = amc_set_excl((AMC), state0__); \\
+ { OPS; } \\
+ amc_inc_mc_unset_excl((AMC), state0__); \\
+} while (0)
+
+#endif /* amc fallback */
+"].
+
diff --git a/erts/lib_src/win/ethr_event.c b/erts/lib_src/win/ethr_event.c
index 68f093f49c..bc2f635c26 100644
--- a/erts/lib_src/win/ethr_event.c
+++ b/erts/lib_src/win/ethr_event.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2009-2010. All Rights Reserved.
+ * Copyright Ericsson AB 2009-2011. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -28,13 +28,10 @@
/* --- Windows implementation of thread events ------------------------------ */
-#pragma intrinsic(_InterlockedExchangeAdd)
-#pragma intrinsic(_InterlockedCompareExchange)
-
int
ethr_event_init(ethr_event *e)
{
- e->state = ETHR_EVENT_OFF__;
+ ethr_atomic32_init(&e->state, ETHR_EVENT_OFF__);
e->handle = CreateEvent(NULL, FALSE, FALSE, NULL);
if (e->handle == INVALID_HANDLE_VALUE)
return ethr_win_get_errno__();
@@ -63,7 +60,6 @@ ethr_event_reset(ethr_event *e)
static ETHR_INLINE int
wait(ethr_event *e, int spincount)
{
- LONG state;
DWORD code;
int sc, res, until_yield = ETHR_YIELD_AFTER_BUSY_LOOPS;
@@ -73,13 +69,9 @@ wait(ethr_event *e, int spincount)
sc = spincount;
while (1) {
- long on;
+ ethr_sint32_t state;
while (1) {
-#if ETHR_READ_AND_SET_WITHOUT_INTERLOCKED_OP__
- state = e->state;
-#else
- state = _InterlockedExchangeAdd(&e->state, (LONG) 0);
-#endif
+ state = ethr_atomic32_read(&e->state);
if (state == ETHR_EVENT_ON__)
return 0;
if (sc == 0)
@@ -95,9 +87,9 @@ wait(ethr_event *e, int spincount)
}
if (state != ETHR_EVENT_OFF_WAITER__) {
- state = _InterlockedCompareExchange(&e->state,
- ETHR_EVENT_OFF_WAITER__,
- ETHR_EVENT_OFF__);
+ state = ethr_atomic32_cmpxchg(&e->state,
+ ETHR_EVENT_OFF_WAITER__,
+ ETHR_EVENT_OFF__);
if (state == ETHR_EVENT_ON__)
return 0;
ETHR_ASSERT(state == ETHR_EVENT_OFF__);
diff --git a/erts/lib_src/win/ethread.c b/erts/lib_src/win/ethread.c
index 789a360b11..3abda6de4c 100644
--- a/erts/lib_src/win/ethread.c
+++ b/erts/lib_src/win/ethread.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2010. All Rights Reserved.
+ * Copyright Ericsson AB 2010-2011. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -35,6 +35,7 @@
#include <winerror.h>
#include <stdio.h>
#include <limits.h>
+#include <intrin.h>
#define ETHR_INLINE_FUNC_NAME_(X) X ## __
#define ETHREAD_IMPL__
@@ -158,6 +159,25 @@ ethr_abort__(void)
#endif
}
+#if defined(ETHR_X86_RUNTIME_CONF__)
+
+#pragma intrinsic(__cpuid)
+
+void
+ethr_x86_cpuid__(int *eax, int *ebx, int *ecx, int *edx)
+{
+ int CPUInfo[4];
+
+ __cpuid(CPUInfo, *eax);
+
+ *eax = CPUInfo[0];
+ *ebx = CPUInfo[1];
+ *ecx = CPUInfo[2];
+ *edx = CPUInfo[3];
+}
+
+#endif /* ETHR_X86_RUNTIME_CONF__ */
+
/*
* ----------------------------------------------------------------------------
* Exported functions
diff --git a/erts/ntbuild.erl b/erts/ntbuild.erl
deleted file mode 100644
index e48be58c17..0000000000
--- a/erts/ntbuild.erl
+++ /dev/null
@@ -1,332 +0,0 @@
-%%
-%% %CopyrightBegin%
-%%
-%% Copyright Ericsson AB 1997-2009. All Rights Reserved.
-%%
-%% The contents of this file are subject to the Erlang Public License,
-%% Version 1.1, (the "License"); you may not use this file except in
-%% compliance with the License. You should have received a copy of the
-%% Erlang Public License along with this software. If not, it can be
-%% retrieved online at http://www.erlang.org/.
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-%% the License for the specific language governing rights and limitations
-%% under the License.
-%%
-%% %CopyrightEnd%
-%%
-%% To be used from makefiles on the unix side executing things on the NT-side
--module(ntbuild).
-
--export([nmake/1, omake/1, waitnode/1, restart/1,
- setdir/1, run_tests/1, run_command/1]).
--export([serv_nmake/2, serv_omake/2, serv_restart/0, serv_run_tests/2,
- serv_run_command/1]).
-
-waitnode([NtNode]) ->
- % First, wait for node to disappear.
- case wait_disappear(NtNode, 0) of
- ok ->
- case wait_appear(NtNode, 0) of
- ok ->
- halt(0);
- fail ->
- halt(1)
- end;
- fail ->
- halt(1)
- end.
-
-% Wait for nt node to appear within 5 minutes.
-wait_appear(_NtNode, 300) ->
- fail;
-wait_appear(NtNode, N) ->
- receive after 1000 -> ok end,
- case nt_node_alive(NtNode, quiet) of
- no ->
- wait_appear(NtNode, N+1);
- yes ->
- ok
- end.
-
-
-
-% Waits for nt node to disappear within 3 minutes.
-wait_disappear(NtNode, 300) ->
- fail;
-wait_disappear(NtNode, N) ->
- receive after 1000 -> ok end,
- case nt_node_alive(NtNode, quiet) of
- yes ->
- wait_disappear(NtNode, N+1);
- no ->
- ok
- end.
-
-restart([NtNode]) ->
- case nt_node_alive(NtNode) of
- yes ->
- case rpc:call(NtNode, ntbuild, serv_restart, []) of
- ok ->
- io:format("halt(0)~n"),
- halt();
- Error ->
- io:format("halt(1)~n"),
- halt(1)
- end;
- no ->
- halt(1)
- end.
-
-
-setdir([NtNode, Dir0]) ->
- Dir = atom_to_list(Dir0),
- case nt_node_alive(NtNode) of
- yes ->
- case rpc:call(NtNode, file, set_cwd, [Dir]) of
- ok ->
- io:format("halt(0)~n"),
- halt();
- Error ->
- io:format("halt(1) (Error: ~p) (~p not found) ~n", [Error, Dir]),
- halt(1)
- end;
- no ->
- halt(1)
- end.
-
-run_tests([NtNode, Vsn0, Logdir]) ->
- Vsn = atom_to_list(Vsn0),
- case nt_node_alive(NtNode) of
- yes ->
- case rpc:call(NtNode, ntbuild, serv_run_tests, [Vsn, Logdir]) of
- ok ->
- io:format("halt(0)~n"),
- halt();
- Error ->
- io:format("RPC To Windows Node Failed: ~p~n", [Error]),
- io:format("halt(1)~n"),
- halt(1)
- end;
- no ->
- halt(1)
- end.
-
-run_command([NtNode, Cmd]) ->
- case nt_node_alive(NtNode) of
- yes ->
- case rpc:call(NtNode, ntbuild, serv_run_command, [Cmd]) of
- ok ->
- io:format("halt(0)~n"),
- halt();
- Error ->
- io:format("RPC To Windows Node Failed: ~p~n", [Error]),
- io:format("halt(1)~n"),
- halt(1)
- end;
- no ->
- halt(1)
- end.
-
-nmake([NtNode, Path, Options]) ->
-% io:format("nmake2(~w,~w)~n",[Path, Options]),
- Dir=atom_to_list(Path),
- Opt=atom_to_list(Options),
- case nt_node_alive(NtNode) of
- yes ->
- case rpc:call(NtNode, ntbuild, serv_nmake, [Dir, Opt]) of
- ok ->
- io:format("halt(0)~n"),
- halt();
- Error ->
- io:format("Error: ~n", [Error]),
- halt(1)
- end;
- no ->
- halt(1)
- end.
-
-omake([NtNode, Path, Options]) ->
- Dir=atom_to_list(Path),
- Opt=atom_to_list(Options),
- case nt_node_alive(NtNode) of
- yes ->
- case rpc:call(NtNode, ntbuild, serv_omake, [Dir, Opt]) of
- ok ->
- io:format("halt(0)~n"),
- halt();
- Error ->
- io:format("RPC To Windows Node Failed: ~p~n", [Error]),
- io:format("~p ~p~n", [Dir, Opt]),
- io:format("halt(1)~n"),
- halt(1)
- end;
- no ->
- halt(1)
- end.
-
-
-
-
-
-nt_node_alive(NtNode) ->
- case net:ping(NtNode) of
- pong ->
- yes;
- pang ->
- io:format("The NT node (~p) is not up. ~n",[NtNode]),
- no
- end.
-
-nt_node_alive(NtNode, quiet) ->
- case net:ping(NtNode) of
- pong ->
- yes;
- pang ->
- no
- end.
-
-
-
-%%%
-%%% The 'serv_' functions. Theese are the routines run on the WinNT node.
-%%%
-
-%%-----------------------
-%% serv_run_tests()
-%% Runs the tests.
-serv_run_tests(Vsn, Logdir) ->
- {ok, Cwd}=file:get_cwd(),
- io:format("serv_run_tests ~p ~p ~n", [Vsn, Logdir]),
- Cmd0= "set central_log_dir=" ++ Logdir,
- Erl = "C:/progra~1/erl"++Vsn++"/bin/erl",
- Cmd1 = Erl++" -sname a -setcookie a -noshell -noinput -s ts install -s ts run -s ts save -s erlang halt",
-%% Dir = "C:/temp/test_suite/test_server",
- Cmd= Cmd0 ++ "/r/n" ++ Cmd1,
- Dir = "C:/temp/test_server/p7a/test_server",
- file:set_cwd(Dir),
- Res=run_make_bat(Dir, Cmd),
- file:set_cwd(Cwd),
- Res.
-
-%%-----------------------
-%% serv_run_command()
-%% Runs a command.
-serv_run_command(Cmd) ->
- {ok, Cwd}=file:get_cwd(),
- Res=run_make_bat("", Cmd),
- file:set_cwd(Cwd),
- Res.
-
-%%-----------------------
-%% serv_restart()
-%% Reboots the NT machine.
-serv_restart() ->
- Exe="\\erts\\install_nt\\reboot.exe",
- open_port({spawn, Exe}, [stream, eof, in]),
- ok.
-
-
-%%-----------------------
-%% serv_nmake(Path, Options)
-%% Runs `nmake' in the given directory.
-%% Result: ok | error
-serv_nmake(Path, Options) ->
- {ok, Cwd}=file:get_cwd(),
- Command="nmake -e -f Makefile.win32 " ++ Options ++ " 2>&1",
- Res=run_make_bat(Path, Command),
- file:set_cwd(Cwd),
- Res.
-
-%%-----------------------
-%% serv_omake(Path, Options)
-%% Runs `omake' in the given directory.
-%% Result: ok | error
-serv_omake(Path, Options) ->
- {ok, Cwd}=file:get_cwd(),
- Command="omake -W -E -EN -f Makefile.win32 " ++ Options ++ " 2>&1",
- Res=run_make_bat(Path, Command),
- file:set_cwd(Cwd),
- Res.
-
-
-read_output(Port, SoFar) ->
-% io:format("(read_output)~n"),
- case get_data_from_port(Port) of
- eof ->
- io:format("*** eof ***~n"),
- io:format("Never reached a real message"),
- halt(1);
- {ok, Data} ->
- case print_line([SoFar|Data]) of
- {ok, Rest} ->
- read_output(Port, Rest);
- {done, Res} ->
- Res
- end
- end.
-
-print_line(Data) ->
- print_line(Data, []).
-
-print_line([], Acc) ->
- {ok, lists:reverse(Acc)};
-print_line([$*,$o,$k,$*|Rest], _Acc) ->
- io:format("*ok*~n"),
- {done, ok};
-print_line([$*,$e,$r,$r,$o,$r|Rest], _Acc) ->
- io:format("*error*~n"),
- {done, error};
-print_line([$\r,$\n|Rest], Acc) ->
- io:format("~s~n", [lists:reverse(Acc)]),
- print_line(Rest, []);
-print_line([Chr|Rest], Acc) ->
- print_line(Rest, [Chr|Acc]).
-
-get_data_from_port(Port) ->
- receive
- {Port, {data, Bytes}} ->
- {ok, Bytes};
- {Port, eof} ->
- unlink(Port),
- exit(Port, die),
- eof;
- Other ->
- io:format("Strange message received: ~p~n", [Other]),
- get_data_from_port(Port)
- end.
-
-
-run_make_bat(Dir, Make) ->
- {Name, Exe, Script}=create_make_script(Dir, Make),
- io:format("Exe:~p Cwd:~p Script:~p ~n",[Exe, Dir, Script]),
- case file:write_file(Name, Script) of
- ok ->
- case catch open_port({spawn, Exe}, [stderr_to_stdout, stream, hide,
- eof, in]) of
- Port when port(Port) ->
- read_output(Port, []);
- Other ->
- io:format("Error, open_port failed: ~p~n", [Other]),
- {open_port, Other, Exe}
- end;
- Error ->
- {write_file, Error, Name}
- end.
-
-create_make_script(Dir, Make) when atom(Make) ->
- create_make_script(Dir, atom_to_list(Make));
-create_make_script(Dir, Make) ->
- {"run_make_bs.bat",
- "run_make_bs 2>&1",
- ["@echo off\r\n",
- "@cd ", Dir, "\r\n",
- Make++"\r\n",
- "if errorlevel 1 echo *run_make_bs error*\r\n",
- "if not errorlevel 1 echo *ok*\r\n"]}.
-
-
-
-
-
diff --git a/erts/preloaded/ebin/erl_prim_loader.beam b/erts/preloaded/ebin/erl_prim_loader.beam
index dccb92a5af..df1831f340 100644
--- a/erts/preloaded/ebin/erl_prim_loader.beam
+++ b/erts/preloaded/ebin/erl_prim_loader.beam
Binary files differ
diff --git a/erts/preloaded/ebin/erlang.beam b/erts/preloaded/ebin/erlang.beam
index b537546071..b78747bc84 100644
--- a/erts/preloaded/ebin/erlang.beam
+++ b/erts/preloaded/ebin/erlang.beam
Binary files differ
diff --git a/erts/preloaded/ebin/init.beam b/erts/preloaded/ebin/init.beam
index 901def5eba..cc170b86b2 100644
--- a/erts/preloaded/ebin/init.beam
+++ b/erts/preloaded/ebin/init.beam
Binary files differ
diff --git a/erts/preloaded/ebin/otp_ring0.beam b/erts/preloaded/ebin/otp_ring0.beam
index 62ebbb9ffe..45a409738c 100644
--- a/erts/preloaded/ebin/otp_ring0.beam
+++ b/erts/preloaded/ebin/otp_ring0.beam
Binary files differ
diff --git a/erts/preloaded/ebin/prim_file.beam b/erts/preloaded/ebin/prim_file.beam
index 66fcb9e6a9..6400cda2b5 100644
--- a/erts/preloaded/ebin/prim_file.beam
+++ b/erts/preloaded/ebin/prim_file.beam
Binary files differ
diff --git a/erts/preloaded/ebin/prim_inet.beam b/erts/preloaded/ebin/prim_inet.beam
index 9894050cba..b2f3ab6c5b 100644
--- a/erts/preloaded/ebin/prim_inet.beam
+++ b/erts/preloaded/ebin/prim_inet.beam
Binary files differ
diff --git a/erts/preloaded/ebin/prim_zip.beam b/erts/preloaded/ebin/prim_zip.beam
index 9ee70d59ec..411fc8d524 100644
--- a/erts/preloaded/ebin/prim_zip.beam
+++ b/erts/preloaded/ebin/prim_zip.beam
Binary files differ
diff --git a/erts/preloaded/ebin/zlib.beam b/erts/preloaded/ebin/zlib.beam
index 542e266f88..7a1f896d36 100644
--- a/erts/preloaded/ebin/zlib.beam
+++ b/erts/preloaded/ebin/zlib.beam
Binary files differ
diff --git a/erts/preloaded/src/erl_prim_loader.erl b/erts/preloaded/src/erl_prim_loader.erl
index 024b20eadb..14a7a2bf20 100644
--- a/erts/preloaded/src/erl_prim_loader.erl
+++ b/erts/preloaded/src/erl_prim_loader.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1996-2010. All Rights Reserved.
+%% Copyright Ericsson AB 1996-2011. All Rights Reserved.
%%
%% The contents of this file are subject to the Erlang Public License,
%% Version 1.1, (the "License"); you may not use this file except in
@@ -102,8 +102,14 @@ debug(#prim_state{debug = Deb}, Term) ->
%%% Interface Functions.
%%% --------------------------------------------------------
--spec start(term(), atom() | string(), host() | [host()]) ->
- {'ok', pid()} | {'error', term()}.
+-spec start(Id, Loader, Hosts) ->
+ {'ok', Pid} | {'error', What} when
+ Id :: term(),
+ Loader :: atom() | string(),
+ Hosts :: Host | [Host],
+ Host :: host(),
+ Pid :: pid(),
+ What :: term().
start(Id, Pgm, Hosts) when is_atom(Hosts) ->
start(Id, Pgm, [Hosts]);
start(Id, Pgm0, Hosts) ->
@@ -123,19 +129,7 @@ start(Id, Pgm0, Hosts) ->
{error,Reason}
end.
-start_it("ose_inet"=Cmd, Id, Pid, Hosts) ->
- %% Setup reserved port for ose_inet driver (only OSE)
- case catch erlang:open_port({spawn,Cmd}, [binary]) of
- {'EXIT',Why} ->
- ?dbg(ose_inet_port_open_fail, Why),
- Why;
- OseInetPort ->
- ?dbg(ose_inet_port, OseInetPort),
- OseInetPort
- end,
- start_it("inet", Id, Pid, Hosts);
-
-%% Hosts must be a list on form ['1.2.3.4' ...]
+%% Hosts must be a list of form ['1.2.3.4' ...]
start_it("inet", Id, Pid, Hosts) ->
process_flag(trap_exit, true),
?dbg(inet, {Id,Pid,Hosts}),
@@ -174,15 +168,20 @@ init_ack(Pid) ->
Pid ! {self(),ok},
ok.
--spec set_path([string()]) -> 'ok'.
+-spec set_path(Path) -> 'ok' when
+ Path :: [Dir :: string()].
set_path(Paths) when is_list(Paths) ->
request({set_path,Paths}).
--spec get_path() -> {'ok', [string()]}.
+-spec get_path() -> {'ok', Path} when
+ Path :: [Dir :: string()].
get_path() ->
request({get_path,[]}).
--spec get_file(atom() | string()) -> {'ok', binary(), string()} | 'error'.
+-spec get_file(Filename) -> {'ok', Bin, FullName} | 'error' when
+ Filename :: atom() | string(),
+ Bin :: binary(),
+ FullName :: string().
get_file(File) when is_atom(File) ->
get_file(atom_to_list(File));
get_file(File) ->
@@ -202,13 +201,15 @@ get_files(ModFiles, Fun) ->
ok
end.
--spec list_dir(string()) -> {'ok', [string()]} | 'error'.
+-spec list_dir(Dir) -> {'ok', Filenames} | 'error' when
+ Dir :: string(),
+ Filenames :: [Filename :: string()].
list_dir(Dir) ->
check_file_result(list_dir, Dir, request({list_dir,Dir})).
-%% -> {ok,Info} | error
--spec read_file_info(string()) -> {'ok', tuple()} | 'error'.
-
+-spec read_file_info(Filename) -> {'ok', FileInfo} | 'error' when
+ Filename :: string(),
+ FileInfo :: file:file_info().
read_file_info(File) ->
check_file_result(read_file_info, File, request({read_file_info,File})).
@@ -395,7 +396,7 @@ handle_timeout(State = #state{loader = inet}, Parent) ->
inet_timeout_handler(State, Parent).
%%% --------------------------------------------------------
-%%% Functions which handles efile as prim_loader (default).
+%%% Functions which handle efile as prim_loader (default).
%%% --------------------------------------------------------
%%% Reading many files in parallel is an optimization.
@@ -469,7 +470,7 @@ efile_get_file_from_port2(#state{prim_state = PS} = State, File) ->
end.
efile_get_file_from_port3(State, File, [P | Paths]) ->
- case efile_get_file_from_port2(State, concat([P,"/",File])) of
+ case efile_get_file_from_port2(State, join(P, File)) of
{{error,Reason},State1} when Reason =/= emfile ->
case Paths of
[] -> % return last error
@@ -522,7 +523,7 @@ efile_timeout_handler(#state{n_timeouts = N} = State, _Parent) ->
end.
%%% --------------------------------------------------------
-%%% Functions which handles inet prim_loader
+%%% Functions which handle inet prim_loader
%%% --------------------------------------------------------
%%
@@ -643,7 +644,7 @@ inet_get_file_from_port(State, File, Paths) ->
end.
inet_get_file_from_port1(File, [P | Paths], State) ->
- File1 = concat([P,"/",File]),
+ File1 = join(P, File),
case inet_send_and_rcv({get,File1}, File1, State) of
{{error,Reason},State1} ->
case Paths of
@@ -728,7 +729,7 @@ udp_options() ->
%% INET version IPv4 addresses
%%
ll_tcp_connect(LocalPort, IP, RemotePort) ->
- case ll_open_set_bind(tcp, ?INET_FAMILY, tcp_options(),
+ case ll_open_set_bind(tcp, ?INET_FAMILY, stream, tcp_options(),
?INET_ADDRESS, LocalPort) of
{ok,S} ->
case prim_inet:connect(S, IP, RemotePort, tcp_timeout()) of
@@ -742,11 +743,11 @@ ll_tcp_connect(LocalPort, IP, RemotePort) ->
%% Open and initialize an udp port for broadcast
%%
ll_udp_open(P) ->
- ll_open_set_bind(udp, ?INET_FAMILY, udp_options(), ?INET_ADDRESS, P).
+ ll_open_set_bind(udp, ?INET_FAMILY, dgram, udp_options(), ?INET_ADDRESS, P).
-ll_open_set_bind(Protocol, Family, SOpts, IP, Port) ->
- case prim_inet:open(Protocol, Family) of
+ll_open_set_bind(Protocol, Family, Type, SOpts, IP, Port) ->
+ case prim_inet:open(Protocol, Family, Type) of
{ok, S} ->
case prim_inet:setopts(S, SOpts) of
ok ->
@@ -1151,14 +1152,8 @@ send_all(U, [IP | AL], Cmd) ->
send_all(U, AL, Cmd);
send_all(_U, [], _) -> ok.
-%%concat([A|T]) when is_atom(A) -> %Atom
-%% atom_to_list(A) ++ concat(T);
-concat([C|T]) when C >= 0, C =< 255 ->
- [C|concat(T)];
-concat([S|T]) -> %String
- S ++ concat(T);
-concat([]) ->
- [].
+join(P, F) ->
+ P ++ "/" ++ F.
member(X, [X|_]) -> true;
member(X, [_|Y]) -> member(X, Y);
@@ -1360,9 +1355,7 @@ pathtype(Name) when is_list(Name) ->
absolute;
_ ->
relative
- end;
- {ose,_} ->
- unix_pathtype(Name)
+ end
end.
unix_pathtype(Name) ->
diff --git a/erts/preloaded/src/erlang.erl b/erts/preloaded/src/erlang.erl
index 4679a916c7..1cbce5b80b 100644
--- a/erts/preloaded/src/erlang.erl
+++ b/erts/preloaded/src/erlang.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1996-2011. All Rights Reserved.
+%% Copyright Ericsson AB 1996-2012. All Rights Reserved.
%%
%% The contents of this file are subject to the Erlang Public License,
%% Version 1.1, (the "License"); you may not use this file except in
@@ -35,14 +35,18 @@
-export([delay_trap/2]).
-export([set_cookie/2, get_cookie/0]).
-export([nodes/0]).
--export([concat_binary/1]).
+
-export([list_to_integer/2,integer_to_list/2]).
-export([flush_monitor_message/2]).
-export([set_cpu_topology/1, format_cpu_topology/1]).
-export([await_proc_exit/3]).
+-export([memory/0, memory/1]).
+-export([alloc_info/1, alloc_sizes/1]).
+
+-export([gather_sched_wall_time_result/1,
+ await_sched_wall_time_modifications/2]).
-deprecated([hash/2]).
--deprecated([concat_binary/1]).
% Get rid of autoimports of spawn to avoid clashes with ourselves.
-compile({no_auto_import,[spawn/1]}).
@@ -53,22 +57,31 @@
-compile({no_auto_import,[spawn_opt/4]}).
-compile({no_auto_import,[spawn_opt/5]}).
-%%--------------------------------------------------------------------------
+-export_type([timestamp/0]).
--type date() :: {pos_integer(), pos_integer(), pos_integer()}.
--type time() :: {non_neg_integer(), non_neg_integer(), non_neg_integer()}.
--type date_time() :: {date(), time()}.
+-type timestamp() :: {MegaSecs :: non_neg_integer(),
+ Secs :: non_neg_integer(),
+ MicroSecs :: non_neg_integer()}.
%%--------------------------------------------------------------------------
+-spec apply(Fun, Args) -> term() when
+ Fun :: function(),
+ Args :: [term()].
apply(Fun, Args) ->
erlang:apply(Fun, Args).
+-spec apply(Module, Function, Args) -> term() when
+ Module :: module(),
+ Function :: atom(),
+ Args :: [term()].
apply(Mod, Name, Args) ->
erlang:apply(Mod, Name, Args).
%% Spawns with a fun
+-spec spawn(Fun) -> pid() when
+ Fun :: function().
spawn(F) when is_function(F) ->
spawn(erlang, apply, [F, []]);
spawn({M,F}=MF) when is_atom(M), is_atom(F) ->
@@ -76,6 +89,9 @@ spawn({M,F}=MF) when is_atom(M), is_atom(F) ->
spawn(F) ->
erlang:error(badarg, [F]).
+-spec spawn(Node, Fun) -> pid() when
+ Node :: node(),
+ Fun :: function().
spawn(N, F) when N =:= node() ->
spawn(F);
spawn(N, F) when is_function(F) ->
@@ -85,6 +101,8 @@ spawn(N, {M,F}=MF) when is_atom(M), is_atom(F) ->
spawn(N, F) ->
erlang:error(badarg, [N, F]).
+-spec spawn_link(Fun) -> pid() when
+ Fun :: function().
spawn_link(F) when is_function(F) ->
spawn_link(erlang, apply, [F, []]);
spawn_link({M,F}=MF) when is_atom(M), is_atom(F) ->
@@ -92,6 +110,9 @@ spawn_link({M,F}=MF) when is_atom(M), is_atom(F) ->
spawn_link(F) ->
erlang:error(badarg, [F]).
+-spec spawn_link(Node, Fun) -> pid() when
+ Node :: node(),
+ Fun :: function().
spawn_link(N, F) when N =:= node() ->
spawn_link(F);
spawn_link(N, F) when is_function(F) ->
@@ -103,16 +124,30 @@ spawn_link(N, F) ->
%% Spawn and atomically set up a monitor.
+-spec spawn_monitor(Fun) -> {pid(), reference()} when
+ Fun :: function().
spawn_monitor(F) when is_function(F, 0) ->
erlang:spawn_opt({erlang,apply,[F,[]],[monitor]});
spawn_monitor(F) ->
erlang:error(badarg, [F]).
+-spec spawn_monitor(Module, Function, Args) -> {pid(), reference()} when
+ Module :: module(),
+ Function :: atom(),
+ Args :: [term()].
spawn_monitor(M, F, A) when is_atom(M), is_atom(F), is_list(A) ->
erlang:spawn_opt({M,F,A,[monitor]});
spawn_monitor(M, F, A) ->
erlang:error(badarg, [M,F,A]).
+-spec spawn_opt(Fun, Options) -> pid() | {pid(), reference()} when
+ Fun :: function(),
+ Options :: [Option],
+ Option :: link | monitor | {priority, Level}
+ | {fullsweep_after, Number :: non_neg_integer()}
+ | {min_heap_size, Size :: non_neg_integer()}
+ | {min_bin_vheap_size, VSize :: non_neg_integer()},
+ Level :: low | normal | high.
spawn_opt(F, O) when is_function(F) ->
spawn_opt(erlang, apply, [F, []], O);
spawn_opt({M,F}=MF, O) when is_atom(M), is_atom(F) ->
@@ -122,6 +157,15 @@ spawn_opt({M,F,A}, O) -> % For (undocumented) backward compatibility
spawn_opt(F, O) ->
erlang:error(badarg, [F, O]).
+-spec spawn_opt(Node, Fun, Options) -> pid() | {pid(), reference()} when
+ Node :: node(),
+ Fun :: function(),
+ Options :: [Option],
+ Option :: link | monitor | {priority, Level}
+ | {fullsweep_after, Number :: non_neg_integer()}
+ | {min_heap_size, Size :: non_neg_integer()}
+ | {min_bin_vheap_size, VSize :: non_neg_integer()},
+ Level :: low | normal | high.
spawn_opt(N, F, O) when N =:= node() ->
spawn_opt(F, O);
spawn_opt(N, F, O) when is_function(F) ->
@@ -133,6 +177,11 @@ spawn_opt(N, F, O) ->
%% Spawns with MFA
+-spec spawn(Node, Module, Function, Args) -> pid() when
+ Node :: node(),
+ Module :: module(),
+ Function :: atom(),
+ Args :: [term()].
spawn(N,M,F,A) when N =:= node(), is_atom(M), is_atom(F), is_list(A) ->
spawn(M,F,A);
spawn(N,M,F,A) when is_atom(N), is_atom(M), is_atom(F) ->
@@ -158,6 +207,11 @@ spawn(N,M,F,A) when is_atom(N), is_atom(M), is_atom(F) ->
spawn(N,M,F,A) ->
erlang:error(badarg, [N, M, F, A]).
+-spec spawn_link(Node, Module, Function, Args) -> pid() when
+ Node :: node(),
+ Module :: module(),
+ Function :: atom(),
+ Args :: [term()].
spawn_link(N,M,F,A) when N =:= node(), is_atom(M), is_atom(F), is_list(A) ->
spawn_link(M,F,A);
spawn_link(N,M,F,A) when is_atom(N), is_atom(M), is_atom(F) ->
@@ -183,6 +237,17 @@ spawn_link(N,M,F,A) when is_atom(N), is_atom(M), is_atom(F) ->
spawn_link(N,M,F,A) ->
erlang:error(badarg, [N, M, F, A]).
+-spec spawn_opt(Module, Function, Args, Options) ->
+ pid() | {pid(), reference()} when
+ Module :: module(),
+ Function :: atom(),
+ Args :: [term()],
+ Options :: [Option],
+ Option :: link | monitor | {priority, Level}
+ | {fullsweep_after, Number :: non_neg_integer()}
+ | {min_heap_size, Size :: non_neg_integer()}
+ | {min_bin_vheap_size, VSize :: non_neg_integer()},
+ Level :: low | normal | high.
spawn_opt(M, F, A, Opts) ->
case catch erlang:spawn_opt({M,F,A,Opts}) of
{'EXIT',{Reason,_}} ->
@@ -191,6 +256,18 @@ spawn_opt(M, F, A, Opts) ->
Res
end.
+-spec spawn_opt(Node, Module, Function, Args, Options) ->
+ pid() | {pid(), reference()} when
+ Node :: node(),
+ Module :: module(),
+ Function :: atom(),
+ Args :: [term()],
+ Options :: [Option],
+ Option :: link | monitor | {priority, Level}
+ | {fullsweep_after, Number :: non_neg_integer()}
+ | {min_heap_size, Size :: non_neg_integer()}
+ | {min_bin_vheap_size, VSize :: non_neg_integer()},
+ Level :: low | normal | high.
spawn_opt(N, M, F, A, O) when N =:= node(),
is_atom(M), is_atom(F), is_list(A),
is_list(O) ->
@@ -260,18 +337,25 @@ crasher(Node,Mod,Fun,Args,Opts,Reason) ->
[Mod,Fun,Args,Opts,Node]),
exit(Reason).
--spec yield() -> 'true'.
+-spec erlang:yield() -> 'true'.
yield() ->
erlang:yield().
--spec nodes() -> [node()].
+-spec nodes() -> Nodes when
+ Nodes :: [node()].
nodes() ->
erlang:nodes(visible).
--spec disconnect_node(node()) -> boolean().
+-spec disconnect_node(Node) -> boolean() | ignored when
+ Node :: node().
disconnect_node(Node) ->
net_kernel:disconnect(Node).
+-spec erlang:fun_info(Fun) -> [{Item, Info}] when
+ Fun :: function(),
+ Item :: arity | env | index | name
+ | module | new_index | new_uniq | pid | type | uniq,
+ Info :: term().
fun_info(Fun) when is_function(Fun) ->
Keys = [type,env,arity,name,uniq,index,new_uniq,new_index,module,pid],
fun_info_1(Keys, Fun, []).
@@ -283,24 +367,37 @@ fun_info_1([K|Ks], Fun, A) ->
end;
fun_info_1([], _, A) -> A.
--type dst() :: pid() | port() | atom() | {atom(), node()}.
+-type dst() :: pid()
+ | port()
+ | (RegName :: atom())
+ | {RegName :: atom(), Node :: node()}.
--spec send_nosuspend(dst(), term()) -> boolean().
+-spec erlang:send_nosuspend(Dest, Msg) -> boolean() when
+ Dest :: dst(),
+ Msg :: term().
send_nosuspend(Pid, Msg) ->
send_nosuspend(Pid, Msg, []).
--spec send_nosuspend(dst(), term(), ['noconnect' | 'nosuspend']) -> boolean().
+-spec erlang:send_nosuspend(Dest, Msg, Options) -> boolean() when
+ Dest :: dst(),
+ Msg :: term(),
+ Options :: [noconnect].
send_nosuspend(Pid, Msg, Opts) ->
case erlang:send(Pid, Msg, [nosuspend|Opts]) of
ok -> true;
_ -> false
end.
--spec localtime_to_universaltime(date_time()) -> date_time().
+-spec erlang:localtime_to_universaltime({Date1, Time1}) -> {Date2, Time2} when
+ Date1 :: calendar:date(),
+ Date2 :: calendar:date(),
+ Time1 :: calendar:time(),
+ Time2 :: calendar:time().
localtime_to_universaltime(Localtime) ->
erlang:localtime_to_universaltime(Localtime, undefined).
--spec suspend_process(pid()) -> 'true'.
+-spec erlang:suspend_process(Suspendee) -> 'true' when
+ Suspendee :: pid().
suspend_process(P) ->
case catch erlang:suspend_process(P, []) of
{'EXIT', {Reason, _}} -> erlang:error(Reason, [P]);
@@ -426,6 +523,9 @@ delay_trap(Result, Timeout) -> receive after Timeout -> Result end.
%% Messages to us use our cookie. IF we change our cookie, other nodes
%% have to reflect that, which we cannot forsee.
%%
+-spec erlang:set_cookie(Node, Cookie) -> true when
+ Node :: node(),
+ Cookie :: atom().
set_cookie(Node, C) when Node =/= nonode@nohost, is_atom(Node) ->
case is_atom(C) of
true ->
@@ -434,14 +534,14 @@ set_cookie(Node, C) when Node =/= nonode@nohost, is_atom(Node) ->
error(badarg)
end.
--spec get_cookie() -> atom().
+-spec erlang:get_cookie() -> Cookie | nocookie when
+ Cookie :: atom().
get_cookie() ->
auth:get_cookie().
-concat_binary(List) ->
- list_to_binary(List).
-
--spec integer_to_list(integer(), 1..255) -> string().
+-spec integer_to_list(Integer, Base) -> string() when
+ Integer :: integer(),
+ Base :: 2..36.
integer_to_list(I, 10) ->
erlang:integer_to_list(I);
integer_to_list(I, Base)
@@ -469,6 +569,9 @@ integer_to_list(I0, Base, R0) ->
end.
+-spec list_to_integer(String, Base) -> integer() when
+ String :: string(),
+ Base :: 2..36.
list_to_integer(L, 10) ->
erlang:list_to_integer(L);
list_to_integer(L, Base)
@@ -689,10 +792,447 @@ await_proc_exit(Proc, Op, Data) ->
end
end.
--spec min(term(), term()) -> term().
+-spec min(Term1, Term2) -> Minimum when
+ Term1 :: term(),
+ Term2 :: term(),
+ Minimum :: term().
min(A, B) when A > B -> B;
min(A, _) -> A.
--spec max(term(), term()) -> term().
+-spec max(Term1, Term2) -> Maximum when
+ Term1 :: term(),
+ Term2 :: term(),
+ Maximum :: term().
max(A, B) when A < B -> B;
max(A, _) -> A.
+
+
+%%
+%% erlang:memory/[0,1]
+%%
+%% NOTE! When updating these functions, make sure to also update
+%% erts_memory() in $ERL_TOP/erts/emulator/beam/erl_alloc.c
+%%
+
+-type memory_type() :: 'total' | 'processes' | 'processes_used' | 'system' | 'atom' | 'atom_used' | 'binary' | 'code' | 'ets' | 'low' | 'maximum'.
+
+-define(CARRIER_ALLOCS, [mseg_alloc, sbmbc_alloc, sbmbc_low_alloc]).
+-define(LOW_ALLOCS, [sbmbc_low_alloc, ll_low_alloc, std_low_alloc]).
+-define(ALL_NEEDED_ALLOCS, (erlang:system_info(alloc_util_allocators)
+ -- ?CARRIER_ALLOCS)).
+
+-record(memory, {total = 0,
+ processes = 0,
+ processes_used = 0,
+ system = 0,
+ atom = 0,
+ atom_used = 0,
+ binary = 0,
+ code = 0,
+ ets = 0,
+ low = 0,
+ maximum = 0}).
+
+-spec memory() -> [{memory_type(), non_neg_integer()}].
+memory() ->
+ case aa_mem_data(au_mem_data(?ALL_NEEDED_ALLOCS)) of
+ notsup ->
+ erlang:error(notsup);
+ Mem ->
+ InstrTail = case Mem#memory.maximum of
+ 0 -> [];
+ _ -> [{maximum, Mem#memory.maximum}]
+ end,
+ Tail = case Mem#memory.low of
+ 0 -> InstrTail;
+ _ -> [{low, Mem#memory.low} | InstrTail]
+ end,
+ [{total, Mem#memory.total},
+ {processes, Mem#memory.processes},
+ {processes_used, Mem#memory.processes_used},
+ {system, Mem#memory.system},
+ {atom, Mem#memory.atom},
+ {atom_used, Mem#memory.atom_used},
+ {binary, Mem#memory.binary},
+ {code, Mem#memory.code},
+ {ets, Mem#memory.ets} | Tail]
+ end.
+
+-spec memory(memory_type()|[memory_type()]) -> non_neg_integer() | [{memory_type(), non_neg_integer()}].
+memory(Type) when is_atom(Type) ->
+ {AA, ALCU, ChkSup, BadArgZero} = need_mem_info(Type),
+ case get_mem_data(ChkSup, ALCU, AA) of
+ notsup ->
+ erlang:error(notsup, [Type]);
+ Mem ->
+ Value = get_memval(Type, Mem),
+ case {BadArgZero, Value} of
+ {true, 0} -> erlang:error(badarg, [Type]);
+ _ -> Value
+ end
+ end;
+memory(Types) when is_list(Types) ->
+ {AA, ALCU, ChkSup, BadArgZeroList} = need_mem_info_list(Types),
+ case get_mem_data(ChkSup, ALCU, AA) of
+ notsup ->
+ erlang:error(notsup, [Types]);
+ Mem ->
+ case memory_result_list(Types, BadArgZeroList, Mem) of
+ badarg -> erlang:error(badarg, [Types]);
+ Result -> Result
+ end
+ end.
+
+memory_result_list([], [], _Mem) ->
+ [];
+memory_result_list([T|Ts], [BAZ|BAZs], Mem) ->
+ case memory_result_list(Ts, BAZs, Mem) of
+ badarg -> badarg;
+ TVs ->
+ V = get_memval(T, Mem),
+ case {BAZ, V} of
+ {true, 0} -> badarg;
+ _ -> [{T, V}| TVs]
+ end
+ end.
+
+get_mem_data(true, AlcUAllocs, NeedAllocatedAreas) ->
+ case memory_is_supported() of
+ false -> notsup;
+ true -> get_mem_data(false, AlcUAllocs, NeedAllocatedAreas)
+ end;
+get_mem_data(false, AlcUAllocs, NeedAllocatedAreas) ->
+ AlcUMem = case AlcUAllocs of
+ [] -> #memory{};
+ _ ->
+ au_mem_data(AlcUAllocs)
+ end,
+ case NeedAllocatedAreas of
+ true -> aa_mem_data(AlcUMem);
+ false -> AlcUMem
+ end.
+
+need_mem_info_list([]) ->
+ {false, [], false, []};
+need_mem_info_list([T|Ts]) ->
+ {MAA, MALCU, MChkSup, MBadArgZero} = need_mem_info_list(Ts),
+ {AA, ALCU, ChkSup, BadArgZero} = need_mem_info(T),
+ {case AA of
+ true -> true;
+ _ -> MAA
+ end,
+ ALCU ++ (MALCU -- ALCU),
+ case ChkSup of
+ true -> true;
+ _ -> MChkSup
+ end,
+ [BadArgZero|MBadArgZero]}.
+
+need_mem_info(Type) when Type == total;
+ Type == system ->
+ {true, ?ALL_NEEDED_ALLOCS, false, false};
+need_mem_info(Type) when Type == processes;
+ Type == processes_used ->
+ {true, [eheap_alloc, fix_alloc], true, false};
+need_mem_info(Type) when Type == atom;
+ Type == atom_used;
+ Type == code ->
+ {true, [], true, false};
+need_mem_info(binary) ->
+ {false, [binary_alloc], true, false};
+need_mem_info(ets) ->
+ {true, [ets_alloc], true, false};
+need_mem_info(low) ->
+ LowAllocs = ?LOW_ALLOCS -- ?CARRIER_ALLOCS,
+ {_, _, FeatureList, _} = erlang:system_info(allocator),
+ AlcUAllocs = case LowAllocs -- FeatureList of
+ [] -> LowAllocs;
+ _ -> []
+ end,
+ {false, AlcUAllocs, true, true};
+need_mem_info(maximum) ->
+ {true, [], true, true};
+need_mem_info(_) ->
+ {false, [], false, true}.
+
+get_memval(total, #memory{total = V}) -> V;
+get_memval(processes, #memory{processes = V}) -> V;
+get_memval(processes_used, #memory{processes_used = V}) -> V;
+get_memval(system, #memory{system = V}) -> V;
+get_memval(atom, #memory{atom = V}) -> V;
+get_memval(atom_used, #memory{atom_used = V}) -> V;
+get_memval(binary, #memory{binary = V}) -> V;
+get_memval(code, #memory{code = V}) -> V;
+get_memval(ets, #memory{ets = V}) -> V;
+get_memval(low, #memory{low = V}) -> V;
+get_memval(maximum, #memory{maximum = V}) -> V;
+get_memval(_, #memory{}) -> 0.
+
+memory_is_supported() ->
+ {_, _, FeatureList, _} = erlang:system_info(allocator),
+ case ((erlang:system_info(alloc_util_allocators)
+ -- ?CARRIER_ALLOCS)
+ -- FeatureList) of
+ [] -> true;
+ _ -> false
+ end.
+
+get_blocks_size([{blocks_size, Sz, _, _} | Rest], Acc) ->
+ get_blocks_size(Rest, Acc+Sz);
+get_blocks_size([{_, _, _, _} | Rest], Acc) ->
+ get_blocks_size(Rest, Acc);
+get_blocks_size([], Acc) ->
+ Acc.
+
+blocks_size([{Carriers, SizeList} | Rest], Acc) when Carriers == mbcs;
+ Carriers == sbcs;
+ Carriers == sbmbcs ->
+ blocks_size(Rest, get_blocks_size(SizeList, Acc));
+blocks_size([_ | Rest], Acc) ->
+ blocks_size(Rest, Acc);
+blocks_size([], Acc) ->
+ Acc.
+
+get_fix_proc([{ProcType, A1, U1}| Rest], {A0, U0}) when ProcType == proc;
+ ProcType == monitor_sh;
+ ProcType == nlink_sh;
+ ProcType == msg_ref ->
+ get_fix_proc(Rest, {A0+A1, U0+U1});
+get_fix_proc([_|Rest], Acc) ->
+ get_fix_proc(Rest, Acc);
+get_fix_proc([], Acc) ->
+ Acc.
+
+fix_proc([{fix_types, SizeList} | _Rest], Acc) ->
+ get_fix_proc(SizeList, Acc);
+fix_proc([_ | Rest], Acc) ->
+ fix_proc(Rest, Acc);
+fix_proc([], Acc) ->
+ Acc.
+
+is_low_alloc(_A, []) ->
+ false;
+is_low_alloc(A, [A|_As]) ->
+ true;
+is_low_alloc(A, [_A|As]) ->
+ is_low_alloc(A, As).
+
+is_low_alloc(A) ->
+ is_low_alloc(A, ?LOW_ALLOCS).
+
+au_mem_data(notsup, _) ->
+ notsup;
+au_mem_data(_, [{_, false} | _]) ->
+ notsup;
+au_mem_data(#memory{total = Tot,
+ processes = Proc,
+ processes_used = ProcU} = Mem,
+ [{eheap_alloc, _, Data} | Rest]) ->
+ Sz = blocks_size(Data, 0),
+ au_mem_data(Mem#memory{total = Tot+Sz,
+ processes = Proc+Sz,
+ processes_used = ProcU+Sz},
+ Rest);
+au_mem_data(#memory{total = Tot,
+ system = Sys,
+ ets = Ets} = Mem,
+ [{ets_alloc, _, Data} | Rest]) ->
+ Sz = blocks_size(Data, 0),
+ au_mem_data(Mem#memory{total = Tot+Sz,
+ system = Sys+Sz,
+ ets = Ets+Sz},
+ Rest);
+au_mem_data(#memory{total = Tot,
+ system = Sys,
+ binary = Bin} = Mem,
+ [{binary_alloc, _, Data} | Rest]) ->
+ Sz = blocks_size(Data, 0),
+ au_mem_data(Mem#memory{total = Tot+Sz,
+ system = Sys+Sz,
+ binary = Bin+Sz},
+ Rest);
+au_mem_data(#memory{total = Tot,
+ processes = Proc,
+ processes_used = ProcU,
+ system = Sys} = Mem,
+ [{fix_alloc, _, Data} | Rest]) ->
+ {A, U} = fix_proc(Data, {0, 0}),
+ Sz = blocks_size(Data, 0),
+ au_mem_data(Mem#memory{total = Tot+Sz,
+ processes = Proc+A,
+ processes_used = ProcU+U,
+ system = Sys+Sz-A},
+ Rest);
+au_mem_data(#memory{total = Tot,
+ system = Sys,
+ low = Low} = Mem,
+ [{A, _, Data} | Rest]) ->
+ Sz = blocks_size(Data, 0),
+ au_mem_data(Mem#memory{total = Tot+Sz,
+ system = Sys+Sz,
+ low = case is_low_alloc(A) of
+ true -> Low+Sz;
+ false -> Low
+ end},
+ Rest);
+au_mem_data(EMD, []) ->
+ EMD.
+
+au_mem_data(Allocs) ->
+ Ref = make_ref(),
+ erlang:system_info({allocator_sizes, Ref, Allocs}),
+ receive_emd(Ref).
+
+receive_emd(_Ref, EMD, 0) ->
+ EMD;
+receive_emd(Ref, EMD, N) ->
+ receive
+ {Ref, _, Data} ->
+ receive_emd(Ref, au_mem_data(EMD, Data), N-1)
+ end.
+
+receive_emd(Ref) ->
+ receive_emd(Ref, #memory{}, erlang:system_info(schedulers)).
+
+aa_mem_data(#memory{} = Mem,
+ [{maximum, Max} | Rest]) ->
+ aa_mem_data(Mem#memory{maximum = Max},
+ Rest);
+aa_mem_data(#memory{} = Mem,
+ [{total, Tot} | Rest]) ->
+ aa_mem_data(Mem#memory{total = Tot,
+ system = 0}, % system will be adjusted later
+ Rest);
+aa_mem_data(#memory{atom = Atom,
+ atom_used = AtomU} = Mem,
+ [{atom_space, Alloced, Used} | Rest]) ->
+ aa_mem_data(Mem#memory{atom = Atom+Alloced,
+ atom_used = AtomU+Used},
+ Rest);
+aa_mem_data(#memory{atom = Atom,
+ atom_used = AtomU} = Mem,
+ [{atom_table, Sz} | Rest]) ->
+ aa_mem_data(Mem#memory{atom = Atom+Sz,
+ atom_used = AtomU+Sz},
+ Rest);
+aa_mem_data(#memory{ets = Ets} = Mem,
+ [{ets_misc, Sz} | Rest]) ->
+ aa_mem_data(Mem#memory{ets = Ets+Sz},
+ Rest);
+aa_mem_data(#memory{processes = Proc,
+ processes_used = ProcU,
+ system = Sys} = Mem,
+ [{ProcData, Sz} | Rest]) when ProcData == bif_timer;
+ ProcData == link_lh;
+ ProcData == process_table ->
+ aa_mem_data(Mem#memory{processes = Proc+Sz,
+ processes_used = ProcU+Sz,
+ system = Sys-Sz},
+ Rest);
+aa_mem_data(#memory{code = Code} = Mem,
+ [{CodeData, Sz} | Rest]) when CodeData == module_table;
+ CodeData == export_table;
+ CodeData == export_list;
+ CodeData == fun_table;
+ CodeData == module_refs;
+ CodeData == loaded_code ->
+ aa_mem_data(Mem#memory{code = Code+Sz},
+ Rest);
+aa_mem_data(EMD, [{_, _} | Rest]) ->
+ aa_mem_data(EMD, Rest);
+aa_mem_data(#memory{total = Tot,
+ processes = Proc,
+ system = Sys} = Mem,
+ []) when Sys =< 0 ->
+ %% Instrumented runtime system -> Sys = Tot - Proc
+ Mem#memory{system = Tot - Proc};
+aa_mem_data(EMD, []) ->
+ EMD.
+
+aa_mem_data(notsup) ->
+ notsup;
+aa_mem_data(EMD) ->
+ aa_mem_data(EMD, erlang:system_info(allocated_areas)).
+
+%%
+%% alloc_info/1 and alloc_sizes/1 are for internal use only (used by
+%% erlang:system_info({allocator|allocator_sizes, _})).
+%%
+
+alloc_info(Allocs) ->
+ get_alloc_info(allocator, Allocs).
+
+alloc_sizes(Allocs) ->
+ get_alloc_info(allocator_sizes, Allocs).
+
+get_alloc_info(Type, AAtom) when is_atom(AAtom) ->
+ [{AAtom, Result}] = get_alloc_info(Type, [AAtom]),
+ Result;
+get_alloc_info(Type, AList) when is_list(AList) ->
+ Ref = make_ref(),
+ erlang:system_info({Type, Ref, AList}),
+ receive_allocator(Ref,
+ erlang:system_info(schedulers),
+ mk_res_list(AList)).
+
+mk_res_list([]) ->
+ [];
+mk_res_list([Alloc | Rest]) ->
+ [{Alloc, []} | mk_res_list(Rest)].
+
+insert_instance(I, N, []) ->
+ [{instance, N, I}];
+insert_instance(I, N, [{instance, M, _}|_] = Rest) when N < M ->
+ [{instance, N, I} | Rest];
+insert_instance(I, N, [Prev|Rest]) ->
+ [Prev | insert_instance(I, N, Rest)].
+
+insert_info([], Ys) ->
+ Ys;
+insert_info([{A, false}|Xs], [{A, _IList}|Ys]) ->
+ insert_info(Xs, [{A, false}|Ys]);
+insert_info([{A, N, I}|Xs], [{A, IList}|Ys]) ->
+ insert_info(Xs, [{A, insert_instance(I, N, IList)}|Ys]);
+insert_info([{A1, _}|_] = Xs, [{A2, _} = Y | Ys]) when A1 /= A2 ->
+ [Y | insert_info(Xs, Ys)];
+insert_info([{A1, _, _}|_] = Xs, [{A2, _} = Y | Ys]) when A1 /= A2 ->
+ [Y | insert_info(Xs, Ys)].
+
+receive_allocator(_Ref, 0, Acc) ->
+ Acc;
+receive_allocator(Ref, N, Acc) ->
+ receive
+ {Ref, _, InfoList} ->
+ receive_allocator(Ref, N-1, insert_info(InfoList, Acc))
+ end.
+
+-spec await_sched_wall_time_modifications(Ref, Result) -> boolean() when
+ Ref :: reference(),
+ Result :: boolean().
+
+await_sched_wall_time_modifications(Ref, Result) ->
+ sched_wall_time(Ref, erlang:system_info(schedulers)),
+ Result.
+
+-spec gather_sched_wall_time_result(Ref) -> [{pos_integer(),
+ non_neg_integer(),
+ non_neg_integer()}] when
+ Ref :: reference().
+
+gather_sched_wall_time_result(Ref) when is_reference(Ref) ->
+ sched_wall_time(Ref, erlang:system_info(schedulers), []).
+
+sched_wall_time(_Ref, 0) ->
+ ok;
+sched_wall_time(Ref, N) ->
+ receive Ref -> sched_wall_time(Ref, N-1) end.
+
+sched_wall_time(_Ref, 0, Acc) ->
+ Acc;
+sched_wall_time(Ref, N, undefined) ->
+ receive {Ref, _} -> sched_wall_time(Ref, N-1, undefined) end;
+sched_wall_time(Ref, N, Acc) ->
+ receive
+ {Ref, undefined} -> sched_wall_time(Ref, N-1, undefined);
+ {Ref, SWT} -> sched_wall_time(Ref, N-1, [SWT|Acc])
+ end.
diff --git a/erts/preloaded/src/init.erl b/erts/preloaded/src/init.erl
index 24430a3d40..c9c434dea0 100644
--- a/erts/preloaded/src/init.erl
+++ b/erts/preloaded/src/init.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1996-2010. All Rights Reserved.
+%% Copyright Ericsson AB 1996-2011. All Rights Reserved.
%%
%% The contents of this file are subject to the Erlang Public License,
%% Version 1.1, (the "License"); you may not use this file except in
@@ -28,10 +28,10 @@
%% : $Var in the boot script is expanded to
%% Value.
%% -loader LoaderMethod
-%% : efile, inet, ose_inet
+%% : efile, inet
%% (Optional - default efile)
%% -hosts [Node] : List of hosts from which we can boot.
-%% (Mandatory if -loader inet or ose_inet)
+%% (Mandatory if -loader inet)
%% -mode embedded : Load all modules at startup, no automatic loading
%% -mode interactive : Auto load modules (default system behaviour).
%% -path : Override path in bootfile.
@@ -79,19 +79,24 @@
debug(false, _) -> ok;
debug(_, T) -> erlang:display(T).
--spec get_arguments() -> [{atom(), [string()]}].
+-spec get_arguments() -> Flags when
+ Flags :: [{Flag :: atom(), Values :: [string()]}].
get_arguments() ->
request(get_arguments).
--spec get_plain_arguments() -> [string()].
+-spec get_plain_arguments() -> [Arg] when
+ Arg :: string().
get_plain_arguments() ->
bs2ss(request(get_plain_arguments)).
--spec get_argument(atom()) -> 'error' | {'ok', [[string()]]}.
+-spec get_argument(Flag) -> {'ok', Arg} | 'error' when
+ Flag :: atom(),
+ Arg :: [Values :: [string()]].
get_argument(Arg) ->
request({get_argument, Arg}).
--spec script_id() -> term().
+-spec script_id() -> Id when
+ Id :: term().
script_id() ->
request(script_id).
@@ -105,7 +110,9 @@ bs2ss(L0) when is_list(L0) ->
bs2ss(L) ->
L.
--spec get_status() -> {internal_status(), term()}.
+-spec get_status() -> {InternalStatus, ProvidedStatus} when
+ InternalStatus :: internal_status(),
+ ProvidedStatus :: term().
get_status() ->
request(get_status).
@@ -150,10 +157,12 @@ reboot() -> init ! {stop,reboot}, ok.
-spec stop() -> 'ok'.
stop() -> init ! {stop,stop}, ok.
--spec stop(non_neg_integer() | string()) -> 'ok'.
+-spec stop(Status) -> 'ok' when
+ Status :: non_neg_integer() | string().
stop(Status) -> init ! {stop,{stop,Status}}, ok.
--spec boot([binary()]) -> no_return().
+-spec boot(BootArgs) -> no_return() when
+ BootArgs :: [binary()].
boot(BootArgs) ->
register(init, self()),
process_flag(trap_exit, true),
@@ -336,7 +345,7 @@ notify(Pids) ->
lists:foreach(fun(Pid) -> Pid ! {init,started} end, Pids).
%% Garbage collect all info about initially loaded modules.
-%% This information is temporary stored until the code_server
+%% This information is temporarily stored until the code_server
%% is started.
%% We force the garbage collection as the init process holds
%% this information during the initialisation of the system and
@@ -1024,7 +1033,7 @@ start_it({eval,Bin}) ->
TsR -> reverse([{dot,1} | TsR])
end,
{ok,Expr} = erl_parse:parse_exprs(Ts1),
- erl_eval:exprs(Expr, []),
+ erl_eval:exprs(Expr, erl_eval:new_bindings()),
ok;
start_it([_|_]=MFA) ->
Ref = make_ref(),
diff --git a/erts/preloaded/src/prim_file.erl b/erts/preloaded/src/prim_file.erl
index 8e0790e74a..0fc56c8c8e 100644
--- a/erts/preloaded/src/prim_file.erl
+++ b/erts/preloaded/src/prim_file.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 2000-2011. All Rights Reserved.
+%% Copyright Ericsson AB 2000-2012. All Rights Reserved.
%%
%% The contents of this file are subject to the Erlang Public License,
%% Version 1.1, (the "License"); you may not use this file except in
@@ -26,7 +26,8 @@
%% Generic file contents operations
-export([open/2, close/1, datasync/1, sync/1, advise/4, position/2, truncate/1,
- write/2, pwrite/2, pwrite/3, read/2, read_line/1, pread/2, pread/3, copy/3]).
+ write/2, pwrite/2, pwrite/3, read/2, read_line/1, pread/2, pread/3,
+ copy/3, sendfile/10]).
%% Specialized file operations
-export([open/1, open/3]).
@@ -44,13 +45,13 @@
rename/2, rename/3,
make_dir/1, make_dir/2,
del_dir/1, del_dir/2,
- read_file_info/1, read_file_info/2,
+ read_file_info/1, read_file_info/2, read_file_info/3,
altname/1, altname/2,
- write_file_info/2, write_file_info/3,
+ write_file_info/2, write_file_info/3, write_file_info/4,
make_link/2, make_link/3,
make_symlink/2, make_symlink/3,
read_link/1, read_link/2,
- read_link_info/1, read_link_info/2,
+ read_link_info/1, read_link_info/2, read_link_info/3,
list_dir/1, list_dir/2]).
%% How to start and stop the ?DRV port.
-export([start/0, stop/1]).
@@ -63,7 +64,7 @@
-include("file.hrl").
--define(DRV, efile).
+-define(DRV, "efile").
-define(FD_DRV, "efile").
-define(LARGEFILESIZE, (1 bsl 63)).
@@ -98,6 +99,7 @@
-define(FILE_READ_LINE, 29).
-define(FILE_FDATASYNC, 30).
-define(FILE_ADVISE, 31).
+-define(FILE_SENDFILE, 32).
%% Driver responses
-define(FILE_RESP_OK, 0).
@@ -111,6 +113,7 @@
-define(FILE_RESP_EOF, 8).
-define(FILE_RESP_FNAME, 9).
-define(FILE_RESP_ALL_DATA, 10).
+-define(FILE_RESP_LFNAME, 11).
%% Open modes for the driver's open function.
-define(EFILE_MODE_READ, 1).
@@ -265,7 +268,7 @@ advise(#file_descriptor{module = ?MODULE, data = {Port, _}},
%% Returns {error, Reason} | ok.
write(#file_descriptor{module = ?MODULE, data = {Port, _}}, Bytes) ->
- case drv_command(Port, [?FILE_WRITE,Bytes]) of
+ case drv_command_nt(Port, [?FILE_WRITE,erlang:dt_prepend_vm_tag_data(Bytes)],undefined) of
{ok, _Size} ->
ok;
Error ->
@@ -280,8 +283,8 @@ pwrite(#file_descriptor{module = ?MODULE, data = {Port, _}}, L)
pwrite_int(_, [], 0, [], []) ->
ok;
pwrite_int(Port, [], N, Spec, Data) ->
- Header = list_to_binary([<<?FILE_PWRITEV, N:32>> | reverse(Spec)]),
- case drv_command_raw(Port, [Header | reverse(Data)]) of
+ Header = list_to_binary([?FILE_PWRITEV, erlang:dt_prepend_vm_tag_data(<<N:32>>) | reverse(Spec)]),
+ case drv_command_nt(Port, [Header | reverse(Data)], undefined) of
{ok, _Size} ->
ok;
Error ->
@@ -374,7 +377,7 @@ read(#file_descriptor{module = ?MODULE, data = {Port, _}}, Size)
{ok, Data};
{error, enomem} ->
%% Garbage collecting here might help if
- %% the current processes has some old binaries left.
+ %% the current processes have some old binaries left.
erlang:garbage_collect(),
case drv_command(Port, <<?FILE_READ, Size:64>>) of
{ok, {0, _Data}} when Size =/= 0 ->
@@ -399,7 +402,7 @@ pread(#file_descriptor{module = ?MODULE, data = {Port, _}}, L)
pread_int(_, [], 0, []) ->
{ok, []};
pread_int(Port, [], N, Spec) ->
- drv_command(Port, [<<?FILE_PREADV, 0:32, N:32>> | reverse(Spec)]);
+ drv_command_nt(Port, [?FILE_PREADV, erlang:dt_prepend_vm_tag_data(<<0:32, N:32>>) | reverse(Spec)],undefined);
pread_int(Port, [{Offs, Size} | T], N, Spec)
when is_integer(Offs), is_integer(Size), 0 =< Size ->
if
@@ -420,9 +423,9 @@ pread(#file_descriptor{module = ?MODULE, data = {Port, _}}, Offs, Size)
if
-(?LARGEFILESIZE) =< Offs, Offs < ?LARGEFILESIZE,
Size < ?LARGEFILESIZE ->
- case drv_command(Port,
- <<?FILE_PREADV, 0:32, 1:32,
- Offs:64/signed, Size:64>>) of
+ case drv_command_nt(Port,
+ [?FILE_PREADV, erlang:dt_prepend_vm_tag_data(<<0:32, 1:32,
+ Offs:64/signed, Size:64>>)], undefined) of
{ok, [eof]} ->
eof;
{ok, [Data]} ->
@@ -536,7 +539,31 @@ write_file(File, Bin) when (is_list(File) orelse is_binary(File)) ->
end;
write_file(_, _) ->
{error, badarg}.
-
+
+
+%% Returns {error, Reason} | {ok, BytesCopied}
+%sendfile(_,_,_,_,_,_,_,_,_,_) ->
+% {error, enotsup};
+sendfile(#file_descriptor{module = ?MODULE, data = {Port, _}},
+ Dest, Offset, Bytes, _ChunkSize, Headers, Trailers,
+ _Nodiskio, _MNowait, _Sync) ->
+ case erlang:port_get_data(Dest) of
+ Data when Data == inet_tcp; Data == inet6_tcp ->
+ ok = inet:lock_socket(Dest,true),
+ {ok, DestFD} = prim_inet:getfd(Dest),
+ try drv_command(Port, [<<?FILE_SENDFILE, DestFD:32,
+ 0:8,
+ Offset:64/unsigned,
+ Bytes:64/unsigned,
+ (iolist_size(Headers)):32/unsigned,
+ (iolist_size(Trailers)):32/unsigned>>,
+ Headers,Trailers])
+ after
+ ok = inet:lock_socket(Dest,false)
+ end;
+ _Else ->
+ {error,badarg}
+ end.
%%%-----------------------------------------------------------------
@@ -549,7 +576,7 @@ write_file(_, _) ->
%% Returns {ok, Port}, the Port should be used as first argument in all
%% the following functions. Returns {error, Reason} upon failure.
start() ->
- try erlang:open_port({spawn, atom_to_list(?DRV)}, [binary]) of
+ try erlang:open_port({spawn, ?DRV}, [binary]) of
Port ->
{ok, Port}
catch
@@ -698,16 +725,33 @@ del_dir_int(Port, Dir) ->
-%% read_file_info/{1,2}
+%% read_file_info/{1,2,3}
read_file_info(File) ->
- read_file_info_int({?DRV, [binary]}, File).
+ read_file_info_int({?DRV, [binary]}, File, local).
read_file_info(Port, File) when is_port(Port) ->
- read_file_info_int(Port, File).
+ read_file_info_int(Port, File, local);
+read_file_info(File, Opts) ->
+ read_file_info_int({?DRV, [binary]}, File, plgv(time, Opts, local)).
+
+read_file_info(Port, File, Opts) when is_port(Port) ->
+ read_file_info_int(Port, File, plgv(time, Opts, local)).
+
+read_file_info_int(Port, File, TimeType) ->
+ try
+ case drv_command(Port, [?FILE_FSTAT, pathname(File)]) of
+ {ok, FI} -> {ok, FI#file_info{
+ ctime = from_seconds(FI#file_info.ctime, TimeType),
+ mtime = from_seconds(FI#file_info.mtime, TimeType),
+ atime = from_seconds(FI#file_info.atime, TimeType)
+ }};
+ Error -> Error
+ end
+ catch
+ error:_ -> {error, badarg}
+ end.
-read_file_info_int(Port, File) ->
- drv_command(Port, [?FILE_FSTAT, pathname(File)]).
%% altname/{1,2}
@@ -720,38 +764,61 @@ altname(Port, File) when is_port(Port) ->
altname_int(Port, File) ->
drv_command(Port, [?FILE_ALTNAME, pathname(File)]).
-%% write_file_info/{2,3}
+%% write_file_info/{2,3,4}
write_file_info(File, Info) ->
- write_file_info_int({?DRV, [binary]}, File, Info).
+ write_file_info_int({?DRV, [binary]}, File, Info, local).
write_file_info(Port, File, Info) when is_port(Port) ->
- write_file_info_int(Port, File, Info).
+ write_file_info_int(Port, File, Info, local);
+write_file_info(File, Info, Opts) ->
+ write_file_info_int({?DRV, [binary]}, File, Info, plgv(time, Opts, local)).
+
+write_file_info(Port, File, Info, Opts) when is_port(Port) ->
+ write_file_info_int(Port, File, Info, plgv(time, Opts, local)).
-write_file_info_int(Port,
- File,
+write_file_info_int(Port, File,
#file_info{mode=Mode,
uid=Uid,
gid=Gid,
atime=Atime0,
mtime=Mtime0,
- ctime=Ctime}) ->
- {Atime, Mtime} =
- case {Atime0, Mtime0} of
- {undefined, Mtime0} -> {erlang:localtime(), Mtime0};
- {Atime0, undefined} -> {Atime0, Atime0};
- Complete -> Complete
- end,
- drv_command(Port, [?FILE_WRITE_INFO,
- int_to_bytes(Mode),
- int_to_bytes(Uid),
- int_to_bytes(Gid),
- date_to_bytes(Atime),
- date_to_bytes(Mtime),
- date_to_bytes(Ctime),
- pathname(File)]).
+ ctime=Ctime0},
+ TimeType) ->
+
+ % Atime and/or Mtime might be undefined
+ % - use localtime() for atime, if atime is undefined
+ % - use atime as mtime if mtime is undefined
+ % - use mtime as ctime if ctime is undefined
+
+ try
+ Atime = file_info_validate_atime(Atime0, TimeType),
+ Mtime = file_info_validate_mtime(Mtime0, Atime),
+ Ctime = file_info_validate_ctime(Ctime0, Mtime),
+
+ drv_command(Port, [?FILE_WRITE_INFO,
+ int_to_int32bytes(Mode),
+ int_to_int32bytes(Uid),
+ int_to_int32bytes(Gid),
+ int_to_int64bytes(to_seconds(Atime, TimeType)),
+ int_to_int64bytes(to_seconds(Mtime, TimeType)),
+ int_to_int64bytes(to_seconds(Ctime, TimeType)),
+ pathname(File)])
+ catch
+ error:_ -> {error, badarg}
+ end.
+file_info_validate_atime(Atime, _) when Atime =/= undefined -> Atime;
+file_info_validate_atime(undefined, local) -> erlang:localtime();
+file_info_validate_atime(undefined, universal) -> erlang:universaltime();
+file_info_validate_atime(undefined, posix) -> erlang:universaltime_to_posixtime(erlang:universaltime()).
+
+file_info_validate_mtime(undefined, Atime) -> Atime;
+file_info_validate_mtime(Mtime, _) -> Mtime.
+
+file_info_validate_ctime(undefined, Mtime) -> Mtime;
+file_info_validate_ctime(Ctime, _) -> Ctime.
%% make_link/{2,3}
@@ -795,15 +862,31 @@ read_link_int(Port, Link) ->
%% read_link_info/{2,3}
read_link_info(Link) ->
- read_link_info_int({?DRV, [binary]}, Link).
+ read_link_info_int({?DRV, [binary]}, Link, local).
read_link_info(Port, Link) when is_port(Port) ->
- read_link_info_int(Port, Link).
+ read_link_info_int(Port, Link, local);
+
+read_link_info(Link, Opts) ->
+ read_link_info_int({?DRV, [binary]}, Link, plgv(time, Opts, local)).
-read_link_info_int(Port, Link) ->
- drv_command(Port, [?FILE_LSTAT, pathname(Link)]).
+read_link_info(Port, Link, Opts) when is_port(Port) ->
+ read_link_info_int(Port, Link, plgv(time, Opts, local)).
+read_link_info_int(Port, Link, TimeType) ->
+ try
+ case drv_command(Port, [?FILE_LSTAT, pathname(Link)]) of
+ {ok, FI} -> {ok, FI#file_info{
+ ctime = from_seconds(FI#file_info.ctime, TimeType),
+ mtime = from_seconds(FI#file_info.mtime, TimeType),
+ atime = from_seconds(FI#file_info.atime, TimeType)
+ }};
+ Error -> Error
+ end
+ catch
+ error:_ -> {error, badarg}
+ end.
%% list_dir/{1,2}
@@ -824,7 +907,7 @@ list_dir_int(Port, Dir) ->
%% Opens a driver port and converts any problems into {error, emfile}.
-%% Returns {ok, Port} when succesful.
+%% Returns {ok, Port} when successful.
drv_open(Driver, Portopts) ->
try erlang:open_port({spawn, Driver}, Portopts) of
@@ -840,12 +923,17 @@ drv_open(Driver, Portopts) ->
%% Closes a port in a safe way. Returns ok.
drv_close(Port) ->
- try erlang:port_close(Port) catch error:_ -> ok end,
- receive %% Ugly workaround in case the caller==owner traps exits
- {'EXIT', Port, _Reason} ->
- ok
- after 0 ->
- ok
+ Save = erlang:dt_spread_tag(false),
+ try
+ try erlang:port_close(Port) catch error:_ -> ok end,
+ receive %% Ugly workaround in case the caller==owner traps exits
+ {'EXIT', Port, _Reason} ->
+ ok
+ after 0 ->
+ ok
+ end
+ after
+ erlang:dt_restore_tag(Save)
end.
@@ -855,9 +943,6 @@ drv_close(Port) ->
%% then closed after the result has been received.
%% Returns {ok, Result} or {error, Reason}.
-drv_command_raw(Port, Command) ->
- drv_command(Port, Command, false, undefined).
-
drv_command(Port, Command) ->
drv_command(Port, Command, undefined).
@@ -873,7 +958,8 @@ drv_command(Port, Command, R) ->
end.
drv_command(Port, Command, Validated, R) when is_port(Port) ->
- try erlang:port_command(Port, Command) of
+ Save = erlang:dt_spread_tag(false),
+ try erlang:port_command(Port, erlang:dt_append_vm_tag_data(Command)) of
true ->
drv_get_response(Port, R)
catch
@@ -892,6 +978,8 @@ drv_command(Port, Command, Validated, R) when is_port(Port) ->
end;
error:Reason ->
{error, Reason}
+ after
+ erlang:dt_restore_tag(Save)
end;
drv_command({Driver, Portopts}, Command, Validated, R) ->
case drv_open(Driver, Portopts) of
@@ -902,6 +990,25 @@ drv_command({Driver, Portopts}, Command, Validated, R) ->
Error ->
Error
end.
+drv_command_nt(Port, Command, R) when is_port(Port) ->
+ Save = erlang:dt_spread_tag(false),
+ try erlang:port_command(Port, Command) of
+ true ->
+ drv_get_response(Port, R)
+ catch
+ error:badarg ->
+ try erlang:iolist_size(Command) of
+ _ -> % Valid
+ {error, einval}
+ catch
+ error:_ ->
+ {error, badarg}
+ end;
+ error:Reason ->
+ {error, Reason}
+ after
+ erlang:dt_restore_tag(Save)
+ end.
@@ -914,6 +1021,8 @@ drv_get_response(Port, R) when is_list(R) ->
{ok, R};
{ok, Name} ->
drv_get_response(Port, [Name|R]);
+ {append, Names} ->
+ drv_get_response(Port, append(Names, R));
Error ->
Error
end;
@@ -938,9 +1047,11 @@ drv_get_response(Port) ->
%%%-----------------------------------------------------------------
%%% Utility functions.
+append([I | Is], R) when is_list(R) -> append(Is, [I | R]);
+append([], R) -> R.
-%% Converts a list of mode atoms into an mode word for the driver.
+%% Converts a list of mode atoms into a mode word for the driver.
%% Returns {Mode, Portopts, Setopts} where Portopts is a list of
%% options for erlang:open_port/2 and Setopts is a list of
%% setopt commands to send to the port, or error Reason upon failure.
@@ -1044,7 +1155,7 @@ translate_response(?FILE_RESP_DATA, List) ->
{_N, _Data} = ND = get_uint64(List),
{ok, ND};
translate_response(?FILE_RESP_INFO, List) when is_list(List) ->
- {ok, transform_info_ints(get_uint32s(List))};
+ {ok, transform_info(List)};
translate_response(?FILE_RESP_NUMERR, L0) ->
{N, L1} = get_uint64(L0),
{error, {N, list_to_atom(L1)}};
@@ -1067,7 +1178,7 @@ translate_response(?FILE_RESP_N2DATA,
{ok, {Size, Offset, D}};
translate_response(?FILE_RESP_N2DATA = X, L0) when is_list(L0) ->
{Offset, L1} = get_uint64(L0),
- {ReadSize, L2} = get_uint64(L1),
+ {ReadSize, L2} = get_uint64(L1),
{Size, L3} = get_uint64(L2),
case {ReadSize, L3} of
{0, []} ->
@@ -1087,32 +1198,48 @@ translate_response(?FILE_RESP_FNAME, Data) when is_binary(Data) ->
{ok, prim_file:internal_native2name(Data)};
translate_response(?FILE_RESP_FNAME, Data) ->
{ok, Data};
+translate_response(?FILE_RESP_LFNAME, []) ->
+ ok;
+translate_response(?FILE_RESP_LFNAME, Data) when is_binary(Data) ->
+ {append, transform_lfname(Data)};
+translate_response(?FILE_RESP_LFNAME, Data) ->
+ {append, transform_lfname(Data)};
translate_response(?FILE_RESP_ALL_DATA, Data) ->
{ok, Data};
translate_response(X, Data) ->
{error, {bad_response_from_port, [X | Data]}}.
-transform_info_ints(Ints) ->
- [HighSize, LowSize, Type|Tail0] = Ints,
- Size = HighSize * 16#100000000 + LowSize,
- [Ay, Am, Ad, Ah, Ami, As|Tail1] = Tail0,
- [My, Mm, Md, Mh, Mmi, Ms|Tail2] = Tail1,
- [Cy, Cm, Cd, Ch, Cmi, Cs|Tail3] = Tail2,
- [Mode, Links, Major, Minor, Inode, Uid, Gid, Access] = Tail3,
+transform_info([
+ Hsize1, Hsize2, Hsize3, Hsize4,
+ Lsize1, Lsize2, Lsize3, Lsize4,
+ Type1, Type2, Type3, Type4,
+ Atime1, Atime2, Atime3, Atime4, Atime5, Atime6, Atime7, Atime8,
+ Mtime1, Mtime2, Mtime3, Mtime4, Mtime5, Mtime6, Mtime7, Mtime8,
+ Ctime1, Ctime2, Ctime3, Ctime4, Ctime5, Ctime6, Ctime7, Ctime8,
+ Mode1, Mode2, Mode3, Mode4,
+ Links1, Links2, Links3, Links4,
+ Major1, Major2, Major3, Major4,
+ Minor1, Minor2, Minor3, Minor4,
+ Inode1, Inode2, Inode3, Inode4,
+ Uid1, Uid2, Uid3, Uid4,
+ Gid1, Gid2, Gid3, Gid4,
+ Access1,Access2,Access3,Access4]) ->
#file_info {
- size = Size,
- type = file_type(Type),
- access = file_access(Access),
- atime = {{Ay, Am, Ad}, {Ah, Ami, As}},
- mtime = {{My, Mm, Md}, {Mh, Mmi, Ms}},
- ctime = {{Cy, Cm, Cd}, {Ch, Cmi, Cs}},
- mode = Mode,
- links = Links,
- major_device = Major,
- minor_device = Minor,
- inode = Inode,
- uid = Uid,
- gid = Gid}.
+ size = uint32(Hsize1,Hsize2,Hsize3,Hsize4)*16#100000000 + uint32(Lsize1,Lsize2,Lsize3,Lsize4),
+ type = file_type(uint32(Type1,Type2,Type3,Type4)),
+ access = file_access(uint32(Access1,Access2,Access3,Access4)),
+ atime = sint64(Atime1, Atime2, Atime3, Atime4, Atime5, Atime6, Atime7, Atime8),
+ mtime = sint64(Mtime1, Mtime2, Mtime3, Mtime4, Mtime5, Mtime6, Mtime7, Mtime8),
+ ctime = sint64(Ctime1, Ctime2, Ctime3, Ctime4, Ctime5, Ctime6, Ctime7, Ctime8),
+ mode = uint32(Mode1,Mode2,Mode3,Mode4),
+ links = uint32(Links1,Links2,Links3,Links4),
+ major_device = uint32(Major1,Major2,Major3,Major4),
+ minor_device = uint32(Minor1,Minor2,Minor3,Minor4),
+ inode = uint32(Inode1,Inode2,Inode3,Inode4),
+ uid = uint32(Uid1,Uid2,Uid3,Uid4),
+ gid = uint32(Gid1,Gid2,Gid3,Gid4)
+ }.
+
file_type(1) -> device;
file_type(2) -> directory;
@@ -1125,24 +1252,22 @@ file_access(1) -> write;
file_access(2) -> read;
file_access(3) -> read_write.
-int_to_bytes(Int) when is_integer(Int) ->
+int_to_int32bytes(Int) when is_integer(Int) ->
<<Int:32>>;
-int_to_bytes(undefined) ->
+int_to_int32bytes(undefined) ->
<<-1:32>>.
-date_to_bytes(undefined) ->
- <<-1:32, -1:32, -1:32, -1:32, -1:32, -1:32>>;
-date_to_bytes({{Y, Mon, D}, {H, Min, S}}) ->
- <<Y:32, Mon:32, D:32, H:32, Min:32, S:32>>.
+int_to_int64bytes(Int) when is_integer(Int) ->
+ <<Int:64/signed>>.
-%% uint64([[X1, X2, X3, X4] = Y1 | [X5, X6, X7, X8] = Y2]) ->
-%% (uint32(Y1) bsl 32) bor uint32(Y2).
-%% uint64(X1, X2, X3, X4, X5, X6, X7, X8) ->
-%% (uint32(X1, X2, X3, X4) bsl 32) bor uint32(X5, X6, X7, X8).
+sint64(I1,I2,I3,I4,I5,I6,I7,I8) when I1 > 127 ->
+ ((I1 bsl 56) bor (I2 bsl 48) bor (I3 bsl 40) bor (I4 bsl 32) bor
+ (I5 bsl 24) bor (I6 bsl 16) bor (I7 bsl 8) bor I8) - (1 bsl 64);
+sint64(I1,I2,I3,I4,I5,I6,I7,I8) ->
+ ((I1 bsl 56) bor (I2 bsl 48) bor (I3 bsl 40) bor (I4 bsl 32) bor
+ (I5 bsl 24) bor (I6 bsl 16) bor (I7 bsl 8) bor I8).
-%% uint32([X1,X2,X3,X4]) ->
-%% (X1 bsl 24) bor (X2 bsl 16) bor (X3 bsl 8) bor X4.
uint32(X1,X2,X3,X4) ->
(X1 bsl 24) bor (X2 bsl 16) bor (X3 bsl 8) bor X4.
@@ -1155,11 +1280,6 @@ get_uint64(L0) ->
get_uint32([X1,X2,X3,X4|List]) ->
{(((((X1 bsl 8) bor X2) bsl 8) bor X3) bsl 8) bor X4, List}.
-get_uint32s([X1,X2,X3,X4|Tail]) ->
- [uint32(X1,X2,X3,X4) | get_uint32s(Tail)];
-get_uint32s([]) -> [].
-
-
%% Binary mode
transform_ldata(<<0:32, 0:32>>) ->
@@ -1199,6 +1319,14 @@ transform_ldata(0, List, [Size | Sizes], R) ->
{Front, Rear} = lists_split(List, Size),
transform_ldata(0, Rear, Sizes, [Front | R]).
+transform_lfname(<<>>) -> [];
+transform_lfname(<<L:16, Name:L/binary, Names/binary>>) ->
+ [ prim_file:internal_native2name(Name) | transform_lfname(Names)];
+transform_lfname([]) -> [];
+transform_lfname([L1,L2|Names]) ->
+ L = (L1 bsl 8) bor L2,
+ {Name, Rest} = lists_split(Names, L),
+ [Name | transform_lfname(Rest)].
lists_split(List, 0) when is_list(List) ->
@@ -1230,3 +1358,28 @@ reverse(L, T) -> lists:reverse(L, T).
% in list_to_binary, which is caught and generates the {error,badarg} return
pathname(File) ->
(catch prim_file:internal_name2native(File)).
+
+
+%% proplist:get_value/3
+plgv(K, [{K, V}|_], _) -> V;
+plgv(K, [_|KVs], D) -> plgv(K, KVs, D);
+plgv(_, [], D) -> D.
+
+%%
+%% We don't actually want this here
+%% We want to use posix time in all prim but erl_prim_loader makes that tricky
+%% It is probably needed to redo the whole erl_prim_loader
+
+from_seconds(Seconds, posix) when is_integer(Seconds) ->
+ Seconds;
+from_seconds(Seconds, universal) when is_integer(Seconds) ->
+ erlang:posixtime_to_universaltime(Seconds);
+from_seconds(Seconds, local) when is_integer(Seconds) ->
+ erlang:universaltime_to_localtime(erlang:posixtime_to_universaltime(Seconds)).
+
+to_seconds(Seconds, posix) when is_integer(Seconds) ->
+ Seconds;
+to_seconds({_,_} = Datetime, universal) ->
+ erlang:universaltime_to_posixtime(Datetime);
+to_seconds({_,_} = Datetime, local) ->
+ erlang:universaltime_to_posixtime(erlang:localtime_to_universaltime(Datetime)).
diff --git a/erts/preloaded/src/prim_inet.erl b/erts/preloaded/src/prim_inet.erl
index 8f2e845b4f..846ae97ed2 100644
--- a/erts/preloaded/src/prim_inet.erl
+++ b/erts/preloaded/src/prim_inet.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 2000-2011. All Rights Reserved.
+%% Copyright Ericsson AB 2000-2012. All Rights Reserved.
%%
%% The contents of this file are subject to the Erlang Public License,
%% Version 1.1, (the "License"); you may not use this file except in
@@ -25,8 +25,8 @@
%% Primitive inet_drv interface
--export([open/1, open/2, fdopen/2, fdopen/3, close/1]).
--export([bind/3, listen/1, listen/2]).
+-export([open/3, fdopen/4, close/1]).
+-export([bind/3, listen/1, listen/2, peeloff/2]).
-export([connect/3, connect/4, async_connect/4]).
-export([accept/1, accept/2, async_accept/2]).
-export([shutdown/2]).
@@ -36,7 +36,8 @@
-export([recvfrom/2, recvfrom/3]).
-export([setopt/3, setopts/2, getopt/2, getopts/2, is_sockopt_val/2]).
-export([chgopt/3, chgopts/2]).
--export([getstat/2, getfd/1, getindex/1, getstatus/1, gettype/1,
+-export([getstat/2, getfd/1, ignorefd/2,
+ getindex/1, getstatus/1, gettype/1,
getifaddrs/1, getiflist/1, ifget/3, ifset/3,
gethostname/1]).
-export([getservbyname/3, getservbyport/3]).
@@ -56,58 +57,48 @@
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%
-%% OPEN(tcp | udp | sctp, inet | inet6) ->
+%% OPEN(tcp | udp | sctp, inet | inet6, stream | dgram | seqpacket) ->
%% {ok, insock()} |
%% {error, Reason}
%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-open(Protocol) -> open1(Protocol, ?INET_AF_INET).
+open(Protocol, Family, Type) ->
+ open(Protocol, Family, Type, ?INET_REQ_OPEN, []).
-open(Protocol, inet) -> open1(Protocol, ?INET_AF_INET);
-open(Protocol, inet6) -> open1(Protocol, ?INET_AF_INET6);
-open(_, _) -> {error, einval}.
+fdopen(Protocol, Family, Type, Fd) when is_integer(Fd) ->
+ open(Protocol, Family, Type, ?INET_REQ_FDOPEN, ?int32(Fd)).
-fdopen(Protocol, Fd) -> fdopen1(Protocol, ?INET_AF_INET, Fd).
-
-fdopen(Protocol, Fd, inet) -> fdopen1(Protocol, ?INET_AF_INET, Fd);
-fdopen(Protocol, Fd, inet6) -> fdopen1(Protocol, ?INET_AF_INET6, Fd);
-fdopen(_, _, _) -> {error, einval}.
-
-open1(Protocol, Family) ->
- case open0(Protocol) of
- {ok, S} ->
- case ctl_cmd(S, ?INET_REQ_OPEN, [Family]) of
- {ok, _} ->
- {ok,S};
- Error ->
- close(S), Error
- end;
- Error -> Error
+open(Protocol, Family, Type, Req, Data) ->
+ Drv = protocol2drv(Protocol),
+ AF = enc_family(Family),
+ T = enc_type(Type),
+ try erlang:open_port({spawn_driver,Drv}, [binary]) of
+ S ->
+ case ctl_cmd(S, Req, [AF,T,Data]) of
+ {ok,_} -> {ok,S};
+ {error,_}=Error ->
+ close(S),
+ Error
+ end
+ catch
+ %% The only (?) way to get here is to try to open
+ %% the sctp driver when it does not exist (badarg)
+ error:badarg -> {error, eprotonosupport};
+ %% system_limit if out of port slots
+ error:system_limit -> {error, system_limit}
end.
-fdopen1(Protocol, Family, Fd) when is_integer(Fd) ->
- case open0(Protocol) of
- {ok, S} ->
- case ctl_cmd(S,?INET_REQ_FDOPEN,[Family,?int32(Fd)]) of
- {ok, _} -> {ok,S};
- Error -> close(S), Error
- end;
- Error -> Error
- end.
+enc_family(inet) -> ?INET_AF_INET;
+enc_family(inet6) -> ?INET_AF_INET6.
-open0(Protocol) ->
- try erlang:open_port({spawn_driver,protocol2drv(Protocol)}, [binary]) of
- Port -> {ok,Port}
- catch
- error:Reason -> {error,Reason}
- end.
+enc_type(stream) -> ?INET_TYPE_STREAM;
+enc_type(dgram) -> ?INET_TYPE_DGRAM;
+enc_type(seqpacket) -> ?INET_TYPE_SEQPACKET.
protocol2drv(tcp) -> "tcp_inet";
protocol2drv(udp) -> "udp_inet";
-protocol2drv(sctp) -> "sctp_inet";
-protocol2drv(_) ->
- erlang:error(eprotonosupport).
+protocol2drv(sctp) -> "sctp_inet".
drv2protocol("tcp_inet") -> tcp;
drv2protocol("udp_inet") -> udp;
@@ -139,7 +130,7 @@ shutdown_1(S, How) ->
shutdown_2(S, How) ->
case ctl_cmd(S, ?TCP_REQ_SHUTDOWN, [How]) of
{ok, []} -> ok;
- Error -> Error
+ {error,_}=Error -> Error
end.
shutdown_pend_loop(S, N0) ->
@@ -195,7 +186,7 @@ close_pend_loop(S, N) ->
bind(S,IP,Port) when is_port(S), is_integer(Port), Port >= 0, Port =< 65535 ->
case ctl_cmd(S,?INET_REQ_BIND,[?int16(Port),ip_to_bytes(IP)]) of
{ok, [P1,P0]} -> {ok, ?u16(P1, P0)};
- Error -> Error
+ {error,_}=Error -> Error
end;
%% Multi-homed "bind": sctp_bindx(). The Op is 'add' or 'remove'.
@@ -222,7 +213,7 @@ bindx(S, AddFlag, Addrs) ->
{IP, Port} <- Addrs]],
case ctl_cmd(S, ?SCTP_REQ_BINDX, Args) of
{ok,_} -> {ok, S};
- Error -> Error
+ {error,_}=Error -> Error
end;
_ -> {error, einval}
end.
@@ -265,7 +256,7 @@ async_connect(S, IP, Port, Time) ->
case ctl_cmd(S, ?INET_REQ_CONNECT,
[enc_time(Time),?int16(Port),ip_to_bytes(IP)]) of
{ok, [R1,R0]} -> {ok, S, ?u16(R1,R0)};
- Error -> Error
+ {error,_}=Error -> Error
end.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
@@ -318,9 +309,9 @@ accept_opts(L, S) ->
end.
async_accept(L, Time) ->
- case ctl_cmd(L,?TCP_REQ_ACCEPT, [enc_time(Time)]) of
+ case ctl_cmd(L,?INET_REQ_ACCEPT, [enc_time(Time)]) of
{ok, [R1,R0]} -> {ok, ?u16(R1,R0)};
- Error -> Error
+ {error,_}=Error -> Error
end.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
@@ -334,16 +325,30 @@ async_accept(L, Time) ->
%% listening) is also accepted:
listen(S) -> listen(S, ?LISTEN_BACKLOG).
-
+
+listen(S, true) -> listen(S, ?LISTEN_BACKLOG);
+listen(S, false) -> listen(S, 0);
listen(S, BackLog) when is_port(S), is_integer(BackLog) ->
- case ctl_cmd(S, ?TCP_REQ_LISTEN, [?int16(BackLog)]) of
+ case ctl_cmd(S, ?INET_REQ_LISTEN, [?int16(BackLog)]) of
{ok, _} -> ok;
- Error -> Error
- end;
-listen(S, Flag) when is_port(S), is_boolean(Flag) ->
- case ctl_cmd(S, ?SCTP_REQ_LISTEN, enc_value(set, bool8, Flag)) of
- {ok,_} -> ok;
- Error -> Error
+ {error,_}=Error -> Error
+ end.
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%%
+%% PEELOFF(insock(), AssocId) -> {ok,outsock()} | {error, Reason}
+%%
+%% SCTP: Peel off one association into a type stream socket
+%%
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+peeloff(S, AssocId) ->
+ case ctl_cmd(S, ?SCTP_REQ_PEELOFF, [?int32(AssocId)]) of
+ inet_reply ->
+ receive
+ {inet_reply,S,Res} -> Res
+ end;
+ {error,_}=Error -> Error
end.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
@@ -395,12 +400,12 @@ sendto(S, IP, Port, Data) when is_port(S), Port >= 0, Port =< 65535 ->
true ->
receive
{inet_reply,S,Reply} ->
- ?DBG_FORMAT("prim_inet:send() -> ~p~n", [Reply]),
+ ?DBG_FORMAT("prim_inet:sendto() -> ~p~n", [Reply]),
Reply
end
catch
error:_ ->
- ?DBG_FORMAT("prim_inet:send() -> {error,einval}~n", []),
+ ?DBG_FORMAT("prim_inet:sendto() -> {error,einval}~n", []),
{error,einval}
end.
@@ -455,7 +460,7 @@ recv0(S, Length, Time) when is_port(S), is_integer(Length), Length >= 0 ->
async_recv(S, Length, Time) ->
case ctl_cmd(S, ?TCP_REQ_RECV, [enc_time(Time), ?int32(Length)]) of
{ok,[R1,R0]} -> {ok, ?u16(R1,R0)};
- Error -> Error
+ {error,_}=Error -> Error
end.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
@@ -501,7 +506,7 @@ recvfrom0(S, Length, Time)
{inet_async, S, Ref, Error={error, _}} ->
Error
end;
- Error ->
+ {error,_}=Error ->
Error % Front-end error
end;
recvfrom0(_, _, _) -> {error,einval}.
@@ -517,18 +522,18 @@ peername(S) when is_port(S) ->
{ok, [F, P1,P0 | Addr]} ->
{IP, _} = get_ip(F, Addr),
{ok, { IP, ?u16(P1, P0) }};
- Error -> Error
+ {error,_}=Error -> Error
end.
setpeername(S, {IP,Port}) when is_port(S) ->
case ctl_cmd(S, ?INET_REQ_SETPEER, [?int16(Port),ip_to_bytes(IP)]) of
{ok,[]} -> ok;
- Error -> Error
+ {error,_}=Error -> Error
end;
setpeername(S, undefined) when is_port(S) ->
case ctl_cmd(S, ?INET_REQ_SETPEER, []) of
{ok,[]} -> ok;
- Error -> Error
+ {error,_}=Error -> Error
end.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
@@ -542,18 +547,18 @@ sockname(S) when is_port(S) ->
{ok, [F, P1, P0 | Addr]} ->
{IP, _} = get_ip(F, Addr),
{ok, { IP, ?u16(P1, P0) }};
- Error -> Error
+ {error,_}=Error -> Error
end.
setsockname(S, {IP,Port}) when is_port(S) ->
case ctl_cmd(S, ?INET_REQ_SETNAME, [?int16(Port),ip_to_bytes(IP)]) of
{ok,[]} -> ok;
- Error -> Error
+ {error,_}=Error -> Error
end;
setsockname(S, undefined) when is_port(S) ->
case ctl_cmd(S, ?INET_REQ_SETNAME, []) of
{ok,[]} -> ok;
- Error -> Error
+ {error,_}=Error -> Error
end.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
@@ -573,7 +578,7 @@ setopts(S, Opts) when is_port(S) ->
{ok, Buf} ->
case ctl_cmd(S, ?INET_REQ_SETOPTS, Buf) of
{ok, _} -> ok;
- Error -> Error
+ {error,_}=Error -> Error
end;
Error -> Error
end.
@@ -599,12 +604,12 @@ getopts(S, Opts) when is_port(S), is_list(Opts) ->
{ok,Rep} ->
%% Non-SCTP: "Rep" contains the encoded option vals:
decode_opt_val(Rep);
- {error,sctp_reply} ->
+ inet_reply ->
%% SCTP: Need to receive the full value:
receive
{inet_reply,S,Res} -> Res
end;
- Error -> Error
+ {error,_}=Error -> Error
end;
Error -> Error
end.
@@ -733,7 +738,7 @@ getifaddrs_ifget(S, IFs, IF, FlagsVals, Opts) ->
getiflist(S) when is_port(S) ->
case ctl_cmd(S, ?INET_REQ_GETIFLIST, []) of
{ok, Data} -> {ok, build_iflist(Data)};
- Error -> Error
+ {error,_}=Error -> Error
end.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
@@ -751,7 +756,7 @@ ifget(S, Name, Opts) ->
{ok, Buf2} ->
case ctl_cmd(S, ?INET_REQ_IFGET, [Buf1,Buf2]) of
{ok, Data} -> decode_ifopts(Data,[]);
- Error -> Error
+ {error,_}=Error -> Error
end;
Error -> Error
end;
@@ -773,7 +778,7 @@ ifset(S, Name, Opts) ->
{ok, Buf2} ->
case ctl_cmd(S, ?INET_REQ_IFSET, [Buf1,Buf2]) of
{ok, _} -> ok;
- Error -> Error
+ {error,_}=Error -> Error
end;
Error -> Error
end;
@@ -801,7 +806,7 @@ subscribe(S, Sub) when is_port(S), is_list(Sub) ->
{ok, Bytes} ->
case ctl_cmd(S, ?INET_REQ_SUBSCRIBE, Bytes) of
{ok, Data} -> decode_subs(Data);
- Error -> Error
+ {error,_}=Error -> Error
end;
Error -> Error
end.
@@ -819,7 +824,7 @@ getstat(S, Stats) when is_port(S), is_list(Stats) ->
{ok, Bytes} ->
case ctl_cmd(S, ?INET_REQ_GETSTAT, Bytes) of
{ok, Data} -> decode_stats(Data);
- Error -> Error
+ {error,_}=Error -> Error
end;
Error -> Error
end.
@@ -835,11 +840,26 @@ getstat(S, Stats) when is_port(S), is_list(Stats) ->
getfd(S) when is_port(S) ->
case ctl_cmd(S, ?INET_REQ_GETFD, []) of
{ok, [S3,S2,S1,S0]} -> {ok, ?u32(S3,S2,S1,S0)};
- Error -> Error
+ {error,_}=Error -> Error
end.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%
+%% IGNOREFD(insock(),boolean()) -> {ok,integer()} | {error, Reason}
+%%
+%% steal internal file descriptor
+%%
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+ignorefd(S,Bool) when is_port(S) ->
+ Val = if Bool -> 1; true -> 0 end,
+ case ctl_cmd(S, ?INET_REQ_IGNOREFD, [Val]) of
+ {ok, _} -> ok;
+ Error -> Error
+ end.
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%%
%% GETIX(insock()) -> {ok,integer()} | {error, Reason}
%%
%% get internal socket index
@@ -873,7 +893,7 @@ gettype(S) when is_port(S) ->
_ -> undefined
end,
{ok, {Family, Type}};
- Error -> Error
+ {error,_}=Error -> Error
end.
getprotocol(S) when is_port(S) ->
@@ -901,7 +921,7 @@ getstatus(S) when is_port(S) ->
case ctl_cmd(S, ?INET_REQ_GETSTATUS, []) of
{ok, [S3,S2,S1,S0]} ->
{ok, dec_status(?u32(S3,S2,S1,S0))};
- Error -> Error
+ {error,_}=Error -> Error
end.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
@@ -943,7 +963,7 @@ getservbyname1(S,Name,Proto) ->
case ctl_cmd(S, ?INET_REQ_GETSERVBYNAME, [L1,Name,L2,Proto]) of
{ok, [P1,P0]} ->
{ok, ?u16(P1,P0)};
- Error ->
+ {error,_}=Error ->
Error
end
end.
@@ -971,7 +991,7 @@ getservbyport1(S,Port,Proto) ->
true ->
case ctl_cmd(S, ?INET_REQ_GETSERVBYPORT, [?int16(Port),L,Proto]) of
{ok, Name} -> {ok, Name};
- Error -> Error
+ {error,_}=Error -> Error
end
end.
@@ -985,7 +1005,7 @@ getservbyport1(S,Port,Proto) ->
unrecv(S, Data) ->
case ctl_cmd(S, ?TCP_REQ_UNRECV, Data) of
{ok, _} -> ok;
- Error -> Error
+ {error,_}=Error -> Error
end.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
@@ -2163,7 +2183,7 @@ ctl_cmd(Port, Cmd, Args) ->
Result =
try erlang:port_control(Port, Cmd, Args) of
[?INET_REP_OK|Reply] -> {ok,Reply};
- [?INET_REP_SCTP] -> {error,sctp_reply};
+ [?INET_REP] -> inet_reply;
[?INET_REP_ERROR|Err] -> {error,list_to_atom(Err)}
catch
error:_ -> {error,einval}
diff --git a/erts/preloaded/src/prim_zip.erl b/erts/preloaded/src/prim_zip.erl
index 6a9856fdad..d29f17ae56 100644
--- a/erts/preloaded/src/prim_zip.erl
+++ b/erts/preloaded/src/prim_zip.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 2008-2010. All Rights Reserved.
+%% Copyright Ericsson AB 2008-2011. All Rights Reserved.
%%
%% The contents of this file are subject to the Erlang Public License,
%% Version 1.1, (the "License"); you may not use this file except in
@@ -21,7 +21,7 @@
-module(prim_zip).
-%% unzipping piecemal
+%% unzipping piecemeal
-export([
open/1,
open/3,
@@ -432,7 +432,7 @@ binary_io({file_info, B}, _) ->
is_binary(B) -> {regular, byte_size(B)};
B =:= directory -> {directory, 0}
end,
- Now = calendar:local_time(),
+ Now = erlang:localtime(),
#file_info{size = Size, type = Type, access = read_write,
atime = Now, mtime = Now, ctime = Now,
mode = 0, links = 1, major_device = 0,
diff --git a/erts/preloaded/src/zlib.erl b/erts/preloaded/src/zlib.erl
index 51d6cd0a0b..1faae1c1f4 100644
--- a/erts/preloaded/src/zlib.erl
+++ b/erts/preloaded/src/zlib.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 2003-2010. All Rights Reserved.
+%% Copyright Ericsson AB 2003-2012. All Rights Reserved.
%%
%% The contents of this file are subject to the Erlang Public License,
%% Version 1.1, (the "License"); you may not use this file except in
@@ -124,7 +124,6 @@
-type zwindowbits() :: -15..-9 | 9..47.
-type zmemlevel() :: 1..9.
-type zstrategy() :: 'default' | 'filtered' | 'huffman_only'.
--type zflush() :: 'none' | 'sync' | 'full' | 'finish'.
%%------------------------------------------------------------------------
@@ -134,7 +133,8 @@ open() ->
open_port({spawn, "zlib_drv"}, [binary]).
%% close and release z_stream
--spec close(zstream()) -> 'ok'.
+-spec close(Z) -> 'ok' when
+ Z :: zstream().
close(Z) ->
try
true = port_close(Z),
@@ -145,16 +145,25 @@ close(Z) ->
catch _:_ -> erlang:error(badarg)
end.
--spec deflateInit(zstream()) -> 'ok'.
+-spec deflateInit(Z) -> 'ok' when
+ Z :: zstream().
deflateInit(Z) ->
call(Z, ?DEFLATE_INIT, <<?Z_DEFAULT_COMPRESSION:32>>).
--spec deflateInit(zstream(), zlevel()) -> 'ok'.
+-spec deflateInit(Z, Level) -> 'ok' when
+ Z :: zstream(),
+ Level :: zlevel().
deflateInit(Z, Level) ->
call(Z, ?DEFLATE_INIT, <<(arg_level(Level)):32>>).
--spec deflateInit(zstream(), zlevel(), zmethod(),
- zwindowbits(), zmemlevel(), zstrategy()) -> 'ok'.
+-spec deflateInit(Z, Level, Method,
+ WindowBits, MemLevel, Strategy) -> 'ok' when
+ Z :: zstream(),
+ Level :: zlevel(),
+ Method :: zmethod(),
+ WindowBits :: zwindowbits(),
+ MemLevel :: zmemlevel(),
+ Strategy :: zstrategy().
deflateInit(Z, Level, Method, WindowBits, MemLevel, Strategy) ->
call(Z, ?DEFLATE_INIT2, <<(arg_level(Level)):32,
(arg_method(Method)):32,
@@ -162,24 +171,38 @@ deflateInit(Z, Level, Method, WindowBits, MemLevel, Strategy) ->
(arg_mem(MemLevel)):32,
(arg_strategy(Strategy)):32>>).
--spec deflateSetDictionary(zstream(), binary()) -> integer().
+-spec deflateSetDictionary(Z, Dictionary) -> Adler32 when
+ Z :: zstream(),
+ Dictionary :: iodata(),
+ Adler32 :: integer().
deflateSetDictionary(Z, Dictionary) ->
call(Z, ?DEFLATE_SETDICT, Dictionary).
--spec deflateReset(zstream()) -> 'ok'.
+-spec deflateReset(Z) -> 'ok' when
+ Z :: zstream().
deflateReset(Z) ->
call(Z, ?DEFLATE_RESET, []).
--spec deflateParams(zstream(), zlevel(), zstrategy()) -> 'ok'.
+-spec deflateParams(Z, Level, Strategy) -> ok when
+ Z :: zstream(),
+ Level :: zlevel(),
+ Strategy :: zstrategy().
deflateParams(Z, Level, Strategy) ->
call(Z, ?DEFLATE_PARAMS, <<(arg_level(Level)):32,
(arg_strategy(Strategy)):32>>).
--spec deflate(zstream(), iodata()) -> iolist().
+-spec deflate(Z, Data) -> Compressed when
+ Z :: zstream(),
+ Data :: iodata(),
+ Compressed :: iolist().
deflate(Z, Data) ->
deflate(Z, Data, none).
--spec deflate(zstream(), iodata(), zflush()) -> iolist().
+-spec deflate(Z, Data, Flush) -> Compressed when
+ Z :: zstream(),
+ Data :: iodata(),
+ Flush :: none | sync | full | finish,
+ Compressed :: iolist().
deflate(Z, Data, Flush) ->
try port_command(Z, Data) of
true ->
@@ -191,19 +214,25 @@ deflate(Z, Data, Flush) ->
erlang:error(badarg)
end.
--spec deflateEnd(zstream()) -> 'ok'.
+-spec deflateEnd(Z) -> 'ok' when
+ Z :: zstream().
deflateEnd(Z) ->
call(Z, ?DEFLATE_END, []).
--spec inflateInit(zstream()) -> 'ok'.
+-spec inflateInit(Z) -> 'ok' when
+ Z :: zstream().
inflateInit(Z) ->
call(Z, ?INFLATE_INIT, []).
--spec inflateInit(zstream(), zwindowbits()) -> 'ok'.
+-spec inflateInit(Z, WindowBits) -> 'ok' when
+ Z :: zstream(),
+ WindowBits :: zwindowbits().
inflateInit(Z, WindowBits) ->
call(Z, ?INFLATE_INIT2, <<(arg_bitsz(WindowBits)):32>>).
--spec inflateSetDictionary(zstream(), binary()) -> 'ok'.
+-spec inflateSetDictionary(Z, Dictionary) -> 'ok' when
+ Z :: zstream(),
+ Dictionary :: iodata().
inflateSetDictionary(Z, Dictionary) ->
call(Z, ?INFLATE_SETDICT, Dictionary).
@@ -211,11 +240,15 @@ inflateSetDictionary(Z, Dictionary) ->
inflateSync(Z) ->
call(Z, ?INFLATE_SYNC, []).
--spec inflateReset(zstream()) -> 'ok'.
+-spec inflateReset(Z) -> 'ok' when
+ Z :: zstream().
inflateReset(Z) ->
call(Z, ?INFLATE_RESET, []).
--spec inflate(zstream(), iodata()) -> iolist().
+-spec inflate(Z, Data) -> Decompressed when
+ Z :: zstream(),
+ Data :: iodata(),
+ Decompressed :: iolist().
inflate(Z, Data) ->
try port_command(Z, Data) of
true ->
@@ -227,50 +260,79 @@ inflate(Z, Data) ->
erlang:error(badarg)
end.
--spec inflateEnd(zstream()) -> 'ok'.
+-spec inflateEnd(Z) -> 'ok' when
+ Z :: zstream().
inflateEnd(Z) ->
call(Z, ?INFLATE_END, []).
--spec setBufSize(zstream(), non_neg_integer()) -> 'ok'.
+-spec setBufSize(Z, Size) -> 'ok' when
+ Z :: zstream(),
+ Size :: non_neg_integer().
setBufSize(Z, Size) ->
call(Z, ?SET_BUFSZ, <<Size:32>>).
--spec getBufSize(zstream()) -> non_neg_integer().
+-spec getBufSize(Z) -> Size when
+ Z :: zstream(),
+ Size :: non_neg_integer().
getBufSize(Z) ->
call(Z, ?GET_BUFSZ, []).
--spec crc32(zstream()) -> integer().
+-spec crc32(Z) -> CRC when
+ Z :: zstream(),
+ CRC :: integer().
crc32(Z) ->
call(Z, ?CRC32_0, []).
--spec crc32(zstream(), binary()) -> integer().
-crc32(Z, Binary) ->
- call(Z, ?CRC32_1, Binary).
-
--spec crc32(zstream(), integer(), binary()) -> integer().
-crc32(Z, CRC, Binary) when is_binary(Binary), is_integer(CRC) ->
- call(Z, ?CRC32_2, <<CRC:32, Binary/binary>>);
-crc32(_Z, _CRC, _Binary) ->
- erlang:error(badarg).
-
--spec adler32(zstream(), binary()) -> integer().
-adler32(Z, Binary) ->
- call(Z, ?ADLER32_1, Binary).
-
--spec adler32(zstream(), integer(), binary()) -> integer().
-adler32(Z, Adler, Binary) when is_binary(Binary), is_integer(Adler) ->
- call(Z, ?ADLER32_2, <<Adler:32, Binary/binary>>);
-adler32(_Z, _Adler, _Binary) ->
+-spec crc32(Z, Data) -> CRC when
+ Z :: zstream(),
+ Data :: iodata(),
+ CRC :: integer().
+crc32(Z, Data) ->
+ call(Z, ?CRC32_1, Data).
+
+-spec crc32(Z, PrevCRC, Data) -> CRC when
+ Z :: zstream(),
+ PrevCRC :: integer(),
+ Data :: iodata(),
+ CRC :: integer().
+crc32(Z, CRC, Data) ->
+ call(Z, ?CRC32_2, [<<CRC:32>>, Data]).
+
+-spec adler32(Z, Data) -> CheckSum when
+ Z :: zstream(),
+ Data :: iodata(),
+ CheckSum :: integer().
+adler32(Z, Data) ->
+ call(Z, ?ADLER32_1, Data).
+
+-spec adler32(Z, PrevAdler, Data) -> CheckSum when
+ Z :: zstream(),
+ PrevAdler :: integer(),
+ Data :: iodata(),
+ CheckSum :: integer().
+adler32(Z, Adler, Data) when is_integer(Adler) ->
+ call(Z, ?ADLER32_2, [<<Adler:32>>, Data]);
+adler32(_Z, _Adler, _Data) ->
erlang:error(badarg).
--spec crc32_combine(zstream(), integer(), integer(), integer()) -> integer().
+-spec crc32_combine(Z, CRC1, CRC2, Size2) -> CRC when
+ Z :: zstream(),
+ CRC :: integer(),
+ CRC1 :: integer(),
+ CRC2 :: integer(),
+ Size2 :: integer().
crc32_combine(Z, CRC1, CRC2, Len2)
when is_integer(CRC1), is_integer(CRC2), is_integer(Len2) ->
call(Z, ?CRC32_COMBINE, <<CRC1:32, CRC2:32, Len2:32>>);
crc32_combine(_Z, _CRC1, _CRC2, _Len2) ->
erlang:error(badarg).
--spec adler32_combine(zstream(), integer(), integer(), integer()) -> integer().
+-spec adler32_combine(Z, Adler1, Adler2, Size2) -> Adler when
+ Z :: zstream(),
+ Adler :: integer(),
+ Adler1 :: integer(),
+ Adler2 :: integer(),
+ Size2 :: integer().
adler32_combine(Z, Adler1, Adler2, Len2)
when is_integer(Adler1), is_integer(Adler2), is_integer(Len2) ->
call(Z, ?ADLER32_COMBINE, <<Adler1:32, Adler2:32, Len2:32>>);
@@ -282,64 +344,107 @@ getQSize(Z) ->
call(Z, ?GET_QSIZE, []).
%% compress/uncompress zlib with header
--spec compress(binary()) -> binary().
-compress(Binary) ->
- Z = open(),
- deflateInit(Z, default),
- Bs = deflate(Z, Binary,finish),
- deflateEnd(Z),
- close(Z),
- list_to_binary(Bs).
-
--spec uncompress(binary()) -> binary().
-uncompress(Binary) when byte_size(Binary) >= 8 ->
+-spec compress(Data) -> Compressed when
+ Data :: iodata(),
+ Compressed :: binary().
+compress(Data) ->
Z = open(),
- inflateInit(Z),
- Bs = inflate(Z, Binary),
- inflateEnd(Z),
- close(Z),
- list_to_binary(Bs);
-uncompress(Binary) when is_binary(Binary) -> erlang:error(data_error);
-uncompress(_) -> erlang:error(badarg).
+ Bs = try
+ deflateInit(Z, default),
+ B = deflate(Z, Data, finish),
+ deflateEnd(Z),
+ B
+ after
+ close(Z)
+ end,
+ iolist_to_binary(Bs).
+
+-spec uncompress(Data) -> Decompressed when
+ Data :: iodata(),
+ Decompressed :: binary().
+uncompress(Data) ->
+ try iolist_size(Data) of
+ Size ->
+ if
+ Size >= 8 ->
+ Z = open(),
+ Bs = try
+ inflateInit(Z),
+ B = inflate(Z, Data),
+ inflateEnd(Z),
+ B
+ after
+ close(Z)
+ end,
+ iolist_to_binary(Bs);
+ true ->
+ erlang:error(data_error)
+ end
+ catch
+ _:_ ->
+ erlang:error(badarg)
+ end.
%% unzip/zip zlib without header (zip members)
--spec zip(binary()) -> binary().
-zip(Binary) ->
+-spec zip(Data) -> Compressed when
+ Data :: iodata(),
+ Compressed :: binary().
+zip(Data) ->
Z = open(),
- deflateInit(Z, default, deflated, -?MAX_WBITS, 8, default),
- Bs = deflate(Z, Binary, finish),
- deflateEnd(Z),
- close(Z),
- list_to_binary(Bs).
-
--spec unzip(binary()) -> binary().
-unzip(Binary) ->
+ Bs = try
+ deflateInit(Z, default, deflated, -?MAX_WBITS, 8, default),
+ B = deflate(Z, Data, finish),
+ deflateEnd(Z),
+ B
+ after
+ close(Z)
+ end,
+ iolist_to_binary(Bs).
+
+-spec unzip(Data) -> Decompressed when
+ Data :: iodata(),
+ Decompressed :: binary().
+unzip(Data) ->
Z = open(),
- inflateInit(Z, -?MAX_WBITS),
- Bs = inflate(Z, Binary),
- inflateEnd(Z),
- close(Z),
- list_to_binary(Bs).
+ Bs = try
+ inflateInit(Z, -?MAX_WBITS),
+ B = inflate(Z, Data),
+ inflateEnd(Z),
+ B
+ after
+ close(Z)
+ end,
+ iolist_to_binary(Bs).
--spec gzip(iodata()) -> binary().
-gzip(Data) when is_binary(Data); is_list(Data) ->
+-spec gzip(Data) -> Compressed when
+ Data :: iodata(),
+ Compressed :: binary().
+gzip(Data) ->
Z = open(),
- deflateInit(Z, default, deflated, 16+?MAX_WBITS, 8, default),
- Bs = deflate(Z, Data, finish),
- deflateEnd(Z),
- close(Z),
- iolist_to_binary(Bs);
-gzip(_) -> erlang:error(badarg).
-
--spec gunzip(iodata()) -> binary().
-gunzip(Data) when is_binary(Data); is_list(Data) ->
+ Bs = try
+ deflateInit(Z, default, deflated, 16+?MAX_WBITS, 8, default),
+ B = deflate(Z, Data, finish),
+ deflateEnd(Z),
+ B
+ after
+ close(Z)
+ end,
+ iolist_to_binary(Bs).
+
+-spec gunzip(Data) -> Decompressed when
+ Data :: iodata(),
+ Decompressed :: binary().
+gunzip(Data) ->
Z = open(),
- inflateInit(Z, 16+?MAX_WBITS),
- Bs = inflate(Z, Data),
- inflateEnd(Z),
- close(Z),
- iolist_to_binary(Bs);
-gunzip(_) -> erlang:error(badarg).
+ Bs = try
+ inflateInit(Z, 16+?MAX_WBITS),
+ B = inflate(Z, Data),
+ inflateEnd(Z),
+ B
+ after
+ close(Z)
+ end,
+ iolist_to_binary(Bs).
-spec collect(zstream()) -> iolist().
collect(Z) ->
diff --git a/erts/start_scripts/Makefile b/erts/start_scripts/Makefile
index 4df7568484..d8e39062e3 100644
--- a/erts/start_scripts/Makefile
+++ b/erts/start_scripts/Makefile
@@ -1,7 +1,7 @@
#
# %CopyrightBegin%
#
-# Copyright Ericsson AB 1997-2010. All Rights Reserved.
+# Copyright Ericsson AB 1997-2011. All Rights Reserved.
#
# The contents of this file are subject to the Erlang Public License,
# Version 1.1, (the "License"); you may not use this file except in
@@ -74,7 +74,7 @@ $(SS_ROOT)/start_clean.script \
$(SS_ROOT)/start_clean.boot: $(SS_ROOT)/start_clean.rel
$(INSTALL_DIR) $(SS_TMP)
( cd $(SS_TMP) && \
- $(ERLC) $(SASL_FLAGS) $(SCRIPT_PATH) -o $(SS_ROOT) $< )
+ $(ERLC) $(SASL_FLAGS) $(SCRIPT_PATH) +no_warn_sasl -o $(SS_ROOT) $< )
$(SS_ROOT)/start_sasl.script \
$(SS_ROOT)/start_sasl.boot: $(SS_ROOT)/start_sasl.rel
@@ -128,7 +128,7 @@ $(SS_ROOT)/start_all_example.rel: $(SS_ROOT)/start_all_example.rel.src \
$(ERL_TOP)/bin/start.script:
$(INSTALL_DIR) $(SS_TMP)
( cd $(SS_TMP) && \
- $(ERLC) $(SCRIPT_PATH) +otp_build -o $@ $(SS_ROOT)/start_clean.rel )
+ $(ERLC) $(SCRIPT_PATH) +no_warn_sasl +otp_build -o $@ $(SS_ROOT)/start_clean.rel )
$(ERL_TOP)/bin/start_sasl.script:
$(INSTALL_DIR) $(SS_TMP)
@@ -138,7 +138,7 @@ $(ERL_TOP)/bin/start_sasl.script:
$(ERL_TOP)/bin/start_clean.script:
$(INSTALL_DIR) $(SS_TMP)
( cd $(SS_TMP) && \
- $(ERLC) $(SCRIPT_PATH) +otp_build -o $@ $(SS_ROOT)/start_clean.rel )
+ $(ERLC) $(SCRIPT_PATH) +no_warn_sasl +otp_build -o $@ $(SS_ROOT)/start_clean.rel )
## Special target used from system/build/Makefile for source code release bootstrap.
bootstrap_scripts: $(SS_ROOT)/start_clean.rel
diff --git a/erts/test/autoimport_SUITE.erl b/erts/test/autoimport_SUITE.erl
index 0e4708e046..71ed5204b1 100644
--- a/erts/test/autoimport_SUITE.erl
+++ b/erts/test/autoimport_SUITE.erl
@@ -87,10 +87,21 @@ autoimports(Config) when is_list(Config) ->
xml(XMLFile) ->
{ok,File} = file:open(XMLFile,[read]),
+ xskip_to_funcs(file:read_line(File),File),
DocData = xloop(file:read_line(File),File),
+ true = DocData =/= [],
file:close(File),
analyze(DocData).
+%% Skip lines up to and including the <funcs> tag.
+xskip_to_funcs({ok,Line},File) ->
+ case re:run(Line,"\\<funcs\\>",[{capture,none}]) of
+ nomatch ->
+ xskip_to_funcs(file:read_line(File),File);
+ match ->
+ ok
+ end.
+
xloop({ok,Line},File) ->
case re:run(Line,"\\<name\\>",[{capture,none}]) of
nomatch ->
diff --git a/erts/test/erl_print_SUITE.erl b/erts/test/erl_print_SUITE.erl
index ee1a200530..a49d8f069f 100644
--- a/erts/test/erl_print_SUITE.erl
+++ b/erts/test/erl_print_SUITE.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 2005-2011. All Rights Reserved.
+%% Copyright Ericsson AB 2005-2012. All Rights Reserved.
%%
%% The contents of this file are subject to the Erlang Public License,
%% Version 1.1, (the "License"); you may not use this file except in
@@ -34,7 +34,7 @@
-export([all/0, suite/0,groups/0,init_per_suite/1, end_per_suite/1,
init_per_group/2,end_per_group/2,
- init_per_testcase/2, fin_per_testcase/2]).
+ init_per_testcase/2, end_per_testcase/2]).
-export([erlang_display/1, integer/1, float/1,
string/1, character/1, snprintf/1, quote/1]).
@@ -247,7 +247,7 @@ init_per_testcase(Case, Config) ->
Dog = ?t:timetrap(?DEFAULT_TIMEOUT),
[{testcase, Case}, {watchdog, Dog} |Config].
-fin_per_testcase(_Case, Config) ->
+end_per_testcase(_Case, Config) ->
Dog = ?config(watchdog, Config),
?t:timetrap_cancel(Dog),
ok.
@@ -303,7 +303,7 @@ read_case_data(Port, TestCase) ->
{Port, {data, {eol, [?PID_MARKER | PidStr]}}} ->
?line ?t:format("Port program pid: ~s~n", [PidStr]),
?line CaseProc = self(),
- ?line list_to_integer(PidStr), % Sanity check
+ ?line _ = list_to_integer(PidStr), % Sanity check
spawn_opt(fun () ->
port_prog_killer(CaseProc, PidStr)
end,
diff --git a/erts/test/erl_print_SUITE_data/Makefile.src b/erts/test/erl_print_SUITE_data/Makefile.src
index 109d55e572..dec5650416 100644
--- a/erts/test/erl_print_SUITE_data/Makefile.src
+++ b/erts/test/erl_print_SUITE_data/Makefile.src
@@ -1,7 +1,7 @@
#
# %CopyrightBegin%
#
-# Copyright Ericsson AB 2005-2009. All Rights Reserved.
+# Copyright Ericsson AB 2005-2011. All Rights Reserved.
#
# The contents of this file are subject to the Erlang Public License,
# Version 1.1, (the "License"); you may not use this file except in
@@ -27,7 +27,7 @@ EPTF_CFLAGS = -Wall $(CFLAGS) @DEFS@ -I@erts_lib_include_internal@ -I@erts_lib_i
EPTF_LIBS = $(LIBS) -L@erts_lib_internal_path@ -lerts_internal@type_marker@
EPTT_CFLAGS = -DTHREAD_SAFE $(ETHR_DEFS) $(EPTF_CFLAGS)
-EPTT_LIBS = $(LIBS) -L@erts_lib_internal_path@ -lerts_internal_r@type_marker@ $(ETHR_LIBS)
+EPTT_LIBS = -L@erts_lib_internal_path@ -lerts_internal_r@type_marker@ $(ETHR_LIBS) $(LIBS)
GCC = .@DS@gccifier -CC"$(CC)"
diff --git a/erts/test/erlc_SUITE.erl b/erts/test/erlc_SUITE.erl
index 62e0e6813d..6c0d662126 100644
--- a/erts/test/erlc_SUITE.erl
+++ b/erts/test/erlc_SUITE.erl
@@ -79,7 +79,7 @@ compile_erl(Config) when is_list(Config) ->
?line run(Config, Cmd, FileName, "-Werror",
["compile: warnings being treated as errors\$",
- "Warning: function foo/0 is unused\$",
+ "function foo/0 is unused\$",
"_ERROR_"]),
%% Check a bad file.
@@ -154,7 +154,8 @@ compile_mib(Config) when is_list(Config) ->
?line BadFile = filename:join(SrcDir, "BAD-MIB.mib"),
?line run(Config, Cmd, BadFile, "",
- ["Error: syntax error before: mibs\$", "compilation_failed_ERROR_"]),
+ ["BAD-MIB.mib: 1: syntax error before: mibs\$",
+ "compilation_failed_ERROR_"]),
%% Make sure that no -I option works.
@@ -213,13 +214,40 @@ deep_cwd_1(PrivDir) ->
arg_overflow(Config) when is_list(Config) ->
?line {SrcDir, _OutDir, Cmd} = get_cmd(Config),
?line FileName = filename:join(SrcDir, "erl_test_ok.erl"),
- ?line Args = lists:flatten([ ["-D", integer_to_list(N), "=1 "] ||
- N <- lists:seq(1,10000) ]),
+ %% Each -D option will be expanded to three arguments when
+ %% invoking 'erl'.
+ ?line NumDOptions = num_d_options(),
+ ?line Args = lists:flatten([ ["-D", integer_to_list(N, 36), "=1 "] ||
+ N <- lists:seq(1, NumDOptions) ]),
?line run(Config, Cmd, FileName, Args,
["Warning: function foo/0 is unused\$",
"_OK_"]),
ok.
+num_d_options() ->
+ case {os:type(),os:version()} of
+ {{win32,_},_} ->
+ %% The maximum size of a command line in the command
+ %% shell on Windows is 8191 characters.
+ %% Each -D option is expanded to "@dv NN 1", i.e.
+ %% 8 characters. (Numbers up to 1295 can be expressed
+ %% as two 36-base digits.)
+ 1000;
+ {{unix,linux},Version} when Version < {2,6,23} ->
+ %% On some older 64-bit versions of Linux, the maximum number
+ %% of arguments is 16383.
+ %% See: http://www.in-ulm.de/~mascheck/various/argmax/
+ 5440;
+ {{unix,darwin},{Major,_,_}} when Major >= 11 ->
+ %% "getconf ARG_MAX" still reports 262144 (as in previous
+ %% version of MacOS X), but the useful space seem to have
+ %% shrunk significantly (or possibly the number of arguments).
+ %% 7673
+ 7500;
+ {_,_} ->
+ 12000
+ end.
+
erlc() ->
case os:find_executable("erlc") of
false ->
diff --git a/erts/test/ethread_SUITE.erl b/erts/test/ethread_SUITE.erl
index 71d8c1c679..24075286a8 100644
--- a/erts/test/ethread_SUITE.erl
+++ b/erts/test/ethread_SUITE.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 2004-2011. All Rights Reserved.
+%% Copyright Ericsson AB 2004-2012. All Rights Reserved.
%%
%% The contents of this file are subject to the Erlang Public License,
%% Version 1.1, (the "License"); you may not use this file except in
@@ -33,7 +33,7 @@
-export([all/0, suite/0,groups/0,init_per_suite/1, end_per_suite/1,
init_per_group/2,end_per_group/2,
- init_per_testcase/2, fin_per_testcase/2]).
+ init_per_testcase/2, end_per_testcase/2]).
-export([create_join_thread/1,
equal_tids/1,
@@ -47,14 +47,26 @@
spinlock/1,
rwspinlock/1,
rwmutex/1,
- atomic/1]).
+ atomic/1,
+ dw_atomic_massage/1]).
-include_lib("test_server/include/test_server.hrl").
-tests() ->
- [create_join_thread, equal_tids, mutex, try_lock_mutex,
- cond_wait, broadcast, detached_thread,
- max_threads, tsd, spinlock, rwspinlock, rwmutex, atomic].
+tests() ->
+ [create_join_thread,
+ equal_tids,
+ mutex,
+ try_lock_mutex,
+ cond_wait,
+ broadcast,
+ detached_thread,
+ max_threads,
+ tsd,
+ spinlock,
+ rwspinlock,
+ rwmutex,
+ atomic,
+ dw_atomic_massage].
suite() -> [{ct_hooks,[ts_install_cth]}].
@@ -110,35 +122,37 @@ try_lock_mutex(suite) ->
try_lock_mutex(Config) ->
run_case(Config, "try_lock_mutex", "").
-wd_dispatch(P) ->
- receive
- bye ->
- ?line true = port_command(P, "-1 "),
- ?line bye;
- L when is_list(L) ->
- ?line true = port_command(P, L),
- ?line wd_dispatch(P)
- end.
-
-watchdog(Port) ->
- ?line process_flag(priority, max),
- ?line receive after 500 -> ok end,
-
- ?line random:seed(),
- ?line true = port_command(Port, "0 "),
- ?line lists:foreach(fun (T) ->
- erlang:send_after(T,
- self(),
- integer_to_list(T)
- ++ " ")
- end,
- lists:usort(lists:map(fun (_) ->
- random:uniform(4500)+500
- end,
- lists:duplicate(50,0)))),
- ?line erlang:send_after(5100, self(), bye),
-
- wd_dispatch(Port).
+%% Remove dead code?
+
+% wd_dispatch(P) ->
+% receive
+% bye ->
+% ?line true = port_command(P, "-1 "),
+% ?line bye;
+% L when is_list(L) ->
+% ?line true = port_command(P, L),
+% ?line wd_dispatch(P)
+% end.
+%
+% watchdog(Port) ->
+% ?line process_flag(priority, max),
+% ?line receive after 500 -> ok end,
+%
+% ?line random:seed(),
+% ?line true = port_command(Port, "0 "),
+% ?line lists:foreach(fun (T) ->
+% erlang:send_after(T,
+% self(),
+% integer_to_list(T)
+% ++ " ")
+% end,
+% lists:usort(lists:map(fun (_) ->
+% random:uniform(4500)+500
+% end,
+% lists:duplicate(50,0)))),
+% ?line erlang:send_after(5100, self(), bye),
+%
+% wd_dispatch(Port).
cond_wait(doc) ->
["Tests ethr_cond_wait with ethr_cond_signal and ethr_cond_broadcast."];
@@ -159,7 +173,15 @@ detached_thread(doc) ->
detached_thread(suite) ->
[];
detached_thread(Config) ->
- run_case(Config, "detached_thread", "").
+ case {os:type(), os:version()} of
+ {{unix,darwin}, {9, _, _}} ->
+ %% For some reason pthread_create() crashes when more
+ %% threads cannot be created, instead of returning an
+ %% error code on our MacOS X Leopard machine...
+ {skipped, "MacOS X Leopard cannot cope with this test..."};
+ _ ->
+ run_case(Config, "detached_thread", "")
+ end.
max_threads(doc) ->
["Tests maximum number of threads."];
@@ -211,17 +233,31 @@ atomic(suite) ->
atomic(Config) ->
run_case(Config, "atomic", "").
+dw_atomic_massage(doc) ->
+ ["Massage double word atomics"];
+dw_atomic_massage(suite) ->
+ [];
+dw_atomic_massage(Config) ->
+ run_case(Config, "dw_atomic_massage", "").
+
%%
%%
%% Auxiliary functions
%%
%%
-init_per_testcase(_Case, Config) ->
- Dog = ?t:timetrap(?DEFAULT_TIMEOUT),
- [{watchdog, Dog}|Config].
+init_per_testcase(Case, Config) ->
+ case inet:gethostname() of
+ {ok,"fenris"} when Case == max_threads ->
+ %% Cannot use os:type+os:version as not all
+ %% solaris10 machines are buggy.
+ {skip, "This machine is buggy"};
+ _Else ->
+ Dog = ?t:timetrap(?DEFAULT_TIMEOUT),
+ [{watchdog, Dog}|Config]
+ end.
-fin_per_testcase(_Case, Config) ->
+end_per_testcase(_Case, Config) ->
Dog = ?config(watchdog, Config),
?t:timetrap_cancel(Dog),
ok.
@@ -277,7 +313,7 @@ read_case_data(Port, TestCase) ->
{Port, {data, {eol, [?PID_MARKER | PidStr]}}} ->
?line ?t:format("Port program pid: ~s~n", [PidStr]),
?line CaseProc = self(),
- ?line list_to_integer(PidStr), % Sanity check
+ ?line _ = list_to_integer(PidStr), % Sanity check
spawn_opt(fun () ->
port_prog_killer(CaseProc, PidStr)
end,
diff --git a/erts/test/ethread_SUITE_data/ethread_tests.c b/erts/test/ethread_SUITE_data/ethread_tests.c
index 0b59ff5aa6..ed96ecdbd2 100644
--- a/erts/test/ethread_SUITE_data/ethread_tests.c
+++ b/erts/test/ethread_SUITE_data/ethread_tests.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2004-2010. All Rights Reserved.
+ * Copyright Ericsson AB 2004-2011. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -82,6 +82,7 @@ static void
print_eol(void)
{
fprintf(stderr, EOL);
+ fflush(stderr);
}
static void print_line(char *frmt,...)
@@ -826,6 +827,7 @@ detached_thread_test(void)
* Tests
*/
#define MTT_TIMES 10
+#define MTT_HARD_LIMIT (80000)
static int mtt_terminate;
static ethr_mutex mtt_mutex;
@@ -866,14 +868,20 @@ mtt_create_join_threads(void)
while (1) {
if (ix >= no_tids) {
no_tids += 100;
+ if (no_tids > MTT_HARD_LIMIT) {
+ print_line("Hit the hard limit on number of threads (%d)!",
+ MTT_HARD_LIMIT);
+ break;
+ }
tids = (ethr_tid *) realloc((void *)tids, sizeof(ethr_tid)*no_tids);
ASSERT(tids);
}
res = ethr_thr_create(&tids[ix], mtt_thread, NULL, NULL);
- if (res != 0)
+ if (res != 0) {
break;
+ }
ix++;
- } while (res == 0);
+ }
no_threads = ix;
@@ -1310,6 +1318,9 @@ rwmutex_test(void)
* Tests atomics.
*/
+#define AT_AINT32_MAX 0x7fffffff
+#define AT_AINT32_MIN 0x80000000
+
#define AT_THREADS 4
#define AT_ITER 10000
@@ -1320,12 +1331,428 @@ static ethr_atomic_t at_go;
static ethr_atomic_t at_done;
static ethr_atomic_t at_data;
+#define AT_TEST_INIT(T, A, B) \
+do { \
+ ethr_ ## A ## _init ## B(&A, 17); \
+ ASSERT(ethr_ ## A ## _read ## B(&A) == 17); \
+} while (0)
+
+#define AT_TEST_SET(T, A, B) \
+do { \
+ ethr_ ## A ## _set ## B(&A, 4711); \
+ ASSERT(ethr_ ## A ## _read ## B(&A) == 4711); \
+} while (0)
+
+#define AT_TEST_XCHG(T, A, B) \
+do { \
+ ethr_ ## A ## _set ## B(&A, 4711); \
+ ASSERT(ethr_ ## A ## _xchg ## B(&A, 17) == 4711); \
+ ASSERT(ethr_ ## A ## _read ## B(&A) == 17); \
+} while (0)
+
+#define AT_TEST_CMPXCHG(T, A, B) \
+do { \
+ ethr_ ## A ## _set ## B(&A, 4711); \
+ ASSERT(ethr_ ## A ## _cmpxchg ## B(&A, 17, 33) == 4711); \
+ ASSERT(ethr_ ## A ## _read ## B(&A) == 4711); \
+ ASSERT(ethr_ ## A ## _cmpxchg ## B(&A, 17, 4711) == 4711); \
+ ASSERT(ethr_ ## A ## _read ## B(&A) == 17); \
+} while (0)
+
+#define AT_TEST_ADD_READ(T, A, B) \
+do { \
+ T var_ = AT_AINT32_MAX; \
+ var_ += 4711; \
+ ethr_ ## A ## _set ## B(&A, AT_AINT32_MAX); \
+ ASSERT(ethr_ ## A ## _add_read ## B(&A, 4711) == var_); \
+ ASSERT(ethr_ ## A ## _read ## B(&A) == var_); \
+ var_ = AT_AINT32_MIN; \
+ var_ -= 4711; \
+ ethr_ ## A ## _set ## B(&A, AT_AINT32_MIN); \
+ ASSERT(ethr_ ## A ## _add_read ## B(&A, -4711) == var_); \
+ ASSERT(ethr_ ## A ## _read ## B(&A) == var_); \
+ ethr_ ## A ## _set ## B(&A, 4711); \
+ ASSERT(ethr_ ## A ## _add_read ## B(&A, 10) == 4721); \
+ ASSERT(ethr_ ## A ## _read ## B(&A) == 4721); \
+} while (0)
+
+#define AT_TEST_ADD(T, A, B) \
+do { \
+ T var_ = AT_AINT32_MAX; \
+ var_ += 4711; \
+ ethr_ ## A ## _set ## B(&A, AT_AINT32_MAX); \
+ ethr_ ## A ## _add ## B(&A, 4711); \
+ ASSERT(ethr_ ## A ## _read ## B(&A) == var_); \
+ var_ = AT_AINT32_MIN; \
+ var_ -= 4711; \
+ ethr_ ## A ## _set ## B(&A, AT_AINT32_MIN); \
+ ethr_ ## A ## _add ## B(&A, -4711); \
+ ASSERT(ethr_ ## A ## _read ## B(&A) == var_); \
+ ethr_ ## A ## _set ## B(&A, 11); \
+ ethr_ ## A ## _add ## B(&A, 4700); \
+ ASSERT(ethr_ ## A ## _read ## B(&A) == 4711); \
+} while (0)
+
+#define AT_TEST_INC_READ(T, A, B) \
+do { \
+ T var_ = AT_AINT32_MAX; \
+ var_++; \
+ ethr_ ## A ## _set ## B(&A, AT_AINT32_MAX); \
+ ASSERT(ethr_ ## A ## _inc_read ## B(&A) == var_); \
+ ASSERT(ethr_ ## A ## _read ## B(&A) == var_); \
+ ethr_ ## A ## _set ## B(&A, 4710); \
+ ASSERT(ethr_ ## A ## _inc_read ## B(&A) == 4711); \
+ ASSERT(ethr_ ## A ## _read ## B(&A) == 4711); \
+} while (0)
+
+#define AT_TEST_DEC_READ(T, A, B) \
+do { \
+ T var_ = AT_AINT32_MIN; \
+ var_--; \
+ ethr_ ## A ## _set ## B(&A, AT_AINT32_MIN); \
+ ASSERT(ethr_ ## A ## _dec_read ## B(&A) == var_); \
+ ASSERT(ethr_ ## A ## _read ## B(&A) == var_); \
+ ethr_ ## A ## _set ## B(&A, 17); \
+ ASSERT(ethr_ ## A ## _dec_read ## B(&A) == 16); \
+ ASSERT(ethr_ ## A ## _read ## B(&A) == 16); \
+} while (0)
+
+
+#define AT_TEST_INC(T, A, B) \
+do { \
+ T var_ = AT_AINT32_MAX; \
+ var_++; \
+ ethr_ ## A ## _set ## B(&A, AT_AINT32_MAX); \
+ ethr_ ## A ## _inc ## B(&A); \
+ ASSERT(ethr_ ## A ## _read ## B(&A) == var_); \
+ ethr_ ## A ## _set ## B(&A, 4710); \
+ ethr_ ## A ## _inc ## B(&A); \
+ ASSERT(ethr_ ## A ## _read ## B(&A) == 4711); \
+} while (0)
+
+#define AT_TEST_DEC(T, A, B) \
+do { \
+ T var_ = AT_AINT32_MIN; \
+ var_--; \
+ ethr_ ## A ## _set ## B(&A, AT_AINT32_MIN); \
+ ethr_ ## A ## _dec ## B(&A); \
+ ASSERT(ethr_ ## A ## _read ## B(&A) == var_); \
+ ethr_ ## A ## _set ## B(&A, 17); \
+ ethr_ ## A ## _dec ## B(&A); \
+ ASSERT(ethr_ ## A ## _read ## B(&A) == 16); \
+} while (0)
+
+#define AT_TEST_READ_BAND(T, A, B) \
+do { \
+ ethr_ ## A ## _set ## B(&A, 0x13131313); \
+ ASSERT(ethr_ ## A ## _read_band ## B(&A, 0x31313131) == 0x13131313); \
+ ASSERT(ethr_ ## A ## _read ## B(&A) == 0x11111111); \
+} while (0)
+
+#define AT_TEST_READ_BOR(T, A, B) \
+do { \
+ ethr_ ## A ## _set ## B(&A, 0x11111111); \
+ ASSERT(ethr_ ## A ## _read_bor ## B(&A, 0x23232323) == 0x11111111); \
+ ASSERT(ethr_ ## A ## _read ## B(&A) == 0x33333333); \
+} while (0)
+
+
+static void
+atomic_basic_test(void)
+{
+ /*
+ * Verify that each op does what it is expected
+ * to do for at least one input.
+ */
+ ethr_atomic32_t atomic32;
+ ethr_atomic_t atomic;
+
+ print_line("AT_AINT32_MAX=%d",AT_AINT32_MAX);
+ print_line("AT_AINT32_MIN=%d",AT_AINT32_MIN);
+
+ AT_TEST_INIT(ethr_sint32_t, atomic32, );
+ AT_TEST_SET(ethr_sint32_t, atomic32, );
+ AT_TEST_XCHG(ethr_sint32_t, atomic32, );
+ AT_TEST_CMPXCHG(ethr_sint32_t, atomic32, );
+ AT_TEST_ADD_READ(ethr_sint32_t, atomic32, );
+ AT_TEST_ADD(ethr_sint32_t, atomic32, );
+ AT_TEST_INC_READ(ethr_sint32_t, atomic32, );
+ AT_TEST_DEC_READ(ethr_sint32_t, atomic32, );
+ AT_TEST_INC(ethr_sint32_t, atomic32, );
+ AT_TEST_DEC(ethr_sint32_t, atomic32, );
+ AT_TEST_READ_BAND(ethr_sint32_t, atomic32, );
+ AT_TEST_READ_BOR(ethr_sint32_t, atomic32, );
+
+ AT_TEST_INIT(ethr_sint32_t, atomic32, _acqb);
+ AT_TEST_SET(ethr_sint32_t, atomic32, _acqb);
+ AT_TEST_XCHG(ethr_sint32_t, atomic32, _acqb);
+ AT_TEST_CMPXCHG(ethr_sint32_t, atomic32, _acqb);
+ AT_TEST_ADD_READ(ethr_sint32_t, atomic32, _acqb);
+ AT_TEST_ADD(ethr_sint32_t, atomic32, _acqb);
+ AT_TEST_INC_READ(ethr_sint32_t, atomic32, _acqb);
+ AT_TEST_DEC_READ(ethr_sint32_t, atomic32, _acqb);
+ AT_TEST_INC(ethr_sint32_t, atomic32, _acqb);
+ AT_TEST_DEC(ethr_sint32_t, atomic32, _acqb);
+ AT_TEST_READ_BAND(ethr_sint32_t, atomic32, _acqb);
+ AT_TEST_READ_BOR(ethr_sint32_t, atomic32, _acqb);
+
+ AT_TEST_INIT(ethr_sint32_t, atomic32, _relb);
+ AT_TEST_SET(ethr_sint32_t, atomic32, _relb);
+ AT_TEST_XCHG(ethr_sint32_t, atomic32, _relb);
+ AT_TEST_CMPXCHG(ethr_sint32_t, atomic32, _relb);
+ AT_TEST_ADD_READ(ethr_sint32_t, atomic32, _relb);
+ AT_TEST_ADD(ethr_sint32_t, atomic32, _relb);
+ AT_TEST_INC_READ(ethr_sint32_t, atomic32, _relb);
+ AT_TEST_DEC_READ(ethr_sint32_t, atomic32, _relb);
+ AT_TEST_INC(ethr_sint32_t, atomic32, _relb);
+ AT_TEST_DEC(ethr_sint32_t, atomic32, _relb);
+ AT_TEST_READ_BAND(ethr_sint32_t, atomic32, _relb);
+ AT_TEST_READ_BOR(ethr_sint32_t, atomic32, _relb);
+
+ AT_TEST_INIT(ethr_sint32_t, atomic32, _rb);
+ AT_TEST_SET(ethr_sint32_t, atomic32, _rb);
+ AT_TEST_XCHG(ethr_sint32_t, atomic32, _rb);
+ AT_TEST_CMPXCHG(ethr_sint32_t, atomic32, _rb);
+ AT_TEST_ADD_READ(ethr_sint32_t, atomic32, _rb);
+ AT_TEST_ADD(ethr_sint32_t, atomic32, _rb);
+ AT_TEST_INC_READ(ethr_sint32_t, atomic32, _rb);
+ AT_TEST_DEC_READ(ethr_sint32_t, atomic32, _rb);
+ AT_TEST_INC(ethr_sint32_t, atomic32, _rb);
+ AT_TEST_DEC(ethr_sint32_t, atomic32, _rb);
+ AT_TEST_READ_BAND(ethr_sint32_t, atomic32, _rb);
+ AT_TEST_READ_BOR(ethr_sint32_t, atomic32, _rb);
+
+ AT_TEST_INIT(ethr_sint32_t, atomic32, _wb);
+ AT_TEST_SET(ethr_sint32_t, atomic32, _wb);
+ AT_TEST_XCHG(ethr_sint32_t, atomic32, _wb);
+ AT_TEST_CMPXCHG(ethr_sint32_t, atomic32, _wb);
+ AT_TEST_ADD_READ(ethr_sint32_t, atomic32, _wb);
+ AT_TEST_ADD(ethr_sint32_t, atomic32, _wb);
+ AT_TEST_INC_READ(ethr_sint32_t, atomic32, _wb);
+ AT_TEST_DEC_READ(ethr_sint32_t, atomic32, _wb);
+ AT_TEST_INC(ethr_sint32_t, atomic32, _wb);
+ AT_TEST_DEC(ethr_sint32_t, atomic32, _wb);
+ AT_TEST_READ_BAND(ethr_sint32_t, atomic32, _wb);
+ AT_TEST_READ_BOR(ethr_sint32_t, atomic32, _wb);
+
+ AT_TEST_INIT(ethr_sint32_t, atomic32, _mb);
+ AT_TEST_SET(ethr_sint32_t, atomic32, _mb);
+ AT_TEST_XCHG(ethr_sint32_t, atomic32, _mb);
+ AT_TEST_CMPXCHG(ethr_sint32_t, atomic32, _mb);
+ AT_TEST_ADD_READ(ethr_sint32_t, atomic32, _mb);
+ AT_TEST_ADD(ethr_sint32_t, atomic32, _mb);
+ AT_TEST_INC_READ(ethr_sint32_t, atomic32, _mb);
+ AT_TEST_DEC_READ(ethr_sint32_t, atomic32, _mb);
+ AT_TEST_INC(ethr_sint32_t, atomic32, _mb);
+ AT_TEST_DEC(ethr_sint32_t, atomic32, _mb);
+ AT_TEST_READ_BAND(ethr_sint32_t, atomic32, _mb);
+ AT_TEST_READ_BOR(ethr_sint32_t, atomic32, _mb);
+
+ AT_TEST_INIT(ethr_sint_t, atomic, );
+ AT_TEST_SET(ethr_sint_t, atomic, );
+ AT_TEST_XCHG(ethr_sint_t, atomic, );
+ AT_TEST_CMPXCHG(ethr_sint_t, atomic, );
+ AT_TEST_ADD_READ(ethr_sint_t, atomic, );
+ AT_TEST_ADD(ethr_sint_t, atomic, );
+ AT_TEST_INC_READ(ethr_sint_t, atomic, );
+ AT_TEST_DEC_READ(ethr_sint_t, atomic, );
+ AT_TEST_INC(ethr_sint_t, atomic, );
+ AT_TEST_DEC(ethr_sint_t, atomic, );
+ AT_TEST_READ_BAND(ethr_sint_t, atomic, );
+ AT_TEST_READ_BOR(ethr_sint_t, atomic, );
+
+ AT_TEST_INIT(ethr_sint_t, atomic, _acqb);
+ AT_TEST_SET(ethr_sint_t, atomic, _acqb);
+ AT_TEST_XCHG(ethr_sint_t, atomic, _acqb);
+ AT_TEST_CMPXCHG(ethr_sint_t, atomic, _acqb);
+ AT_TEST_ADD_READ(ethr_sint_t, atomic, _acqb);
+ AT_TEST_ADD(ethr_sint_t, atomic, _acqb);
+ AT_TEST_INC_READ(ethr_sint_t, atomic, _acqb);
+ AT_TEST_DEC_READ(ethr_sint_t, atomic, _acqb);
+ AT_TEST_INC(ethr_sint_t, atomic, _acqb);
+ AT_TEST_DEC(ethr_sint_t, atomic, _acqb);
+ AT_TEST_READ_BAND(ethr_sint_t, atomic, _acqb);
+ AT_TEST_READ_BOR(ethr_sint_t, atomic, _acqb);
+
+ AT_TEST_INIT(ethr_sint_t, atomic, _relb);
+ AT_TEST_SET(ethr_sint_t, atomic, _relb);
+ AT_TEST_XCHG(ethr_sint_t, atomic, _relb);
+ AT_TEST_CMPXCHG(ethr_sint_t, atomic, _relb);
+ AT_TEST_ADD_READ(ethr_sint_t, atomic, _relb);
+ AT_TEST_ADD(ethr_sint_t, atomic, _relb);
+ AT_TEST_INC_READ(ethr_sint_t, atomic, _relb);
+ AT_TEST_DEC_READ(ethr_sint_t, atomic, _relb);
+ AT_TEST_INC(ethr_sint_t, atomic, _relb);
+ AT_TEST_DEC(ethr_sint_t, atomic, _relb);
+ AT_TEST_READ_BAND(ethr_sint_t, atomic, _relb);
+ AT_TEST_READ_BOR(ethr_sint_t, atomic, _relb);
+
+ AT_TEST_INIT(ethr_sint_t, atomic, _rb);
+ AT_TEST_SET(ethr_sint_t, atomic, _rb);
+ AT_TEST_XCHG(ethr_sint_t, atomic, _rb);
+ AT_TEST_CMPXCHG(ethr_sint_t, atomic, _rb);
+ AT_TEST_ADD_READ(ethr_sint_t, atomic, _rb);
+ AT_TEST_ADD(ethr_sint_t, atomic, _rb);
+ AT_TEST_INC_READ(ethr_sint_t, atomic, _rb);
+ AT_TEST_DEC_READ(ethr_sint_t, atomic, _rb);
+ AT_TEST_INC(ethr_sint_t, atomic, _rb);
+ AT_TEST_DEC(ethr_sint_t, atomic, _rb);
+ AT_TEST_READ_BAND(ethr_sint_t, atomic, _rb);
+ AT_TEST_READ_BOR(ethr_sint_t, atomic, _rb);
+
+ AT_TEST_INIT(ethr_sint_t, atomic, _wb);
+ AT_TEST_SET(ethr_sint_t, atomic, _wb);
+ AT_TEST_XCHG(ethr_sint_t, atomic, _wb);
+ AT_TEST_CMPXCHG(ethr_sint_t, atomic, _wb);
+ AT_TEST_ADD_READ(ethr_sint_t, atomic, _wb);
+ AT_TEST_ADD(ethr_sint_t, atomic, _wb);
+ AT_TEST_INC_READ(ethr_sint_t, atomic, _wb);
+ AT_TEST_DEC_READ(ethr_sint_t, atomic, _wb);
+ AT_TEST_INC(ethr_sint_t, atomic, _wb);
+ AT_TEST_DEC(ethr_sint_t, atomic, _wb);
+ AT_TEST_READ_BAND(ethr_sint_t, atomic, _wb);
+ AT_TEST_READ_BOR(ethr_sint_t, atomic, _wb);
+
+ AT_TEST_INIT(ethr_sint_t, atomic, _mb);
+ AT_TEST_SET(ethr_sint_t, atomic, _mb);
+ AT_TEST_XCHG(ethr_sint_t, atomic, _mb);
+ AT_TEST_CMPXCHG(ethr_sint_t, atomic, _mb);
+ AT_TEST_ADD_READ(ethr_sint_t, atomic, _mb);
+ AT_TEST_ADD(ethr_sint_t, atomic, _mb);
+ AT_TEST_INC_READ(ethr_sint_t, atomic, _mb);
+ AT_TEST_DEC_READ(ethr_sint_t, atomic, _mb);
+ AT_TEST_INC(ethr_sint_t, atomic, _mb);
+ AT_TEST_DEC(ethr_sint_t, atomic, _mb);
+ AT_TEST_READ_BAND(ethr_sint_t, atomic, _mb);
+ AT_TEST_READ_BOR(ethr_sint_t, atomic, _mb);
+
+ /* Double word */
+ {
+ ethr_dw_atomic_t dw_atomic;
+ ethr_dw_sint_t dw0, dw1;
+ dw0.sint[0] = 4711;
+ dw0.sint[1] = 4712;
+
+ /* init */
+ ethr_dw_atomic_init(&dw_atomic, &dw0);
+ ethr_dw_atomic_read(&dw_atomic, &dw1);
+ ETHR_ASSERT(dw1.sint[0] == 4711);
+ ETHR_ASSERT(dw1.sint[1] == 4712);
+
+ /* set */
+ dw0.sint[0] = 42;
+ dw0.sint[1] = ~((ethr_sint_t) 0);
+ ethr_dw_atomic_set(&dw_atomic, &dw0);
+ ethr_dw_atomic_read(&dw_atomic, &dw1);
+ ASSERT(dw1.sint[0] == 42);
+ ASSERT(dw1.sint[1] == ~((ethr_sint_t) 0));
+
+ /* cmpxchg */
+ dw0.sint[0] = 17;
+ dw0.sint[1] = 18;
+ dw1.sint[0] = 19;
+ dw1.sint[1] = 20;
+ ASSERT(!ethr_dw_atomic_cmpxchg(&dw_atomic, &dw1, &dw0));
+ ethr_dw_atomic_read(&dw_atomic, &dw0);
+ ASSERT(dw0.sint[0] == 42);
+ ASSERT(dw0.sint[1] == ~((ethr_sint_t) 0));
+
+ ASSERT(ethr_dw_atomic_cmpxchg(&dw_atomic, &dw1, &dw0));
+
+ ethr_dw_atomic_read(&dw_atomic, &dw0);
+ ASSERT(dw0.sint[0] == 19);
+ ASSERT(dw0.sint[1] == 20);
+ }
+}
+
+
+#define AT_DW_MIN 12
+#define AT_DW_MAX 42
+#define AT_DW_THREADS (AT_DW_MAX - AT_DW_MIN + 1)
+
+#define AT_DW_LOOPS 200000
+#define AT_DW_R_LOOPS 10
+
+ethr_dw_atomic_t at_dw_atomic;
+
+void
+at_dw_valid(ethr_dw_sint_t *dw)
+{
+ int i;
+ char c;
+ char *cp;
+
+ ASSERT(dw->sint[0] == dw->sint[1]);
+
+ cp = (char *) &dw->sint[0];
+ c = cp[0];
+
+ ASSERT(AT_DW_MIN <= c && c <= AT_DW_MAX);
+
+ for (i = 0; i < sizeof(ethr_sint_t); i++)
+ ASSERT(c == cp[i]);
+}
+
+void *
+at_dw_thr(void *vval)
+{
+ int l, r;
+ ethr_sint_t val = (ethr_sint_t) vval;
+ ethr_dw_sint_t dw;
+ ethr_dw_sint_t my_dw;
+
+ my_dw.sint[0] = val;
+ my_dw.sint[1] = val;
+
+ ethr_dw_atomic_set(&at_dw_atomic, &my_dw);
+ for (l = 0; l < AT_DW_LOOPS; l++) {
+ for (r = 0; r < AT_DW_R_LOOPS; r++) {
+ ethr_dw_atomic_read(&at_dw_atomic, &dw);
+ at_dw_valid(&dw);
+ }
+ ethr_dw_atomic_set(&at_dw_atomic, &my_dw);
+ for (r = 0; r < AT_DW_R_LOOPS; r++) {
+ ethr_dw_atomic_read(&at_dw_atomic, &dw);
+ at_dw_valid(&dw);
+ }
+ dw.sint[0] = 0;
+ dw.sint[1] = 0;
+ while (1) {
+ if (ethr_dw_atomic_cmpxchg(&at_dw_atomic, &my_dw, &dw))
+ break;
+ }
+ }
+}
+
+static void
+dw_atomic_massage_test(void)
+{
+ int i, res;
+ ethr_tid tid[AT_DW_THREADS];
+ ethr_thr_opts thr_opts = ETHR_THR_OPTS_DEFAULT_INITER;
+ ethr_dw_sint_t dw;
+
+ dw.sint[0] = dw.sint[1] = 0;
+
+ ethr_dw_atomic_init(&at_dw_atomic, &dw);
+
+ for (i = AT_DW_MIN; i <= AT_DW_MAX; i++) {
+ ethr_sint_t val;
+ memset(&val, i, sizeof(ethr_sint_t));
+ res = ethr_thr_create(&tid[i-AT_DW_MIN], at_dw_thr, (void *) val, &thr_opts);
+ ASSERT(res == 0);
+ }
+ for (i = AT_DW_MIN; i <= AT_DW_MAX; i++) {
+ res = ethr_thr_join(tid[i-AT_DW_MIN], NULL);
+ ASSERT(res == 0);
+ }
+}
+
void *
at_thread(void *unused)
{
int i;
long val, go;
-
val = ethr_atomic_inc_read(&at_ready);
ASSERT(val > 0);
@@ -1373,7 +1800,6 @@ at_thread(void *unused)
return NULL;
}
-
static void
atomic_test(void)
{
@@ -1382,6 +1808,8 @@ atomic_test(void)
ethr_tid tid[AT_THREADS];
ethr_thr_opts thr_opts = ETHR_THR_OPTS_DEFAULT_INITER;
+ atomic_basic_test();
+
#if ETHR_SIZEOF_PTR > 4
at_rm_val = ((long) 1) << 57;
at_set_val = ((long) 1) << 60;
@@ -1493,6 +1921,8 @@ main(int argc, char *argv[])
rwmutex_test();
else if (strcmp(testcase, "atomic") == 0)
atomic_test();
+ else if (strcmp(testcase, "dw_atomic_massage") == 0)
+ dw_atomic_massage_test();
else
skip("Test case \"%s\" not implemented yet", testcase);
diff --git a/erts/test/nt_SUITE.erl b/erts/test/nt_SUITE.erl
index 7d6da28ad6..f9bd15a0ce 100644
--- a/erts/test/nt_SUITE.erl
+++ b/erts/test/nt_SUITE.erl
@@ -490,12 +490,12 @@ middleman(Waitfor) ->
match_event(_X, []) ->
nomatch;
match_event({Time,Cat,Fac,Sev,Mes},[{Pid,Ref,{Cat,Fac,Sev,MesRE}} | Tail]) ->
- case regexp:match(Mes,MesRE) of
- {match,_,_} ->
+ case re:run(Mes,MesRE,[{capture,none}]) of
+ match ->
%%io:format("Match!~n"),
{ok,{Pid,Ref,Time,Mes},Tail};
- _Z ->
- %%io:format("No match (~p)~n",[_Z]),
+ nomatch ->
+ %%io:format("No match~n"),
case match_event({Time,Cat,Fac,Sev,Mes},Tail) of
{ok,X,Rest} ->
{ok,X,[{Pid,Ref,{Cat,Fac,Sev,MesRE}} | Rest]};
diff --git a/erts/test/otp_SUITE.erl b/erts/test/otp_SUITE.erl
index d61fbbddcf..f146bf1d69 100644
--- a/erts/test/otp_SUITE.erl
+++ b/erts/test/otp_SUITE.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 2000-2011. All Rights Reserved.
+%% Copyright Ericsson AB 2000-2012. All Rights Reserved.
%%
%% The contents of this file are subject to the Erlang Public License,
%% Version 1.1, (the "License"); you may not use this file except in
@@ -94,15 +94,16 @@ undefined_functions(Config) when is_list(Config) ->
case Undef of
[] -> ok;
_ ->
+ Fd = open_log(Config, "undefined_functions"),
foreach(fun ({MFA1,MFA2}) ->
io:format("~s calls undefined ~s",
+ [format_mfa(MFA1),format_mfa(MFA2)]),
+ io:format(Fd, "~s ~s\n",
[format_mfa(MFA1),format_mfa(MFA2)])
end, Undef),
+ close_log(Fd),
?line ?t:fail({length(Undef),undefined_functions_in_otp})
-
- end,
-
- ok.
+ end.
hipe_filter(Undef) ->
case erlang:system_info(hipe_architecture) of
@@ -150,6 +151,9 @@ ssl_crypto_filter(Undef) ->
{{error,bad_name},{error,bad_name}} ->
filter(fun({_,{ssl,_,_}}) -> false;
({_,{crypto,_,_}}) -> false;
+ ({_,{ssh,_,_}}) -> false;
+ ({_,{ssh_connection,_,_}}) -> false;
+ ({_,{ssh_sftp,_,_}}) -> false;
(_) -> true
end, Undef);
{_,_} -> Undef
@@ -211,7 +215,10 @@ deprecated_not_in_obsolete(Config) when is_list(Config) ->
_ ->
io:put_chars("The following functions have -deprecated() attributes,\n"
"but are not listed in otp_internal:obsolete/3.\n"),
- ?line print_mfas(L),
+ ?line print_mfas(group_leader(), L),
+ Fd = open_log(Config, "deprecated_not_obsolete"),
+ print_mfas(Fd, L),
+ close_log(Fd),
?line ?t:fail({length(L),deprecated_but_not_obsolete})
end.
@@ -232,11 +239,13 @@ obsolete_but_not_deprecated(Config) when is_list(Config) ->
io:put_chars("The following functions are listed "
"in otp_internal:obsolete/3,\n"
"but don't have -deprecated() attributes.\n"),
- ?line print_mfas(L),
+ ?line print_mfas(group_leader(), L),
+ Fd = open_log(Config, "obsolete_not_deprecated"),
+ print_mfas(Fd, L),
+ close_log(Fd),
?line ?t:fail({length(L),obsolete_but_not_deprecated})
end.
-
call_to_deprecated(Config) when is_list(Config) ->
Server = ?config(xref_server, Config),
?line {ok,DeprecatedCalls} = xref:q(Server, "strict(E || DF)"),
@@ -302,10 +311,20 @@ strong_components(Config) when is_list(Config) ->
%%%
-print_mfas([MFA|T]) ->
- io:format("~s\n", [format_mfa(MFA)]),
- print_mfas(T);
-print_mfas([]) -> ok.
+print_mfas(Fd, [MFA|T]) ->
+ io:format(Fd, "~s\n", [format_mfa(MFA)]),
+ print_mfas(Fd, T);
+print_mfas(_, []) -> ok.
format_mfa({M,F,A}) ->
lists:flatten(io_lib:format("~s:~s/~p", [M,F,A])).
+
+open_log(Config, Name) ->
+ PrivDir = ?config(priv_dir, Config),
+ RunDir = filename:dirname(filename:dirname(PrivDir)),
+ Path = filename:join(RunDir, "system_"++Name++".log"),
+ {ok,Fd} = file:open(Path, [write]),
+ Fd.
+
+close_log(Fd) ->
+ ok = file:close(Fd).
diff --git a/erts/test/z_SUITE.erl b/erts/test/z_SUITE.erl
index 8fceab32a6..78968ed405 100644
--- a/erts/test/z_SUITE.erl
+++ b/erts/test/z_SUITE.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 2008-2011. All Rights Reserved.
+%% Copyright Ericsson AB 2008-2012. All Rights Reserved.
%%
%% The contents of this file are subject to the Erlang Public License,
%% Version 1.1, (the "License"); you may not use this file except in
@@ -166,9 +166,12 @@ core_search_conf(RunByTS, DBTop, XDir) ->
file_inspect(#core_search_conf{file = File}, Core) ->
FRes0 = os:cmd(File ++ " " ++ Core),
- FRes = case regexp:match(FRes0, Core) of
- {match, S, E} ->
+ FRes = case string:str(FRes0, Core) of
+ 0 ->
+ FRes0;
+ S ->
L = length(FRes0),
+ E = length(Core),
case S of
1 ->
lists:sublist(FRes0, E+1, L+1);
@@ -178,19 +181,13 @@ file_inspect(#core_search_conf{file = File}, Core) ->
" "
++
lists:sublist(FRes0, E+1, L+1)
- end;
- _ -> FRes0
+ end
end,
- case regexp:match(FRes, "[Tt][Ee][Xx][Tt]") of
+ case re:run(FRes, "text|ascii", [caseless,{capture,none}]) of
+ match ->
+ not_a_core;
nomatch ->
- case regexp:match(FRes, "[Aa][Ss][Cc][Ii][Ii]") of
- nomatch ->
- probably_a_core;
- _ ->
- not_a_core
- end;
- _ ->
- not_a_core
+ probably_a_core
end.
mk_readable(F) ->
@@ -239,8 +236,8 @@ format_core(#core_search_conf{file = false}, Core, Ignore) ->
[Ignore, Core] ++ mod_time_list(Core));
format_core(#core_search_conf{file = File}, Core, Ignore) ->
FRes = str_strip(os:cmd(File ++ " " ++ Core)),
- case catch regexp:match(FRes, Core) of
- {match, _, _} ->
+ case catch re:run(FRes, Core, [caseless,{capture,none}]) of
+ match ->
io:format(" ~s~s " ++ time_fstr() ++ "~n",
[Ignore, FRes] ++ mod_time_list(Core));
_ ->
diff --git a/erts/vsn.mk b/erts/vsn.mk
index 193a914a70..bbf77b1a68 100644
--- a/erts/vsn.mk
+++ b/erts/vsn.mk
@@ -1,7 +1,7 @@
#
# %CopyrightBegin%
#
-# Copyright Ericsson AB 1997-2011. All Rights Reserved.
+# Copyright Ericsson AB 1997-2012. All Rights Reserved.
#
# The contents of this file are subject to the Erlang Public License,
# Version 1.1, (the "License"); you may not use this file except in
@@ -17,8 +17,8 @@
# %CopyrightEnd%
#
-VSN = 5.8.4
-SYSTEM_VSN = R14B03
+VSN = 5.9.1
+SYSTEM_VSN = R15B01
# Port number 4365 in 4.2
# Port number 4366 in 4.3