aboutsummaryrefslogtreecommitdiffstats
path: root/lib/kernel/src
diff options
context:
space:
mode:
Diffstat (limited to 'lib/kernel/src')
-rw-r--r--lib/kernel/src/Makefile243
-rw-r--r--lib/kernel/src/application.erl263
-rw-r--r--lib/kernel/src/application_controller.erl1946
-rw-r--r--lib/kernel/src/application_master.erl426
-rw-r--r--lib/kernel/src/application_master.hrl20
-rw-r--r--lib/kernel/src/application_starter.erl111
-rw-r--r--lib/kernel/src/auth.erl391
-rw-r--r--lib/kernel/src/code.erl491
-rw-r--r--lib/kernel/src/code_server.erl1539
-rw-r--r--lib/kernel/src/disk_log.erl1899
-rw-r--r--lib/kernel/src/disk_log.hrl161
-rw-r--r--lib/kernel/src/disk_log_1.erl1551
-rw-r--r--lib/kernel/src/disk_log_server.erl368
-rw-r--r--lib/kernel/src/disk_log_sup.erl32
-rw-r--r--lib/kernel/src/dist.hrl38
-rw-r--r--lib/kernel/src/dist_ac.erl1534
-rw-r--r--lib/kernel/src/dist_util.erl762
-rw-r--r--lib/kernel/src/dist_util.hrl87
-rw-r--r--lib/kernel/src/erl_boot_server.erl325
-rw-r--r--lib/kernel/src/erl_ddll.erl150
-rw-r--r--lib/kernel/src/erl_distribution.erl106
-rw-r--r--lib/kernel/src/erl_epmd.erl553
-rw-r--r--lib/kernel/src/erl_epmd.hrl32
-rw-r--r--lib/kernel/src/erl_reply.erl49
-rw-r--r--lib/kernel/src/error_handler.erl141
-rw-r--r--lib/kernel/src/error_logger.erl387
-rw-r--r--lib/kernel/src/erts_debug.erl155
-rw-r--r--lib/kernel/src/file.erl1077
-rw-r--r--lib/kernel/src/file_io_server.erl882
-rw-r--r--lib/kernel/src/file_server.erl325
-rw-r--r--lib/kernel/src/gen_sctp.erl230
-rw-r--r--lib/kernel/src/gen_tcp.erl192
-rw-r--r--lib/kernel/src/gen_udp.erl117
-rw-r--r--lib/kernel/src/global.erl2244
-rw-r--r--lib/kernel/src/global_group.erl1347
-rw-r--r--lib/kernel/src/global_search.erl279
-rw-r--r--lib/kernel/src/group.erl689
-rw-r--r--lib/kernel/src/heart.erl271
-rw-r--r--lib/kernel/src/hipe_ext_format.hrl41
-rw-r--r--lib/kernel/src/hipe_unified_loader.erl894
-rw-r--r--lib/kernel/src/inet.erl1342
-rw-r--r--lib/kernel/src/inet6_sctp.erl75
-rw-r--r--lib/kernel/src/inet6_tcp.erl153
-rw-r--r--lib/kernel/src/inet6_tcp_dist.erl417
-rw-r--r--lib/kernel/src/inet6_udp.erl87
-rw-r--r--lib/kernel/src/inet_boot.hrl32
-rw-r--r--lib/kernel/src/inet_config.erl638
-rw-r--r--lib/kernel/src/inet_config.hrl34
-rw-r--r--lib/kernel/src/inet_db.erl1525
-rw-r--r--lib/kernel/src/inet_dns.erl701
-rw-r--r--lib/kernel/src/inet_dns.hrl208
-rw-r--r--lib/kernel/src/inet_dns_record_adts.pl180
-rw-r--r--lib/kernel/src/inet_gethost_native.erl626
-rw-r--r--lib/kernel/src/inet_hosts.erl123
-rw-r--r--lib/kernel/src/inet_int.hrl414
-rw-r--r--lib/kernel/src/inet_parse.erl755
-rw-r--r--lib/kernel/src/inet_res.erl846
-rw-r--r--lib/kernel/src/inet_res.hrl42
-rw-r--r--lib/kernel/src/inet_sctp.erl139
-rw-r--r--lib/kernel/src/inet_tcp.erl153
-rw-r--r--lib/kernel/src/inet_tcp_dist.erl448
-rw-r--r--lib/kernel/src/inet_udp.erl132
-rw-r--r--lib/kernel/src/kernel.app.src120
-rw-r--r--lib/kernel/src/kernel.appup.src1
-rw-r--r--lib/kernel/src/kernel.erl292
-rw-r--r--lib/kernel/src/kernel_config.erl173
-rw-r--r--lib/kernel/src/net.erl39
-rw-r--r--lib/kernel/src/net_address.hrl28
-rw-r--r--lib/kernel/src/net_adm.erl239
-rw-r--r--lib/kernel/src/net_kernel.erl1513
-rw-r--r--lib/kernel/src/os.erl291
-rw-r--r--lib/kernel/src/packages.erl158
-rw-r--r--lib/kernel/src/pg2.erl376
-rw-r--r--lib/kernel/src/ram_file.erl492
-rw-r--r--lib/kernel/src/rpc.erl609
-rw-r--r--lib/kernel/src/seq_trace.erl126
-rw-r--r--lib/kernel/src/standard_error.erl253
-rw-r--r--lib/kernel/src/user.erl786
-rw-r--r--lib/kernel/src/user_drv.erl614
-rw-r--r--lib/kernel/src/user_sup.erl129
-rw-r--r--lib/kernel/src/wrap_log_reader.erl288
81 files changed, 37945 insertions, 0 deletions
diff --git a/lib/kernel/src/Makefile b/lib/kernel/src/Makefile
new file mode 100644
index 0000000000..ef280058fb
--- /dev/null
+++ b/lib/kernel/src/Makefile
@@ -0,0 +1,243 @@
+#
+# %CopyrightBegin%
+#
+# Copyright Ericsson AB 1996-2009. All Rights Reserved.
+#
+# The contents of this file are subject to the Erlang Public License,
+# Version 1.1, (the "License"); you may not use this file except in
+# compliance with the License. You should have received a copy of the
+# Erlang Public License along with this software. If not, it can be
+# retrieved online at http://www.erlang.org/.
+#
+# Software distributed under the License is distributed on an "AS IS"
+# basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+# the License for the specific language governing rights and limitations
+# under the License.
+#
+# %CopyrightEnd%
+#
+
+ifdef BOOTSTRAP
+EGEN=$(BOOTSTRAP_TOP)/lib/kernel/egen
+EBIN=$(BOOTSTRAP_TOP)/lib/kernel/ebin
+endif
+
+include $(ERL_TOP)/make/target.mk
+include $(ERL_TOP)/make/$(TARGET)/otp.mk
+
+# ----------------------------------------------------
+# Application version
+# ----------------------------------------------------
+
+# Include erts/system/vsn.mk to port number for EPMD -- we will
+# get an unwanted definition for VSN too. Therefore,
+# we'll use KERNEL_VSN directly instead of assigning it to
+# VSN which is done in other Makefiles. Same with HIPE_VSN.
+
+include ../vsn.mk
+include $(ERL_TOP)/erts/vsn.mk
+include $(ERL_TOP)/lib/hipe/vsn.mk
+
+include $(ERL_TOP)/erts/epmd/epmd.mk
+
+
+# ----------------------------------------------------
+# Release directory specification
+# ----------------------------------------------------
+RELSYSDIR = $(RELEASE_PATH)/lib/kernel-$(KERNEL_VSN)
+
+# ----------------------------------------------------
+# Target Specs
+# ----------------------------------------------------
+
+
+MODULES = \
+ application \
+ application_controller \
+ application_master \
+ application_starter \
+ auth \
+ code \
+ code_server \
+ disk_log \
+ disk_log_1 \
+ disk_log_server \
+ disk_log_sup \
+ dist_ac \
+ dist_util \
+ erl_boot_server \
+ erl_ddll \
+ erl_distribution \
+ erl_epmd \
+ erl_reply \
+ erts_debug \
+ error_handler \
+ error_logger \
+ file \
+ file_io_server \
+ file_server \
+ gen_tcp \
+ gen_udp \
+ gen_sctp \
+ global \
+ global_group \
+ global_search \
+ group \
+ heart \
+ hipe_unified_loader \
+ inet \
+ inet6_tcp \
+ inet6_tcp_dist \
+ inet6_udp \
+ inet6_sctp \
+ inet_config \
+ inet_db \
+ inet_dns \
+ inet_gethost_native \
+ inet_hosts \
+ inet_parse \
+ inet_res \
+ inet_tcp \
+ inet_tcp_dist \
+ inet_udp \
+ inet_sctp \
+ kernel \
+ kernel_config \
+ net \
+ net_adm \
+ net_kernel \
+ os \
+ packages \
+ pg2 \
+ ram_file \
+ rpc \
+ seq_trace \
+ standard_error \
+ user \
+ user_drv \
+ user_sup \
+ wrap_log_reader
+
+HRL_FILES= ../include/file.hrl ../include/inet.hrl ../include/inet_sctp.hrl
+INTERNAL_HRL_FILES= application_master.hrl disk_log.hrl \
+ net_address.hrl inet_dns.hrl inet_res.hrl \
+ inet_boot.hrl inet_config.hrl inet_int.hrl \
+ dist.hrl dist_util.hrl inet_dns_record_adts.hrl
+
+ERL_FILES= $(MODULES:%=%.erl)
+
+TARGET_FILES = $(MODULES:%=$(EBIN)/%.$(EMULATOR)) \
+ $(APP_TARGET) $(APPUP_TARGET)
+
+APP_FILE= kernel.app
+
+APP_SRC= $(APP_FILE).src
+APP_TARGET= $(EBIN)/$(APP_FILE)
+
+APPUP_FILE= kernel.appup
+
+APPUP_SRC= $(APPUP_FILE).src
+APPUP_TARGET= $(EBIN)/$(APPUP_FILE)
+
+# ----------------------------------------------------
+# FLAGS
+# ----------------------------------------------------
+
+ERL_COMPILE_FLAGS += -I../include
+
+# ----------------------------------------------------
+# Targets
+# ----------------------------------------------------
+
+debug opt: $(TARGET_FILES)
+
+# Note: In the open-source build clean must not destroyed the preloaded
+# beam files.
+clean:
+ rm -f $(NON_PRECIOUS_TARGETS)
+ rm -f core
+
+
+docs:
+
+# ----------------------------------------------------
+# Special Build Targets
+# ----------------------------------------------------
+
+../../hipe/main/hipe.hrl: ../../hipe/vsn.mk ../../hipe/main/hipe.hrl.src
+ sed -e "s;%VSN%;$(HIPE_VSN);" ../../hipe/main/hipe.hrl.src > ../../hipe/main/hipe.hrl
+
+$(APP_TARGET): $(APP_SRC) ../vsn.mk
+ sed -e 's;%VSN%;$(KERNEL_VSN);' $< > $@
+
+$(APPUP_TARGET): $(APPUP_SRC) ../vsn.mk
+ sed -e 's;%VSN%;$(KERNEL_VSN);' $< > $@
+
+
+EPMD_FLAGS = -Depmd_port_no=$(EPMD_PORT_NO) \
+ -Depmd_node_type=$(EPMD_NODE_TYPE) \
+ -Depmd_dist_low=$(EPMD_DIST_LOW) \
+ -Depmd_dist_high=$(EPMD_DIST_HIGH) \
+ -Derlang_daemon_port=$(EPMD_PORT_NO)
+
+$(ESRC)/inet_dns_record_adts.hrl: $(ESRC)/inet_dns_record_adts.pl
+ LANG=C $(PERL) $< > $@
+
+$(EBIN)/erl_epmd.beam: $(ESRC)/erl_epmd.erl
+ $(ERLC) $(ERL_COMPILE_FLAGS) $(EPMD_FLAGS) -o$(EBIN) $<
+
+# ----------------------------------------------------
+# Release Target
+# ----------------------------------------------------
+include $(ERL_TOP)/make/otp_release_targets.mk
+
+release_spec: opt
+ $(INSTALL_DIR) $(RELSYSDIR)/src
+ $(INSTALL_DATA) $(ERL_FILES) $(RELSYSDIR)/src
+ $(INSTALL_DATA) $(INTERNAL_HRL_FILES) $(RELSYSDIR)/src
+ $(INSTALL_DIR) $(RELSYSDIR)/include
+ $(INSTALL_DATA) $(HRL_FILES) $(RELSYSDIR)/include
+ $(INSTALL_DIR) $(RELSYSDIR)/ebin
+ $(INSTALL_DATA) $(TARGET_FILES) $(RELSYSDIR)/ebin
+
+release_docs_spec:
+
+
+# Include dependencies -- list below added by Kostis Sagonas
+$(EBIN)/application_controller.beam: application_master.hrl
+$(EBIN)/application_master.beam: application_master.hrl
+$(EBIN)/auth.beam: ../include/file.hrl
+$(EBIN)/code.beam: ../include/file.hrl
+$(EBIN)/code_server.beam: ../include/file.hrl
+$(EBIN)/disk_log.beam: disk_log.hrl
+$(EBIN)/disk_log_1.beam: disk_log.hrl ../include/file.hrl
+$(EBIN)/disk_log_server.beam: disk_log.hrl
+$(EBIN)/dist_util.beam: dist_util.hrl dist.hrl
+$(EBIN)/erl_boot_server.beam: inet_boot.hrl
+$(EBIN)/erl_epmd.beam: inet_int.hrl erl_epmd.hrl
+$(EBIN)/file.beam: ../include/file.hrl
+$(EBIN)/gen_tcp.beam: inet_int.hrl
+$(EBIN)/gen_udp.beam: inet_int.hrl
+$(EBIN)/gen_sctp.beam: ../include/inet_sctp.hrl
+$(EBIN)/global.beam: ../../stdlib/include/ms_transform.hrl
+$(EBIN)/hipe_unified_loader.beam: ../../hipe/main/hipe.hrl hipe_ext_format.hrl
+$(EBIN)/inet.beam: ../include/inet.hrl inet_int.hrl ../include/inet_sctp.hrl
+$(EBIN)/inet6_tcp.beam: inet_int.hrl
+$(EBIN)/inet6_tcp_dist.beam: net_address.hrl dist.hrl dist_util.hrl
+$(EBIN)/inet6_udp.beam: inet_int.hrl
+$(EBIN)/inet6_sctp.beam: inet_int.hrl
+$(EBIN)/inet_config.beam: inet_config.hrl ../include/inet.hrl
+$(EBIN)/inet_db.beam: ../include/inet.hrl inet_int.hrl inet_res.hrl inet_dns.hrl inet_config.hrl
+$(EBIN)/inet_dns.beam: inet_int.hrl inet_dns.hrl inet_dns_record_adts.hrl
+$(EBIN)/inet_gethost_native.beam: ../include/inet.hrl
+$(EBIN)/inet_hosts.beam: ../include/inet.hrl
+$(EBIN)/inet_parse.beam: ../include/file.hrl
+$(EBIN)/inet_res.beam: ../include/inet.hrl inet_res.hrl inet_dns.hrl inet_int.hrl
+$(EBIN)/inet_tcp.beam: inet_int.hrl
+$(EBIN)/inet_udp_dist.beam: net_address.hrl dist.hrl dist_util.hrl
+$(EBIN)/inet_udp.beam: inet_int.hrl
+$(EBIN)/inet_sctp.beam: inet_int.hrl ../include/inet_sctp.hrl
+$(EBIN)/net_kernel.beam: net_address.hrl
+$(EBIN)/os.beam: ../include/file.hrl
+$(EBIN)/ram_file.beam: ../include/file.hrl
+$(EBIN)/wrap_log_reader.beam: disk_log.hrl ../include/file.hrl
diff --git a/lib/kernel/src/application.erl b/lib/kernel/src/application.erl
new file mode 100644
index 0000000000..d9db23d652
--- /dev/null
+++ b/lib/kernel/src/application.erl
@@ -0,0 +1,263 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 1996-2009. All Rights Reserved.
+%%
+%% The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved online at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(application).
+
+-export([start/1, start/2, start_boot/1, start_boot/2, stop/1,
+ load/1, load/2, unload/1, takeover/2,
+ which_applications/0, which_applications/1,
+ loaded_applications/0, permit/2]).
+-export([set_env/3, set_env/4, unset_env/2, unset_env/3]).
+-export([get_env/1, get_env/2, get_all_env/0, get_all_env/1]).
+-export([get_key/1, get_key/2, get_all_key/0, get_all_key/1]).
+-export([get_application/0, get_application/1, info/0]).
+-export([start_type/0]).
+
+-export([behaviour_info/1]).
+
+%%%-----------------------------------------------------------------
+
+-type restart_type() :: 'permanent' | 'transient' | 'temporary'.
+-type application_opt() :: {'description', string()}
+ | {'vsn', string()}
+ | {'id', string()}
+ | {'modules', [atom() | {atom(), any()}]}
+ | {'registered', [atom()]}
+ | {'applications', [atom()]}
+ | {'included_applications', [atom()]}
+ | {'env', [{atom(), any()}]}
+ | {'start_phases', [{atom(), any()}] | 'undefined'}
+ | {'maxT', timeout()} % max timeout
+ | {'maxP', integer() | 'infinity'} % max processes
+ | {'mod', {atom(), any()}}.
+-type application_spec() :: {'application', atom(), [application_opt()]}.
+
+%%------------------------------------------------------------------
+
+-spec behaviour_info(atom()) -> 'undefined' | [{atom(), byte()}].
+
+behaviour_info(callbacks) ->
+ [{start,2},{stop,1}];
+behaviour_info(_Other) ->
+ undefined.
+
+%%%-----------------------------------------------------------------
+%%% This module is API towards application_controller and
+%%% application_master.
+%%%-----------------------------------------------------------------
+
+-spec load(Application :: atom() | application_spec()) ->
+ 'ok' | {'error', term()}.
+
+load(Application) ->
+ load(Application, []).
+
+-spec load(Application :: atom() | application_spec(),
+ Distributed :: any()) -> 'ok' | {'error', term()}.
+
+load(Application, DistNodes) ->
+ case application_controller:load_application(Application) of
+ ok when DistNodes =/= [] ->
+ AppName = get_appl_name(Application),
+ case dist_ac:load_application(AppName, DistNodes) of
+ ok ->
+ ok;
+ {error, R} ->
+ application_controller:unload_application(AppName),
+ {error, R}
+ end;
+ Else ->
+ Else
+ end.
+
+-spec unload(Application :: atom()) -> 'ok' | {'error', term()}.
+
+unload(Application) ->
+ application_controller:unload_application(Application).
+
+-spec start(Application :: atom()) -> 'ok' | {'error', term()}.
+
+start(Application) ->
+ start(Application, temporary).
+
+-spec start(Application :: atom() | application_spec(),
+ RestartType :: restart_type()) -> any().
+
+start(Application, RestartType) ->
+ case load(Application) of
+ ok ->
+ Name = get_appl_name(Application),
+ application_controller:start_application(Name, RestartType);
+ {error, {already_loaded, Name}} ->
+ application_controller:start_application(Name, RestartType);
+ Error ->
+ Error
+ end.
+
+-spec start_boot(Application :: atom()) -> 'ok' | {'error', term()}.
+
+start_boot(Application) ->
+ start_boot(Application, temporary).
+
+-spec start_boot(Application :: atom(), RestartType :: restart_type()) ->
+ 'ok' | {'error', term()}.
+
+start_boot(Application, RestartType) ->
+ application_controller:start_boot_application(Application, RestartType).
+
+-spec takeover(Application :: atom(), RestartType :: restart_type()) -> any().
+
+takeover(Application, RestartType) ->
+ dist_ac:takeover_application(Application, RestartType).
+
+-spec permit(Application :: atom(), Bool :: boolean()) -> 'ok' | {'error', term()}.
+
+permit(Application, Bool) ->
+ case Bool of
+ true -> ok;
+ false -> ok;
+ Bad -> exit({badarg, {?MODULE, permit, [Application, Bad]}})
+ end,
+ case application_controller:permit_application(Application, Bool) of
+ distributed_application ->
+ dist_ac:permit_application(Application, Bool);
+ {distributed_application, only_loaded} ->
+ dist_ac:permit_only_loaded_application(Application, Bool);
+ LocalResult ->
+ LocalResult
+ end.
+
+-spec stop(Application :: atom()) -> 'ok' | {'error', term()}.
+
+stop(Application) ->
+ application_controller:stop_application(Application).
+
+-spec which_applications() -> [{atom(), string(), string()}].
+
+which_applications() ->
+ application_controller:which_applications().
+
+-spec which_applications(timeout()) -> [{atom(), string(), string()}].
+
+which_applications(infinity) ->
+ application_controller:which_applications(infinity);
+which_applications(Timeout) when is_integer(Timeout), Timeout>=0 ->
+ application_controller:which_applications(Timeout).
+
+-spec loaded_applications() -> [{atom(), string(), string()}].
+
+loaded_applications() ->
+ application_controller:loaded_applications().
+
+-spec info() -> any().
+
+info() ->
+ application_controller:info().
+
+-spec set_env(Application :: atom(), Key :: atom(), Value :: any()) -> 'ok'.
+
+set_env(Application, Key, Val) ->
+ application_controller:set_env(Application, Key, Val).
+
+-spec set_env(Application :: atom(), Key :: atom(),
+ Value :: any(), Timeout :: timeout()) -> 'ok'.
+
+set_env(Application, Key, Val, infinity) ->
+ application_controller:set_env(Application, Key, Val, infinity);
+set_env(Application, Key, Val, Timeout) when is_integer(Timeout), Timeout>=0 ->
+ application_controller:set_env(Application, Key, Val, Timeout).
+
+-spec unset_env(atom(), atom()) -> 'ok'.
+
+unset_env(Application, Key) ->
+ application_controller:unset_env(Application, Key).
+
+-spec unset_env(atom(), atom(), timeout()) -> 'ok'.
+
+unset_env(Application, Key, infinity) ->
+ application_controller:unset_env(Application, Key, infinity);
+unset_env(Application, Key, Timeout) when is_integer(Timeout), Timeout>=0 ->
+ application_controller:unset_env(Application, Key, Timeout).
+
+-spec get_env(atom()) -> 'undefined' | {'ok', term()}.
+
+get_env(Key) ->
+ application_controller:get_pid_env(group_leader(), Key).
+
+-spec get_env(atom(), atom()) -> 'undefined' | {'ok', term()}.
+
+get_env(Application, Key) ->
+ application_controller:get_env(Application, Key).
+
+-spec get_all_env() -> [] | [{atom(), any()}].
+
+get_all_env() ->
+ application_controller:get_pid_all_env(group_leader()).
+
+-spec get_all_env(atom()) -> [] | [{atom(), any()}].
+
+get_all_env(Application) ->
+ application_controller:get_all_env(Application).
+
+-spec get_key(atom()) -> 'undefined' | {'ok', term()}.
+
+get_key(Key) ->
+ application_controller:get_pid_key(group_leader(), Key).
+
+-spec get_key(atom(), atom()) -> 'undefined' | {'ok', term()}.
+
+get_key(Application, Key) ->
+ application_controller:get_key(Application, Key).
+
+-spec get_all_key() -> 'undefined' | [] | {'ok', [{atom(),any()},...]}.
+
+get_all_key() ->
+ application_controller:get_pid_all_key(group_leader()).
+
+-spec get_all_key(atom()) -> 'undefined' | {'ok', [{atom(),any()},...]}.
+
+get_all_key(Application) ->
+ application_controller:get_all_key(Application).
+
+-spec get_application() -> 'undefined' | {'ok', atom()}.
+
+get_application() ->
+ application_controller:get_application(group_leader()).
+
+-spec get_application(Pid :: pid()) -> 'undefined' | {'ok', atom()}
+ ; (Module :: atom()) -> 'undefined' | {'ok', atom()}.
+
+get_application(Pid) when is_pid(Pid) ->
+ case process_info(Pid, group_leader) of
+ {group_leader, Gl} ->
+ application_controller:get_application(Gl);
+ undefined ->
+ undefined
+ end;
+get_application(Module) when is_atom(Module) ->
+ application_controller:get_application_module(Module).
+
+-spec start_type() -> 'undefined' | 'local' | 'normal'
+ | {'takeover', node()} | {'failover', node()}.
+
+start_type() ->
+ application_controller:start_type(group_leader()).
+
+%% Internal
+get_appl_name(Name) when is_atom(Name) -> Name;
+get_appl_name({application, Name, _}) when is_atom(Name) -> Name.
diff --git a/lib/kernel/src/application_controller.erl b/lib/kernel/src/application_controller.erl
new file mode 100644
index 0000000000..7c1f059875
--- /dev/null
+++ b/lib/kernel/src/application_controller.erl
@@ -0,0 +1,1946 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 1996-2009. All Rights Reserved.
+%%
+%% The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved online at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(application_controller).
+
+%% External exports
+-export([start/1,
+ load_application/1, unload_application/1,
+ start_application/2, start_boot_application/2, stop_application/1,
+ control_application/1,
+ change_application_data/2, prep_config_change/0, config_change/1,
+ which_applications/0, which_applications/1,
+ loaded_applications/0, info/0,
+ get_pid_env/2, get_env/2, get_pid_all_env/1, get_all_env/1,
+ get_pid_key/2, get_key/2, get_pid_all_key/1, get_all_key/1,
+ get_master/1, get_application/1, get_application_module/1,
+ start_type/1, permit_application/2, do_config_diff/2,
+ set_env/3, set_env/4, unset_env/2, unset_env/3]).
+
+%% Internal exports
+-export([handle_call/3, handle_cast/2, handle_info/2, terminate/2,
+ code_change/3, init_starter/4, get_loaded/1]).
+
+%% Test exports, only to be used from the test suites
+-export([test_change_apps/2]).
+
+-import(lists, [zf/2, map/2, foreach/2, foldl/3,
+ keysearch/3, keydelete/3, keyreplace/4]).
+
+-include("application_master.hrl").
+
+-define(AC, ?MODULE). % Name of process
+
+%%%-----------------------------------------------------------------
+%%% The application_controller controls local applications only. A
+%%% local application can be loaded/started/stopped/unloaded and
+%%% changed. The control of distributed applications is taken care of
+%%% by another process (default is dist_ac).
+%%%
+%%% When an application has been started (by a call to application:start)
+%%% it can be running or not running (on this node). For example,
+%%% a distributed application must be started on all nodes, but
+%%% may be running on one node at the time.
+%%%
+%%% The external API to this module is in the module 'application'.
+%%%
+%%% The process that controls distributed applications (called dist
+%%% ac). calls application_controller:control_application(Name) to
+%%% take responsibility for an application. The interface between AC
+%%% and the dist_ac process is message-based:
+%%%
+%%% AC DIST AC
+%%% == =======
+%%% --> {ac_load_application_req, Name}
+%%% <-- {ac_load_application_reply, Name, LoadReply}
+%%% --> {ac_start_application_req, Name} (*)
+%%% <-- {ac_start_application_reply, Name, StartReply}
+%%% --> {ac_application_run, Name, Res}
+%%% --> {ac_application_not_run, Name, Res}
+%%% --> {ac_application_stopped, Name}
+%%% --> {ac_application_unloaded, Name}
+%%% <-- {ac_change_application_req, Name, Req} (**)
+%%%
+%%% Where LoadReply =
+%%% ok - App is loaded
+%%% {error, R} - An error occurred
+%%% And StartReply =
+%%% start_it - DIST AC decided that AC should start the app
+%%% {started, Node} - The app is started distributed at Node
+%%% not_started - The app should not be running at this time
+%%% {takeover, Node}- The app should takeover from Node
+%%% {error, R} - an error occurred
+%%% And Req =
+%%% start_it - DIST AC wants AC to start the app locally
+%%% stop_it - AC should stop the app.
+%%% {takeover, Node, RestartType}
+%%% - AC should start the app as a takeover
+%%% {failover, Node, RestartType}
+%%% - AC should start the app as a failover
+%%% {started, Node} - The app is started at Node
+%%% NOTE: The app must have been started at this node
+%%% before this request is sent!
+%%% And Res =
+%%% ok - Application is started locally
+%%% {error, R} - Start of application failed
+%%%
+%%% (*)
+%%% The call to application:start() doesn't return until the
+%%% ac_start_application_reply has been received by AC. AC
+%%% itself is not blocked however.
+%%% (**)
+%%% DIST AC gets ACK to its ac_change_application_req, but not as a
+%%% separate messgage. Instead the normal messages are used as:
+%%% start_it generates an ac_application_run
+%%% stop_it generates an ac_application_not_run
+%%% takeover generates an ac_application_run
+%%% started doesn't generate anything
+%%%
+%%% There is a distinction between application:stop and stop_it
+%%% from a dist ac process. The first one stops the application,
+%%% and resets the internal structures as they were before start was
+%%% called. stop_it stops the application, but just marks it as
+%%% not being running.
+%%%
+%%% When a dist ac process has taken control of an application, no
+%%% other process can take the control.
+%%%-----------------------------------------------------------------
+
+%%-----------------------------------------------------------------
+%% Naming conventions:
+%% App = appl_descr()
+%% Appl = #appl
+%% AppName = atom()
+%% Application = App | AppName
+%%-----------------------------------------------------------------
+-record(state, {loading = [], starting = [], start_p_false = [], running = [],
+ control = [], started = [], start_req = [], conf_data}).
+%%-----------------------------------------------------------------
+%% loading = [{AppName, From}] - Load not yet finished
+%% starting = [{AppName, RestartType, Type, From}] - Start not
+%% yet finished
+%% start_p_false = [{AppName, RestartType, Type, From}] - Start not
+%% executed because permit == false
+%% running = [{AppName, Pid}] - running locally (Pid == application_master)
+%% [{AppName, {distributed, Node}}] - running on Node
+%% control = [{AppName, Controller}]
+%% started = [{AppName, RestartType}] - Names of all apps that
+%% have been started (but may not run because
+%% permission = false)
+%% conf_data = [{AppName, Env}]
+%% start_req = [{AppName, From}] - list of all start requests
+%% Id = AMPid | undefined | {distributed, Node}
+%% Env = [{Key, Value}]
+%%-----------------------------------------------------------------
+
+-record(appl, {name, appl_data, descr, id, vsn, restart_type, inc_apps, apps}).
+
+%%-----------------------------------------------------------------
+%% Func: start/1
+%% Args: KernelApp = appl_descr()
+%% appl_descr() = [{application, Name, [appl_opt()]}]
+%% appl_opt() = {description, string()} |
+%% {vsn, string()} |
+%% {id, string()}, |
+%% {modules, [Module|{Module,Vsn}]} |
+%% {registered, [atom()]} |
+%% {applications, [atom()]} |
+%% {included_applications, [atom()]} |
+%% {env, [{atom(), term()}]} |
+%% {start_phases, [{atom(), term()}]}|
+%% {maxT, integer()|infinity} |
+%% {maxP, integer()|infinity} |
+%% {mod, {Module, term()}}
+%% Module = atom()
+%% Vsn = term()
+%% Purpose: Starts the application_controller. This process starts all
+%% application masters for the applications.
+%% The kernel application is the only application that is
+%% treated specially. The reason for this is that the kernel
+%% starts user. This process is special because it should
+%% be group_leader for this process.
+%% Pre: All modules are loaded, or will be loaded on demand.
+%% Returns: {ok, Pid} | ReasonStr
+%%-----------------------------------------------------------------
+start(KernelApp) ->
+ %% OTP-5811 Don't start as a gen_server to prevent crash report
+ %% when (if) the process terminates
+ Init = self(),
+ AC = spawn_link(fun() -> init(Init, KernelApp) end),
+ receive
+ {ack, AC, ok} ->
+ {ok, AC};
+ {ack, AC, {error, Reason}} ->
+ to_string(Reason); % init doesn't want error tuple, only a reason
+ {'EXIT', _Pid, Reason} ->
+ to_string(Reason)
+ end.
+
+%%-----------------------------------------------------------------
+%% Func: load_application/1
+%% Args: Application = appl_descr() | atom()
+%% Purpose: Loads an application. Currently just inserts the
+%% application's env.
+%% Returns: ok | {error, Reason}
+%%-----------------------------------------------------------------
+load_application(Application) ->
+ gen_server:call(?AC, {load_application, Application}, infinity).
+
+unload_application(AppName) ->
+ gen_server:call(?AC, {unload_application, AppName}, infinity).
+
+%%-----------------------------------------------------------------
+%% Func: start_application/2
+%% Args: Application = atom()
+%% RestartType = permanent | transient | temporary
+%% Purpose: Starts a new application.
+%% The RestartType specifies what should happen if the
+%% application dies:
+%% If it is permanent, all other applications are terminated,
+%% and the application_controller dies.
+%% If it is transient, and the application dies normally,
+%% this is reported and no other applications are terminated.
+%% If the application dies abnormally, all other applications
+%% are terminated, and the application_controller dies.
+%% If it is temporary and the application dies this is reported
+%% and no other applications are terminated. In this way,
+%% an application can run in test mode, without disturbing
+%% the other applications.
+%% The caller of this function is suspended until the application
+%% is started, either locally or distributed.
+%% Returns: ok | {error, Reason}
+%%-----------------------------------------------------------------
+start_application(AppName, RestartType) ->
+ gen_server:call(?AC, {start_application, AppName, RestartType}, infinity).
+
+%%-----------------------------------------------------------------
+%% Func: start_boot_application/2
+%% The same as start_application/2 expect that this function is
+%% called from the boot script file. It mustnot be used by the operator.
+%% This function will cause a node crash if a permanent application
+%% fails to boot start
+%%-----------------------------------------------------------------
+start_boot_application(Application, RestartType) ->
+ case {application:load(Application), RestartType} of
+ {ok, _} ->
+ AppName = get_appl_name(Application),
+ gen_server:call(?AC, {start_application, AppName, RestartType}, infinity);
+ {{error, {already_loaded, AppName}}, _} ->
+ gen_server:call(?AC, {start_application, AppName, RestartType}, infinity);
+ {{error,{bad_environment_value,Env}}, permanent} ->
+ Txt = io_lib:format("Bad environment variable: ~p Application: ~p",
+ [Env, Application]),
+ exit({error, list_to_atom(lists:flatten(Txt))});
+ {Error, _} ->
+ Error
+ end.
+
+stop_application(AppName) ->
+ gen_server:call(?AC, {stop_application, AppName}, infinity).
+
+%%-----------------------------------------------------------------
+%% Returns: [{Name, Descr, Vsn}]
+%%-----------------------------------------------------------------
+which_applications() ->
+ gen_server:call(?AC, which_applications).
+which_applications(Timeout) ->
+ gen_server:call(?AC, which_applications, Timeout).
+
+loaded_applications() ->
+ ets:filter(ac_tab,
+ fun([{{loaded, AppName}, #appl{descr = Descr, vsn = Vsn}}]) ->
+ {true, {AppName, Descr, Vsn}};
+ (_) ->
+ false
+ end,
+ []).
+
+%% Returns some debug info
+info() ->
+ gen_server:call(?AC, info).
+
+control_application(AppName) ->
+ gen_server:call(?AC, {control_application, AppName}, infinity).
+
+%%-----------------------------------------------------------------
+%% Func: change_application_data/2
+%% Args: Applications = [appl_descr()]
+%% Config = [{AppName, [{Par,Val}]}]
+%% Purpose: Change all applications and their parameters on this node.
+%% This function should be used from a release handler, at
+%% the same time as the .app or start.boot file is
+%% introduced. Note that during some time the ACs may have
+%% different view of e.g. the distributed applications.
+%% This is solved by syncing the release installation.
+%% However, strange things may happen if a node crashes
+%% and two other nodes have different opinons about who's
+%% gonna start the applications. The release handler must
+%% shutdown each involved node in this case.
+%% Note that this function is used to change existing apps,
+%% adding new/deleting old isn't handled by this function.
+%% Changes an application's vsn, descr and env.
+%% Returns: ok | {error, Reason}
+%% If an error occurred, the situation may be inconsistent,
+%% so the release handler must restart the node. E.g. if
+%% some applicatation may have got new config data.
+%%-----------------------------------------------------------------
+change_application_data(Applications, Config) ->
+ gen_server:call(?AC,
+ {change_application_data, Applications, Config},
+ infinity).
+
+prep_config_change() ->
+ gen_server:call(?AC,
+ prep_config_change,
+ infinity).
+
+
+config_change(EnvPrev) ->
+ gen_server:call(?AC,
+ {config_change, EnvPrev},
+ infinity).
+
+
+
+get_pid_env(Master, Key) ->
+ case ets:match(ac_tab, {{application_master, '$1'}, Master}) of
+ [[AppName]] -> get_env(AppName, Key);
+ _ -> undefined
+ end.
+
+get_env(AppName, Key) ->
+ case ets:lookup(ac_tab, {env, AppName, Key}) of
+ [{_, Val}] -> {ok, Val};
+ _ -> undefined
+ end.
+
+get_pid_all_env(Master) ->
+ case ets:match(ac_tab, {{application_master, '$1'}, Master}) of
+ [[AppName]] -> get_all_env(AppName);
+ _ -> []
+ end.
+
+get_all_env(AppName) ->
+ map(fun([Key, Val]) -> {Key, Val} end,
+ ets:match(ac_tab, {{env, AppName, '$1'}, '$2'})).
+
+
+
+
+get_pid_key(Master, Key) ->
+ case ets:match(ac_tab, {{application_master, '$1'}, Master}) of
+ [[AppName]] -> get_key(AppName, Key);
+ _ -> undefined
+ end.
+
+get_key(AppName, Key) ->
+ case ets:lookup(ac_tab, {loaded, AppName}) of
+ [{_, Appl}] ->
+ case Key of
+ description ->
+ {ok, Appl#appl.descr};
+ id ->
+ {ok, Appl#appl.id};
+ vsn ->
+ {ok, Appl#appl.vsn};
+ modules ->
+ {ok, (Appl#appl.appl_data)#appl_data.mods};
+ maxP ->
+ {ok, (Appl#appl.appl_data)#appl_data.maxP};
+ maxT ->
+ {ok, (Appl#appl.appl_data)#appl_data.maxT};
+ registered ->
+ {ok, (Appl#appl.appl_data)#appl_data.regs};
+ included_applications ->
+ {ok, Appl#appl.inc_apps};
+ applications ->
+ {ok, Appl#appl.apps};
+ env ->
+ {ok, get_all_env(AppName)};
+ mod ->
+ {ok, (Appl#appl.appl_data)#appl_data.mod};
+ start_phases ->
+ {ok, (Appl#appl.appl_data)#appl_data.phases};
+ _ -> undefined
+ end;
+ _ ->
+ undefined
+ end.
+
+get_pid_all_key(Master) ->
+ case ets:match(ac_tab, {{application_master, '$1'}, Master}) of
+ [[AppName]] -> get_all_key(AppName);
+ _ -> []
+ end.
+
+get_all_key(AppName) ->
+ case ets:lookup(ac_tab, {loaded, AppName}) of
+ [{_, Appl}] ->
+ {ok, [{description, Appl#appl.descr},
+ {id, Appl#appl.id},
+ {vsn, Appl#appl.vsn},
+ {modules, (Appl#appl.appl_data)#appl_data.mods},
+ {maxP, (Appl#appl.appl_data)#appl_data.maxP},
+ {maxT, (Appl#appl.appl_data)#appl_data.maxT},
+ {registered, (Appl#appl.appl_data)#appl_data.regs},
+ {included_applications, Appl#appl.inc_apps},
+ {applications, Appl#appl.apps},
+ {env, get_all_env(AppName)},
+ {mod, (Appl#appl.appl_data)#appl_data.mod},
+ {start_phases, (Appl#appl.appl_data)#appl_data.phases}
+ ]};
+ _ ->
+ undefined
+ end.
+
+
+start_type(Master) ->
+ case ets:match(ac_tab, {{application_master, '$1'}, Master}) of
+ [[AppName]] ->
+ gen_server:call(?AC, {start_type, AppName}, infinity);
+ _X ->
+ undefined
+ end.
+
+
+
+
+
+
+get_master(AppName) ->
+ case ets:lookup(ac_tab, {application_master, AppName}) of
+ [{_, Pid}] -> Pid;
+ _ -> undefined
+ end.
+
+get_application(Master) ->
+ case ets:match(ac_tab, {{application_master, '$1'}, Master}) of
+ [[AppName]] -> {ok, AppName};
+ _ -> undefined
+ end.
+
+get_application_module(Module) ->
+ ApplDataPattern = #appl_data{mods='$2', _='_'},
+ ApplPattern = #appl{appl_data=ApplDataPattern, _='_'},
+ AppModules = ets:match(ac_tab, {{loaded, '$1'}, ApplPattern}),
+ get_application_module(Module, AppModules).
+
+get_application_module(Module, [[AppName, Modules]|AppModules]) ->
+ case in_modules(Module, Modules) of
+ true ->
+ {ok, AppName};
+ false ->
+ get_application_module(Module, AppModules)
+ end;
+get_application_module(_Module, []) ->
+ undefined.
+
+%% 'modules' key in .app is a list of Module or {Module,Vsn}
+in_modules(Module, [Module|_Modules]) ->
+ true;
+in_modules(Module, [{Module, _Vsn}|_Modules]) ->
+ true;
+in_modules(Module, [_Module|Modules]) ->
+ in_modules(Module, Modules);
+in_modules(_Module, []) ->
+ false.
+
+permit_application(ApplName, Flag) ->
+ gen_server:call(?AC,
+ {permit_application, ApplName, Flag},
+ infinity).
+
+
+set_env(AppName, Key, Val) ->
+ gen_server:call(?AC, {set_env, AppName, Key, Val}).
+set_env(AppName, Key, Val, Timeout) ->
+ gen_server:call(?AC, {set_env, AppName, Key, Val}, Timeout).
+
+unset_env(AppName, Key) ->
+ gen_server:call(?AC, {unset_env, AppName, Key}).
+unset_env(AppName, Key, Timeout) ->
+ gen_server:call(?AC, {unset_env, AppName, Key}, Timeout).
+
+%%%-----------------------------------------------------------------
+%%% call-back functions from gen_server
+%%%-----------------------------------------------------------------
+init(Init, Kernel) ->
+ register(?AC, self()),
+ process_flag(trap_exit, true),
+ put('$ancestors', [Init]), % OTP-5811, for gen_server compatibility
+ put('$initial_call', {application_controller, start, 1}),
+
+ case catch check_conf() of
+ {ok, ConfData} ->
+ %% Actually, we don't need this info in an ets table anymore.
+ %% This table was introduced because starting applications
+ %% should be able to get som info from AC (e.g. loaded_apps).
+ %% The new implementation makes sure the AC process can be
+ %% called during start-up of any app.
+ case check_conf_data(ConfData) of
+ ok ->
+ ets:new(ac_tab, [set, public, named_table]),
+ S = #state{conf_data = ConfData},
+ {ok, KAppl} = make_appl(Kernel),
+ case catch load(S, KAppl) of
+ {'EXIT', LoadError} ->
+ Reason = {'load error', LoadError},
+ Init ! {ack, self(), {error, to_string(Reason)}};
+ {ok, NewS} ->
+ Init ! {ack, self(), ok},
+ gen_server:enter_loop(?MODULE, [], NewS,
+ {local, ?AC})
+ end;
+ {error, ErrorStr} ->
+ Str = lists:flatten(io_lib:format("invalid config data: ~s", [ErrorStr])),
+ Init ! {ack, self(), {error, to_string(Str)}}
+ end;
+ {error, {File, Line, Str}} ->
+ ReasonStr =
+ lists:flatten(io_lib:format("error in config file "
+ "~p (~w): ~s",
+ [File, Line, Str])),
+ Init ! {ack, self(), {error, to_string(ReasonStr)}}
+ end.
+
+
+%% Check the syntax of the .config file [{ApplicationName, [{Parameter, Value}]}].
+check_conf_data([]) ->
+ ok;
+check_conf_data(ConfData) when is_list(ConfData) ->
+ [Application | ConfDataRem] = ConfData,
+ case Application of
+ {kernel, List} when is_list(List) ->
+ case check_para_kernel(List) of
+ ok ->
+ check_conf_data(ConfDataRem);
+ Error1 ->
+ Error1
+ end;
+ {AppName, List} when is_atom(AppName), is_list(List) ->
+ case check_para(List, atom_to_list(AppName)) of
+ ok ->
+ check_conf_data(ConfDataRem);
+ Error2 ->
+ Error2
+ end;
+ {AppName, List} when is_list(List) ->
+ ErrMsg = "application: "
+ ++ lists:flatten(io_lib:format("~p",[AppName]))
+ ++ "; application name must be an atom",
+ {error, ErrMsg};
+ {AppName, _List} ->
+ ErrMsg = "application: "
+ ++ lists:flatten(io_lib:format("~p",[AppName]))
+ ++ "; parameters must be a list",
+ {error, ErrMsg};
+ Else ->
+ ErrMsg = "invalid application name: " ++
+ lists:flatten(io_lib:format(" ~p",[Else])),
+ {error, ErrMsg}
+ end;
+check_conf_data(_ConfData) ->
+ {error, 'configuration must be a list ended by <dot><whitespace>'}.
+
+
+%% Special check of distributed parameter for kernel
+check_para_kernel([]) ->
+ ok;
+check_para_kernel([{distributed, Apps} | ParaList]) when is_list(Apps) ->
+ case check_distributed(Apps) of
+ {error, ErrorMsg} ->
+ {error, ErrorMsg};
+ _ ->
+ check_para_kernel(ParaList)
+ end;
+check_para_kernel([{distributed, _Apps} | _ParaList]) ->
+ {error, "application: kernel; erroneous parameter: distributed"};
+check_para_kernel([{Para, _Val} | ParaList]) when is_atom(Para) ->
+ check_para_kernel(ParaList);
+check_para_kernel([{Para, _Val} | _ParaList]) ->
+ {error, "application: kernel; invalid parameter: " ++
+ lists:flatten(io_lib:format("~p",[Para]))};
+check_para_kernel(Else) ->
+ {error, "application: kernel; invalid parameter list: " ++
+ lists:flatten(io_lib:format("~p",[Else]))}.
+
+
+check_distributed([]) ->
+ ok;
+check_distributed([{App, List} | Apps]) when is_atom(App), is_list(List) ->
+ check_distributed(Apps);
+check_distributed([{App, infinity, List} | Apps]) when is_atom(App), is_list(List) ->
+ check_distributed(Apps);
+check_distributed([{App, Time, List} | Apps]) when is_atom(App), is_integer(Time), is_list(List) ->
+ check_distributed(Apps);
+check_distributed(_Else) ->
+ {error, "application: kernel; erroneous parameter: distributed"}.
+
+
+check_para([], _AppName) ->
+ ok;
+check_para([{Para, _Val} | ParaList], AppName) when is_atom(Para) ->
+ check_para(ParaList, AppName);
+check_para([{Para, _Val} | _ParaList], AppName) ->
+ {error, "application: " ++ AppName ++ "; invalid parameter: " ++
+ lists:flatten(io_lib:format("~p",[Para]))};
+check_para([Else | _ParaList], AppName) ->
+ {error, "application: " ++ AppName ++ "; invalid parameter: " ++
+ lists:flatten(io_lib:format("~p",[Else]))}.
+
+
+handle_call({load_application, Application}, From, S) ->
+ case catch do_load_application(Application, S) of
+ {ok, NewS} ->
+ AppName = get_appl_name(Application),
+ case cntrl(AppName, S, {ac_load_application_req, AppName}) of
+ true ->
+ {noreply, S#state{loading = [{AppName, From} |
+ S#state.loading]}};
+ false ->
+ {reply, ok, NewS}
+ end;
+ {error, Error} ->
+ {reply, {error, Error}, S};
+ {'EXIT',R} ->
+ {reply, {error, R}, S}
+ end;
+
+handle_call({unload_application, AppName}, _From, S) ->
+ case lists:keymember(AppName, 1, S#state.running) of
+ true -> {reply, {error, {running, AppName}}, S};
+ false ->
+ case get_loaded(AppName) of
+ {true, _} ->
+ NewS = unload(AppName, S),
+ cntrl(AppName, S, {ac_application_unloaded, AppName}),
+ {reply, ok, NewS};
+ false ->
+ {reply, {error, {not_loaded, AppName}}, S}
+ end
+ end;
+
+handle_call({start_application, AppName, RestartType}, From, S) ->
+ #state{running = Running, starting = Starting, start_p_false = SPF,
+ started = Started, start_req = Start_req} = S,
+ %% Check if the commandline environment variables are OK.
+ %% Incase of erroneous variables do not start the application,
+ %% if the application is permanent crash the node.
+ %% Check if the application is already starting.
+ case lists:keysearch(AppName, 1, Start_req) of
+ false ->
+ case catch check_start_cond(AppName, RestartType, Started, Running) of
+ {ok, Appl} ->
+ Cntrl = cntrl(AppName, S, {ac_start_application_req, AppName}),
+ Perm = application:get_env(kernel, permissions),
+ case {Cntrl, Perm} of
+ {true, _} ->
+ {noreply, S#state{starting = [{AppName, RestartType, normal, From} |
+ Starting],
+ start_req = [{AppName, From} | Start_req]}};
+ {false, undefined} ->
+ spawn_starter(From, Appl, S, normal),
+ {noreply, S#state{starting = [{AppName, RestartType, normal, From} |
+ Starting],
+ start_req = [{AppName, From} | Start_req]}};
+ {false, {ok, Perms}} ->
+ case lists:member({AppName, false}, Perms) of
+ false ->
+ spawn_starter(From, Appl, S, normal),
+ {noreply, S#state{starting = [{AppName, RestartType, normal, From} |
+ Starting],
+ start_req = [{AppName, From} | Start_req]}};
+ true ->
+ SS = S#state{start_p_false = [{AppName, RestartType, normal, From} |
+ SPF]},
+ {reply, ok, SS}
+ end
+ end;
+ {error, R} ->
+ {reply, {error, R}, S}
+ end;
+ {value, {AppName, _FromX}} ->
+ SS = S#state{start_req = [{AppName, From} | Start_req]},
+ {noreply, SS}
+
+ end;
+
+handle_call({permit_application, AppName, Bool}, From, S) ->
+ Control = S#state.control,
+ Starting = S#state.starting,
+ SPF = S#state.start_p_false,
+ Started = S#state.started,
+ Running = S#state.running,
+ Start_req = S#state.start_req,
+ IsLoaded = get_loaded(AppName),
+ IsStarting = lists:keysearch(AppName, 1, Starting),
+ IsSPF = lists:keysearch(AppName, 1, SPF),
+ IsStarted = lists:keysearch(AppName, 1, Started),
+ IsRunning = lists:keysearch(AppName, 1, Running),
+
+ case lists:keymember(AppName, 1, Control) of
+ %%========================
+ %% distributed application
+ %%========================
+ true ->
+ case {IsLoaded, IsStarting, IsStarted} of
+ %% not loaded
+ {false, _, _} ->
+ {reply, {error, {not_loaded, AppName}}, S};
+ %% only loaded
+ {{true, _Appl}, false, false} ->
+ update_permissions(AppName, Bool),
+ {reply, {distributed_application, only_loaded}, S};
+ _ ->
+ update_permissions(AppName, Bool),
+ {reply, distributed_application, S}
+ end;
+ %%========================
+ %% local application
+ %%========================
+ false ->
+ case {Bool, IsLoaded, IsStarting, IsSPF, IsStarted, IsRunning} of
+ %%------------------------
+ %% permit the applicaition
+ %%------------------------
+ %% already running
+ {true, _, _, _, _, {value, _Tuple}} ->
+ {reply, ok, S};
+ %% not loaded
+ {true, false, _, _, _, _} ->
+ {reply, {error, {not_loaded, AppName}}, S};
+ %% only loaded
+ {true, {true, _Appl}, false, false, false, false} ->
+ update_permissions(AppName, Bool),
+ {reply, ok, S};
+ %% starting
+ {true, {true, _Appl}, {value, _Tuple}, false, false, false} ->
+ update_permissions(AppName, Bool),
+ {reply, ok, S}; %% check the permission after then app is started
+ %% start requested but not started because permit was false
+ {true, {true, Appl}, false, {value, Tuple}, false, false} ->
+ update_permissions(AppName, Bool),
+ {_AppName2, RestartType, normal, _From} = Tuple,
+ spawn_starter(From, Appl, S, normal),
+ SS = S#state{starting = [{AppName, RestartType, normal, From} | Starting],
+ start_p_false = keydelete(AppName, 1, SPF),
+ start_req = [{AppName, From} | Start_req]},
+ {noreply, SS};
+ %% started but not running
+ {true, {true, Appl}, _, _, {value, {AppName, RestartType}}, false} ->
+ update_permissions(AppName, Bool),
+ spawn_starter(From, Appl, S, normal),
+ SS = S#state{starting = [{AppName, RestartType, normal, From} | Starting],
+ started = keydelete(AppName, 1, Started),
+ start_req = [{AppName, From} | Start_req]},
+ {noreply, SS};
+
+ %%==========================
+ %% unpermit the applicaition
+ %%==========================
+ %% running
+ {false, _, _, _, _, {value, {_AppName, Id}}} ->
+ {value, {_AppName2, Type}} = keysearch(AppName, 1, Started),
+ stop_appl(AppName, Id, Type),
+ NRunning = keydelete(AppName, 1, Running),
+ {reply, ok, S#state{running = NRunning}};
+ %% not loaded
+ {false, false, _, _, _, _} ->
+ {reply, {error, {not_loaded, AppName}}, S};
+ %% only loaded
+ {false, {true, _Appl}, false, false, false, false} ->
+ update_permissions(AppName, Bool),
+ {reply, ok, S};
+ %% starting
+ {false, {true, _Appl}, {value, _Tuple}, false, false, false} ->
+ update_permissions(AppName, Bool),
+ {reply, ok, S};
+ %% start requested but not started because permit was false
+ {false, {true, _Appl}, false, {value, _Tuple}, false, false} ->
+ update_permissions(AppName, Bool),
+ SS = S#state{start_p_false = keydelete(AppName, 1, SPF)},
+ {reply, ok, SS};
+ %% started but not running
+ {false, {true, _Appl}, _, _, {value, _Tuple}, false} ->
+ update_permissions(AppName, Bool),
+ {reply, ok, S}
+
+ end
+ end;
+
+handle_call({stop_application, AppName}, _From, S) ->
+ #state{running = Running, started = Started} = S,
+ case keysearch(AppName, 1, Running) of
+ {value, {_AppName, Id}} ->
+ {value, {_AppName2, Type}} = keysearch(AppName, 1, Started),
+ stop_appl(AppName, Id, Type),
+ NRunning = keydelete(AppName, 1, Running),
+ NStarted = keydelete(AppName, 1, Started),
+ cntrl(AppName, S, {ac_application_stopped, AppName}),
+ {reply, ok, S#state{running = NRunning, started = NStarted}};
+ false ->
+ case lists:keymember(AppName, 1, Started) of
+ true ->
+ NStarted = keydelete(AppName, 1, Started),
+ cntrl(AppName, S, {ac_application_stopped, AppName}),
+ {reply, ok, S#state{started = NStarted}};
+ false ->
+ {reply, {error, {not_started, AppName}}, S}
+ end
+ end;
+
+handle_call({change_application_data, Applications, Config}, _From, S) ->
+ OldAppls = ets:filter(ac_tab,
+ fun([{{loaded, _AppName}, Appl}]) ->
+ {true, Appl};
+ (_) ->
+ false
+ end,
+ []),
+ case catch do_change_apps(Applications, Config, OldAppls) of
+ {error, R} ->
+ {reply, {error, R}, S};
+ {'EXIT', R} ->
+ {reply, {error, R}, S};
+ NewAppls ->
+ lists:foreach(fun(Appl) ->
+ ets:insert(ac_tab, {{loaded, Appl#appl.name},
+ Appl})
+ end, NewAppls),
+ {reply, ok, S#state{conf_data = Config}}
+ end;
+
+handle_call(prep_config_change, _From, S) ->
+ RunningApps = S#state.running,
+ EnvBefore = lists:reverse(do_prep_config_change(RunningApps)),
+ {reply, EnvBefore, S};
+
+handle_call({config_change, EnvBefore}, _From, S) ->
+ RunningApps = S#state.running,
+ R = do_config_change(RunningApps, EnvBefore),
+ {reply, R, S};
+
+handle_call(which_applications, _From, S) ->
+ Reply = zf(fun({Name, Id}) ->
+ case Id of
+ {distributed, _Node} ->
+ false;
+ _ ->
+ {true, #appl{descr = Descr, vsn = Vsn}} =
+ get_loaded(Name),
+ {true, {Name, Descr, Vsn}}
+ end
+ end, S#state.running),
+ {reply, Reply, S};
+
+handle_call({set_env, AppName, Key, Val}, _From, S) ->
+ ets:insert(ac_tab, {{env, AppName, Key}, Val}),
+ {reply, ok, S};
+
+handle_call({unset_env, AppName, Key}, _From, S) ->
+ ets:delete(ac_tab, {env, AppName, Key}),
+ {reply, ok, S};
+
+handle_call({control_application, AppName}, {Pid, _Tag}, S) ->
+ Control = S#state.control,
+ case lists:keymember(AppName, 1, Control) of
+ false ->
+ link(Pid),
+ {reply, true, S#state{control = [{AppName, Pid} | Control]}};
+ true ->
+ {reply, false, S}
+ end;
+
+handle_call({start_type, AppName}, _From, S) ->
+ Starting = S#state.starting,
+ StartType = case keysearch(AppName, 1, Starting) of
+ false ->
+ local;
+ {value, {_AppName, _RestartType, Type, _F}} ->
+ Type
+ end,
+ {reply, StartType, S};
+
+handle_call(info, _From, S) ->
+ Reply = [{loaded, loaded_applications()},
+ {loading, S#state.loading},
+ {started, S#state.started},
+ {start_p_false, S#state.start_p_false},
+ {running, S#state.running},
+ {starting, S#state.starting}],
+ {reply, Reply, S}.
+
+handle_cast({application_started, AppName, Res}, S) ->
+ handle_application_started(AppName, Res, S).
+
+handle_application_started(AppName, Res, S) ->
+ #state{starting = Starting, running = Running, started = Started,
+ start_req = Start_req} = S,
+ Start_reqN = reply_to_requester(AppName, Start_req, Res),
+ {value, {_AppName, RestartType, _Type, _From}} = keysearch(AppName, 1, Starting),
+ case Res of
+ {ok, Id} ->
+ case AppName of
+ kernel -> check_user();
+ _ -> ok
+ end,
+ info_started(AppName, nd(Id)),
+ notify_cntrl_started(AppName, Id, S, ok),
+ NRunning = keyreplaceadd(AppName, 1, Running,{AppName,Id}),
+ NStarted = keyreplaceadd(AppName, 1, Started,{AppName,RestartType}),
+ NewS = S#state{starting = keydelete(AppName, 1, Starting),
+ running = NRunning,
+ started = NStarted,
+ start_req = Start_reqN},
+
+ %% The permission may have been changed during start
+ Perm = application:get_env(kernel, permissions),
+ case {Perm, Id} of
+ {undefined, _} ->
+ {noreply, NewS};
+ %% Check only if the application is started on the own node
+ {{ok, Perms}, {distributed, StartNode}} when StartNode =:= node() ->
+ case lists:member({AppName, false}, Perms) of
+ true ->
+ #state{running = StopRunning, started = StopStarted} = NewS,
+ case keysearch(AppName, 1, StopRunning) of
+ {value, {_AppName, Id}} ->
+ {value, {_AppName2, Type}} =
+ keysearch(AppName, 1, StopStarted),
+ stop_appl(AppName, Id, Type),
+ NStopRunning = keydelete(AppName, 1, StopRunning),
+ cntrl(AppName, NewS, {ac_application_stopped, AppName}),
+ {noreply, NewS#state{running = NStopRunning,
+ started = StopStarted}};
+ false ->
+ {noreply, NewS}
+ end;
+ false ->
+ {noreply, NewS}
+ end;
+ _ ->
+ {noreply, NewS}
+ end;
+
+
+
+
+ {error, R} when RestartType =:= temporary ->
+ notify_cntrl_started(AppName, undefined, S, {error, R}),
+ info_exited(AppName, R, RestartType),
+ {noreply, S#state{starting = keydelete(AppName, 1, Starting),
+ start_req = Start_reqN}};
+ {info, R} when RestartType =:= temporary ->
+ notify_cntrl_started(AppName, undefined, S, {error, R}),
+ {noreply, S#state{starting = keydelete(AppName, 1, Starting),
+ start_req = Start_reqN}};
+ {ErrInf, R} when RestartType =:= transient, ErrInf =:= error;
+ RestartType =:= transient, ErrInf =:= info ->
+ notify_cntrl_started(AppName, undefined, S, {error, R}),
+ case ErrInf of
+ error ->
+ info_exited(AppName, R, RestartType);
+ info ->
+ ok
+ end,
+ case R of
+ {{'EXIT',normal},_Call} ->
+ {noreply, S#state{starting = keydelete(AppName, 1, Starting),
+ start_req = Start_reqN}};
+ _ ->
+ Reason = {application_start_failure, AppName, R},
+ {stop, to_string(Reason), S}
+ end;
+ {error, R} -> %% permanent
+ notify_cntrl_started(AppName, undefined, S, {error, R}),
+ info_exited(AppName, R, RestartType),
+ Reason = {application_start_failure, AppName, R},
+ {stop, to_string(Reason), S};
+ {info, R} -> %% permanent
+ notify_cntrl_started(AppName, undefined, S, {error, R}),
+ Reason = {application_start_failure, AppName, R},
+ {stop, to_string(Reason), S}
+ end.
+
+handle_info({ac_load_application_reply, AppName, Res}, S) ->
+ case keysearchdelete(AppName, 1, S#state.loading) of
+ {value, {_AppName, From}, Loading} ->
+ gen_server:reply(From, Res),
+ case Res of
+ ok ->
+ {noreply, S#state{loading = Loading}};
+ {error, _R} ->
+ NewS = unload(AppName, S),
+ {noreply, NewS#state{loading = Loading}}
+ end;
+ false ->
+ {noreply, S}
+ end;
+
+handle_info({ac_start_application_reply, AppName, Res}, S) ->
+ Start_req = S#state.start_req,
+ case keysearch(AppName, 1, Starting = S#state.starting) of
+ {value, {_AppName, RestartType, Type, From}} ->
+ case Res of
+ start_it ->
+ {true, Appl} = get_loaded(AppName),
+ spawn_starter(From, Appl, S, Type),
+ {noreply, S};
+ {started, Node} ->
+ handle_application_started(AppName,
+ {ok, {distributed, Node}},
+ S);
+ not_started ->
+ Started = S#state.started,
+ Start_reqN =
+ reply_to_requester(AppName, Start_req, ok),
+ {noreply,
+ S#state{starting = keydelete(AppName, 1, Starting),
+ started = [{AppName, RestartType} | Started],
+ start_req = Start_reqN}};
+ {takeover, Node} ->
+ {true, Appl} = get_loaded(AppName),
+ spawn_starter(From, Appl, S, {takeover, Node}),
+ NewStarting1 = keydelete(AppName, 1, Starting),
+ NewStarting = [{AppName, RestartType, {takeover, Node}, From} | NewStarting1],
+ {noreply, S#state{starting = NewStarting}};
+ {error, Reason} when RestartType =:= permanent ->
+ Start_reqN =
+ reply_to_requester(AppName, Start_req,
+ {error, Reason}),
+ {stop, to_string(Reason), S#state{start_req = Start_reqN}};
+ {error, Reason} ->
+ Start_reqN =
+ reply_to_requester(AppName, Start_req,
+ {error, Reason}),
+ {noreply, S#state{starting =
+ keydelete(AppName, 1, Starting),
+ start_req = Start_reqN}}
+ end;
+ false ->
+ {noreply, S} % someone called stop before control got that
+ end;
+
+handle_info({ac_change_application_req, AppName, Msg}, S) ->
+ Running = S#state.running,
+ Started = S#state.started,
+ Starting = S#state.starting,
+ case {keysearch(AppName, 1, Running), keysearch(AppName, 1, Started)} of
+ {{value, {AppName, Id}}, {value, {_AppName2, Type}}} ->
+ case Msg of
+ {started, Node} ->
+ stop_appl(AppName, Id, Type),
+ NRunning = [{AppName, {distributed, Node}} |
+ keydelete(AppName, 1, Running)],
+ {noreply, S#state{running = NRunning}};
+ {takeover, _Node, _RT} when is_pid(Id) -> % it is running already
+ notify_cntrl_started(AppName, Id, S, ok),
+ {noreply, S};
+ {takeover, Node, RT} ->
+ NewS = do_start(AppName, RT, {takeover, Node}, undefined, S),
+ {noreply, NewS};
+ {failover, _Node, _RT} when is_pid(Id) -> % it is running already
+ notify_cntrl_started(AppName, Id, S, ok),
+ {noreply, S};
+ {failover, Node, RT} ->
+ case application:get_key(AppName, start_phases) of
+ {ok, undefined} ->
+ %% to be backwards compatible the application
+ %% is not started as failover if start_phases
+ %% is not defined in the .app file
+ NewS = do_start(AppName, RT, normal, undefined, S),
+ {noreply, NewS};
+ {ok, _StartPhases} ->
+ NewS = do_start(AppName, RT, {failover, Node}, undefined, S),
+ {noreply, NewS}
+ end;
+ stop_it ->
+ stop_appl(AppName, Id, Type),
+ cntrl(AppName, S, {ac_application_not_run, AppName}),
+ NRunning = keyreplace(AppName, 1, Running,
+ {AppName, {distributed, []}}),
+ {noreply, S#state{running = NRunning}};
+ %% We should not try to start a running application!
+ start_it when is_pid(Id) ->
+ notify_cntrl_started(AppName, Id, S, ok),
+ {noreply, S};
+ start_it ->
+ NewS = do_start(AppName, undefined, normal, undefined, S),
+ {noreply, NewS};
+ not_running ->
+ NRunning = keydelete(AppName, 1, Running),
+ {noreply, S#state{running = NRunning}};
+ _ ->
+ {noreply, S}
+ end;
+ _ ->
+ IsLoaded = get_loaded(AppName),
+ IsStarting = lists:keysearch(AppName, 1, Starting),
+ IsStarted = lists:keysearch(AppName, 1, Started),
+ IsRunning = lists:keysearch(AppName, 1, Running),
+
+ case Msg of
+ start_it ->
+ case {IsLoaded, IsStarting, IsStarted, IsRunning} of
+ %% already running
+ {_, _, _, {value, _Tuple}} ->
+ {noreply, S};
+ %% not loaded
+ {false, _, _, _} ->
+ {noreply, S};
+ %% only loaded
+ {{true, _Appl}, false, false, false} ->
+ {noreply, S};
+ %% starting
+ {{true, _Appl}, {value, Tuple}, false, false} ->
+ {_AppName, _RStype, _Type, From} = Tuple,
+ NewS = do_start(AppName, undefined, normal, From, S),
+ {noreply, NewS};
+ %% started but not running
+ {{true, _Appl}, _, {value, {AppName, _RestartType}}, false} ->
+ NewS = do_start(AppName, undefined, normal, undefined, S),
+ SS = NewS#state{started = keydelete(AppName, 1, Started)},
+ {noreply, SS}
+ end;
+ {started, Node} ->
+ NRunning = [{AppName, {distributed, Node}} |
+ keydelete(AppName, 1, Running)],
+ {noreply, S#state{running = NRunning}};
+ _ ->
+ {noreply, S} % someone called stop before control got that
+ end
+ end;
+
+%%-----------------------------------------------------------------
+%% An application died. Check its restart_type. Maybe terminate
+%% all other applications.
+%%-----------------------------------------------------------------
+handle_info({'EXIT', Pid, Reason}, S) ->
+ ets:match_delete(ac_tab, {{application_master, '_'}, Pid}),
+ NRunning = keydelete(Pid, 2, S#state.running),
+ NewS = S#state{running = NRunning},
+ case keysearch(Pid, 2, S#state.running) of
+ {value, {AppName, _AmPid}} ->
+ cntrl(AppName, S, {ac_application_stopped, AppName}),
+ case keysearch(AppName, 1, S#state.started) of
+ {value, {_AppName, temporary}} ->
+ info_exited(AppName, Reason, temporary),
+ {noreply, NewS};
+ {value, {_AppName, transient}} when Reason =:= normal ->
+ info_exited(AppName, Reason, transient),
+ {noreply, NewS};
+ {value, {_AppName, Type}} ->
+ info_exited(AppName, Reason, Type),
+ {stop, to_string({application_terminated, AppName, Reason}), NewS}
+ end;
+ false ->
+ {noreply, S#state{control = del_cntrl(S#state.control, Pid)}}
+ end;
+
+handle_info(_, S) ->
+ {noreply, S}.
+
+terminate(Reason, S) ->
+ case application:get_env(kernel, shutdown_func) of
+ {ok, {M, F}} ->
+ catch M:F(Reason);
+ _ ->
+ ok
+ end,
+ foreach(fun({_AppName, Id}) when is_pid(Id) ->
+ exit(Id, shutdown),
+ receive
+ {'EXIT', Id, _} -> ok
+ end;
+ (_) -> ok
+ end,
+ S#state.running),
+ ets:delete(ac_tab).
+
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+
+
+%%%-----------------------------------------------------------------
+%%% Internal functions
+%%%-----------------------------------------------------------------
+cntrl(AppName, #state{control = Control}, Msg) ->
+ case keysearch(AppName, 1, Control) of
+ {value, {_AppName, Pid}} ->
+ Pid ! Msg,
+ true;
+ false ->
+ false
+ end.
+
+notify_cntrl_started(_AppName, {distributed, _Node}, _S, _Res) ->
+ ok;
+notify_cntrl_started(AppName, _Id, S, Res) ->
+ cntrl(AppName, S, {ac_application_run, AppName, Res}).
+
+del_cntrl([{_, Pid}|T], Pid) ->
+ del_cntrl(T, Pid);
+del_cntrl([H|T], Pid) ->
+ [H|del_cntrl(T, Pid)];
+del_cntrl([], _Pid) ->
+ [].
+
+get_loaded(App) ->
+ AppName = get_appl_name(App),
+ case ets:lookup(ac_tab, {loaded, AppName}) of
+ [{_Key, Appl}] -> {true, Appl};
+ _ -> false
+ end.
+
+do_load_application(Application, S) ->
+ case get_loaded(Application) of
+ {true, _} ->
+ throw({error, {already_loaded, Application}});
+ false ->
+ case make_appl(Application) of
+ {ok, Appl} -> load(S, Appl);
+ Error -> Error
+ end
+ end.
+
+%% Recursively load the application and its included apps.
+%load(S, {ApplData, ApplEnv, IncApps, Descr, Vsn, Apps}) ->
+load(S, {ApplData, ApplEnv, IncApps, Descr, Id, Vsn, Apps}) ->
+ Name = ApplData#appl_data.name,
+ ConfEnv = get_env_i(Name, S),
+ NewEnv = merge_app_env(ApplEnv, ConfEnv),
+ CmdLineEnv = get_cmd_env(Name),
+ NewEnv2 = merge_app_env(NewEnv, CmdLineEnv),
+ NewEnv3 = keyreplaceadd(included_applications, 1, NewEnv2,
+ {included_applications, IncApps}),
+ add_env(Name, NewEnv3),
+ Appl = #appl{name = Name, descr = Descr, id = Id, vsn = Vsn,
+ appl_data = ApplData, inc_apps = IncApps, apps = Apps},
+ ets:insert(ac_tab, {{loaded, Name}, Appl}),
+ NewS =
+ foldl(fun(App, S1) ->
+ case get_loaded(App) of
+ {true, _} -> S1;
+ false ->
+ case do_load_application(App, S1) of
+ {ok, S2} -> S2;
+ Error -> throw(Error)
+ end
+ end
+ end, S, IncApps),
+ {ok, NewS}.
+
+unload(AppName, S) ->
+ {ok, IncApps} = get_env(AppName, included_applications),
+ del_env(AppName),
+ ets:delete(ac_tab, {loaded, AppName}),
+ foldl(fun(App, S1) ->
+ case get_loaded(App) of
+ false -> S1;
+ {true, _} -> unload(App, S1)
+ end
+ end, S, IncApps).
+
+check_start_cond(AppName, RestartType, Started, Running) ->
+ validRestartType(RestartType),
+ case get_loaded(AppName) of
+ {true, Appl} ->
+ %% Check Running; not Started. An exited app is not running,
+ %% but started. It must be possible to start an exited app!
+ case lists:keymember(AppName, 1, Running) of
+ true ->
+ {error, {already_started, AppName}};
+ false ->
+ foreach(
+ fun(AppName2) ->
+ case lists:keymember(AppName2, 1, Started) of
+ true -> ok;
+ false ->
+ throw({error, {not_started, AppName2}})
+ end
+ end, Appl#appl.apps),
+ {ok, Appl}
+ end;
+ false ->
+ {error, {not_loaded, AppName}}
+ end.
+
+do_start(AppName, RT, Type, From, S) ->
+ RestartType = case keysearch(AppName, 1, S#state.started) of
+ {value, {_AppName2, OldRT}} ->
+ get_restart_type(RT, OldRT);
+ false ->
+ RT
+ end,
+ %% UW 990913: We check start_req instead of starting, because starting
+ %% has already been checked.
+ case lists:keymember(AppName, 1, S#state.start_req) of
+ false ->
+ {true, Appl} = get_loaded(AppName),
+ Start_req = S#state.start_req,
+ spawn_starter(undefined, Appl, S, Type),
+ Starting = case keysearch(AppName, 1, S#state.starting) of
+ false ->
+ %% UW: don't know if this is necessary
+ [{AppName, RestartType, Type, From} |
+ S#state.starting];
+ _ ->
+ S#state.starting
+ end,
+ S#state{starting = Starting,
+ start_req = [{AppName, From} | Start_req]};
+ true -> % otherwise we're already starting the app...
+ S
+ end.
+
+spawn_starter(From, Appl, S, Type) ->
+ spawn_link(?MODULE, init_starter, [From, Appl, S, Type]).
+
+init_starter(_From, Appl, S, Type) ->
+ process_flag(trap_exit, true),
+ AppName = Appl#appl.name,
+ gen_server:cast(?AC, {application_started, AppName,
+ catch start_appl(Appl, S, Type)}).
+
+reply(undefined, _Reply) ->
+ ok;
+reply(From, Reply) -> gen_server:reply(From, Reply).
+
+start_appl(Appl, S, Type) ->
+ ApplData = Appl#appl.appl_data,
+ case ApplData#appl_data.mod of
+ [] ->
+ {ok, undefined};
+ _ ->
+ %% Name = ApplData#appl_data.name,
+ Running = S#state.running,
+ foreach(
+ fun(AppName) ->
+ case lists:keymember(AppName, 1, Running) of
+ true ->
+ ok;
+ false ->
+ throw({info, {not_running, AppName}})
+ end
+ end, Appl#appl.apps),
+ case application_master:start_link(ApplData, Type) of
+ {ok, Pid} ->
+ {ok, Pid};
+ {error, Reason} ->
+ throw({error, Reason})
+ end
+ end.
+
+
+%%-----------------------------------------------------------------
+%% Stop application locally.
+%%-----------------------------------------------------------------
+stop_appl(AppName, Id, Type) when is_pid(Id) ->
+ unlink(Id),
+ application_master:stop(Id),
+ info_exited(AppName, stopped, Type),
+ ets:delete(ac_tab, {application_master, AppName});
+stop_appl(AppName, undefined, Type) ->
+ %% Code-only application stopped
+ info_exited(AppName, stopped, Type);
+stop_appl(_AppName, _Id, _Type) ->
+ %% Distributed application stopped
+ ok.
+
+keysearchdelete(Key, Pos, List) ->
+ ksd(Key, Pos, List, []).
+
+ksd(Key, Pos, [H | T], Rest) when element(Pos, H) =:= Key ->
+ {value, H, Rest ++ T};
+ksd(Key, Pos, [H | T], Rest) ->
+ ksd(Key, Pos, T, [H | Rest]);
+ksd(_Key, _Pos, [], _Rest) ->
+ false.
+
+keyreplaceadd(Key, Pos, List, New) ->
+ %% Maintains the order!
+ case lists:keymember(Key, Pos, List) of
+ true -> keyreplace(Key, Pos, List, New);
+ false -> [New | List]
+ end.
+
+validRestartType(permanent) -> true;
+validRestartType(temporary) -> true;
+validRestartType(transient) -> true;
+validRestartType(RestartType) ->
+ throw({error, {invalid_restart_type, RestartType}}).
+
+nd({distributed, Node}) -> Node;
+nd(_) -> node().
+
+get_restart_type(undefined, OldRT) ->
+ OldRT;
+get_restart_type(RT, _OldRT) ->
+ RT.
+
+get_appl_name(Name) when is_atom(Name) -> Name;
+get_appl_name({application, Name, _}) when is_atom(Name) -> Name;
+get_appl_name(Appl) -> throw({error, {bad_application, Appl}}).
+
+make_appl(Name) when is_atom(Name) ->
+ FName = atom_to_list(Name) ++ ".app",
+ case code:where_is_file(FName) of
+ non_existing ->
+ {error, {file:format_error(enoent), FName}};
+ FullName ->
+ case prim_consult(FullName) of
+ {ok, [Application]} ->
+ {ok, make_appl_i(Application)};
+ {error, Reason} ->
+ {error, {file:format_error(Reason), FName}}
+ end
+ end;
+make_appl(Application) ->
+ {ok, make_appl_i(Application)}.
+
+prim_consult(FullName) ->
+ case erl_prim_loader:get_file(FullName) of
+ {ok, Bin, _} ->
+ case erl_scan:string(binary_to_list(Bin)) of
+ {ok, Tokens, _EndLine} ->
+ prim_parse(Tokens, []);
+ {error, Reason, _EndLine} ->
+ {error, Reason}
+ end;
+ error ->
+ {error, enoent}
+ end.
+
+prim_parse(Tokens, Acc) ->
+ case lists:splitwith(fun(T) -> element(1,T) =/= dot end, Tokens) of
+ {[], []} ->
+ {ok, lists:reverse(Acc)};
+ {Tokens2, [{dot,_} = Dot | Rest]} ->
+ case erl_parse:parse_term(Tokens2 ++ [Dot]) of
+ {ok, Term} ->
+ prim_parse(Rest, [Term | Acc]);
+ {error, Reason} ->
+ {error, Reason}
+ end;
+ {Tokens2, []} ->
+ case erl_parse:parse_term(Tokens2) of
+ {ok, Term} ->
+ {ok, lists:reverse([Term | Acc])};
+ {error, Reason} ->
+ {error, Reason}
+ end
+ end.
+
+make_appl_i({application, Name, Opts}) when is_atom(Name), is_list(Opts) ->
+ Descr = get_opt(description, Opts, ""),
+ Id = get_opt(id, Opts, ""),
+ Vsn = get_opt(vsn, Opts, ""),
+ Mods = get_opt(modules, Opts, []),
+ Regs = get_opt(registered, Opts, []),
+ Apps = get_opt(applications, Opts, []),
+ Mod =
+ case get_opt(mod, Opts, []) of
+ {M,A} when is_atom(M) -> {M,A};
+ [] -> [];
+ Other -> throw({error, {badstartspec, Other}})
+ end,
+ Phases = get_opt(start_phases, Opts, undefined),
+ Env = get_opt(env, Opts, []),
+ MaxP = get_opt(maxP, Opts, infinity),
+ MaxT = get_opt(maxT, Opts, infinity),
+ IncApps = get_opt(included_applications, Opts, []),
+ {#appl_data{name = Name, regs = Regs, mod = Mod, phases = Phases, mods = Mods,
+ inc_apps = IncApps, maxP = MaxP, maxT = MaxT},
+ Env, IncApps, Descr, Id, Vsn, Apps};
+make_appl_i({application, Name, Opts}) when is_list(Opts) ->
+ throw({error,{invalid_name,Name}});
+make_appl_i({application, _Name, Opts}) ->
+ throw({error,{invalid_options, Opts}});
+make_appl_i(Appl) -> throw({error, {bad_application, Appl}}).
+
+
+%%-----------------------------------------------------------------
+%% Merge current applications with changes.
+%%-----------------------------------------------------------------
+
+%% do_change_apps(Applications, Config, OldAppls) -> NewAppls
+%% Applications = [{application, AppName, [{Key,Value}]}]
+%% Config = [{AppName,[{Par,Value}]} | File]
+%% OldAppls = NewAppls = [#appl{}]
+do_change_apps(Applications, Config, OldAppls) ->
+
+ %% OTP-4867
+ %% Config = contents of sys.config file
+ %% May now contain names of other .config files as well as
+ %% configuration parameters.
+ %% Therefore read and merge contents.
+ {ok, SysConfig, Errors} = check_conf_sys(Config),
+
+ %% Report errors, but do not terminate
+ %% (backwards compatible behaviour)
+ lists:foreach(fun({error, {SysFName, Line, Str}}) ->
+ Str2 = lists:flatten(io_lib:format("~p: ~w: ~s~n",
+ [SysFName, Line, Str])),
+ error_logger:format(Str2, [])
+ end,
+ Errors),
+
+ map(fun(Appl) ->
+ AppName = Appl#appl.name,
+ case is_loaded_app(AppName, Applications) of
+ {true, Application} ->
+ do_change_appl(make_appl(Application),
+ Appl, SysConfig);
+
+ %% ignored removed apps - handled elsewhere
+ false ->
+ Appl
+ end
+ end, OldAppls).
+
+is_loaded_app(AppName, [{application, AppName, App} | _]) ->
+ {true, {application, AppName, App}};
+is_loaded_app(AppName, [_ | T]) -> is_loaded_app(AppName, T);
+is_loaded_app(_AppName, []) -> false.
+
+do_change_appl({ok, {ApplData, Env, IncApps, Descr, Id, Vsn, Apps}},
+ OldAppl, Config) ->
+ AppName = OldAppl#appl.name,
+
+ %% Merge application env with env from sys.config, if any
+ ConfEnv = get_opt(AppName, Config, []),
+ NewEnv1 = merge_app_env(Env, ConfEnv),
+
+ %% Merge application env with command line arguments, if any
+ CmdLineEnv = get_cmd_env(AppName),
+ NewEnv2 = merge_app_env(NewEnv1, CmdLineEnv),
+
+ %% included_apps is made into an env parameter as well
+ NewEnv3 = keyreplaceadd(included_applications, 1, NewEnv2,
+ {included_applications, IncApps}),
+
+ %% Update ets table with new application env
+ del_env(AppName),
+ add_env(AppName, NewEnv3),
+
+ OldAppl#appl{appl_data=ApplData,
+ descr=Descr,
+ id=Id,
+ vsn=Vsn,
+ inc_apps=IncApps,
+ apps=Apps};
+do_change_appl({error, R}, _Appl, _ConfData) ->
+ throw({error, R}).
+
+get_opt(Key, List, Default) ->
+ case keysearch(Key, 1, List) of
+ {value, {_Key, Val}} -> Val;
+ _ -> Default
+ end.
+
+get_cmd_env(Name) ->
+ case init:get_argument(Name) of
+ {ok, Args} ->
+ foldl(fun(List, Res) -> conv(List) ++ Res end, [], Args);
+ _ -> []
+ end.
+
+conv([Key, Val | T]) ->
+ [{make_term(Key), make_term(Val)} | conv(T)];
+conv(_) -> [].
+
+%%% Fix some day: eliminate the duplicated code here
+make_term(Str) ->
+ case erl_scan:string(Str) of
+ {ok, Tokens, _} ->
+ case erl_parse:parse_term(Tokens ++ [{dot, 1}]) of
+ {ok, Term} ->
+ Term;
+ {error, {_,M,Reason}} ->
+ error_logger:format("application_controller: ~s: ~s~n",
+ [M:format_error(Reason), Str]),
+ throw({error, {bad_environment_value, Str}})
+ end;
+ {error, {_,M,Reason}, _} ->
+ error_logger:format("application_controller: ~s: ~s~n",
+ [M:format_error(Reason), Str]),
+ throw({error, {bad_environment_value, Str}})
+ end.
+
+get_env_i(Name, #state{conf_data = ConfData}) when is_list(ConfData) ->
+ case keysearch(Name, 1, ConfData) of
+ {value, {_Name, Env}} -> Env;
+ _ -> []
+ end;
+get_env_i(_Name, _) -> [].
+
+%% Merges envs for all apps. Env2 overrides Env1
+merge_env(Env1, Env2) ->
+ merge_env(Env1, Env2, []).
+
+merge_env([{App, AppEnv1} | T], Env2, Res) ->
+ case get_env_key(App, Env2) of
+ {value, AppEnv2, RestEnv2} ->
+ NewAppEnv = merge_app_env(AppEnv1, AppEnv2),
+ merge_env(T, RestEnv2, [{App, NewAppEnv} | Res]);
+ _ ->
+ merge_env(T, Env2, [{App, AppEnv1} | Res])
+ end;
+merge_env([], Env2, Res) ->
+ Env2 ++ Res.
+
+
+
+
+%% Merges envs for an application. Env2 overrides Env1
+merge_app_env(Env1, Env2) ->
+ merge_app_env(Env1, Env2, []).
+
+merge_app_env([{Key, Val} | T], Env2, Res) ->
+ case get_env_key(Key, Env2) of
+ {value, NewVal, RestEnv} ->
+ merge_app_env(T, RestEnv, [{Key, NewVal}|Res]);
+ _ ->
+ merge_app_env(T, Env2, [{Key, Val} | Res])
+ end;
+merge_app_env([], Env2, Res) ->
+ Env2 ++ Res.
+
+get_env_key(Key, Env) -> get_env_key(Env, Key, []).
+get_env_key([{Key, Val} | T], Key, Res) ->
+ {value, Val, T ++ Res};
+get_env_key([H | T], Key, Res) ->
+ get_env_key(T, Key, [H | Res]);
+get_env_key([], _Key, Res) -> Res.
+
+add_env(Name, Env) ->
+ foreach(fun({Key, Value}) ->
+ ets:insert(ac_tab, {{env, Name, Key}, Value})
+ end,
+ Env).
+
+del_env(Name) ->
+ ets:match_delete(ac_tab, {{env, Name, '_'}, '_'}).
+
+check_user() ->
+ case whereis(user) of
+ User when is_pid(User) -> group_leader(User, self());
+ _ -> ok
+ end.
+
+
+%%-----------------------------------------------------------------
+%% Prepare for a release upgrade by reading all the evironment variables.
+%%-----------------------------------------------------------------
+do_prep_config_change(Apps) ->
+ do_prep_config_change(Apps, []).
+
+do_prep_config_change([], EnvBefore) ->
+ EnvBefore;
+do_prep_config_change([{App, _Id} | Apps], EnvBefore) ->
+ Env = application:get_all_env(App),
+ do_prep_config_change(Apps, [{App, Env} | EnvBefore]).
+
+
+
+%%-----------------------------------------------------------------
+%% Inform all running applications about the changed configuration.
+%%-----------------------------------------------------------------
+do_config_change(Apps, EnvBefore) ->
+ do_config_change(Apps, EnvBefore, []).
+
+do_config_change([], _EnvBefore, []) ->
+ ok;
+do_config_change([], _EnvBefore, Errors) ->
+ {error, Errors};
+do_config_change([{App, _Id} | Apps], EnvBefore, Errors) ->
+ AppEnvNow = lists:sort(application:get_all_env(App)),
+ AppEnvBefore = case lists:keysearch(App, 1, EnvBefore) of
+ false ->
+ [];
+ {value, {App, AppEnvBeforeT}} ->
+ lists:sort(AppEnvBeforeT)
+ end,
+
+ Res =
+ case AppEnvNow of
+ AppEnvBefore ->
+ ok;
+ _ ->
+ case do_config_diff(AppEnvNow, AppEnvBefore) of
+ {[], [], []} ->
+ ok;
+ {Changed, New, Removed} ->
+ case application:get_key(App, mod) of
+ {ok, {Mod, _Para}} ->
+ case catch Mod:config_change(Changed, New,
+ Removed) of
+ ok ->
+ ok;
+ %% It is not considered as an error
+ %% if the cb-function is not defined
+ {'EXIT', {undef, _}} ->
+ ok;
+ {error, Error} ->
+ {error, Error};
+ Else ->
+ {error, Else}
+ end;
+ {ok,[]} ->
+ {error, {module_not_defined, App}};
+ undefined ->
+ {error, {application_not_found, App}}
+ end
+ end
+ end,
+
+ case Res of
+ ok ->
+ do_config_change(Apps, EnvBefore, Errors);
+ {error, NewError} ->
+ do_config_change(Apps, EnvBefore,[NewError | Errors])
+ end.
+
+
+
+
+%%-----------------------------------------------------------------
+%% Check if the configuration is changed in anyway.
+%%-----------------------------------------------------------------
+do_config_diff(AppEnvNow, AppEnvBefore) ->
+ do_config_diff(AppEnvNow, AppEnvBefore, {[], []}).
+
+do_config_diff([], AppEnvBefore, {Changed, New}) ->
+ Removed = lists:foldl(fun({Env, _Value}, Acc) -> [Env | Acc] end, [], AppEnvBefore),
+ {Changed, New, Removed};
+do_config_diff(AppEnvNow, [], {Changed, New}) ->
+ {Changed, AppEnvNow++New, []};
+do_config_diff([{Env, Value} | AppEnvNow], AppEnvBefore, {Changed, New}) ->
+ case lists:keysearch(Env, 1, AppEnvBefore) of
+ {value, {Env, Value}} ->
+ do_config_diff(AppEnvNow, lists:keydelete(Env,1,AppEnvBefore), {Changed, New});
+ {value, {Env, _OtherValue}} ->
+ do_config_diff(AppEnvNow, lists:keydelete(Env,1,AppEnvBefore),
+ {[{Env, Value} | Changed], New});
+ false ->
+ do_config_diff(AppEnvNow, AppEnvBefore, {Changed, [{Env, Value}|New]})
+ end.
+
+
+
+
+
+
+%%-----------------------------------------------------------------
+%% Read the .config files.
+%%-----------------------------------------------------------------
+check_conf() ->
+ case init:get_argument(config) of
+ {ok, Files} ->
+ {ok, lists:foldl(
+ fun([File], Env) ->
+ BFName = filename:basename(File,".config"),
+ FName = filename:join(filename:dirname(File),
+ BFName ++ ".config"),
+ case load_file(FName) of
+ {ok, NewEnv} ->
+ %% OTP-4867
+ %% sys.config may now contain names of
+ %% other .config files as well as
+ %% configuration parameters.
+ %% Therefore read and merge contents.
+ if
+ BFName =:= "sys" ->
+ {ok, SysEnv, Errors} =
+ check_conf_sys(NewEnv),
+
+ %% Report first error, if any, and
+ %% terminate
+ %% (backwards compatible behaviour)
+ case Errors of
+ [] ->
+ merge_env(Env, SysEnv);
+ [{error, {SysFName, Line, Str}}|_] ->
+ throw({error, {SysFName, Line, Str}})
+ end;
+ true ->
+ merge_env(Env, NewEnv)
+ end;
+ {error, {Line, _Mod, Str}} ->
+ throw({error, {FName, Line, Str}})
+ end
+ end, [], Files)};
+ _ -> {ok, []}
+ end.
+
+check_conf_sys(Env) ->
+ check_conf_sys(Env, [], []).
+
+check_conf_sys([File|T], SysEnv, Errors) when is_list(File) ->
+ BFName = filename:basename(File, ".config"),
+ FName = filename:join(filename:dirname(File), BFName ++ ".config"),
+ case load_file(FName) of
+ {ok, NewEnv} ->
+ check_conf_sys(T, merge_env(SysEnv, NewEnv), Errors);
+ {error, {Line, _Mod, Str}} ->
+ check_conf_sys(T, SysEnv, [{error, {FName, Line, Str}}|Errors])
+ end;
+check_conf_sys([Tuple|T], SysEnv, Errors) ->
+ check_conf_sys(T, merge_env(SysEnv, [Tuple]), Errors);
+check_conf_sys([], SysEnv, Errors) ->
+ {ok, SysEnv, lists:reverse(Errors)}.
+
+load_file(File) ->
+ %% We can't use file:consult/1 here. Too bad.
+ case erl_prim_loader:get_file(File) of
+ {ok, Bin, _FileName} ->
+ %% Make sure that there is some whitespace at the end of the string
+ %% (so that reading a file with no NL following the "." will work).
+ Str = binary_to_list(Bin) ++ " ",
+ scan_file(Str);
+ error ->
+ {error, {none, open_file, "configuration file not found"}}
+ end.
+
+scan_file(Str) ->
+ case erl_scan:tokens([], Str, 1) of
+ {done, {ok, Tokens, _}, Left} ->
+ case erl_parse:parse_term(Tokens) of
+ {ok,L}=Res when is_list(L) ->
+ case only_ws(Left) of
+ true ->
+ Res;
+ false ->
+ %% There was trailing garbage found after the list.
+ config_error()
+ end;
+ {ok,_} ->
+ %% Parsing succeeded but the result is not a list.
+ config_error();
+ Error ->
+ Error
+ end;
+ {done, Result, _} ->
+ {error, {none, parse_file, tuple_to_list(Result)}};
+ {more, _} ->
+ {error, {none, load_file, "no ending <dot> found"}}
+ end.
+
+only_ws([C|Cs]) when C =< $\s -> only_ws(Cs);
+only_ws([$%|Cs]) -> only_ws(strip_comment(Cs)); % handle comment
+only_ws([_|_]) -> false;
+only_ws([]) -> true.
+
+strip_comment([$\n|Cs]) -> Cs;
+strip_comment([_|Cs]) -> strip_comment(Cs);
+strip_comment([]) -> [].
+
+config_error() ->
+ {error,
+ {none, load_file,
+ "configuration file must contain ONE list ended by <dot>"}}.
+
+%%-----------------------------------------------------------------
+%% Info messages sent to error_logger
+%%-----------------------------------------------------------------
+info_started(Name, Node) ->
+ Rep = [{application, Name},
+ {started_at, Node}],
+ error_logger:info_report(progress, Rep).
+
+info_exited(Name, Reason, Type) ->
+ Rep = [{application, Name},
+ {exited, Reason},
+ {type, Type}],
+ error_logger:info_report(Rep).
+
+
+%%-----------------------------------------------------------------
+%% Reply to all processes waiting this application to be started.
+%%-----------------------------------------------------------------
+reply_to_requester(AppName, Start_req, Res) ->
+ R = case Res of
+ {ok, _Id} ->
+ ok;
+ {info, Reason} ->
+ {error, Reason};
+ Error ->
+ Error
+ end,
+
+ lists:foldl(fun(Sp, AccIn) ->
+ case Sp of
+ {AppName, From} ->
+ reply(From, R),
+ AccIn;
+ _ ->
+ [Sp | AccIn]
+ end
+ end,
+ [],
+ Start_req).
+
+
+%%-----------------------------------------------------------------
+%% Update the environment variable permission for an application.
+%%-----------------------------------------------------------------
+update_permissions(AppName, Bool) ->
+ case ets:lookup(ac_tab, {env, kernel, permissions}) of
+ [] ->
+ ets:insert(ac_tab, {{env, kernel, permissions},
+ [{AppName, Bool}]});
+ [{_, Perm}] ->
+ Perm2 = lists:keydelete(AppName, 1, Perm),
+ ets:insert(ac_tab, {{env, kernel, permissions},
+ [{AppName, Bool}| Perm2]})
+ end.
+
+%%-----------------------------------------------------------------
+%% These functions are only to be used from testsuites.
+%%-----------------------------------------------------------------
+test_change_apps(Apps, Conf) ->
+ Res = test_make_apps(Apps, []),
+ test_do_change_appl(Apps, Conf, Res).
+
+test_do_change_appl([], _, _) ->
+ ok;
+test_do_change_appl([A|Apps], [], [R|Res]) ->
+ do_change_appl(R, #appl{name = A}, []),
+ test_do_change_appl(Apps, [], Res);
+test_do_change_appl([A|Apps], [C|Conf], [R|Res]) ->
+ do_change_appl(R, #appl{name = A}, C),
+ test_do_change_appl(Apps, Conf, Res).
+
+test_make_apps([], Res) ->
+ lists:reverse(Res);
+test_make_apps([A|Apps], Res) ->
+ test_make_apps(Apps, [make_appl(A) | Res]).
+
+%%-----------------------------------------------------------------
+%% String conversion
+%% Exit reason needs to be a printable string
+%% (and of length <200, but init now does the chopping).
+%%-----------------------------------------------------------------
+to_string(Term) ->
+ case io_lib:printable_list(Term) of
+ true ->
+ Term;
+ false ->
+ lists:flatten(io_lib:write(Term))
+ end.
diff --git a/lib/kernel/src/application_master.erl b/lib/kernel/src/application_master.erl
new file mode 100644
index 0000000000..679fefaed9
--- /dev/null
+++ b/lib/kernel/src/application_master.erl
@@ -0,0 +1,426 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 1996-2009. All Rights Reserved.
+%%
+%% The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved online at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(application_master).
+
+%% External exports
+-export([start_link/2, start_type/0, stop/1]).
+-export([get_child/1]).
+
+%% Internal exports
+-export([init/4, start_it/4]).
+
+-include("application_master.hrl").
+
+-record(state, {child, appl_data, children = [], procs = 0, gleader}).
+
+%%-----------------------------------------------------------------
+%% Func: start_link/1
+%% Args: ApplData = record(appl_data)
+%% Purpose: Starts an application master for the application.
+%% Called from application_controller. (The application is
+%% also started).
+%% Returns: {ok, Pid} | {error, Reason} (Pid is unregistered)
+%%-----------------------------------------------------------------
+start_link(ApplData, Type) ->
+ Parent = whereis(application_controller),
+ proc_lib:start_link(application_master, init,
+ [Parent, self(), ApplData, Type]).
+
+start_type() ->
+ group_leader() ! {start_type, self()},
+ receive
+ {start_type, Type} ->
+ Type
+ after 5000 ->
+ {error, timeout}
+ end.
+
+%%-----------------------------------------------------------------
+%% Func: stop/1
+%% Purpose: Stops the application. This function makes sure
+%% that all processes belonging to the applicication is
+%% stopped (shutdown or killed). The application master
+%% is also stopped.
+%% Returns: ok
+%%-----------------------------------------------------------------
+stop(AppMaster) -> call(AppMaster, stop).
+
+%%-----------------------------------------------------------------
+%% Func: get_child/1
+%% Purpose: Get the topmost supervisor of an application.
+%% Returns: {pid(), App}
+%%-----------------------------------------------------------------
+get_child(AppMaster) -> call(AppMaster, get_child).
+
+call(AppMaster, Req) ->
+ Tag = make_ref(),
+ Ref = erlang:monitor(process, AppMaster),
+ AppMaster ! {Req, Tag, self()},
+ receive
+ {'DOWN', Ref, process, _, _Info} ->
+ ok;
+ {Tag, Res} ->
+ erlang:demonitor(Ref),
+ receive
+ {'DOWN', Ref, process, _, _Info} ->
+ Res
+ after 0 ->
+ Res
+ end
+ end.
+
+%%%-----------------------------------------------------------------
+%%% The logical and physical process structrure is as follows:
+%%%
+%%% logical physical
+%%%
+%%% -------- --------
+%%% |AM(GL)| |AM(GL)|
+%%% -------- --------
+%%% | |
+%%% -------- --------
+%%% |Appl P| | X |
+%%% -------- --------
+%%% |
+%%% --------
+%%% |Appl P|
+%%% --------
+%%%
+%%% Where AM(GL) == Application Master (Group Leader)
+%%% Appl P == The application specific root process (child to AM)
+%%% X == A special 'invisible' process
+%%% The reason for not using the logical structrure is that
+%%% the application start function is synchronous, and
+%%% that the AM is GL. This means that if AM executed the start
+%%% function, and this function uses spawn_request/1
+%%% or io, deadlock would occur. Therefore, this function is
+%%% executed by the process X. Also, AM needs three loops;
+%%% init_loop (waiting for the start function to return)
+%%% main_loop
+%%% terminate_loop (waiting for the process to die)
+%%% In each of these loops, io and other requests are handled.
+%%%-----------------------------------------------------------------
+%%% Internal functions
+%%%-----------------------------------------------------------------
+init(Parent, Starter, ApplData, Type) ->
+ link(Parent),
+ process_flag(trap_exit, true),
+ OldGleader = group_leader(),
+ group_leader(self(), self()),
+ %% Insert ourselves as master for the process. This ensures that
+ %% the processes in the application can use get_env/1 at startup.
+ Name = ApplData#appl_data.name,
+ ets:insert(ac_tab, {{application_master, Name}, self()}),
+ State = #state{appl_data = ApplData, gleader = OldGleader},
+ case start_it(State, Type) of
+ {ok, Pid} -> % apply(M,F,A) returned ok
+ set_timer(ApplData#appl_data.maxT),
+ unlink(Starter),
+ proc_lib:init_ack(Starter, {ok,self()}),
+ main_loop(Parent, State#state{child = Pid});
+ {error, Reason} -> % apply(M,F,A) returned error
+ exit(Reason);
+ Else -> % apply(M,F,A) returned erroneous
+ exit(Else)
+ end.
+
+%%-----------------------------------------------------------------
+%% We want to start the new application synchronously, but we still
+%% want to handle io requests. So we spawn off a new process that
+%% performs the apply, and we wait for a start ack.
+%%-----------------------------------------------------------------
+start_it(State, Type) ->
+ Tag = make_ref(),
+ Pid = spawn_link(application_master, start_it, [Tag, State, self(), Type]),
+ init_loop(Pid, Tag, State, Type).
+
+
+%%-----------------------------------------------------------------
+%% These are the three different loops executed by the application_
+%% master
+%%-----------------------------------------------------------------
+init_loop(Pid, Tag, State, Type) ->
+ receive
+ IoReq when element(1, IoReq) =:= io_request ->
+ State#state.gleader ! IoReq,
+ init_loop(Pid, Tag, State, Type);
+ {Tag, Res} ->
+ Res;
+ {'EXIT', Pid, Reason} ->
+ {error, Reason};
+ {start_type, From} ->
+ From ! {start_type, Type},
+ init_loop(Pid, Tag, State, Type);
+ Other ->
+ NewState = handle_msg(Other, State),
+ init_loop(Pid, Tag, NewState, Type)
+ end.
+
+main_loop(Parent, State) ->
+ receive
+ IoReq when element(1, IoReq) =:= io_request ->
+ State#state.gleader ! IoReq,
+ main_loop(Parent, State);
+ {'EXIT', Parent, Reason} ->
+ terminate(Reason, State);
+ {'EXIT', Child, Reason} when State#state.child =:= Child ->
+ terminate(Reason, State#state{child=undefined});
+ {'EXIT', _, timeout} ->
+ terminate(normal, State);
+ {'EXIT', Pid, _Reason} ->
+ Children = lists:delete(Pid, State#state.children),
+ Procs = State#state.procs - 1,
+ main_loop(Parent, State#state{children=Children, procs=Procs});
+ {start_type, From} ->
+ From ! {start_type, local},
+ main_loop(Parent, State);
+ Other ->
+ NewState = handle_msg(Other, State),
+ main_loop(Parent, NewState)
+ end.
+
+terminate_loop(Child, State) ->
+ receive
+ IoReq when element(1, IoReq) =:= io_request ->
+ State#state.gleader ! IoReq,
+ terminate_loop(Child, State);
+ {'EXIT', Child, _} ->
+ ok;
+ Other ->
+ NewState = handle_msg(Other, State),
+ terminate_loop(Child, NewState)
+ end.
+
+
+%%-----------------------------------------------------------------
+%% The Application Master is linked to *all* processes in the group
+%% (application).
+%%-----------------------------------------------------------------
+handle_msg({get_child, Tag, From}, State) ->
+ From ! {Tag, get_child_i(State#state.child)},
+ State;
+handle_msg({stop, Tag, From}, State) ->
+ catch terminate(normal, State),
+ From ! {Tag, ok},
+ exit(normal);
+handle_msg(_, State) ->
+ State.
+
+
+terminate(Reason, State) ->
+ terminate_child(State#state.child, State),
+ kill_children(State#state.children),
+ exit(Reason).
+
+
+
+
+%%======================================================================
+%%======================================================================
+%%======================================================================
+%% This is the process X above...
+%%======================================================================
+%%======================================================================
+%%======================================================================
+
+%%======================================================================
+%% Start an application.
+%% If the start_phases is defined in the .app file, the application is
+%% to be started in one or several start phases.
+%% If the Module in the mod-key is set to application_starter then
+%% the generic help module application_starter is used to control
+%% the start.
+%%======================================================================
+
+start_it(Tag, State, From, Type) ->
+ process_flag(trap_exit, true),
+ ApplData = State#state.appl_data,
+ case {ApplData#appl_data.phases, ApplData#appl_data.mod} of
+ {undefined, _} ->
+ start_it_old(Tag, From, Type, ApplData);
+ {Phases, {application_starter, [M, A]}} ->
+ start_it_new(Tag, From, Type, M, A, Phases,
+ [ApplData#appl_data.name]);
+ {Phases, {M, A}} ->
+ start_it_new(Tag, From, Type, M, A, Phases,
+ [ApplData#appl_data.name]);
+ {OtherP, OtherM} ->
+ From ! {Tag, {error, {bad_keys, {{mod, OtherM},
+ {start_phases, OtherP}}}}}
+ end.
+
+
+%%%-----------------------------------------------------
+%%% No start phases are defined
+%%%-----------------------------------------------------
+start_it_old(Tag, From, Type, ApplData) ->
+ {M,A} = ApplData#appl_data.mod,
+ case catch M:start(Type, A) of
+ {ok, Pid} ->
+ link(Pid),
+ From ! {Tag, {ok, self()}},
+ loop_it(From, Pid, M, []);
+ {ok, Pid, AppState} ->
+ link(Pid),
+ From ! {Tag, {ok, self()}},
+ loop_it(From, Pid, M, AppState);
+ {'EXIT', normal} ->
+ From ! {Tag, {error, {{'EXIT',normal},{M,start,[Type,A]}}}};
+ {error, Reason} ->
+ From ! {Tag, {error, {Reason, {M,start,[Type,A]}}}};
+ Other ->
+ From ! {Tag, {error, {bad_return,{{M,start,[Type,A]},Other}}}}
+ end.
+
+
+%%%-----------------------------------------------------
+%%% Start phases are defined
+%%%-----------------------------------------------------
+start_it_new(Tag, From, Type, M, A, Phases, Apps) ->
+ case catch start_the_app(Type, M, A, Phases, Apps) of
+ {ok, Pid, AppState} ->
+ From ! {Tag, {ok, self()}},
+ loop_it(From, Pid, M, AppState);
+ Error ->
+ From ! {Tag, Error}
+ end.
+
+
+%%%=====================================================
+%%% Start the application in the defined phases,
+%%% but first the supervisors are starter.
+%%%=====================================================
+start_the_app(Type, M, A, Phases, Apps) ->
+ case start_supervisor(Type, M, A) of
+ {ok, Pid, AppState} ->
+ link(Pid),
+ case application_starter:start(Phases, Type, Apps) of
+ ok ->
+ {ok, Pid, AppState};
+ Error2 ->
+ unlink(Pid),
+ Error2
+ end;
+ Error ->
+ Error
+ end.
+
+%%%-------------------------------------------------------------
+%%% Start the supervisors
+%%%-------------------------------------------------------------
+start_supervisor(Type, M, A) ->
+ case catch M:start(Type, A) of
+ {ok, Pid} ->
+ {ok, Pid, []};
+ {ok, Pid, AppState} ->
+ {ok, Pid, AppState};
+ {error, Reason} ->
+ {error, {Reason, {M, start, [Type, A]}}};
+ {'EXIT', normal} ->
+ {error, {{'EXIT', normal}, {M, start, [Type, A]}}};
+ Other ->
+ {error, {bad_return, {{M, start, [Type, A]}, Other}}}
+ end.
+
+
+
+
+%%======================================================================
+%%
+%%======================================================================
+
+loop_it(Parent, Child, Mod, AppState) ->
+ receive
+ {Parent, get_child} ->
+ Parent ! {self(), Child, Mod},
+ loop_it(Parent, Child, Mod, AppState);
+ {Parent, terminate} ->
+ NewAppState = prep_stop(Mod, AppState),
+ exit(Child, shutdown),
+ receive
+ {'EXIT', Child, _} -> ok
+ end,
+ catch Mod:stop(NewAppState),
+ exit(normal);
+ {'EXIT', Parent, Reason} ->
+ NewAppState = prep_stop(Mod, AppState),
+ exit(Child, Reason),
+ receive
+ {'EXIT', Child, Reason2} ->
+ exit(Reason2)
+ end,
+ catch Mod:stop(NewAppState);
+ {'EXIT', Child, Reason} -> % forward *all* exit reasons (inc. normal)
+ NewAppState = prep_stop(Mod, AppState),
+ catch Mod:stop(NewAppState),
+ exit(Reason);
+ _ ->
+ loop_it(Parent, Child, Mod, AppState)
+ end.
+
+prep_stop(Mod, AppState) ->
+ case catch Mod:prep_stop(AppState) of
+ {'EXIT', {undef, _}} ->
+ AppState;
+ {'EXIT', Reason} ->
+ error_logger:error_report([{?MODULE, shutdown_error},
+ {Mod, {prep_stop, [AppState]}},
+ {error_info, Reason}]),
+ AppState;
+ NewAppState ->
+ NewAppState
+ end.
+
+get_child_i(Child) ->
+ Child ! {self(), get_child},
+ receive
+ {Child, GrandChild, Mod} -> {GrandChild, Mod}
+ end.
+
+terminate_child_i(Child, State) ->
+ Child ! {self(), terminate},
+ terminate_loop(Child, State).
+
+%% Try to shutdown the child gently
+terminate_child(undefined, _) -> ok;
+terminate_child(Child, State) ->
+ terminate_child_i(Child, State).
+
+kill_children(Children) ->
+ lists:foreach(fun(Pid) -> exit(Pid, kill) end, Children),
+ kill_all_procs().
+
+kill_all_procs() ->
+ kill_all_procs_1(processes(), self(), 0).
+
+kill_all_procs_1([Self|Ps], Self, N) ->
+ kill_all_procs_1(Ps, Self, N);
+kill_all_procs_1([P|Ps], Self, N) ->
+ case process_info(P, group_leader) of
+ {group_leader,Self} ->
+ exit(P, kill),
+ kill_all_procs_1(Ps, Self, N+1);
+ _ ->
+ kill_all_procs_1(Ps, Self, N)
+ end;
+kill_all_procs_1([], _, 0) -> ok;
+kill_all_procs_1([], _, _) -> kill_all_procs().
+
+set_timer(infinity) -> ok;
+set_timer(Time) -> timer:exit_after(Time, timeout).
diff --git a/lib/kernel/src/application_master.hrl b/lib/kernel/src/application_master.hrl
new file mode 100644
index 0000000000..cd6d12c33c
--- /dev/null
+++ b/lib/kernel/src/application_master.hrl
@@ -0,0 +1,20 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 1996-2009. All Rights Reserved.
+%%
+%% The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved online at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% %CopyrightEnd%
+%%
+-record(appl_data, {name, regs = [], phases, mod, mods = [],
+ inc_apps, maxP = infinity, maxT = infinity}).
diff --git a/lib/kernel/src/application_starter.erl b/lib/kernel/src/application_starter.erl
new file mode 100644
index 0000000000..8d839e4662
--- /dev/null
+++ b/lib/kernel/src/application_starter.erl
@@ -0,0 +1,111 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 1998-2009. All Rights Reserved.
+%%
+%% The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved online at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% %CopyrightEnd%
+%%
+%% ----------------------------------------------------------------------
+%% Purpose : Starts applications in the phases defined in the .app file's
+%% start_phases key. If the application includes other applications
+%% these are also started according to their mod and
+%% start_phases-keys in their .app file.
+%% ----------------------------------------------------------------------
+
+-module(application_starter).
+
+-export([start/3]).
+
+%%%=============================================================================
+%%%=============================================================================
+%%%=============================================================================
+%%% start(Phases, Type, Applications) -> ok | {error, ErrorMessage}
+%%%
+%%% The applications are started by calling Module:start_phase(Phase, Type, Args)
+%%% where Module and is defined in the mod-key, Phase and Args are defined in
+%%% the start_phases-key.
+%%%=============================================================================
+%%%=============================================================================
+%%%=============================================================================
+start([], _Type, _Apps) ->
+ ok;
+start([{Phase,_PhaseArgs}|Phases], Type, Apps) ->
+ case start_apps(Phase, Type, Apps) of
+ {error, Error} ->
+ {error, Error};
+ _ ->
+ start(Phases, Type, Apps)
+ end.
+
+
+%%%=============================================================================
+%%% Start each application in the phase Phase.
+%%%=============================================================================
+start_apps(_Phase, _Type, []) ->
+ ok;
+start_apps(Phase, Type, [App | Apps]) ->
+ case catch run_start_phase(Phase, Type, App) of
+ {error, Error} ->
+ {error, Error};
+ _ ->
+ start_apps(Phase, Type, Apps)
+ end.
+
+
+%%%=============================================================================
+%%% If application_starter is used recursively, start also all the included
+%%% applications in the phase Phase.
+%%%=============================================================================
+run_start_phase(Phase, Type, App) ->
+ {ok,{Mod,Arg}} = application:get_key(App, mod),
+ case Mod of
+ application_starter ->
+ [StartMod, _StartArgs] = Arg,
+ run_the_phase(Phase, Type, App, StartMod),
+ {ok, IncApps} = application:get_key(App, included_applications),
+ start_apps(Phase, Type, IncApps);
+ _ ->
+ run_the_phase(Phase, Type, App, Mod)
+ end.
+
+
+%%%=============================================================================
+%%% Start the application only if the start phase is defined in the
+%%% start_phases-key.
+%%%=============================================================================
+run_the_phase(Phase, Type, App, Mod) ->
+ Start_phases = case application_controller:get_key(App, start_phases) of
+ {ok, undefined} ->
+ throw({error, {start_phases_undefined, App}});
+ {ok, Sp} ->
+ Sp
+ end,
+ case lists:keysearch(Phase, 1, Start_phases) of
+ false ->
+ ok;
+ {value, {Phase, PhaseArgs}} ->
+ case catch Mod:start_phase(Phase, Type, PhaseArgs) of
+ ok ->
+ ok;
+ {error, Reason} ->
+ throw({error, {Reason,
+ {Mod, start_phase,
+ [Phase, Type, PhaseArgs]}}});
+ Other ->
+ throw({error, {bad_return_value,
+ {{Mod, start_phase,
+ [Phase, Type, PhaseArgs]},
+ Other}}})
+ end
+ end.
diff --git a/lib/kernel/src/auth.erl b/lib/kernel/src/auth.erl
new file mode 100644
index 0000000000..62c0bef0cc
--- /dev/null
+++ b/lib/kernel/src/auth.erl
@@ -0,0 +1,391 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 1996-2009. All Rights Reserved.
+%%
+%% The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved online at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(auth).
+-behaviour(gen_server).
+
+-export([start_link/0]).
+
+%% Old documented interface - deprecated
+-export([is_auth/1, cookie/0, cookie/1, node_cookie/1, node_cookie/2]).
+-deprecated([{is_auth,1}, {cookie,'_'}, {node_cookie, '_'}]).
+
+%% New interface - meant for internal use within kernel only
+-export([get_cookie/0, get_cookie/1,
+ set_cookie/1, set_cookie/2,
+ sync_cookie/0,
+ print/3]).
+
+%% gen_server callbacks
+-export([init/1, handle_call/3, handle_cast/2, handle_info/2,
+ terminate/2, code_change/3]).
+
+-define(COOKIE_ETS_PROTECTION, protected).
+
+-record(state, {
+ our_cookie, %% Our own cookie
+ other_cookies %% The send-cookies of other nodes
+ }).
+
+-include("../include/file.hrl").
+
+%%----------------------------------------------------------------------
+%% Exported functions
+%%----------------------------------------------------------------------
+
+start_link() ->
+ gen_server:start_link({local, auth}, auth, [], []).
+
+%%--Deprecated interface------------------------------------------------
+
+-spec is_auth(Node :: node()) -> 'yes' | 'no'.
+
+is_auth(Node) ->
+ case net_adm:ping(Node) of
+ pong -> yes;
+ pang -> no
+ end.
+
+-spec cookie() -> atom().
+
+cookie() ->
+ get_cookie().
+
+-spec cookie(Cookies :: [atom(),...] | atom()) -> 'true'.
+
+cookie([Cookie]) ->
+ set_cookie(Cookie);
+cookie(Cookie) ->
+ set_cookie(Cookie).
+
+-spec node_cookie(Cookies :: [atom(),...]) -> 'yes' | 'no'.
+
+node_cookie([Node, Cookie]) ->
+ node_cookie(Node, Cookie).
+
+-spec node_cookie(Node :: node(), Cookie :: atom()) -> 'yes' | 'no'.
+
+node_cookie(Node, Cookie) ->
+ set_cookie(Node, Cookie),
+ is_auth(Node).
+
+%%--"New" interface-----------------------------------------------------
+
+-spec get_cookie() -> atom().
+
+get_cookie() ->
+ get_cookie(node()).
+
+-spec get_cookie(Node :: node()) -> atom().
+
+get_cookie(_Node) when node() =:= nonode@nohost ->
+ nocookie;
+get_cookie(Node) ->
+ gen_server:call(auth, {get_cookie, Node}).
+
+-spec set_cookie(Cookie :: atom()) -> 'true'.
+
+set_cookie(Cookie) ->
+ set_cookie(node(), Cookie).
+
+-spec set_cookie(Node :: node(), Cookie :: atom()) -> 'true'.
+
+set_cookie(_Node, _Cookie) when node() =:= nonode@nohost ->
+ erlang:error(distribution_not_started);
+set_cookie(Node, Cookie) ->
+ gen_server:call(auth, {set_cookie, Node, Cookie}).
+
+-spec sync_cookie() -> any().
+
+sync_cookie() ->
+ gen_server:call(auth, sync_cookie).
+
+-spec print(Node :: node(), Format :: string(), Args :: [_]) -> 'ok'.
+
+print(Node,Format,Args) ->
+ (catch gen_server:cast({auth,Node},{print,Format,Args})).
+
+%%--gen_server callbacks------------------------------------------------
+
+init([]) ->
+ process_flag(trap_exit, true),
+ {ok, init_cookie()}.
+
+%% Opened is a list of servers we have opened up
+%% The net kernel will let all message to the auth server
+%% through as is
+
+handle_call({get_cookie, Node}, {_From,_Tag}, State) when Node =:= node() ->
+ {reply, State#state.our_cookie, State};
+handle_call({get_cookie, Node}, {_From,_Tag}, State) ->
+ case ets:lookup(State#state.other_cookies, Node) of
+ [{Node, Cookie}] ->
+ {reply, Cookie, State};
+ [] ->
+ {reply, State#state.our_cookie, State}
+ end;
+handle_call({set_cookie, Node, Cookie}, {_From,_Tag}, State)
+ when Node =:= node() ->
+ {reply, true, State#state{our_cookie = Cookie}};
+
+%%
+%% Happens when the distribution is brought up and
+%% Someone wight have set up the cookie for our new nodename.
+%%
+
+handle_call({set_cookie, Node, Cookie}, {_From,_Tag}, State) ->
+ ets:insert(State#state.other_cookies, {Node, Cookie}),
+ {reply, true, State};
+
+handle_call(sync_cookie, _From, State) ->
+ case ets:lookup(State#state.other_cookies,node()) of
+ [{_N,C}] ->
+ ets:delete(State#state.other_cookies,node()),
+ {reply, true, State#state{our_cookie = C}};
+ [] ->
+ {reply, true, State}
+ end;
+
+handle_call(echo, _From, O) ->
+ {reply, hello, O}.
+
+handle_cast({print,What,Args}, O) ->
+ %% always allow print outs
+ error_logger:error_msg(What,Args),
+ {noreply, O}.
+
+%% A series of bad messages that may come (from older distribution versions).
+
+handle_info({From,badcookie,net_kernel,{From,spawn,_M,_F,_A,_Gleader}}, O) ->
+ auth:print(node(From) ,"~n** Unauthorized spawn attempt to ~w **~n",
+ [node()]),
+ erlang:disconnect_node(node(From)),
+ {noreply, O};
+handle_info({From,badcookie,net_kernel,{From,spawn_link,_M,_F,_A,_Gleader}}, O) ->
+ auth:print(node(From),
+ "~n** Unauthorized spawn_link attempt to ~w **~n",
+ [node()]),
+ erlang:disconnect_node(node(From)),
+ {noreply, O};
+handle_info({_From,badcookie,ddd_server,_Mess}, O) ->
+ %% Ignore bad messages to the ddd server, they will be resent
+ %% If the authentication is succesful
+ {noreply, O};
+handle_info({From,badcookie,rex,_Msg}, O) ->
+ auth:print(getnode(From),
+ "~n** Unauthorized rpc attempt to ~w **~n",[node()]),
+ disconnect_node(node(From)),
+ {noreply, O};
+%% These two messages has to do with the old auth:is_auth() call (net_adm:ping)
+handle_info({From,badcookie,net_kernel,{'$gen_call',{From,Tag},{is_auth,_Node}}}, O) -> %% ho ho
+ From ! {Tag, no},
+ {noreply, O};
+handle_info({_From,badcookie,To,{{auth_reply,N},R}}, O) ->%% Let auth replys through
+ catch To ! {{auth_reply,N},R},
+ {noreply, O};
+handle_info({From,badcookie,Name,Mess}, Opened) ->
+ %% This may be registered send as well as pid send.
+ case lists:member(Name, Opened) of
+ true ->
+ catch Name ! Mess;
+ false ->
+ case catch lists:member(element(1, Mess), Opened) of
+ true ->
+ catch Name ! Mess; %% Might be a pid as well
+ _ ->
+ auth:print(getnode(From),
+ "~n** Unauthorized send attempt ~w to ~w **~n",
+ [Mess,node()]),
+ erlang:disconnect_node(getnode(From))
+ end
+ end,
+ {noreply, Opened};
+handle_info(_, O)-> % Ignore anything else especially EXIT signals
+ {noreply, O}.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+
+terminate(_Reason, _State) ->
+ ok.
+
+getnode(P) when is_pid(P) -> node(P);
+getnode(P) -> P.
+
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%%%
+%%% Cookie functions
+%%%
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+%% Read cookie from $HOME/.erlang.cookie and set it.
+init_cookie() ->
+ case init:get_argument(nocookie) of
+ error ->
+ case init:get_argument(setcookie) of
+ {ok, [[C0]]} ->
+ C = list_to_atom(C0),
+ #state{our_cookie = C,
+ other_cookies = ets:new(cookies,
+ [?COOKIE_ETS_PROTECTION])};
+ _ ->
+ %% Here is the default
+ case read_cookie() of
+ {error, Error} ->
+ error_logger:error_msg(Error, []),
+ %% Is this really this serious?
+ erlang:error(Error);
+ {ok, Co} ->
+ #state{our_cookie = list_to_atom(Co),
+ other_cookies = ets:new(
+ cookies,
+ [?COOKIE_ETS_PROTECTION])}
+ end
+ end;
+ _Other ->
+ #state{our_cookie = nocookie,
+ other_cookies = ets:new(cookies,[?COOKIE_ETS_PROTECTION])}
+ end.
+
+read_cookie() ->
+ case init:get_argument(home) of
+ {ok, [[Home]]} ->
+ read_cookie(filename:join(Home, ".erlang.cookie"));
+ _ ->
+ {error, "No home for cookie file"}
+ end.
+
+read_cookie(Name) ->
+ case file:raw_read_file_info(Name) of
+ {ok, #file_info {type=Type, mode=Mode, size=Size}} ->
+ case check_attributes(Name, Type, Mode, os:type()) of
+ ok -> read_cookie(Name, Size);
+ Error -> Error
+ end;
+ {error, enoent} ->
+ case create_cookie(Name) of
+ ok -> read_cookie(Name);
+ Error -> Error
+ end;
+ {error, Reason} ->
+ {error, make_error(Name, Reason)}
+ end.
+
+read_cookie(Name, Size) ->
+ case file:open(Name, [raw, read]) of
+ {ok, File} ->
+ case file:read(File, Size) of
+ {ok, List} ->
+ file:close(File),
+ check_cookie(List, []);
+ {error, Reason} ->
+ make_error(Name, Reason)
+ end;
+ {error, Reason} ->
+ make_error(Name, Reason)
+ end.
+
+make_error(Name, Reason) ->
+ {error, "Error when reading " ++ Name ++ ": " ++ atom_to_list(Reason)}.
+
+%% Verifies that only the owner can access the cookie file.
+
+check_attributes(Name, Type, _Mode, _Os) when Type =/= regular ->
+ {error, "Cookie file " ++ Name ++ " is of type " ++ Type};
+check_attributes(Name, _Type, Mode, {unix, _}) when (Mode band 8#077) =/= 0 ->
+ {error, "Cookie file " ++ Name ++ " must be accessible by owner only"};
+check_attributes(_Name, _Type, _Mode, _Os) ->
+ ok.
+
+%% Checks that the cookie has the correct format.
+
+check_cookie([Letter|Rest], Result) when $\s =< Letter, Letter =< $~ ->
+ check_cookie(Rest, [Letter|Result]);
+check_cookie([X|Rest], Result) ->
+ check_cookie1([X|Rest], Result);
+check_cookie([], Result) ->
+ check_cookie1([], Result).
+
+check_cookie1([$\n|Rest], Result) ->
+ check_cookie1(Rest, Result);
+check_cookie1([$\r|Rest], Result) ->
+ check_cookie1(Rest, Result);
+check_cookie1([$\s|Rest], Result) ->
+ check_cookie1(Rest, Result);
+check_cookie1([_|_], _Result) ->
+ {error, "Bad characters in cookie"};
+check_cookie1([], []) ->
+ {error, "Too short cookie string"};
+check_cookie1([], Result) ->
+ {ok, lists:reverse(Result)}.
+
+%% Creates a new, random cookie.
+
+create_cookie(Name) ->
+ {_, S1, S2} = now(),
+ Seed = S2*10000+S1,
+ Cookie = random_cookie(20, Seed, []),
+ case file:open(Name, [write, raw]) of
+ {ok, File} ->
+ R1 = file:write(File, Cookie),
+ file:close(File),
+ R2 = file:raw_write_file_info(Name, make_info(Name)),
+ case {R1, R2} of
+ {ok, ok} ->
+ ok;
+ {{error,_Reason}, _} ->
+ {error, "Failed to create cookie file"};
+ {ok, {error, Reason}} ->
+ {error, "Failed to change mode: " ++ atom_to_list(Reason)}
+ end;
+ {error,_Reason} ->
+ {error, "Failed to create cookie file"}
+ end.
+
+random_cookie(0, _, Result) ->
+ Result;
+random_cookie(Count, X0, Result) ->
+ X = next_random(X0),
+ Letter = X*($Z-$A+1) div 16#1000000000 + $A,
+ random_cookie(Count-1, X, [Letter|Result]).
+
+%% Returns suitable information for a new cookie.
+%%
+%% Note: Since the generated cookie depends on the time the file was
+%% created, and the time can be seen plainly in the file, we will
+%% round down the file creation times to the nearest midnight to
+%% give crackers some more work.
+
+make_info(Name) ->
+ Midnight =
+ case file:raw_read_file_info(Name) of
+ {ok, #file_info{atime={Date, _}}} ->
+ {Date, {0, 0, 0}};
+ _ ->
+ {{1990, 1, 1}, {0, 0, 0}}
+ end,
+ #file_info{mode=8#400, atime=Midnight, mtime=Midnight, ctime=Midnight}.
+
+%% This RNG is from line 21 on page 102 in Knuth: The Art of Computer Programming,
+%% Volume II, Seminumerical Algorithms.
+%%
+%% Returns an integer in the range 0..(2^35-1).
+
+next_random(X) ->
+ (X*17059465+1) band 16#fffffffff.
diff --git a/lib/kernel/src/code.erl b/lib/kernel/src/code.erl
new file mode 100644
index 0000000000..fef11d7e6e
--- /dev/null
+++ b/lib/kernel/src/code.erl
@@ -0,0 +1,491 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 1996-2009. All Rights Reserved.
+%%
+%% The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved online at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(code).
+
+%% This is the interface module to the code server. It also contains
+%% some implementation details. See also related modules: code_*.erl
+%% in this directory.
+
+-export([objfile_extension/0,
+ set_path/1,
+ get_path/0,
+ load_file/1,
+ ensure_loaded/1,
+ load_abs/1,
+ load_abs/2,
+ load_binary/3,
+ load_native_partial/2,
+ load_native_sticky/3,
+ delete/1,
+ purge/1,
+ soft_purge/1,
+ is_loaded/1,
+ all_loaded/0,
+ stop/0,
+ root_dir/0,
+ lib_dir/0,
+ lib_dir/1,
+ lib_dir/2,
+ compiler_dir/0,
+ priv_dir/1,
+ stick_dir/1,
+ unstick_dir/1,
+ stick_mod/1,
+ unstick_mod/1,
+ is_sticky/1,
+ get_object_code/1,
+ add_path/1,
+ add_pathsz/1,
+ add_paths/1,
+ add_pathsa/1,
+ add_patha/1,
+ add_pathz/1,
+ del_path/1,
+ replace_path/2,
+ rehash/0,
+ start_link/0, start_link/1,
+ which/1,
+ where_is_file/1,
+ where_is_file/2,
+ set_primary_archive/2,
+ clash/0]).
+
+-include_lib("kernel/include/file.hrl").
+
+%% User interface.
+%%
+%% objfile_extension() -> ".beam"
+%% set_path(Dir*) -> true
+%% get_path() -> Dir*
+%% add_path(Dir) -> true | {error, What}
+%% add_patha(Dir) -> true | {error, What}
+%% add_pathz(Dir) -> true | {error, What}
+%% add_paths(DirList) -> true | {error, What}
+%% add_pathsa(DirList) -> true | {error, What}
+%% add_pathsz(DirList) -> true | {error, What}
+%% del_path(Dir) -> true | {error, What}
+%% replace_path(Name,Dir) -> true | {error, What}
+%% load_file(File) -> {error,What} | {module, Mod}
+%% load_abs(File) -> {error,What} | {module, Mod}
+%% load_abs(File,Mod) -> {error,What} | {module, Mod}
+%% load_binary(Mod,File,Bin) -> {error,What} | {module,Mod}
+%% ensure_loaded(Module) -> {error,What} | {module, Mod}
+%% delete(Module)
+%% purge(Module) kills all procs running old code
+%% soft_purge(Module) -> true | false
+%% is_loaded(Module) -> {file, File} | false
+%% all_loaded() -> {Module, File}*
+%% get_object_code(Mod) -> error | {Mod, Bin, Filename}
+%% stop() -> true
+%% root_dir()
+%% compiler_dir()
+%% lib_dir()
+%% priv_dir(Name)
+%% stick_dir(Dir) -> ok | error
+%% unstick_dir(Dir) -> ok | error
+%% is_sticky(Module) -> true | false
+%% which(Module) -> Filename
+%% set_primary_archive((FileName, Bin) -> ok | {error, Reason}
+%% clash() -> -> print out
+
+%%----------------------------------------------------------------------------
+%% Some types for basic exported functions of this module
+%%----------------------------------------------------------------------------
+
+-type load_error_rsn() :: 'badfile' | 'native_code' | 'nofile' | 'not_purged'
+ | 'sticky_directory'. % for some functions only
+-type load_ret() :: {'error', load_error_rsn()} | {'module', atom()}.
+-type loaded_ret_atoms() :: 'cover_compiled' | 'preloaded'.
+-type loaded_filename() :: file:filename() | loaded_ret_atoms().
+
+%%----------------------------------------------------------------------------
+%% User interface
+%%----------------------------------------------------------------------------
+
+-spec objfile_extension() -> file:filename().
+objfile_extension() ->
+ init:objfile_extension().
+
+-spec load_file(Module :: atom()) -> load_ret().
+load_file(Mod) when is_atom(Mod) ->
+ call({load_file,Mod}).
+
+-spec ensure_loaded(Module :: atom()) -> load_ret().
+ensure_loaded(Mod) when is_atom(Mod) ->
+ call({ensure_loaded,Mod}).
+
+%% XXX File as an atom is allowed only for backwards compatibility.
+-spec load_abs(Filename :: file:filename()) -> load_ret().
+load_abs(File) when is_list(File); is_atom(File) -> call({load_abs,File,[]}).
+
+%% XXX Filename is also an atom(), e.g. 'cover_compiled'
+-spec load_abs(Filename :: loaded_filename(), Module :: atom()) -> load_ret().
+load_abs(File,M) when (is_list(File) orelse is_atom(File)), is_atom(M) ->
+ call({load_abs,File,M}).
+
+%% XXX Filename is also an atom(), e.g. 'cover_compiled'
+-spec load_binary(Module :: atom(), Filename :: loaded_filename(), Binary :: binary()) -> load_ret().
+load_binary(Mod,File,Bin)
+ when is_atom(Mod), (is_list(File) orelse is_atom(File)), is_binary(Bin) ->
+ call({load_binary,Mod,File,Bin}).
+
+-spec load_native_partial(Module :: atom(), Binary :: binary()) -> load_ret().
+load_native_partial(Mod,Bin) when is_atom(Mod), is_binary(Bin) ->
+ call({load_native_partial,Mod,Bin}).
+
+-spec load_native_sticky(Module :: atom(), Binary :: binary(), WholeModule :: 'false' | binary()) -> load_ret().
+load_native_sticky(Mod,Bin,WholeModule)
+ when is_atom(Mod), is_binary(Bin),
+ (is_binary(WholeModule) orelse WholeModule =:= false) ->
+ call({load_native_sticky,Mod,Bin,WholeModule}).
+
+-spec delete(Module :: atom()) -> boolean().
+delete(Mod) when is_atom(Mod) -> call({delete,Mod}).
+
+-spec purge/1 :: (Module :: atom()) -> boolean().
+purge(Mod) when is_atom(Mod) -> call({purge,Mod}).
+
+-spec soft_purge(Module :: atom()) -> boolean().
+soft_purge(Mod) when is_atom(Mod) -> call({soft_purge,Mod}).
+
+-spec is_loaded(Module :: atom()) -> {'file', loaded_filename()} | 'false'.
+is_loaded(Mod) when is_atom(Mod) -> call({is_loaded,Mod}).
+
+-spec get_object_code(Module :: atom()) -> {atom(), binary(), file:filename()} | 'error'.
+get_object_code(Mod) when is_atom(Mod) -> call({get_object_code, Mod}).
+
+-spec all_loaded() -> [{atom(), loaded_filename()}].
+all_loaded() -> call(all_loaded).
+
+-spec stop() -> no_return().
+stop() -> call(stop).
+
+-spec root_dir() -> file:filename().
+root_dir() -> call({dir,root_dir}).
+
+-spec lib_dir() -> file:filename().
+lib_dir() -> call({dir,lib_dir}).
+
+%% XXX is_list() is for backwards compatibility -- take out in future version
+-spec lib_dir(App :: atom()) -> file:filename() | {'error', 'bad_name'}.
+lib_dir(App) when is_atom(App) ; is_list(App) -> call({dir,{lib_dir,App}}).
+
+-spec lib_dir(App :: atom(), SubDir :: atom()) -> file:filename() | {'error', 'bad_name'}.
+lib_dir(App, SubDir) when is_atom(App), is_atom(SubDir) -> call({dir,{lib_dir,App,SubDir}}).
+
+-spec compiler_dir() -> file:filename().
+compiler_dir() -> call({dir,compiler_dir}).
+
+%% XXX is_list() is for backwards compatibility -- take out in future version
+-spec priv_dir(Appl :: atom()) -> file:filename() | {'error', 'bad_name'}.
+priv_dir(App) when is_atom(App) ; is_list(App) -> call({dir,{priv_dir,App}}).
+
+-spec stick_dir(Directory :: file:filename()) -> 'ok' | 'error'.
+stick_dir(Dir) when is_list(Dir) -> call({stick_dir,Dir}).
+
+-spec unstick_dir(Directory :: file:filename()) -> 'ok' | 'error'.
+unstick_dir(Dir) when is_list(Dir) -> call({unstick_dir,Dir}).
+
+-spec stick_mod(Module :: atom()) -> 'true'.
+stick_mod(Mod) when is_atom(Mod) -> call({stick_mod,Mod}).
+
+-spec unstick_mod(Module :: atom()) -> 'true'.
+unstick_mod(Mod) when is_atom(Mod) -> call({unstick_mod,Mod}).
+
+-spec is_sticky(Module :: atom()) -> boolean().
+is_sticky(Mod) when is_atom(Mod) -> call({is_sticky,Mod}).
+
+-spec set_path(Directories :: [file:filename()]) -> 'true' | {'error', term()}.
+set_path(PathList) when is_list(PathList) -> call({set_path,PathList}).
+
+-spec get_path() -> [file:filename()].
+get_path() -> call(get_path).
+
+-spec add_path(Directory :: file:filename()) -> 'true' | {'error', term()}.
+add_path(Dir) when is_list(Dir) -> call({add_path,last,Dir}).
+
+-spec add_pathz(Directory :: file:filename()) -> 'true' | {'error', term()}.
+add_pathz(Dir) when is_list(Dir) -> call({add_path,last,Dir}).
+
+-spec add_patha(Directory :: file:filename()) -> 'true' | {'error', term()}.
+add_patha(Dir) when is_list(Dir) -> call({add_path,first,Dir}).
+
+-spec add_paths(Directories :: [file:filename()]) -> 'ok'.
+add_paths(Dirs) when is_list(Dirs) -> call({add_paths,last,Dirs}).
+
+-spec add_pathsz(Directories :: [file:filename()]) -> 'ok'.
+add_pathsz(Dirs) when is_list(Dirs) -> call({add_paths,last,Dirs}).
+
+-spec add_pathsa(Directories :: [file:filename()]) -> 'ok'.
+add_pathsa(Dirs) when is_list(Dirs) -> call({add_paths,first,Dirs}).
+
+%% XXX Contract's input argument differs from add_path/1 -- why?
+-spec del_path(Name :: file:filename() | atom()) -> boolean() | {'error', 'bad_name'}.
+del_path(Name) when is_list(Name) ; is_atom(Name) -> call({del_path,Name}).
+
+-type replace_path_error() :: {'error', 'bad_directory' | 'bad_name' | {'badarg',_}}.
+-spec replace_path(Name:: atom(), Dir :: file:filename()) -> 'true' | replace_path_error().
+replace_path(Name, Dir) when (is_atom(Name) or is_list(Name)) and
+ (is_atom(Dir) or is_list(Dir)) ->
+ call({replace_path,Name,Dir}).
+
+-spec rehash() -> 'ok'.
+rehash() -> call(rehash).
+
+%%-----------------------------------------------------------------
+
+call(Req) ->
+ code_server:call(code_server, Req).
+
+-spec start_link() -> {'ok', pid()} | {'error', 'crash'}.
+start_link() ->
+ start_link([stick]).
+
+-spec start_link(Flags :: [atom()]) -> {'ok', pid()} | {'error', 'crash'}.
+start_link(Flags) ->
+ do_start(Flags).
+
+%%-----------------------------------------------------------------
+%% In the init phase, code must not use any modules not yet loaded,
+%% either pre_loaded (e.g. init) or first in the script (e.g.
+%% erlang). Therefore, keep the modules used in init phase to a
+%% minimum, and make sure they are loaded before init is called.
+%% Try to call these modules from do_start instead.
+%% file is used in init - this is ok; file has been started before
+%% us, so the module is loaded.
+%%-----------------------------------------------------------------
+
+do_start(Flags) ->
+ %% The following module_info/1 calls are here to ensure
+ %% that the modules are loaded prior to their use elsewhere in
+ %% the code_server.
+ %% Otherwise a deadlock may occur when the code_server is starting.
+ code_server:module_info(module),
+ packages:module_info(module),
+ catch hipe_unified_loader:load_hipe_modules(),
+ gb_sets:module_info(module),
+ gb_trees:module_info(module),
+
+ ets:module_info(module),
+ os:module_info(module),
+ filename:module_info(module),
+ lists:module_info(module),
+
+ Mode = get_mode(Flags),
+ case init:get_argument(root) of
+ {ok,[[Root0]]} ->
+ Root = filename:join([Root0]), % Normalize. Use filename
+ case code_server:start_link([Root,Mode]) of
+ {ok,_Pid} = Ok2 ->
+ if
+ Mode =:= interactive ->
+ case lists:member(stick, Flags) of
+ true -> do_stick_dirs();
+ _ -> ok
+ end;
+ true ->
+ ok
+ end,
+ Ok2;
+ Other ->
+ Other
+ end;
+ Other ->
+ error_logger:error_msg("Can not start code server ~w ~n",[Other]),
+ {error, crash}
+ end.
+
+do_stick_dirs() ->
+ do_s(compiler),
+ do_s(stdlib),
+ do_s(kernel).
+
+do_s(Lib) ->
+ case lib_dir(Lib) of
+ {error, _} ->
+ ok;
+ Dir ->
+ %% The return value is intentionally ignored. Missing
+ %% directories is not a fatal error. (In embedded systems,
+ %% there is usually no compiler directory.)
+ stick_dir(filename:append(Dir, "ebin")),
+ ok
+ end.
+
+get_mode(Flags) ->
+ case lists:member(embedded, Flags) of
+ true ->
+ embedded;
+ _Otherwise ->
+ case init:get_argument(mode) of
+ {ok,[["embedded"]]} ->
+ embedded;
+ {ok,[["minimal"]]} ->
+ minimal;
+ _Else ->
+ interactive
+ end
+ end.
+
+%% Find out which version of a particular module we would
+%% load if we tried to load it, unless it's already loaded.
+%% In that case return the name of the file which contains
+%% the loaded object code
+
+-type which_ret_atoms() :: loaded_ret_atoms() | 'non_existing'.
+
+-spec which(Module :: atom()) -> file:filename() | which_ret_atoms().
+
+which(Module) when is_atom(Module) ->
+ case is_loaded(Module) of
+ false ->
+ which2(Module);
+ {file, File} ->
+ File
+ end.
+
+which2(Module) ->
+ Base = to_path(Module),
+ File = filename:basename(Base) ++ objfile_extension(),
+ Path = get_path(),
+ which(File, filename:dirname(Base), Path).
+
+-spec which(file:filename(), file:filename(), [file:filename()]) ->
+ 'non_existing' | file:filename().
+
+which(_, _, []) ->
+ non_existing;
+which(File, Base, [Directory|Tail]) ->
+ Path = if
+ Base =:= "." -> Directory;
+ true -> filename:join(Directory, Base)
+ end,
+ case erl_prim_loader:list_dir(Path) of
+ {ok,Files} ->
+ case lists:member(File,Files) of
+ true ->
+ filename:append(Path, File);
+ false ->
+ which(File, Base, Tail)
+ end;
+ _Error ->
+ which(File, Base, Tail)
+ end.
+
+%% Search the code path for a specific file. Try to locate
+%% it in the code path cache if possible.
+
+-spec where_is_file(Filename :: file:filename()) ->
+ 'non_existing' | file:filename().
+
+where_is_file(File) when is_list(File) ->
+ case call({is_cached,File}) of
+ no ->
+ Path = get_path(),
+ which(File, ".", Path);
+ Dir ->
+ filename:join(Dir, File)
+ end.
+
+-spec where_is_file(Path :: file:filename(), Filename :: file:filename()) ->
+ file:filename() | 'non_existing'.
+
+where_is_file(Path, File) when is_list(Path), is_list(File) ->
+ CodePath = get_path(),
+ if
+ Path =:= CodePath ->
+ case call({is_cached, File}) of
+ no ->
+ which(File, ".", Path);
+ Dir ->
+ filename:join(Dir, File)
+ end;
+ true ->
+ which(File, ".", Path)
+ end.
+
+-spec set_primary_archive(ArchiveFile :: file:filename(), ArchiveBin :: binary()) -> 'ok' | {'error', atom()}.
+
+set_primary_archive(ArchiveFile0, ArchiveBin) when is_list(ArchiveFile0), is_binary(ArchiveBin) ->
+ ArchiveFile = filename:absname(ArchiveFile0),
+ case call({set_primary_archive, ArchiveFile, ArchiveBin}) of
+ {ok, []} ->
+ ok;
+ {ok, _Mode, Ebins} ->
+ %% Prepend the code path with the ebins found in the archive
+ Ebins2 = [filename:join([ArchiveFile, E]) || E <- Ebins],
+ add_pathsa(Ebins2); % Returns ok
+ {error, _Reason} = Error ->
+ Error
+ end.
+
+%% Search the entire path system looking for name clashes
+
+-spec clash() -> 'ok'.
+
+clash() ->
+ Path = get_path(),
+ Struct = lists:flatten(build(Path)),
+ Len = length(search(Struct)),
+ io:format("** Found ~w name clashes in code paths ~n", [Len]).
+
+%% Internal for clash/0
+
+search([]) -> [];
+search([{Dir, File} | Tail]) ->
+ case lists:keyfind(File, 2, Tail) of
+ false ->
+ search(Tail);
+ {Dir2, File} ->
+ io:format("** ~s hides ~s~n",
+ [filename:join(Dir, File),
+ filename:join(Dir2, File)]),
+ [clash | search(Tail)]
+ end.
+
+build([]) -> [];
+build([Dir|Tail]) ->
+ Files = filter(objfile_extension(), Dir, file:list_dir(Dir)),
+ [decorate(Files, Dir) | build(Tail)].
+
+decorate([], _) -> [];
+decorate([File|Tail], Dir) ->
+ [{Dir, File} | decorate(Tail, Dir)].
+
+filter(_Ext, Dir, {error,_}) ->
+ io:format("** Bad path can't read ~s~n", [Dir]), [];
+filter(Ext, _, {ok,Files}) ->
+ filter2(Ext, length(Ext), Files).
+
+filter2(_Ext, _Extlen, []) -> [];
+filter2(Ext, Extlen,[File|Tail]) ->
+ case has_ext(Ext,Extlen, File) of
+ true -> [File | filter2(Ext, Extlen, Tail)];
+ false -> filter2(Ext, Extlen, Tail)
+ end.
+
+has_ext(Ext, Extlen,File) ->
+ L = length(File),
+ case catch lists:nthtail(L - Extlen, File) of
+ Ext -> true;
+ _ -> false
+ end.
+
+to_path(X) ->
+ filename:join(packages:split(X)).
diff --git a/lib/kernel/src/code_server.erl b/lib/kernel/src/code_server.erl
new file mode 100644
index 0000000000..018f7f41d2
--- /dev/null
+++ b/lib/kernel/src/code_server.erl
@@ -0,0 +1,1539 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 1998-2009. All Rights Reserved.
+%%
+%% The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved online at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(code_server).
+
+%% This file holds the server part of the code_server.
+
+-export([start_link/1,
+ call/2,
+ system_continue/3,
+ system_terminate/4,
+ system_code_change/4,
+ error_msg/2, info_msg/2
+ ]).
+
+-include_lib("kernel/include/file.hrl").
+
+-import(lists, [foreach/2]).
+
+-record(state,{supervisor,
+ root,
+ path,
+ moddb,
+ namedb,
+ cache = no_cache,
+ mode=interactive,
+ on_load = []}).
+
+start_link(Args) ->
+ Ref = make_ref(),
+ Parent = self(),
+ Init = fun() -> init(Ref, Parent, Args) end,
+ spawn_link(Init),
+ receive
+ {Ref,Res} -> Res
+ end.
+
+
+%% -----------------------------------------------------------
+%% Init the code_server process.
+%% -----------------------------------------------------------
+
+init(Ref, Parent, [Root,Mode0]) ->
+ register(?MODULE, self()),
+ process_flag(trap_exit, true),
+
+ Db = ets:new(code, [private]),
+ foreach(fun (M) -> ets:insert(Db, {M,preloaded}) end, erlang:pre_loaded()),
+ ets:insert(Db, init:fetch_loaded()),
+
+ Mode =
+ case Mode0 of
+ minimal -> interactive;
+ _ -> Mode0
+ end,
+
+ IPath =
+ case Mode of
+ interactive ->
+ LibDir = filename:append(Root, "lib"),
+ {ok,Dirs} = erl_prim_loader:list_dir(LibDir),
+ {Paths,_Libs} = make_path(LibDir,Dirs),
+ UserLibPaths = get_user_lib_dirs(),
+ ["."] ++ UserLibPaths ++ Paths;
+ _ ->
+ []
+ end,
+
+ Path = add_loader_path(IPath, Mode),
+ State0 = #state{root = Root,
+ path = Path,
+ moddb = Db,
+ namedb = init_namedb(Path),
+ mode = Mode},
+
+ State =
+ case init:get_argument(code_path_cache) of
+ {ok, _} ->
+ create_cache(State0);
+ error ->
+ State0
+ end,
+
+ Parent ! {Ref,{ok,self()}},
+ loop(State#state{supervisor=Parent}).
+
+get_user_lib_dirs() ->
+ case os:getenv("ERL_LIBS") of
+ LibDirs0 when is_list(LibDirs0) ->
+ Sep =
+ case os:type() of
+ {win32, _} -> $;;
+ _ -> $:
+ end,
+ LibDirs = split_paths(LibDirs0, Sep, [], []),
+ get_user_lib_dirs_1(LibDirs);
+ false ->
+ []
+ end.
+
+get_user_lib_dirs_1([Dir|DirList]) ->
+ case erl_prim_loader:list_dir(Dir) of
+ {ok, Dirs} ->
+ {Paths,_Libs} = make_path(Dir, Dirs),
+ %% Only add paths trailing with ./ebin.
+ [P || P <- Paths, filename:basename(P) =:= "ebin"] ++
+ get_user_lib_dirs_1(DirList);
+ error ->
+ get_user_lib_dirs_1(DirList)
+ end;
+get_user_lib_dirs_1([]) -> [].
+
+
+split_paths([S|T], S, Path, Paths) ->
+ split_paths(T, S, [], [lists:reverse(Path) | Paths]);
+split_paths([C|T], S, Path, Paths) ->
+ split_paths(T, S, [C|Path], Paths);
+split_paths([], _S, Path, Paths) ->
+ lists:reverse(Paths, [lists:reverse(Path)]).
+
+call(Name, Req) ->
+ Name ! {code_call, self(), Req},
+ receive
+ {?MODULE, Reply} ->
+ Reply
+ end.
+
+reply(Pid, Res) ->
+ Pid ! {?MODULE, Res}.
+
+loop(#state{supervisor=Supervisor}=State0) ->
+ receive
+ {code_call, Pid, Req} ->
+ case handle_call(Req, {Pid, call}, State0) of
+ {reply, Res, State} ->
+ reply(Pid, Res),
+ loop(State);
+ {noreply, State} ->
+ loop(State);
+ {stop, Why, stopped, State} ->
+ system_terminate(Why, Supervisor, [], State)
+ end;
+ {'EXIT', Supervisor, Reason} ->
+ system_terminate(Reason, Supervisor, [], State0);
+ {system, From, Msg} ->
+ handle_system_msg(running,Msg, From, Supervisor, State0);
+ {'DOWN',Ref,process,_,Res} ->
+ State = finish_on_load(Ref, Res, State0),
+ loop(State);
+ _Msg ->
+ loop(State0)
+ end.
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% System upgrade
+
+handle_system_msg(SysState,Msg,From,Parent,Misc) ->
+ case do_sys_cmd(SysState,Msg,Parent, Misc) of
+ {suspended, Reply, NMisc} ->
+ gen_reply(From, Reply),
+ suspend_loop(suspended, Parent, NMisc);
+ {running, Reply, NMisc} ->
+ gen_reply(From, Reply),
+ system_continue(Parent, [], NMisc)
+ end.
+
+gen_reply({To, Tag}, Reply) ->
+ catch To ! {Tag, Reply}.
+
+%%-----------------------------------------------------------------
+%% When a process is suspended, it can only respond to system
+%% messages.
+%%-----------------------------------------------------------------
+suspend_loop(SysState, Parent, Misc) ->
+ receive
+ {system, From, Msg} ->
+ handle_system_msg(SysState, Msg, From, Parent, Misc);
+ {'EXIT', Parent, Reason} ->
+ system_terminate(Reason, Parent, [], Misc)
+ end.
+
+do_sys_cmd(_, suspend, _Parent, Misc) ->
+ {suspended, ok, Misc};
+do_sys_cmd(_, resume, _Parent, Misc) ->
+ {running, ok, Misc};
+do_sys_cmd(SysState, get_status, Parent, Misc) ->
+ Status = {status, self(), {module, ?MODULE},
+ [get(), SysState, Parent, [], Misc]},
+ {SysState, Status, Misc};
+do_sys_cmd(SysState, {debug, _What}, _Parent, Misc) ->
+ {SysState,ok,Misc};
+do_sys_cmd(suspended, {change_code, Module, Vsn, Extra}, _Parent, Misc0) ->
+ {Res, Misc} =
+ case catch ?MODULE:system_code_change(Misc0, Module, Vsn, Extra) of
+ {ok, Misc1} -> {ok, Misc1};
+ Else -> {{error, Else}, Misc0}
+ end,
+ {suspended, Res, Misc};
+do_sys_cmd(SysState, Other, _Parent, Misc) ->
+ {SysState, {error, {unknown_system_msg, Other}}, Misc}.
+
+system_continue(_Parent, _Debug, State) ->
+ loop(State).
+
+system_terminate(_Reason, _Parent, _Debug, _State) ->
+% error_msg("~p terminating: ~p~n ",[?MODULE,Reason]),
+ exit(shutdown).
+
+system_code_change(State, _Module, _OldVsn, _Extra) ->
+ {ok, State}.
+
+%%
+%% The gen_server call back functions.
+%%
+
+handle_call({stick_dir,Dir}, {_From,_Tag}, S) ->
+ {reply,stick_dir(Dir, true, S),S};
+
+handle_call({unstick_dir,Dir}, {_From,_Tag}, S) ->
+ {reply,stick_dir(Dir, false, S),S};
+
+handle_call({stick_mod,Mod}, {_From,_Tag}, S) ->
+ {reply,stick_mod(Mod, true, S),S};
+
+handle_call({unstick_mod,Mod}, {_From,_Tag}, S) ->
+ {reply,stick_mod(Mod, false, S),S};
+
+handle_call({dir,Dir},{_From,_Tag}, S) ->
+ Root = S#state.root,
+ Resp = do_dir(Root,Dir,S#state.namedb),
+ {reply,Resp,S};
+
+handle_call({load_file,Mod}, Caller, St) ->
+ case modp(Mod) of
+ false ->
+ {reply,{error,badarg},St};
+ true ->
+ load_file(Mod, Caller, St)
+ end;
+
+handle_call({add_path,Where,Dir0}, {_From,_Tag}, S=#state{cache=Cache0}) ->
+ case Cache0 of
+ no_cache ->
+ {Resp,Path} = add_path(Where, Dir0, S#state.path, S#state.namedb),
+ {reply,Resp,S#state{path=Path}};
+ _ ->
+ Dir = absname(Dir0), %% Cache always expands the path
+ {Resp,Path} = add_path(Where, Dir, S#state.path, S#state.namedb),
+ Cache=update_cache([Dir],Where,Cache0),
+ {reply,Resp,S#state{path=Path,cache=Cache}}
+ end;
+
+handle_call({add_paths,Where,Dirs0}, {_From,_Tag}, S=#state{cache=Cache0}) ->
+ case Cache0 of
+ no_cache ->
+ {Resp,Path} = add_paths(Where,Dirs0,S#state.path,S#state.namedb),
+ {reply,Resp, S#state{path=Path}};
+ _ ->
+ %% Cache always expands the path
+ Dirs = [absname(Dir) || Dir <- Dirs0],
+ {Resp,Path} = add_paths(Where, Dirs, S#state.path, S#state.namedb),
+ Cache=update_cache(Dirs,Where,Cache0),
+ {reply,Resp,S#state{cache=Cache,path=Path}}
+ end;
+
+handle_call({set_path,PathList}, {_From,_Tag}, S) ->
+ Path = S#state.path,
+ {Resp, NewPath,NewDb} = set_path(PathList, Path, S#state.namedb),
+ {reply,Resp,rehash_cache(S#state{path = NewPath, namedb=NewDb})};
+
+handle_call({del_path,Name}, {_From,_Tag}, S) ->
+ {Resp,Path} = del_path(Name,S#state.path,S#state.namedb),
+ {reply,Resp,rehash_cache(S#state{path = Path})};
+
+handle_call({replace_path,Name,Dir}, {_From,_Tag}, S) ->
+ {Resp,Path} = replace_path(Name,Dir,S#state.path,S#state.namedb),
+ {reply,Resp,rehash_cache(S#state{path = Path})};
+
+handle_call(rehash, {_From,_Tag}, S0) ->
+ S = create_cache(S0),
+ {reply,ok,S};
+
+handle_call(get_path, {_From,_Tag}, S) ->
+ {reply,S#state.path,S};
+
+%% Messages to load, delete and purge modules/files.
+handle_call({load_abs,File,Mod}, Caller, S) ->
+ case modp(File) of
+ false ->
+ {reply,{error,badarg},S};
+ true ->
+ load_abs(File, Mod, Caller, S)
+ end;
+
+handle_call({load_binary,Mod,File,Bin}, Caller, S) ->
+ do_load_binary(Mod, File, Bin, Caller, S);
+
+handle_call({load_native_partial,Mod,Bin}, {_From,_Tag}, S) ->
+ Result = (catch hipe_unified_loader:load(Mod,Bin)),
+ Status = hipe_result_to_status(Result),
+ {reply,Status,S};
+
+handle_call({load_native_sticky,Mod,Bin,WholeModule}, {_From,_Tag}, S) ->
+ Result = (catch hipe_unified_loader:load_module(Mod,Bin,WholeModule)),
+ Status = hipe_result_to_status(Result),
+ {reply,Status,S};
+
+handle_call({ensure_loaded,Mod0}, Caller, St0) ->
+ Fun = fun (M, St) ->
+ case erlang:module_loaded(M) of
+ true ->
+ {reply,{module,M},St};
+ false when St#state.mode =:= interactive ->
+ load_file(M, Caller, St);
+ false ->
+ {reply,{error,embedded},St}
+ end
+ end,
+ do_mod_call(Fun, Mod0, {error,badarg}, St0);
+
+handle_call({delete,Mod0}, {_From,_Tag}, S) ->
+ Fun = fun (M, St) ->
+ case catch erlang:delete_module(M) of
+ true ->
+ ets:delete(St#state.moddb, M),
+ {reply,true,St};
+ _ ->
+ {reply,false,St}
+ end
+ end,
+ do_mod_call(Fun, Mod0, false, S);
+
+handle_call({purge,Mod0}, {_From,_Tag}, St0) ->
+ do_mod_call(fun (M, St) ->
+ {reply,do_purge(M),St}
+ end, Mod0, false, St0);
+
+handle_call({soft_purge,Mod0}, {_From,_Tag}, St0) ->
+ do_mod_call(fun (M, St) ->
+ {reply,do_soft_purge(M),St}
+ end, Mod0, true, St0);
+
+handle_call({is_loaded,Mod0}, {_From,_Tag}, St0) ->
+ do_mod_call(fun (M, St) ->
+ {reply,is_loaded(M, St#state.moddb),St}
+ end, Mod0, false, St0);
+
+handle_call(all_loaded, {_From,_Tag}, S) ->
+ Db = S#state.moddb,
+ {reply,all_loaded(Db),S};
+
+handle_call({get_object_code,Mod0}, {_From,_Tag}, St0) ->
+ Fun = fun(M, St) ->
+ Path = St#state.path,
+ case mod_to_bin(Path, atom_to_list(M)) of
+ {_,Bin,FName} -> {reply,{M,Bin,FName},St};
+ Error -> {reply,Error,St}
+ end
+ end,
+ do_mod_call(Fun, Mod0, error, St0);
+
+handle_call({is_sticky, Mod}, {_From,_Tag}, S) ->
+ Db = S#state.moddb,
+ {reply, is_sticky(Mod,Db), S};
+
+handle_call(stop,{_From,_Tag}, S) ->
+ {stop,normal,stopped,S};
+
+handle_call({is_cached,_File}, {_From,_Tag}, S=#state{cache=no_cache}) ->
+ {reply, no, S};
+
+handle_call({set_primary_archive, File, ArchiveBin}, {_From,_Tag}, S=#state{mode=Mode}) ->
+ case erl_prim_loader:set_primary_archive(File, ArchiveBin) of
+ {ok, Files} ->
+ {reply, {ok, Mode, Files}, S};
+ {error, Reason} ->
+ {reply, {error, Reason}, S}
+ end;
+
+handle_call({is_cached,File}, {_From,_Tag}, S=#state{cache=Cache}) ->
+ ObjExt = objfile_extension(),
+ Ext = filename:extension(File),
+ Type = case Ext of
+ ObjExt -> obj;
+ ".app" -> app;
+ _ -> undef
+ end,
+ if Type =:= undef ->
+ {reply, no, S};
+ true ->
+ Key = {Type,list_to_atom(filename:rootname(File, Ext))},
+ case ets:lookup(Cache, Key) of
+ [] ->
+ {reply, no, S};
+ [{Key,Dir}] ->
+ {reply, Dir, S}
+ end
+ end;
+
+handle_call(Other,{_From,_Tag}, S) ->
+ error_msg(" ** Codeserver*** ignoring ~w~n ",[Other]),
+ {noreply,S}.
+
+do_mod_call(Action, Module, _Error, St) when is_atom(Module) ->
+ Action(Module, St);
+do_mod_call(Action, Module, Error, St) ->
+ try list_to_atom(Module) of
+ Atom when is_atom(Atom) ->
+ Action(Atom, St)
+ catch
+ error:badarg ->
+ {reply,Error,St}
+ end.
+
+%% --------------------------------------------------------------
+%% Cache functions
+%% --------------------------------------------------------------
+
+create_cache(St = #state{cache = no_cache}) ->
+ Cache = ets:new(code_cache, [protected]),
+ rehash_cache(Cache, St);
+create_cache(St) ->
+ rehash_cache(St).
+
+rehash_cache(St = #state{cache = no_cache}) ->
+ St;
+rehash_cache(St = #state{cache = OldCache}) ->
+ ets:delete(OldCache),
+ Cache = ets:new(code_cache, [protected]),
+ rehash_cache(Cache, St).
+
+rehash_cache(Cache, St = #state{path = Path}) ->
+ Exts = [{obj,objfile_extension()}, {app,".app"}],
+ {Cache,NewPath} = locate_mods(lists:reverse(Path), first, Exts, Cache, []),
+ St#state{cache = Cache, path=NewPath}.
+
+update_cache(Dirs, Where, Cache0) ->
+ Exts = [{obj,objfile_extension()}, {app,".app"}],
+ {Cache, _} = locate_mods(Dirs, Where, Exts, Cache0, []),
+ Cache.
+
+locate_mods([Dir0|Path], Where, Exts, Cache, Acc) ->
+ Dir = absname(Dir0), %% Cache always expands the path
+ case erl_prim_loader:list_dir(Dir) of
+ {ok, Files} ->
+ Cache = filter_mods(Files, Where, Exts, Dir, Cache),
+ locate_mods(Path, Where, Exts, Cache, [Dir|Acc]);
+ error ->
+ locate_mods(Path, Where, Exts, Cache, Acc)
+ end;
+locate_mods([], _, _, Cache, Path) ->
+ {Cache,Path}.
+
+filter_mods([File|Rest], Where, Exts, Dir, Cache) ->
+ Ext = filename:extension(File),
+ Root = list_to_atom(filename:rootname(File, Ext)),
+ case lists:keysearch(Ext, 2, Exts) of
+ {value,{Type,_}} ->
+ Key = {Type,Root},
+ case Where of
+ first ->
+ true = ets:insert(Cache, {Key,Dir});
+ last ->
+ case ets:lookup(Cache, Key) of
+ [] ->
+ true = ets:insert(Cache, {Key,Dir});
+ _ ->
+ ignore
+ end
+ end;
+ false ->
+ ok
+ end,
+ filter_mods(Rest, Where, Exts, Dir, Cache);
+
+filter_mods([], _, _, _, Cache) ->
+ Cache.
+
+%% --------------------------------------------------------------
+%% Path handling functions.
+%% --------------------------------------------------------------
+
+%%
+%% Create the initial path.
+%%
+make_path(BundleDir,Bundles0) ->
+ Bundles = choose_bundles(Bundles0),
+ make_path(BundleDir,Bundles,[],[]).
+
+choose_bundles(Bundles) ->
+ ArchiveExt = archive_extension(),
+ Bs = lists:sort([create_bundle(B,ArchiveExt) || B <- Bundles]),
+ [FullName || {_Name,_NumVsn,FullName} <-
+ choose(lists:reverse(Bs), [], ArchiveExt)].
+
+create_bundle(FullName,ArchiveExt) ->
+ BaseName = filename:basename(FullName,ArchiveExt),
+ case split(BaseName, "-") of
+ Toks when length(Toks) > 1 ->
+ VsnStr = lists:last(Toks),
+ case vsn_to_num(VsnStr) of
+ {ok, VsnNum} ->
+ Name = join(lists:sublist(Toks,length(Toks)-1),"-"),
+ {Name,VsnNum,FullName};
+ false ->
+ {FullName, [0], FullName}
+ end;
+ _ ->
+ {FullName,[0],FullName}
+ end.
+
+%% Convert "X.Y.Z. ..." to [K, L, M| ...]
+vsn_to_num(Vsn) ->
+ case is_vsn(Vsn) of
+ true ->
+ {ok, [list_to_integer(S) || S <- split(Vsn, ".")]};
+ _ ->
+ false
+ end.
+
+is_vsn(Str) when is_list(Str) ->
+ Vsns = split(Str, "."),
+ lists:all(fun is_numstr/1, Vsns).
+
+is_numstr(Cs) ->
+ lists:all(fun (C) when $0 =< C, C =< $9 -> true;
+ (_) -> false
+ end, Cs).
+
+split(Cs, S) ->
+ split1(Cs, S, []).
+
+split1([C|S], Seps, Toks) ->
+ case lists:member(C, Seps) of
+ true -> split1(S, Seps, Toks);
+ false -> split2(S, Seps, Toks, [C])
+ end;
+split1([], _Seps, Toks) ->
+ lists:reverse(Toks).
+
+split2([C|S], Seps, Toks, Cs) ->
+ case lists:member(C, Seps) of
+ true -> split1(S, Seps, [lists:reverse(Cs)|Toks]);
+ false -> split2(S, Seps, Toks, [C|Cs])
+ end;
+split2([], _Seps, Toks, Cs) ->
+ lists:reverse([lists:reverse(Cs)|Toks]).
+
+join([H1, H2| T], S) ->
+ H1 ++ S ++ join([H2| T], S);
+join([H], _) ->
+ H;
+join([], _) ->
+ [].
+
+choose([{Name,NumVsn,NewFullName}=New|Bs], Acc, ArchiveExt) ->
+ case lists:keysearch(Name,1,Acc) of
+ {value, {_, NV, OldFullName}} when NV =:= NumVsn ->
+ case filename:extension(OldFullName) =:= ArchiveExt of
+ false ->
+ choose(Bs,Acc, ArchiveExt);
+ true ->
+ Acc2 = lists:keystore(Name, 1, Acc, New),
+ choose(Bs,Acc2, ArchiveExt)
+ end;
+ {value, {_, _, _}} ->
+ choose(Bs,Acc, ArchiveExt);
+ false ->
+ choose(Bs,[{Name,NumVsn,NewFullName}|Acc], ArchiveExt)
+ end;
+choose([],Acc, _ArchiveExt) ->
+ Acc.
+
+make_path(_,[],Res,Bs) ->
+ {Res,Bs};
+make_path(BundleDir,[Bundle|Tail],Res,Bs) ->
+ Dir = filename:append(BundleDir,Bundle),
+ Ebin = filename:append(Dir,"ebin"),
+ %% First try with /ebin
+ case erl_prim_loader:read_file_info(Ebin) of
+ {ok,#file_info{type=directory}} ->
+ make_path(BundleDir,Tail,[Ebin|Res],[Bundle|Bs]);
+ _ ->
+ %% Second try with archive
+ Ext = archive_extension(),
+ Base = filename:basename(Dir, Ext),
+ Ebin2 = filename:join([filename:dirname(Dir), Base ++ Ext, Base, "ebin"]),
+ Ebins =
+ case split(Base, "-") of
+ Toks when length(Toks) > 1 ->
+ AppName = join(lists:sublist(Toks,length(Toks)-1),"-"),
+ Ebin3 = filename:join([filename:dirname(Dir), Base ++ Ext, AppName, "ebin"]),
+ [Ebin3, Ebin2, Dir];
+ _ ->
+ [Ebin2, Dir]
+ end,
+ try_ebin_dirs(Ebins,BundleDir,Tail,Res,Bundle, Bs)
+ end.
+
+try_ebin_dirs([Ebin | Ebins],BundleDir,Tail,Res,Bundle,Bs) ->
+ case erl_prim_loader:read_file_info(Ebin) of
+ {ok,#file_info{type=directory}} ->
+ make_path(BundleDir,Tail,[Ebin|Res],[Bundle|Bs]);
+ _ ->
+ try_ebin_dirs(Ebins,BundleDir,Tail,Res,Bundle,Bs)
+ end;
+try_ebin_dirs([],BundleDir,Tail,Res,_Bundle,Bs) ->
+ make_path(BundleDir,Tail,Res,Bs).
+
+
+%%
+%% Add the erl_prim_loader path.
+%%
+%%
+add_loader_path(IPath0,Mode) ->
+ {ok,PrimP0} = erl_prim_loader:get_path(),
+ case Mode of
+ embedded ->
+ strip_path(PrimP0, Mode); % i.e. only normalize
+ _ ->
+ Pa0 = get_arg(pa),
+ Pz0 = get_arg(pz),
+
+ Pa = patch_path(Pa0),
+ Pz = patch_path(Pz0),
+ PrimP = patch_path(PrimP0),
+ IPath = patch_path(IPath0),
+
+ P = exclude_pa_pz(PrimP,Pa,Pz),
+ Path0 = strip_path(P, Mode),
+ Path = add(Path0, IPath, []),
+ add_pa_pz(Path,Pa,Pz)
+ end.
+
+patch_path(Path) ->
+ case check_path(Path) of
+ {ok, NewPath} -> NewPath;
+ {error, _Reason} -> Path
+ end.
+
+%% As the erl_prim_loader path includes the -pa and -pz
+%% directories they have to be removed first !!
+exclude_pa_pz(P0,Pa,Pz) ->
+ P1 = excl(Pa, P0),
+ P = excl(Pz, lists:reverse(P1)),
+ lists:reverse(P).
+
+excl([], P) ->
+ P;
+excl([D|Ds], P) ->
+ excl(Ds, lists:delete(D, P)).
+
+%%
+%% Keep only 'valid' paths in code server.
+%% Only if mode is interactive, in an embedded
+%% system we can't rely on file.
+%%
+
+strip_path([P0|Ps], Mode) ->
+ P = filename:join([P0]), % Normalize
+ case check_path([P]) of
+ {ok, [NewP]} ->
+ [NewP|strip_path(Ps, Mode)];
+ _ when Mode =:= embedded ->
+ [P|strip_path(Ps, Mode)];
+ _ ->
+ strip_path(Ps, Mode)
+ end;
+strip_path(_, _) ->
+ [].
+
+%%
+%% Add only non-existing paths.
+%% Also delete other versions of directories,
+%% e.g. .../test-3.2/ebin should exclude .../test-*/ebin (and .../test/ebin).
+%% Put the Path directories first in resulting path.
+%%
+add(Path,["."|IPath],Acc) ->
+ RPath = add1(Path,IPath,Acc),
+ ["."|lists:delete(".",RPath)];
+add(Path,IPath,Acc) ->
+ add1(Path,IPath,Acc).
+
+add1([P|Path],IPath,Acc) ->
+ case lists:member(P,Acc) of
+ true ->
+ add1(Path,IPath,Acc); % Already added
+ false ->
+ IPath1 = exclude(P,IPath),
+ add1(Path,IPath1,[P|Acc])
+ end;
+add1(_,IPath,Acc) ->
+ lists:reverse(Acc) ++ IPath.
+
+add_pa_pz(Path0, Patha, Pathz) ->
+ {_,Path1} = add_paths(first,Patha,Path0,false),
+ {_,Path2} = add_paths(first,Pathz,lists:reverse(Path1),false),
+ lists:reverse(Path2).
+
+get_arg(Arg) ->
+ case init:get_argument(Arg) of
+ {ok, Values} ->
+ lists:append(Values);
+ _ ->
+ []
+ end.
+
+%%
+%% Exclude other versions of Dir or duplicates.
+%% Return a new Path.
+%%
+exclude(Dir,Path) ->
+ Name = get_name(Dir),
+ [D || D <- Path,
+ D =/= Dir,
+ get_name(D) =/= Name].
+
+%%
+%% Get the "Name" of a directory. A directory in the code server path
+%% have the following form: .../Name-Vsn or .../Name
+%% where Vsn is any sortable term (the newest directory is sorted as
+%% the greatest term).
+%%
+%%
+get_name(Dir) ->
+ get_name2(get_name1(Dir), []).
+
+get_name1(Dir) ->
+ case lists:reverse(filename:split(Dir)) of
+ ["ebin",DirName|_] -> DirName;
+ [DirName|_] -> DirName;
+ _ -> "" % No name !
+ end.
+
+get_name2([$-|_],Acc) -> lists:reverse(Acc);
+get_name2([H|T],Acc) -> get_name2(T,[H|Acc]);
+get_name2(_,Acc) -> lists:reverse(Acc).
+
+check_path(Path) ->
+ PathChoice = init:code_path_choice(),
+ ArchiveExt = archive_extension(),
+ do_check_path(Path, PathChoice, ArchiveExt, []).
+
+do_check_path([], _PathChoice, _ArchiveExt, Acc) ->
+ {ok, lists:reverse(Acc)};
+do_check_path([Dir | Tail], PathChoice, ArchiveExt, Acc) ->
+ case catch erl_prim_loader:read_file_info(Dir) of
+ {ok, #file_info{type=directory}} ->
+ do_check_path(Tail, PathChoice, ArchiveExt, [Dir | Acc]);
+ _ when PathChoice =:= strict ->
+ %% Be strict. Only use dir as explicitly stated
+ {error, bad_directory};
+ _ when PathChoice =:= relaxed ->
+ %% Be relaxed
+ case catch lists:reverse(filename:split(Dir)) of
+ {'EXIT', _} ->
+ {error, bad_directory};
+ ["ebin", App] ->
+ Dir2 = filename:join([App ++ ArchiveExt, App, "ebin"]),
+ case erl_prim_loader:read_file_info(Dir2) of
+ {ok, #file_info{type = directory}} ->
+ do_check_path(Tail, PathChoice, ArchiveExt, [Dir2 | Acc]);
+ _ ->
+ {error, bad_directory}
+ end;
+ ["ebin", App, OptArchive | RevTop] ->
+ Ext = filename:extension(OptArchive),
+ Base = filename:basename(OptArchive, Ext),
+ Dir2 =
+ if
+ Ext =:= ArchiveExt, Base =:= App ->
+ %% .../app-vsn.ez/app-vsn/ebin
+ Top = lists:reverse(RevTop),
+ filename:join(Top ++ [App, "ebin"]);
+ Ext =:= ArchiveExt ->
+ %% .../app-vsn.ez/xxx/ebin
+ {error, bad_directory};
+ true ->
+ %% .../app-vsn/ebin
+ Top = lists:reverse([OptArchive | RevTop]),
+ filename:join(Top ++ [App ++ ArchiveExt, App, "ebin"])
+ end,
+ case erl_prim_loader:read_file_info(Dir2) of
+ {ok, #file_info{type = directory}} ->
+ do_check_path(Tail, PathChoice, ArchiveExt, [Dir2 | Acc]);
+ _ ->
+ {error, bad_directory}
+ end;
+ _ ->
+ {error, bad_directory}
+ end
+ end.
+
+%%
+%% Add new path(s).
+%%
+add_path(Where,Dir,Path,NameDb) when is_atom(Dir) ->
+ add_path(Where,atom_to_list(Dir),Path,NameDb);
+add_path(Where,Dir0,Path,NameDb) when is_list(Dir0) ->
+ case int_list(Dir0) of
+ true ->
+ Dir = filename:join([Dir0]), % Normalize
+ case check_path([Dir]) of
+ {ok, [NewDir]} ->
+ {true, do_add(Where,NewDir,Path,NameDb)};
+ Error ->
+ {Error, Path}
+ end;
+ false ->
+ {{error, bad_directory}, Path}
+ end;
+add_path(_,_,Path,_) ->
+ {{error, bad_directory}, Path}.
+
+
+%%
+%% If the new directory is added first or if the directory didn't exist
+%% the name-directory table must be updated.
+%% If NameDb is false we should NOT update NameDb as it is done later
+%% then the table is created :-)
+%%
+do_add(first,Dir,Path,NameDb) ->
+ update(Dir,NameDb),
+ [Dir|lists:delete(Dir,Path)];
+do_add(last,Dir,Path,NameDb) ->
+ case lists:member(Dir,Path) of
+ true ->
+ Path;
+ false ->
+ maybe_update(Dir,NameDb),
+ Path ++ [Dir]
+ end.
+
+%% Do not update if the same name already exists !
+maybe_update(Dir,NameDb) ->
+ case lookup_name(get_name(Dir),NameDb) of
+ false -> update(Dir,NameDb);
+ _ -> false
+ end.
+
+update(_Dir, false) ->
+ ok;
+update(Dir,NameDb) ->
+ replace_name(Dir,NameDb).
+
+
+
+%%
+%% Set a completely new path.
+%%
+set_path(NewPath0, OldPath, NameDb) ->
+ NewPath = normalize(NewPath0),
+ case check_path(NewPath) of
+ {ok, NewPath2} ->
+ ets:delete(NameDb),
+ NewDb = init_namedb(NewPath2),
+ {true, NewPath2, NewDb};
+ Error ->
+ {Error, OldPath, NameDb}
+ end.
+
+%%
+%% Normalize the given path.
+%% The check_path function catches erroneous path,
+%% thus it is ignored here.
+%%
+normalize([P|Path]) when is_atom(P) ->
+ normalize([atom_to_list(P)|Path]);
+normalize([P|Path]) when is_list(P) ->
+ case int_list(P) of
+ true -> [filename:join([P])|normalize(Path)];
+ false -> [P|normalize(Path)]
+ end;
+normalize([P|Path]) ->
+ [P|normalize(Path)];
+normalize([]) ->
+ [];
+normalize(Other) ->
+ Other.
+
+%% Handle a table of name-directory pairs.
+%% The priv_dir/1 and lib_dir/1 functions will have
+%% an O(1) lookup.
+init_namedb(Path) ->
+ Db = ets:new(code_names,[private]),
+ init_namedb(lists:reverse(Path), Db),
+ Db.
+
+init_namedb([P|Path], Db) ->
+ insert_name(P, Db),
+ init_namedb(Path, Db);
+init_namedb([], _) ->
+ ok.
+
+-ifdef(NOTUSED).
+clear_namedb([P|Path], Db) ->
+ delete_name_dir(P, Db),
+ clear_namedb(Path, Db);
+clear_namedb([], _) ->
+ ok.
+-endif.
+
+insert_name(Dir, Db) ->
+ case get_name(Dir) of
+ Dir -> false;
+ Name -> insert_name(Name, Dir, Db)
+ end.
+
+insert_name(Name, Dir, Db) ->
+ AppDir = del_ebin(Dir),
+ {Base, SubDirs} = archive_subdirs(AppDir),
+ ets:insert(Db, {Name, AppDir, Base, SubDirs}),
+ true.
+
+archive_subdirs(AppDir) ->
+ IsDir =
+ fun(RelFile) ->
+ File = filename:join([AppDir, RelFile]),
+ case erl_prim_loader:read_file_info(File) of
+ {ok, #file_info{type = directory}} ->
+ false;
+ _ ->
+ true
+ end
+ end,
+ {Base, ArchiveDirs} = all_archive_subdirs(AppDir),
+ {Base, lists:filter(IsDir, ArchiveDirs)}.
+
+all_archive_subdirs(AppDir) ->
+ Ext = archive_extension(),
+ Base = filename:basename(AppDir),
+ Dirs =
+ case split(Base, "-") of
+ Toks when length(Toks) > 1 ->
+ Base2 = join(lists:sublist(Toks,length(Toks)-1),"-"),
+ [Base2, Base];
+ _ ->
+ [Base]
+ end,
+ try_archive_subdirs(AppDir ++ Ext, Base, Dirs).
+
+try_archive_subdirs(Archive, Base, [Dir | Dirs]) ->
+ ArchiveDir = filename:join([Archive, Dir]),
+ case erl_prim_loader:list_dir(ArchiveDir) of
+ {ok, Files} ->
+ IsDir =
+ fun(RelFile) ->
+ File = filename:join([ArchiveDir, RelFile]),
+ case erl_prim_loader:read_file_info(File) of
+ {ok, #file_info{type = directory}} ->
+ true;
+ _ ->
+ false
+ end
+ end,
+ {Dir, lists:filter(IsDir, Files)};
+ _ ->
+ try_archive_subdirs(Archive, Base, Dirs)
+ end;
+try_archive_subdirs(_Archive, Base, []) ->
+ {Base, []}.
+
+%%
+%% Delete a directory from Path.
+%% Name can be either the the name in .../Name[-*] or
+%% the complete directory name.
+%%
+del_path(Name0,Path,NameDb) ->
+ case catch to_list(Name0)of
+ {'EXIT',_} ->
+ {{error,bad_name},Path};
+ Name ->
+ case del_path1(Name,Path,NameDb) of
+ Path -> % Nothing has changed
+ {false,Path};
+ NewPath ->
+ {true,NewPath}
+ end
+ end.
+
+del_path1(Name,[P|Path],NameDb) ->
+ case get_name(P) of
+ Name ->
+ delete_name(Name, NameDb),
+ insert_old_shadowed(Name, Path, NameDb),
+ Path;
+ _ when Name =:= P ->
+ case delete_name_dir(Name, NameDb) of
+ true -> insert_old_shadowed(get_name(Name), Path, NameDb);
+ false -> ok
+ end,
+ Path;
+ _ ->
+ [P|del_path1(Name,Path,NameDb)]
+ end;
+del_path1(_,[],_) ->
+ [].
+
+insert_old_shadowed(Name, [P|Path], NameDb) ->
+ case get_name(P) of
+ Name -> insert_name(Name, P, NameDb);
+ _ -> insert_old_shadowed(Name, Path, NameDb)
+ end;
+insert_old_shadowed(_, [], _) ->
+ ok.
+
+%%
+%% Replace an old occurrence of an directory with name .../Name[-*].
+%% If it does not exist, put the new directory last in Path.
+%%
+replace_path(Name,Dir,Path,NameDb) ->
+ case catch check_pars(Name,Dir) of
+ {ok,N,D} ->
+ {true,replace_path1(N,D,Path,NameDb)};
+ {'EXIT',_} ->
+ {{error,{badarg,[Name,Dir]}},Path};
+ Error ->
+ {Error,Path}
+ end.
+
+replace_path1(Name,Dir,[P|Path],NameDb) ->
+ case get_name(P) of
+ Name ->
+ insert_name(Name, Dir, NameDb),
+ [Dir|Path];
+ _ ->
+ [P|replace_path1(Name,Dir,Path,NameDb)]
+ end;
+replace_path1(Name, Dir, [], NameDb) ->
+ insert_name(Name, Dir, NameDb),
+ [Dir].
+
+check_pars(Name,Dir) ->
+ N = to_list(Name),
+ D = filename:join([to_list(Dir)]), % Normalize
+ case get_name(Dir) of
+ N ->
+ case check_path([D]) of
+ {ok, [NewD]} ->
+ {ok,N,NewD};
+ Error ->
+ Error
+ end;
+ _ ->
+ {error,bad_name}
+ end.
+
+
+del_ebin(Dir) ->
+ case filename:basename(Dir) of
+ "ebin" ->
+ Dir2 = filename:dirname(Dir),
+ Dir3 = filename:dirname(Dir2),
+ Ext = archive_extension(),
+ case filename:extension(Dir3) of
+ E when E =:= Ext ->
+ %% Strip archive extension
+ filename:join([filename:dirname(Dir3),
+ filename:basename(Dir3, Ext)]);
+ _ ->
+ Dir2
+ end;
+ _ ->
+ Dir
+ end.
+
+
+
+replace_name(Dir, Db) ->
+ case get_name(Dir) of
+ Dir ->
+ false;
+ Name ->
+ delete_name(Name, Db),
+ insert_name(Name, Dir, Db)
+ end.
+
+delete_name(Name, Db) ->
+ ets:delete(Db, Name).
+
+delete_name_dir(Dir, Db) ->
+ case get_name(Dir) of
+ Dir -> false;
+ Name ->
+ Dir0 = del_ebin(Dir),
+ case lookup_name(Name, Db) of
+ {ok, Dir0, _Base, _SubDirs} ->
+ ets:delete(Db, Name),
+ true;
+ _ -> false
+ end
+ end.
+
+lookup_name(Name, Db) ->
+ case ets:lookup(Db, Name) of
+ [{Name, Dir, Base, SubDirs}] -> {ok, Dir, Base, SubDirs};
+ _ -> false
+ end.
+
+
+%%
+%% Fetch a directory.
+%%
+do_dir(Root,lib_dir,_) ->
+ filename:append(Root, "lib");
+do_dir(Root,root_dir,_) ->
+ Root;
+do_dir(_Root,compiler_dir,NameDb) ->
+ case lookup_name("compiler", NameDb) of
+ {ok, Dir, _Base, _SubDirs} -> Dir;
+ _ -> ""
+ end;
+do_dir(_Root,{lib_dir,Name},NameDb) ->
+ case catch lookup_name(to_list(Name), NameDb) of
+ {ok, Dir, _Base, _SubDirs} -> Dir;
+ _ -> {error, bad_name}
+ end;
+do_dir(_Root,{lib_dir,Name,SubDir0},NameDb) ->
+ SubDir = atom_to_list(SubDir0),
+ case catch lookup_name(to_list(Name), NameDb) of
+ {ok, Dir, Base, SubDirs} ->
+ case lists:member(SubDir, SubDirs) of
+ true ->
+ %% Subdir is in archive
+ filename:join([Dir ++ archive_extension(),
+ Base,
+ SubDir]);
+ false ->
+ %% Subdir is regular directory
+ filename:join([Dir, SubDir])
+ end;
+ _ ->
+ {error, bad_name}
+ end;
+do_dir(_Root,{priv_dir,Name},NameDb) ->
+ do_dir(_Root,{lib_dir,Name,priv},NameDb);
+do_dir(_, _, _) ->
+ 'bad request to code'.
+
+stick_dir(Dir, Stick, St) ->
+ case erl_prim_loader:list_dir(Dir) of
+ {ok,Listing} ->
+ Mods = get_mods(Listing, objfile_extension()),
+ Db = St#state.moddb,
+ case Stick of
+ true ->
+ foreach(fun (M) -> ets:insert(Db, {{sticky,M},true}) end, Mods);
+ false ->
+ foreach(fun (M) -> ets:delete(Db, {sticky,M}) end, Mods)
+ end;
+ Error ->
+ Error
+ end.
+
+stick_mod(M, Stick, St) ->
+ Db = St#state.moddb,
+ case Stick of
+ true ->
+ ets:insert(Db, {{sticky,M},true});
+ false ->
+ ets:delete(Db, {sticky,M})
+ end.
+
+get_mods([File|Tail], Extension) ->
+ case filename:extension(File) of
+ Extension ->
+ [list_to_atom(filename:basename(File, Extension)) |
+ get_mods(Tail, Extension)];
+ _ ->
+ get_mods(Tail, Extension)
+ end;
+get_mods([], _) -> [].
+
+is_sticky(Mod, Db) ->
+ case erlang:module_loaded(Mod) of
+ true ->
+ case ets:lookup(Db, {sticky,Mod}) of
+ [] -> false;
+ _ -> true
+ end;
+ false ->
+ false
+ end.
+
+add_paths(Where,[Dir|Tail],Path,NameDb) ->
+ {_,NPath} = add_path(Where,Dir,Path,NameDb),
+ add_paths(Where,Tail,NPath,NameDb);
+add_paths(_,_,Path,_) ->
+ {ok,Path}.
+
+
+do_load_binary(Module, File, Binary, Caller, St) ->
+ case modp(Module) andalso modp(File) andalso is_binary(Binary) of
+ true ->
+ case erlang:module_loaded(to_atom(Module)) of
+ true -> do_purge(Module);
+ false -> ok
+ end,
+ try_load_module(File, Module, Binary, Caller, St);
+ false ->
+ {reply,{error,badarg},St}
+ end.
+
+modp(Atom) when is_atom(Atom) -> true;
+modp(List) when is_list(List) -> int_list(List);
+modp(_) -> false.
+
+
+load_abs(File, Mod0, Caller, St) ->
+ Ext = objfile_extension(),
+ FileName0 = lists:concat([File, Ext]),
+ FileName = absname(FileName0),
+ Mod = if Mod0 =:= [] ->
+ list_to_atom(filename:basename(FileName0, Ext));
+ true ->
+ Mod0
+ end,
+ case erl_prim_loader:get_file(FileName) of
+ {ok,Bin,_} ->
+ try_load_module(FileName, Mod, Bin, Caller, St);
+ error ->
+ {reply,{error,nofile},St}
+ end.
+
+try_load_module(Mod, Dir, Caller, St) ->
+ File = filename:append(Dir, to_path(Mod) ++
+ objfile_extension()),
+ case erl_prim_loader:get_file(File) of
+ error ->
+ {reply,error,St};
+ {ok,Binary,FName} ->
+ try_load_module(absname(FName), Mod, Binary, Caller, St)
+ end.
+
+try_load_module(File, Mod, Bin, {From,_}=Caller, St0) ->
+ M = to_atom(Mod),
+ case pending_on_load(M, From, St0) of
+ no ->
+ try_load_module_1(File, M, Bin, Caller, St0);
+ {yes,St} ->
+ {noreply,St}
+ end.
+
+try_load_module_1(File, Mod, Bin, Caller, #state{moddb=Db}=St) ->
+ case is_sticky(Mod, Db) of
+ true -> %% Sticky file reject the load
+ error_msg("Can't load module that resides in sticky dir\n",[]),
+ {reply,{error,sticky_directory},St};
+ false ->
+ case catch load_native_code(Mod, Bin) of
+ {module,Mod} ->
+ ets:insert(Db, {Mod,File}),
+ {reply,{module,Mod},St};
+ no_native ->
+ case erlang:load_module(Mod, Bin) of
+ {module,Mod} ->
+ ets:insert(Db, {Mod,File}),
+ post_beam_load(Mod),
+ {reply,{module,Mod},St};
+ {error,on_load} ->
+ handle_on_load(Mod, File, Caller, St);
+ {error,What} ->
+ error_msg("Loading of ~s failed: ~p\n", [File, What]),
+ {reply,{error,What},St}
+ end;
+ Error ->
+ error_msg("Native loading of ~s failed: ~p\n",
+ [File,Error]),
+ {reply,ok,St}
+ end
+ end.
+
+load_native_code(Mod, Bin) ->
+ %% During bootstrapping of Open Source Erlang, we don't have any hipe
+ %% loader modules, but the Erlang emulator might be hipe enabled.
+ %% Therefore we must test for that the loader modules are available
+ %% before trying to to load native code.
+ case erlang:module_loaded(hipe_unified_loader) of
+ false -> no_native;
+ true -> hipe_unified_loader:load_native_code(Mod, Bin)
+ end.
+
+hipe_result_to_status(Result) ->
+ case Result of
+ {module,_} -> Result;
+ _ -> {error,Result}
+ end.
+
+post_beam_load(Mod) ->
+ case erlang:module_loaded(hipe_unified_loader) of
+ false -> ok;
+ true -> hipe_unified_loader:post_beam_load(Mod)
+ end.
+
+int_list([H|T]) when is_integer(H) -> int_list(T);
+int_list([_|_]) -> false;
+int_list([]) -> true.
+
+
+load_file(Mod, Caller, #state{path=Path,cache=no_cache}=St) ->
+ case mod_to_bin(Path, Mod) of
+ error ->
+ {reply,{error,nofile},St};
+ {Mod,Binary,File} ->
+ try_load_module(File, Mod, Binary, Caller, St)
+ end;
+load_file(Mod, Caller, #state{cache=Cache}=St0) ->
+ Key = {obj,Mod},
+ case ets:lookup(Cache, Key) of
+ [] ->
+ St = rehash_cache(St0),
+ case ets:lookup(St#state.cache, Key) of
+ [] ->
+ {reply,{error,nofile},St};
+ [{Key,Dir}] ->
+ try_load_module(Mod, Dir, Caller, St)
+ end;
+ [{Key,Dir}] ->
+ try_load_module(Mod, Dir, Caller, St0)
+ end.
+
+mod_to_bin([Dir|Tail], Mod) ->
+ File = filename:append(Dir, to_path(Mod) ++ objfile_extension()),
+ case erl_prim_loader:get_file(File) of
+ error ->
+ mod_to_bin(Tail, Mod);
+ {ok,Bin,FName} ->
+ {Mod,Bin,absname(FName)}
+ end;
+mod_to_bin([], Mod) ->
+ %% At last, try also erl_prim_loader's own method
+ File = to_path(Mod) ++ objfile_extension(),
+ case erl_prim_loader:get_file(File) of
+ error ->
+ error; % No more alternatives !
+ {ok,Bin,FName} ->
+ {Mod,Bin,absname(FName)}
+ end.
+
+absname(File) ->
+ case erl_prim_loader:get_cwd() of
+ {ok,Cwd} -> absname(File, Cwd);
+ _Error -> File
+ end.
+
+absname(Name, AbsBase) ->
+ case filename:pathtype(Name) of
+ relative ->
+ filename:absname_join(AbsBase, Name);
+ absolute ->
+ %% We must flatten the filename before passing it into join/1,
+ %% or we will get slashes inserted into the wrong places.
+ filename:join([filename:flatten(Name)]);
+ volumerelative ->
+ absname_vr(filename:split(Name), filename:split(AbsBase), AbsBase)
+ end.
+
+%% Handles volumerelative names (on Windows only).
+
+absname_vr(["/"|Rest1], [Volume|_], _AbsBase) ->
+ %% Absolute path on current drive.
+ filename:join([Volume|Rest1]);
+absname_vr([[X, $:]|Rest1], [[X|_]|_], AbsBase) ->
+ %% Relative to current directory on current drive.
+ absname(filename:join(Rest1), AbsBase);
+absname_vr([[X, $:]|Name], _, _AbsBase) ->
+ %% Relative to current directory on another drive.
+ Dcwd =
+ case erl_prim_loader:get_cwd([X, $:]) of
+ {ok, Dir} -> Dir;
+ error -> [X, $:, $/]
+ end,
+ absname(filename:join(Name), Dcwd).
+
+
+%% do_purge(Module)
+%% Kill all processes running code from *old* Module, and then purge the
+%% module. Return true if any processes killed, else false.
+
+do_purge(Mod) ->
+ do_purge(processes(), to_atom(Mod), false).
+
+do_purge([P|Ps], Mod, Purged) ->
+ case erlang:check_process_code(P, Mod) of
+ true ->
+ Ref = erlang:monitor(process, P),
+ exit(P, kill),
+ receive
+ {'DOWN',Ref,process,_Pid,_} -> ok
+ end,
+ do_purge(Ps, Mod, true);
+ false ->
+ do_purge(Ps, Mod, Purged)
+ end;
+do_purge([], Mod, Purged) ->
+ catch erlang:purge_module(Mod),
+ Purged.
+
+%% do_soft_purge(Module)
+%% Purge old code only if no procs remain that run old code
+%% Return true in that case, false if procs remain (in this
+%% case old code is not purged)
+
+do_soft_purge(Mod) ->
+ catch do_soft_purge(processes(), Mod).
+
+do_soft_purge([P|Ps], Mod) ->
+ case erlang:check_process_code(P, Mod) of
+ true -> throw(false);
+ false -> do_soft_purge(Ps, Mod)
+ end;
+do_soft_purge([], Mod) ->
+ catch erlang:purge_module(Mod),
+ true.
+
+is_loaded(M, Db) ->
+ case ets:lookup(Db, M) of
+ [{M,File}] -> {file,File};
+ [] -> false
+ end.
+
+%% -------------------------------------------------------
+%% The on_load functionality.
+%% -------------------------------------------------------
+
+handle_on_load(Mod, File, {From,_}, #state{on_load=OnLoad0}=St0) ->
+ Fun = fun() ->
+ Res = erlang:call_on_load_function(Mod),
+ exit(Res)
+ end,
+ {_,Ref} = spawn_monitor(Fun),
+ OnLoad = [{Ref,Mod,File,[From]}|OnLoad0],
+ St = St0#state{on_load=OnLoad},
+ {noreply,St}.
+
+pending_on_load(_, _, #state{on_load=[]}) ->
+ no;
+pending_on_load(Mod, From, #state{on_load=OnLoad0}=St) ->
+ case lists:keymember(Mod, 2, OnLoad0) of
+ false ->
+ no;
+ true ->
+ OnLoad = pending_on_load_1(Mod, From, OnLoad0),
+ {yes,St#state{on_load=OnLoad}}
+ end.
+
+pending_on_load_1(Mod, From, [{Ref,Mod,File,Pids}|T]) ->
+ [{Ref,Mod,File,[From|Pids]}|T];
+pending_on_load_1(Mod, From, [H|T]) ->
+ [H|pending_on_load_1(Mod, From, T)];
+pending_on_load_1(_, _, []) -> [].
+
+finish_on_load(Ref, OnLoadRes, #state{on_load=OnLoad0,moddb=Db}=State) ->
+ case lists:keyfind(Ref, 1, OnLoad0) of
+ false ->
+ %% Since this process in general silently ignores messages
+ %% it doesn't understand, it should also ignore a 'DOWN'
+ %% message with an unknown reference.
+ State;
+ {Ref,Mod,File,WaitingPids} ->
+ finish_on_load_1(Mod, File, OnLoadRes, WaitingPids, Db),
+ OnLoad = [E || {R,_,_,_}=E <- OnLoad0, R =/= Ref],
+ State#state{on_load=OnLoad}
+ end.
+
+finish_on_load_1(Mod, File, OnLoadRes, WaitingPids, Db) ->
+ Keep = if
+ is_boolean(OnLoadRes) -> OnLoadRes;
+ true -> false
+ end,
+ erlang:finish_after_on_load(Mod, Keep),
+ Res = case Keep of
+ false -> {error,on_load_failure};
+ true ->
+ ets:insert(Db, {Mod,File}),
+ {module,Mod}
+ end,
+ [reply(Pid, Res) || Pid <- WaitingPids],
+ ok.
+
+%% -------------------------------------------------------
+%% Internal functions.
+%% -------------------------------------------------------
+
+all_loaded(Db) ->
+ all_l(Db, ets:slot(Db, 0), 1, []).
+
+all_l(_Db, '$end_of_table', _, Acc) ->
+ Acc;
+all_l(Db, ModInfo, N, Acc) ->
+ NewAcc = strip_mod_info(ModInfo,Acc),
+ all_l(Db, ets:slot(Db, N), N + 1, NewAcc).
+
+
+strip_mod_info([{{sticky,_},_}|T], Acc) -> strip_mod_info(T, Acc);
+strip_mod_info([H|T], Acc) -> strip_mod_info(T, [H|Acc]);
+strip_mod_info([], Acc) -> Acc.
+
+% error_msg(Format) ->
+% error_msg(Format,[]).
+error_msg(Format, Args) ->
+ Msg = {notify,{error, group_leader(), {self(), Format, Args}}},
+ error_logger ! Msg,
+ ok.
+
+info_msg(Format, Args) ->
+ Msg = {notify,{info_msg, group_leader(), {self(), Format, Args}}},
+ error_logger ! Msg,
+ ok.
+
+objfile_extension() ->
+ init:objfile_extension().
+
+archive_extension() ->
+ init:archive_extension().
+
+to_list(X) when is_list(X) -> X;
+to_list(X) when is_atom(X) -> atom_to_list(X).
+
+to_atom(X) when is_atom(X) -> X;
+to_atom(X) when is_list(X) -> list_to_atom(X).
+
+to_path(X) ->
+ filename:join(packages:split(X)).
diff --git a/lib/kernel/src/disk_log.erl b/lib/kernel/src/disk_log.erl
new file mode 100644
index 0000000000..7f1b5f9ec6
--- /dev/null
+++ b/lib/kernel/src/disk_log.erl
@@ -0,0 +1,1899 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 1997-2009. All Rights Reserved.
+%%
+%% The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved online at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(disk_log).
+
+%% Efficient file based log - process part
+
+-export([start/0, istart_link/1,
+ log/2, log_terms/2, blog/2, blog_terms/2,
+ alog/2, alog_terms/2, balog/2, balog_terms/2,
+ close/1, lclose/1, lclose/2, sync/1, open/1,
+ truncate/1, truncate/2, btruncate/2,
+ reopen/2, reopen/3, breopen/3, inc_wrap_file/1, change_size/2,
+ change_notify/3, change_header/2,
+ chunk/2, chunk/3, bchunk/2, bchunk/3, chunk_step/3, chunk_info/1,
+ block/1, block/2, unblock/1, info/1, format_error/1,
+ accessible_logs/0]).
+
+%% Internal exports
+-export([init/2, internal_open/2,
+ system_continue/3, system_terminate/4, system_code_change/4]).
+
+%% To be used by disk_log_h.erl (not (yet) in Erlang/OTP) only.
+-export([ll_open/1, ll_close/1, do_log/2, do_sync/1, do_info/2]).
+
+%% To be used by wrap_log_reader only.
+-export([ichunk_end/2]).
+
+%% To be used for debugging only:
+-export([pid2name/1]).
+
+-type dlog_state_error() :: 'ok' | {'error', term()}.
+
+-record(state, {queue = [],
+ messages = [],
+ parent,
+ server,
+ cnt = 0 :: non_neg_integer(),
+ args,
+ error_status = ok :: dlog_state_error(),
+ cache_error = ok %% cache write error after timeout
+ }).
+
+-include("disk_log.hrl").
+
+-define(failure(Error, Function, Arg),
+ {{failed, Error}, [{?MODULE, Function, Arg}]}).
+
+%%-define(PROFILE(C), C).
+-define(PROFILE(C), void).
+
+-compile({inline,[{log_loop,4},{log_end_sync,2},{replies,2},{rflat,1}]}).
+
+%%%----------------------------------------------------------------------
+%%% Contract type specifications
+%%%----------------------------------------------------------------------
+
+-type bytes() :: binary() | [byte()].
+
+-type log() :: term(). % XXX: refine
+-type file_error() :: term(). % XXX: refine
+-type invalid_header() :: term(). % XXX: refine
+
+%%%----------------------------------------------------------------------
+%%% API
+%%%----------------------------------------------------------------------
+
+%%-----------------------------------------------------------------
+%% This module implements the API, and the processes for each log.
+%% There is one process per log.
+%%-----------------------------------------------------------------
+
+-type open_error_rsn() :: 'no_such_log'
+ | {'badarg', term()}
+ | {'size_mismatch', dlog_size(), dlog_size()}
+ | {'arg_mismatch', dlog_optattr(), term(), term()}
+ | {'name_already_open', log()}
+ | {'open_read_write', log()}
+ | {'open_read_only', log()}
+ | {'need_repair', log()}
+ | {'not_a_log_file', string()}
+ | {'invalid_index_file', string()}
+ | {'invalid_header', invalid_header()}
+ | {'file_error', file:filename(), file_error()}
+ | {'node_already_open', log()}.
+-type dist_error_rsn() :: 'nodedown' | open_error_rsn().
+-type ret() :: {'ok', log()}
+ | {'repaired', log(), {'recovered', non_neg_integer()},
+ {'badbytes', non_neg_integer()}}.
+-type open_ret() :: ret() | {'error', open_error_rsn()}.
+-type dist_open_ret() :: {[{node(), ret()}],
+ [{node(), {'error', dist_error_rsn()}}]}.
+-type all_open_ret() :: open_ret() | dist_open_ret().
+
+-spec open(Args :: dlog_options()) -> all_open_ret().
+open(A) ->
+ disk_log_server:open(check_arg(A, #arg{options = A})).
+
+-type log_error_rsn() :: 'no_such_log' | 'nonode' | {'read_only_mode', log()}
+ | {'format_external', log()} | {'blocked_log', log()}
+ | {'full', log()} | {'invalid_header', invalid_header()}
+ | {'file_error', file:filename(), file_error()}.
+
+-spec log(Log :: log(), Term :: term()) -> 'ok' | {'error', log_error_rsn()}.
+log(Log, Term) ->
+ req(Log, {log, term_to_binary(Term)}).
+
+-spec blog(Log :: log(), Bytes :: bytes()) -> 'ok' | {'error', log_error_rsn()}.
+blog(Log, Bytes) ->
+ req(Log, {blog, check_bytes(Bytes)}).
+
+-spec log_terms(Log :: log(), Terms :: [term()]) -> 'ok' | {'error', term()}.
+log_terms(Log, Terms) ->
+ Bs = terms2bins(Terms),
+ req(Log, {log, Bs}).
+
+-spec blog_terms(Log :: log(), Bytes :: [bytes()]) -> 'ok' | {'error', term()}.
+blog_terms(Log, Bytess) ->
+ Bs = check_bytes_list(Bytess, Bytess),
+ req(Log, {blog, Bs}).
+
+-type notify_ret() :: 'ok' | {'error', 'no_such_log'}.
+
+-spec alog(Log :: log(), Term :: term()) -> notify_ret().
+alog(Log, Term) ->
+ notify(Log, {alog, term_to_binary(Term)}).
+
+-spec alog_terms(Log :: log(), Terms :: [term()]) -> notify_ret().
+alog_terms(Log, Terms) ->
+ Bs = terms2bins(Terms),
+ notify(Log, {alog, Bs}).
+
+-spec balog(Log :: log(), Bytes :: bytes()) -> notify_ret().
+balog(Log, Bytes) ->
+ notify(Log, {balog, check_bytes(Bytes)}).
+
+-spec balog_terms(Log :: log(), Bytes :: [bytes()]) -> notify_ret().
+balog_terms(Log, Bytess) ->
+ Bs = check_bytes_list(Bytess, Bytess),
+ notify(Log, {balog, Bs}).
+
+-type close_error_rsn() ::'no_such_log' | 'nonode'
+ | {'file_error', file:filename(), file_error()}.
+
+-spec close(Log :: log()) -> 'ok' | {'error', close_error_rsn()}.
+close(Log) ->
+ req(Log, close).
+
+-type lclose_error_rsn() :: 'no_such_log'
+ | {'file_error', file:filename(), file_error()}.
+
+-spec lclose(Log :: log()) -> 'ok' | {'error', lclose_error_rsn()}.
+lclose(Log) ->
+ lclose(Log, node()).
+
+-spec lclose(Log :: log(), Node :: node()) -> 'ok' | {'error', lclose_error_rsn()}.
+lclose(Log, Node) ->
+ lreq(Log, close, Node).
+
+-type trunc_error_rsn() :: 'no_such_log' | 'nonode'
+ | {'read_only_mode', log()}
+ | {'blocked_log', log()}
+ | {'invalid_header', invalid_header()}
+ | {'file_error', file:filename(), file_error()}.
+
+-spec truncate(Log :: log()) -> 'ok' | {'error', trunc_error_rsn()}.
+truncate(Log) ->
+ req(Log, {truncate, none, truncate, 1}).
+
+-spec truncate(Log :: log(), Head :: term()) -> 'ok' | {'error', trunc_error_rsn()}.
+truncate(Log, Head) ->
+ req(Log, {truncate, {ok, term_to_binary(Head)}, truncate, 2}).
+
+-spec btruncate(Log :: log(), Head :: bytes()) -> 'ok' | {'error', trunc_error_rsn()}.
+btruncate(Log, Head) ->
+ req(Log, {truncate, {ok, check_bytes(Head)}, btruncate, 2}).
+
+-spec reopen(Log :: log(), Filename :: file:filename()) -> 'ok' | {'error', term()}.
+reopen(Log, NewFile) ->
+ req(Log, {reopen, NewFile, none, reopen, 2}).
+
+-spec reopen(Log :: log(), Filename :: file:filename(), Head :: term()) ->
+ 'ok' | {'error', term()}.
+reopen(Log, NewFile, NewHead) ->
+ req(Log, {reopen, NewFile, {ok, term_to_binary(NewHead)}, reopen, 3}).
+
+-spec breopen(Log :: log(), Filename :: file:filename(), Head :: bytes()) ->
+ 'ok' | {'error', term()}.
+breopen(Log, NewFile, NewHead) ->
+ req(Log, {reopen, NewFile, {ok, check_bytes(NewHead)}, breopen, 3}).
+
+-type inc_wrap_error_rsn() :: 'no_such_log' | 'nonode'
+ | {'read_only_mode', log()}
+ | {'blocked_log', log()} | {'halt_log', log()}
+ | {'invalid_header', invalid_header()}
+ | {'file_error', file:filename(), file_error()}.
+
+-spec inc_wrap_file(Log :: log()) -> 'ok' | {'error', inc_wrap_error_rsn()}.
+inc_wrap_file(Log) ->
+ req(Log, inc_wrap_file).
+
+-spec change_size(Log :: log(), Size :: dlog_size()) -> 'ok' | {'error', term()}.
+change_size(Log, NewSize) ->
+ req(Log, {change_size, NewSize}).
+
+-spec change_notify(Log :: log(), Pid :: pid(), Notify :: boolean()) ->
+ 'ok' | {'error', term()}.
+change_notify(Log, Pid, NewNotify) ->
+ req(Log, {change_notify, Pid, NewNotify}).
+
+-spec change_header(Log :: log(), Head :: {atom(), term()}) ->
+ 'ok' | {'error', term()}.
+change_header(Log, NewHead) ->
+ req(Log, {change_header, NewHead}).
+
+-type sync_error_rsn() :: 'no_such_log' | 'nonode' | {'read_only_mode', log()}
+ | {'blocked_log', log()}
+ | {'file_error', file:filename(), file_error()}.
+
+-spec sync(Log :: log()) -> 'ok' | {'error', sync_error_rsn()}.
+sync(Log) ->
+ req(Log, sync).
+
+-type block_error_rsn() :: 'no_such_log' | 'nonode' | {'blocked_log', log()}.
+
+-spec block(Log :: log()) -> 'ok' | {'error', block_error_rsn()}.
+block(Log) ->
+ block(Log, true).
+
+-spec block(Log :: log(), QueueLogRecords :: boolean()) -> 'ok' | {'error', term()}.
+block(Log, QueueLogRecords) ->
+ req(Log, {block, QueueLogRecords}).
+
+-type unblock_error_rsn() :: 'no_such_log' | 'nonode'
+ | {'not_blocked', log()}
+ | {'not_blocked_by_pid', log()}.
+
+-spec unblock(Log :: log()) -> 'ok' | {'error', unblock_error_rsn()}.
+unblock(Log) ->
+ req(Log, unblock).
+
+-spec format_error(Error :: term()) -> string().
+format_error(Error) ->
+ do_format_error(Error).
+
+-spec info(Log :: log()) -> [{atom(), any()}] | {'error', term()}.
+info(Log) ->
+ sreq(Log, info).
+
+-spec pid2name(Pid :: pid()) -> {'ok', log()} | 'undefined'.
+pid2name(Pid) ->
+ disk_log_server:start(),
+ case ets:lookup(?DISK_LOG_PID_TABLE, Pid) of
+ [] -> undefined;
+ [{_Pid, Log}] -> {ok, Log}
+ end.
+
+%% This function Takes 3 args, a Log, a Continuation and N.
+%% It retuns a {Cont2, ObjList} | eof | {error, Reason}
+%% The initial continuation is the atom 'start'
+
+-spec chunk(Log :: log(), Cont :: any()) ->
+ {'error', term()} | 'eof' | {any(), [any()]} | {any(), [any()], integer()}.
+chunk(Log, Cont) ->
+ chunk(Log, Cont, infinity).
+
+-spec chunk(Log :: log(), Cont :: any(), N :: pos_integer() | 'infinity') ->
+ {'error', term()} | 'eof' | {any(), [any()]} | {any(), [any()], integer()}.
+chunk(Log, Cont, infinity) ->
+ %% There cannot be more than ?MAX_CHUNK_SIZE terms in a chunk.
+ ichunk(Log, Cont, ?MAX_CHUNK_SIZE);
+chunk(Log, Cont, N) when is_integer(N), N > 0 ->
+ ichunk(Log, Cont, N).
+
+ichunk(Log, start, N) ->
+ R = sreq(Log, {chunk, 0, [], N}),
+ ichunk_end(R, Log);
+ichunk(Log, More, N) when is_record(More, continuation) ->
+ R = req2(More#continuation.pid,
+ {chunk, More#continuation.pos, More#continuation.b, N}),
+ ichunk_end(R, Log);
+ichunk(_Log, _, _) ->
+ {error, {badarg, continuation}}.
+
+ichunk_end({C, R}, Log) when is_record(C, continuation) ->
+ ichunk_end(R, read_write, Log, C, 0);
+ichunk_end({C, R, Bad}, Log) when is_record(C, continuation) ->
+ ichunk_end(R, read_only, Log, C, Bad);
+ichunk_end(R, _Log) ->
+ R.
+
+%% Create the terms on the client's heap, not the server's.
+%% The list of binaries is reversed.
+ichunk_end(R, Mode, Log, C, Bad) ->
+ case catch bins2terms(R, []) of
+ {'EXIT', _} ->
+ RR = lists:reverse(R),
+ ichunk_bad_end(RR, Mode, Log, C, Bad, []);
+ Ts when Bad > 0 ->
+ {C, Ts, Bad};
+ Ts when Bad =:= 0 ->
+ {C, Ts}
+ end.
+
+bins2terms([], L) ->
+ L;
+bins2terms([B | Bs], L) ->
+ bins2terms(Bs, [binary_to_term(B) | L]).
+
+ichunk_bad_end([B | Bs], Mode, Log, C, Bad, A) ->
+ case catch binary_to_term(B) of
+ {'EXIT', _} when read_write =:= Mode ->
+ InfoList = info(Log),
+ {value, {file, FileName}} = lists:keysearch(file, 1, InfoList),
+ File = case C#continuation.pos of
+ Pos when is_integer(Pos) -> FileName; % halt log
+ {FileNo, _} -> add_ext(FileName, FileNo) % wrap log
+ end,
+ {error, {corrupt_log_file, File}};
+ {'EXIT', _} when read_only =:= Mode ->
+ Reread = lists:foldl(fun(Bin, Sz) ->
+ Sz + byte_size(Bin) + ?HEADERSZ
+ end, 0, Bs),
+ NewPos = case C#continuation.pos of
+ Pos when is_integer(Pos) -> Pos-Reread;
+ {FileNo, Pos} -> {FileNo, Pos-Reread}
+ end,
+ NewBad = Bad + byte_size(B),
+ {C#continuation{pos = NewPos, b = []}, lists:reverse(A), NewBad};
+ T ->
+ ichunk_bad_end(Bs, Mode, Log, C, Bad, [T | A])
+ end.
+
+-spec bchunk(Log :: log(), Cont :: any()) ->
+ {'error', any()} | 'eof' | {any(), [binary()]} | {any(), [binary()], integer()}.
+bchunk(Log, Cont) ->
+ bchunk(Log, Cont, infinity).
+
+-spec bchunk(Log :: log(), Cont :: any(), N :: 'infinity' | pos_integer()) ->
+ {'error', any()} | 'eof' | {any(), [binary()]} | {any(), [binary()], integer()}.
+bchunk(Log, Cont, infinity) ->
+ %% There cannot be more than ?MAX_CHUNK_SIZE terms in a chunk.
+ bichunk(Log, Cont, ?MAX_CHUNK_SIZE);
+bchunk(Log, Cont, N) when is_integer(N), N > 0 ->
+ bichunk(Log, Cont, N).
+
+bichunk(Log, start, N) ->
+ R = sreq(Log, {chunk, 0, [], N}),
+ bichunk_end(R);
+bichunk(_Log, #continuation{pid = Pid, pos = Pos, b = B}, N) ->
+ R = req2(Pid, {chunk, Pos, B, N}),
+ bichunk_end(R);
+bichunk(_Log, _, _) ->
+ {error, {badarg, continuation}}.
+
+bichunk_end({C = #continuation{}, R}) ->
+ {C, lists:reverse(R)};
+bichunk_end({C = #continuation{}, R, Bad}) ->
+ {C, lists:reverse(R), Bad};
+bichunk_end(R) ->
+ R.
+
+-spec chunk_step(Log :: log(), Cont :: any(), N :: integer()) ->
+ {'ok', any()} | {'error', term()}.
+chunk_step(Log, Cont, N) when is_integer(N) ->
+ ichunk_step(Log, Cont, N).
+
+ichunk_step(Log, start, N) ->
+ sreq(Log, {chunk_step, 0, N});
+ichunk_step(_Log, More, N) when is_record(More, continuation) ->
+ req2(More#continuation.pid, {chunk_step, More#continuation.pos, N});
+ichunk_step(_Log, _, _) ->
+ {error, {badarg, continuation}}.
+
+-spec chunk_info(More :: any()) ->
+ [{'node', node()},...] | {'error', {'no_continuation', any()}}.
+chunk_info(More = #continuation{}) ->
+ [{node, node(More#continuation.pid)}];
+chunk_info(BadCont) ->
+ {error, {no_continuation, BadCont}}.
+
+-spec accessible_logs() -> {[_], [_]}.
+accessible_logs() ->
+ disk_log_server:accessible_logs().
+
+istart_link(Server) ->
+ {ok, proc_lib:spawn_link(disk_log, init, [self(), Server])}.
+
+%% Only for backwards compatibility, could probably be removed.
+-spec start() -> 'ok'.
+start() ->
+ disk_log_server:start().
+
+internal_open(Pid, A) ->
+ req2(Pid, {internal_open, A}).
+
+%%% ll_open() and ll_close() are used by disk_log_h.erl, a module not
+%%% (yet) in Erlang/OTP.
+
+%% -spec ll_open(dlog_options()) -> {'ok', Res :: _, #log{}, Cnt :: _} | Error.
+ll_open(A) ->
+ case check_arg(A, #arg{options = A}) of
+ {ok, L} -> do_open(L);
+ Error -> Error
+ end.
+
+%% -> closed | throw(Error)
+ll_close(Log) ->
+ close_disk_log2(Log).
+
+check_arg([], Res) ->
+ Ret = case Res#arg.head of
+ none ->
+ {ok, Res};
+ _ ->
+ case check_head(Res#arg.head, Res#arg.format) of
+ {ok, Head} ->
+ {ok, Res#arg{head = Head}};
+ Error ->
+ Error
+ end
+ end,
+
+ if %% check result
+ Res#arg.name =:= 0 ->
+ {error, {badarg, name}};
+ Res#arg.file =:= none ->
+ case catch lists:concat([Res#arg.name, ".LOG"]) of
+ {'EXIT',_} -> {error, {badarg, file}};
+ FName -> check_arg([], Res#arg{file = FName})
+ end;
+ Res#arg.repair =:= truncate, Res#arg.mode =:= read_only ->
+ {error, {badarg, repair_read_only}};
+ Res#arg.type =:= halt, is_tuple(Res#arg.size) ->
+ {error, {badarg, size}};
+ Res#arg.type =:= wrap ->
+ {OldSize, Version} =
+ disk_log_1:read_size_file_version(Res#arg.file),
+ check_wrap_arg(Ret, OldSize, Version);
+ true ->
+ Ret
+ end;
+check_arg([{file, F} | Tail], Res) when is_list(F) ->
+ check_arg(Tail, Res#arg{file = F});
+check_arg([{file, F} | Tail], Res) when is_atom(F) ->
+ check_arg(Tail, Res#arg{file = F});
+check_arg([{linkto, Pid} |Tail], Res) when is_pid(Pid) ->
+ check_arg(Tail, Res#arg{linkto = Pid});
+check_arg([{linkto, none} |Tail], Res) ->
+ check_arg(Tail, Res#arg{linkto = none});
+check_arg([{name, Name}|Tail], Res) ->
+ check_arg(Tail, Res#arg{name = Name});
+check_arg([{repair, true}|Tail], Res) ->
+ check_arg(Tail, Res#arg{repair = true});
+check_arg([{repair, false}|Tail], Res) ->
+ check_arg(Tail, Res#arg{repair = false});
+check_arg([{repair, truncate}|Tail], Res) ->
+ check_arg(Tail, Res#arg{repair = truncate});
+check_arg([{size, Int}|Tail], Res) when is_integer(Int), Int > 0 ->
+ check_arg(Tail, Res#arg{size = Int});
+check_arg([{size, infinity}|Tail], Res) ->
+ check_arg(Tail, Res#arg{size = infinity});
+check_arg([{size, {MaxB,MaxF}}|Tail], Res) when is_integer(MaxB),
+ is_integer(MaxF),
+ MaxB > 0, MaxB =< ?MAX_BYTES,
+ MaxF > 0, MaxF < ?MAX_FILES ->
+ check_arg(Tail, Res#arg{size = {MaxB, MaxF}});
+check_arg([{type, wrap}|Tail], Res) ->
+ check_arg(Tail, Res#arg{type = wrap});
+check_arg([{type, halt}|Tail], Res) ->
+ check_arg(Tail, Res#arg{type = halt});
+check_arg([{format, internal}|Tail], Res) ->
+ check_arg(Tail, Res#arg{format = internal});
+check_arg([{format, external}|Tail], Res) ->
+ check_arg(Tail, Res#arg{format = external});
+check_arg([{distributed, []}|Tail], Res) ->
+ check_arg(Tail, Res#arg{distributed = false});
+check_arg([{distributed, Nodes}|Tail], Res) when is_list(Nodes) ->
+ check_arg(Tail, Res#arg{distributed = {true, Nodes}});
+check_arg([{notify, true}|Tail], Res) ->
+ check_arg(Tail, Res#arg{notify = true});
+check_arg([{notify, false}|Tail], Res) ->
+ check_arg(Tail, Res#arg{notify = false});
+check_arg([{head_func, HeadFunc}|Tail], Res) ->
+ check_arg(Tail, Res#arg{head = {head_func, HeadFunc}});
+check_arg([{head, Term}|Tail], Res) ->
+ check_arg(Tail, Res#arg{head = {head, Term}});
+check_arg([{mode, read_only}|Tail], Res) ->
+ check_arg(Tail, Res#arg{mode = read_only});
+check_arg([{mode, read_write}|Tail], Res) ->
+ check_arg(Tail, Res#arg{mode = read_write});
+check_arg(Arg, _) ->
+ {error, {badarg, Arg}}.
+
+check_wrap_arg({ok, Res}, {0,0}, _Version) when Res#arg.size =:= infinity ->
+ {error, {badarg, size}};
+check_wrap_arg({ok, Res}, OldSize, Version) when Res#arg.size =:= infinity ->
+ NewRes = Res#arg{size = OldSize},
+ check_wrap_arg({ok, NewRes}, OldSize, Version);
+check_wrap_arg({ok, Res}, {0,0}, Version) ->
+ {ok, Res#arg{version = Version}};
+check_wrap_arg({ok, Res}, OldSize, Version) when OldSize =:= Res#arg.size ->
+ {ok, Res#arg{version = Version}};
+check_wrap_arg({ok, Res}, _OldSize, Version) when Res#arg.repair =:= truncate,
+ is_tuple(Res#arg.size) ->
+ {ok, Res#arg{version = Version}};
+check_wrap_arg({ok, Res}, OldSize, _Version) when is_tuple(Res#arg.size) ->
+ {error, {size_mismatch, OldSize, Res#arg.size}};
+check_wrap_arg({ok, _Res}, _OldSize, _Version) ->
+ {error, {badarg, size}};
+check_wrap_arg(Ret, _OldSize, _Version) ->
+ Ret.
+
+%%%-----------------------------------------------------------------
+%%% Server functions
+%%%-----------------------------------------------------------------
+init(Parent, Server) ->
+ ?PROFILE(ep:do()),
+ process_flag(trap_exit, true),
+ loop(#state{parent = Parent, server = Server}).
+
+loop(State) when State#state.messages =:= [] ->
+ receive
+ Message ->
+ handle(Message, State)
+ end;
+loop(State) ->
+ [M | Ms] = State#state.messages,
+ handle(M, State#state{messages = Ms}).
+
+handle({From, write_cache}, S) when From =:= self() ->
+ case catch do_write_cache(get(log)) of
+ ok ->
+ loop(S);
+ Error ->
+ loop(S#state{cache_error = Error})
+ end;
+handle({From, {log, B}}, S) ->
+ case get(log) of
+ L when L#log.mode =:= read_only ->
+ reply(From, {error, {read_only_mode, L#log.name}}, S);
+ L when L#log.status =:= ok, L#log.format =:= internal ->
+ log_loop(S, From, [B], []);
+ L when L#log.status =:= ok, L#log.format =:= external ->
+ reply(From, {error, {format_external, L#log.name}}, S);
+ L when L#log.status =:= {blocked, false} ->
+ reply(From, {error, {blocked_log, L#log.name}}, S);
+ L when L#log.blocked_by =:= From ->
+ reply(From, {error, {blocked_log, L#log.name}}, S);
+ _ ->
+ loop(S#state{queue = [{From, {log, B}} | S#state.queue]})
+ end;
+handle({From, {blog, B}}, S) ->
+ case get(log) of
+ L when L#log.mode =:= read_only ->
+ reply(From, {error, {read_only_mode, L#log.name}}, S);
+ L when L#log.status =:= ok ->
+ log_loop(S, From, [B], []);
+ L when L#log.status =:= {blocked, false} ->
+ reply(From, {error, {blocked_log, L#log.name}}, S);
+ L when L#log.blocked_by =:= From ->
+ reply(From, {error, {blocked_log, L#log.name}}, S);
+ _ ->
+ loop(S#state{queue = [{From, {blog, B}} | S#state.queue]})
+ end;
+handle({alog, B}, S) ->
+ case get(log) of
+ L when L#log.mode =:= read_only ->
+ notify_owners({read_only,B}),
+ loop(S);
+ L when L#log.status =:= ok, L#log.format =:= internal ->
+ log_loop(S, [], [B], []);
+ L when L#log.status =:= ok ->
+ notify_owners({format_external, B}),
+ loop(S);
+ L when L#log.status =:= {blocked, false} ->
+ notify_owners({blocked_log, B}),
+ loop(S);
+ _ ->
+ loop(S#state{queue = [{alog, B} | S#state.queue]})
+ end;
+handle({balog, B}, S) ->
+ case get(log) of
+ L when L#log.mode =:= read_only ->
+ notify_owners({read_only,B}),
+ loop(S);
+ L when L#log.status =:= ok ->
+ log_loop(S, [], [B], []);
+ L when L#log.status =:= {blocked, false} ->
+ notify_owners({blocked_log, B}),
+ loop(S);
+ _ ->
+ loop(S#state{queue = [{balog, B} | S#state.queue]})
+ end;
+handle({From, {block, QueueLogRecs}}, S) ->
+ case get(log) of
+ L when L#log.status =:= ok ->
+ do_block(From, QueueLogRecs, L),
+ reply(From, ok, S);
+ L when L#log.status =:= {blocked, false} ->
+ reply(From, {error, {blocked_log, L#log.name}}, S);
+ L when L#log.blocked_by =:= From ->
+ reply(From, {error, {blocked_log, L#log.name}}, S);
+ _ ->
+ loop(S#state{queue = [{From, {block, QueueLogRecs}} |
+ S#state.queue]})
+ end;
+handle({From, unblock}, S) ->
+ case get(log) of
+ L when L#log.status =:= ok ->
+ reply(From, {error, {not_blocked, L#log.name}}, S);
+ L when L#log.blocked_by =:= From ->
+ S2 = do_unblock(L, S),
+ reply(From, ok, S2);
+ L ->
+ reply(From, {error, {not_blocked_by_pid, L#log.name}}, S)
+ end;
+handle({From, sync}, S) ->
+ case get(log) of
+ L when L#log.mode =:= read_only ->
+ reply(From, {error, {read_only_mode, L#log.name}}, S);
+ L when L#log.status =:= ok ->
+ sync_loop([From], S);
+ L when L#log.status =:= {blocked, false} ->
+ reply(From, {error, {blocked_log, L#log.name}}, S);
+ L when L#log.blocked_by =:= From ->
+ reply(From, {error, {blocked_log, L#log.name}}, S);
+ _ ->
+ loop(S#state{queue = [{From, sync} | S#state.queue]})
+ end;
+handle({From, {truncate, Head, F, A}}, S) ->
+ case get(log) of
+ L when L#log.mode =:= read_only ->
+ reply(From, {error, {read_only_mode, L#log.name}}, S);
+ L when L#log.status =:= ok, S#state.cache_error =/= ok ->
+ loop(cache_error(S, [From]));
+ L when L#log.status =:= ok ->
+ H = merge_head(Head, L#log.head),
+ case catch do_trunc(L, H) of
+ ok ->
+ erase(is_full),
+ notify_owners({truncated, S#state.cnt}),
+ N = if Head =:= none -> 0; true -> 1 end,
+ reply(From, ok, (state_ok(S))#state{cnt = N});
+ Error ->
+ do_exit(S, From, Error, ?failure(Error, F, A))
+ end;
+ L when L#log.status =:= {blocked, false} ->
+ reply(From, {error, {blocked_log, L#log.name}}, S);
+ L when L#log.blocked_by =:= From ->
+ reply(From, {error, {blocked_log, L#log.name}}, S);
+ _ ->
+ loop(S#state{queue = [{From, {truncate, Head, F, A}}
+ | S#state.queue]})
+ end;
+handle({From, {chunk, Pos, B, N}}, S) ->
+ case get(log) of
+ L when L#log.status =:= ok, S#state.cache_error =/= ok ->
+ loop(cache_error(S, [From]));
+ L when L#log.status =:= ok ->
+ R = do_chunk(L, Pos, B, N),
+ reply(From, R, S);
+ L when L#log.blocked_by =:= From ->
+ R = do_chunk(L, Pos, B, N),
+ reply(From, R, S);
+ L when L#log.status =:= {blocked, false} ->
+ reply(From, {error, {blocked_log, L#log.name}}, S);
+ _L ->
+ loop(S#state{queue = [{From, {chunk, Pos, B, N}} | S#state.queue]})
+ end;
+handle({From, {chunk_step, Pos, N}}, S) ->
+ case get(log) of
+ L when L#log.status =:= ok, S#state.cache_error =/= ok ->
+ loop(cache_error(S, [From]));
+ L when L#log.status =:= ok ->
+ R = do_chunk_step(L, Pos, N),
+ reply(From, R, S);
+ L when L#log.blocked_by =:= From ->
+ R = do_chunk_step(L, Pos, N),
+ reply(From, R, S);
+ L when L#log.status =:= {blocked, false} ->
+ reply(From, {error, {blocked_log, L#log.name}}, S);
+ _ ->
+ loop(S#state{queue = [{From, {chunk_step, Pos, N}}
+ | S#state.queue]})
+ end;
+handle({From, {change_notify, Pid, NewNotify}}, S) ->
+ case get(log) of
+ L when L#log.status =:= ok ->
+ case do_change_notify(L, Pid, NewNotify) of
+ {ok, L1} ->
+ put(log, L1),
+ reply(From, ok, S);
+ Error ->
+ reply(From, Error, S)
+ end;
+ L when L#log.status =:= {blocked, false} ->
+ reply(From, {error, {blocked_log, L#log.name}}, S);
+ L when L#log.blocked_by =:= From ->
+ reply(From, {error, {blocked_log, L#log.name}}, S);
+ _ ->
+ loop(S#state{queue = [{From, {change_notify, Pid, NewNotify}}
+ | S#state.queue]})
+ end;
+handle({From, {change_header, NewHead}}, S) ->
+ case get(log) of
+ L when L#log.mode =:= read_only ->
+ reply(From, {error, {read_only_mode, L#log.name}}, S);
+ L when L#log.status =:= ok ->
+ case check_head(NewHead, L#log.format) of
+ {ok, Head} ->
+ put(log, L#log{head = mk_head(Head, L#log.format)}),
+ reply(From, ok, S);
+ Error ->
+ reply(From, Error, S)
+ end;
+ L when L#log.status =:= {blocked, false} ->
+ reply(From, {error, {blocked_log, L#log.name}}, S);
+ L when L#log.blocked_by =:= From ->
+ reply(From, {error, {blocked_log, L#log.name}}, S);
+ _ ->
+ loop(S#state{queue = [{From, {change_header, NewHead}}
+ | S#state.queue]})
+ end;
+handle({From, {change_size, NewSize}}, S) ->
+ case get(log) of
+ L when L#log.mode =:= read_only ->
+ reply(From, {error, {read_only_mode, L#log.name}}, S);
+ L when L#log.status =:= ok ->
+ case check_size(L#log.type, NewSize) of
+ ok ->
+ case catch do_change_size(L, NewSize) of % does the put
+ ok ->
+ reply(From, ok, S);
+ {big, CurSize} ->
+ reply(From,
+ {error,
+ {new_size_too_small, L#log.name, CurSize}},
+ S);
+ Else ->
+ reply(From, Else, state_err(S, Else))
+ end;
+ not_ok ->
+ reply(From, {error, {badarg, size}}, S)
+ end;
+ L when L#log.status =:= {blocked, false} ->
+ reply(From, {error, {blocked_log, L#log.name}}, S);
+ L when L#log.blocked_by =:= From ->
+ reply(From, {error, {blocked_log, L#log.name}}, S);
+ _ ->
+ loop(S#state{queue = [{From, {change_size, NewSize}}
+ | S#state.queue]})
+ end;
+handle({From, inc_wrap_file}, S) ->
+ case get(log) of
+ L when L#log.mode =:= read_only ->
+ reply(From, {error, {read_only_mode, L#log.name}}, S);
+ L when L#log.type =:= halt ->
+ reply(From, {error, {halt_log, L#log.name}}, S);
+ L when L#log.status =:= ok, S#state.cache_error =/= ok ->
+ loop(cache_error(S, [From]));
+ L when L#log.status =:= ok ->
+ case catch do_inc_wrap_file(L) of
+ {ok, L2, Lost} ->
+ put(log, L2),
+ notify_owners({wrap, Lost}),
+ reply(From, ok, S#state{cnt = S#state.cnt-Lost});
+ {error, Error, L2} ->
+ put(log, L2),
+ reply(From, Error, state_err(S, Error))
+ end;
+ L when L#log.status =:= {blocked, false} ->
+ reply(From, {error, {blocked_log, L#log.name}}, S);
+ L when L#log.blocked_by =:= From ->
+ reply(From, {error, {blocked_log, L#log.name}}, S);
+ _ ->
+ loop(S#state{queue = [{From, inc_wrap_file} | S#state.queue]})
+ end;
+handle({From, {reopen, NewFile, Head, F, A}}, S) ->
+ case get(log) of
+ L when L#log.mode =:= read_only ->
+ reply(From, {error, {read_only_mode, L#log.name}}, S);
+ L when L#log.status =:= ok, S#state.cache_error =/= ok ->
+ loop(cache_error(S, [From]));
+ L when L#log.status =:= ok, L#log.filename =/= NewFile ->
+ case catch close_disk_log2(L) of
+ closed ->
+ File = L#log.filename,
+ case catch rename_file(File, NewFile, L#log.type) of
+ ok ->
+ H = merge_head(Head, L#log.head),
+ case do_open((S#state.args)#arg{name = L#log.name,
+ repair = truncate,
+ head = H,
+ file = File}) of
+ {ok, Res, L2, Cnt} ->
+ put(log, L2#log{owners = L#log.owners,
+ head = L#log.head,
+ users = L#log.users}),
+ notify_owners({truncated, S#state.cnt}),
+ erase(is_full),
+ case Res of
+ {error, _} ->
+ do_exit(S, From, Res,
+ ?failure(Res, F, A));
+ _ ->
+ reply(From, ok, S#state{cnt = Cnt})
+ end;
+ Res ->
+ do_exit(S, From, Res, ?failure(Res, F, A))
+ end;
+ Error ->
+ do_exit(S, From, Error, ?failure(Error, reopen, 2))
+ end;
+ Error ->
+ do_exit(S, From, Error, ?failure(Error, F, A))
+ end;
+ L when L#log.status =:= ok ->
+ reply(From, {error, {same_file_name, L#log.name}}, S);
+ L ->
+ reply(From, {error, {blocked_log, L#log.name}}, S)
+ end;
+handle({Server, {internal_open, A}}, S) ->
+ case get(log) of
+ undefined ->
+ case do_open(A) of % does the put
+ {ok, Res, L, Cnt} ->
+ put(log, opening_pid(A#arg.linkto, A#arg.notify, L)),
+ reply(Server, Res, S#state{args=A, cnt=Cnt});
+ Res ->
+ do_fast_exit(S, Server, Res)
+ end;
+ L ->
+ TestH = mk_head(A#arg.head, A#arg.format),
+ case compare_arg(A#arg.options, S#state.args, TestH, L#log.head) of
+ ok ->
+ case add_pid(A#arg.linkto, A#arg.notify, L) of
+ {ok, L1} ->
+ put(log, L1),
+ reply(Server, {ok, L#log.name}, S);
+ Error ->
+ reply(Server, Error, S)
+ end;
+ Error ->
+ reply(Server, Error, S)
+ end
+ end;
+handle({From, close}, S) ->
+ case do_close(From, S) of
+ {stop, S1} ->
+ do_exit(S1, From, ok, normal);
+ {continue, S1} ->
+ reply(From, ok, S1)
+ end;
+handle({From, info}, S) ->
+ reply(From, do_info(get(log), S#state.cnt), S);
+handle({'EXIT', From, Reason}, S) when From =:= S#state.parent ->
+ %% Parent orders shutdown.
+ _ = do_stop(S),
+ exit(Reason);
+handle({'EXIT', From, Reason}, S) when From =:= S#state.server ->
+ %% The server is gone.
+ _ = do_stop(S),
+ exit(Reason);
+handle({'EXIT', From, _Reason}, S) ->
+ L = get(log),
+ case is_owner(From, L) of
+ {true, _Notify} ->
+ case close_owner(From, L, S) of
+ {stop, S1} ->
+ _ = do_stop(S1),
+ exit(normal);
+ {continue, S1} ->
+ loop(S1)
+ end;
+ false ->
+ %% 'users' is not decremented.
+ S1 = do_unblock(From, get(log), S),
+ loop(S1)
+ end;
+handle({system, From, Req}, S) ->
+ sys:handle_system_msg(Req, From, S#state.parent, ?MODULE, [], S);
+handle(_, S) ->
+ loop(S).
+
+sync_loop(From, S) ->
+ log_loop(S, [], [], From).
+
+%% Inlined.
+log_loop(S, Pids, _Bins, _Sync) when S#state.cache_error =/= ok ->
+ loop(cache_error(S, Pids));
+log_loop(S, Pids, Bins, Sync) when S#state.messages =:= [] ->
+ receive
+ Message ->
+ log_loop(Message, Pids, Bins, Sync, S, get(log))
+ after 0 ->
+ loop(log_end(S, Pids, Bins, Sync))
+ end;
+log_loop(S, Pids, Bins, Sync) ->
+ [M | Ms] = S#state.messages,
+ S1 = S#state{messages = Ms},
+ log_loop(M, Pids, Bins, Sync, S1, get(log)).
+
+%% Items logged after the last sync request found are sync:ed as well.
+log_loop({alog,B}, Pids, Bins, Sync, S, L) when L#log.format =:= internal ->
+ %% {alog, _} allowed for the internal format only.
+ log_loop(S, Pids, [B | Bins], Sync);
+log_loop({balog, B}, Pids, Bins, Sync, S, _L) ->
+ log_loop(S, Pids, [B | Bins], Sync);
+log_loop({From, {log, B}}, Pids, Bins, Sync, S, L)
+ when L#log.format =:= internal ->
+ %% {log, _} allowed for the internal format only.
+ log_loop(S, [From | Pids], [B | Bins], Sync);
+log_loop({From, {blog, B}}, Pids, Bins, Sync, S, _L) ->
+ log_loop(S, [From | Pids], [B | Bins], Sync);
+log_loop({From, sync}, Pids, Bins, Sync, S, _L) ->
+ log_loop(S, Pids, Bins, [From | Sync]);
+log_loop(Message, Pids, Bins, Sync, S, _L) ->
+ NS = log_end(S, Pids, Bins, Sync),
+ handle(Message, NS).
+
+log_end(S, [], [], Sync) ->
+ log_end_sync(S, Sync);
+log_end(S, Pids, Bins, Sync) ->
+ case do_log(get(log), rflat(Bins)) of
+ N when is_integer(N) ->
+ replies(Pids, ok),
+ S1 = (state_ok(S))#state{cnt = S#state.cnt+N},
+ log_end_sync(S1, Sync);
+ {error, {error, {full, _Name}}, N} when Pids =:= [] ->
+ log_end_sync(state_ok(S#state{cnt = S#state.cnt + N}), Sync);
+ {error, Error, N} ->
+ replies(Pids, Error),
+ state_err(S#state{cnt = S#state.cnt + N}, Error)
+ end.
+
+%% Inlined.
+log_end_sync(S, []) ->
+ S;
+log_end_sync(S, Sync) ->
+ Res = do_sync(get(log)),
+ replies(Sync, Res),
+ state_err(S, Res).
+
+%% Inlined.
+rflat([B]=L) when is_binary(B) -> L;
+rflat([B]) -> B;
+rflat(B) -> rflat(B, []).
+
+rflat([B | Bs], L) when is_binary(B) ->
+ rflat(Bs, [B | L]);
+rflat([B | Bs], L) ->
+ rflat(Bs, B ++ L);
+rflat([], L) -> L.
+
+%% -> {ok, Log} | {error, Error}
+do_change_notify(L, Pid, Notify) ->
+ case is_owner(Pid, L) of
+ {true, Notify} ->
+ {ok, L};
+ {true, _OldNotify} when Notify =/= true, Notify =/= false ->
+ {error, {badarg, notify}};
+ {true, _OldNotify} ->
+ Owners = lists:keydelete(Pid, 1, L#log.owners),
+ L1 = L#log{owners = [{Pid, Notify} | Owners]},
+ {ok, L1};
+ false ->
+ {error, {not_owner, Pid}}
+ end.
+
+%% -> {stop, S} | {continue, S}
+do_close(Pid, S) ->
+ L = get(log),
+ case is_owner(Pid, L) of
+ {true, _Notify} ->
+ close_owner(Pid, L, S);
+ false ->
+ close_user(Pid, L, S)
+ end.
+
+%% -> {stop, S} | {continue, S}
+close_owner(Pid, L, S) ->
+ L1 = L#log{owners = lists:keydelete(Pid, 1, L#log.owners)},
+ put(log, L1),
+ S2 = do_unblock(Pid, get(log), S),
+ unlink(Pid),
+ do_close2(L1, S2).
+
+%% -> {stop, S} | {continue, S}
+close_user(Pid, L, S) when L#log.users > 0 ->
+ L1 = L#log{users = L#log.users - 1},
+ put(log, L1),
+ S2 = do_unblock(Pid, get(log), S),
+ do_close2(L1, S2);
+close_user(_Pid, _L, S) ->
+ {continue, S}.
+
+do_close2(L, S) when L#log.users =:= 0, L#log.owners =:= [] ->
+ {stop, S};
+do_close2(_L, S) ->
+ {continue, S}.
+
+%%-----------------------------------------------------------------
+%% Callback functions for system messages handling.
+%%-----------------------------------------------------------------
+system_continue(_Parent, _, State) ->
+ loop(State).
+
+-spec system_terminate(_, _, _, #state{}) -> no_return().
+system_terminate(Reason, _Parent, _, State) ->
+ _ = do_stop(State),
+ exit(Reason).
+
+%%-----------------------------------------------------------------
+%% Temporay code for upgrade.
+%%-----------------------------------------------------------------
+system_code_change(State, _Module, _OldVsn, _Extra) ->
+ {ok, State}.
+
+
+%%%----------------------------------------------------------------------
+%%% Internal functions
+%%%----------------------------------------------------------------------
+-spec do_exit(#state{}, pid(), _, _) -> no_return().
+do_exit(S, From, Message0, Reason) ->
+ R = do_stop(S),
+ Message = case S#state.cache_error of
+ Err when Err =/= ok -> Err;
+ _ when R =:= closed -> Message0;
+ _ when Message0 =:= ok -> R;
+ _ -> Message0
+ end,
+ _ = disk_log_server:close(self()),
+ replies(From, Message),
+ ?PROFILE(ep:done()),
+ exit(Reason).
+
+-spec do_fast_exit(#state{}, pid(), _) -> no_return().
+do_fast_exit(S, Server, Message) ->
+ _ = do_stop(S),
+ Server ! {disk_log, self(), Message},
+ exit(normal).
+
+%% -> closed | Error
+do_stop(S) ->
+ proc_q(S#state.queue ++ S#state.messages),
+ close_disk_log(get(log)).
+
+proc_q([{From, _R}|Tail]) when is_pid(From) ->
+ From ! {disk_log, self(), {error, disk_log_stopped}},
+ proc_q(Tail);
+proc_q([_|T]) -> %% async stuff
+ proc_q(T);
+proc_q([]) ->
+ ok.
+
+%% -> log()
+opening_pid(Pid, Notify, L) ->
+ {ok, L1} = add_pid(Pid, Notify, L),
+ L1.
+
+%% -> {ok, log()} | Error
+add_pid(Pid, Notify, L) when is_pid(Pid) ->
+ case is_owner(Pid, L) of
+ false ->
+ link(Pid),
+ {ok, L#log{owners = [{Pid, Notify} | L#log.owners]}};
+ {true, Notify} ->
+%% {error, {pid_already_connected, L#log.name}};
+ {ok, L};
+ {true, CurNotify} when Notify =/= CurNotify ->
+ {error, {arg_mismatch, notify, CurNotify, Notify}}
+ end;
+add_pid(_NotAPid, _Notify, L) ->
+ {ok, L#log{users = L#log.users + 1}}.
+
+unblock_pid(L) when L#log.blocked_by =:= none ->
+ ok;
+unblock_pid(L) ->
+ case is_owner(L#log.blocked_by, L) of
+ {true, _Notify} ->
+ ok;
+ false ->
+ unlink(L#log.blocked_by)
+ end.
+
+%% -> true | false
+is_owner(Pid, L) ->
+ case lists:keysearch(Pid, 1, L#log.owners) of
+ {value, {_Pid, Notify}} ->
+ {true, Notify};
+ false ->
+ false
+ end.
+
+%% ok | throw(Error)
+rename_file(File, NewFile, halt) ->
+ file:rename(File, NewFile);
+rename_file(File, NewFile, wrap) ->
+ rename_file(wrap_file_extensions(File), File, NewFile, ok).
+
+rename_file([Ext|Exts], File, NewFile, Res) ->
+ NRes = case file:rename(add_ext(File, Ext), add_ext(NewFile, Ext)) of
+ ok ->
+ Res;
+ Else ->
+ Else
+ end,
+ rename_file(Exts, File, NewFile, NRes);
+rename_file([], _File, _NewFiles, Res) -> Res.
+
+%% "Old" error messages have been kept, arg_mismatch has been added.
+%%-spec compare_arg(dlog_options(), #arg{},
+compare_arg([], _A, none, _OrigHead) ->
+ % no header option given
+ ok;
+compare_arg([], _A, Head, OrigHead) when Head =/= OrigHead ->
+ {error, {arg_mismatch, head, OrigHead, Head}};
+compare_arg([], _A, _Head, _OrigHead) ->
+ ok;
+compare_arg([{Attr, Val} | Tail], A, Head, OrigHead) ->
+ case compare_arg(Attr, Val, A) of
+ {not_ok, OrigVal} ->
+ {error, {arg_mismatch, Attr, OrigVal, Val}};
+ ok ->
+ compare_arg(Tail, A, Head, OrigHead);
+ Error ->
+ Error
+ end.
+
+-spec compare_arg(atom(), _, #arg{}) ->
+ 'ok' | {'not_ok', _} | {'error', {atom(), _}}.
+compare_arg(file, F, A) when F =/= A#arg.file ->
+ {error, {name_already_open, A#arg.name}};
+compare_arg(mode, read_only, A) when A#arg.mode =:= read_write ->
+ {error, {open_read_write, A#arg.name}};
+compare_arg(mode, read_write, A) when A#arg.mode =:= read_only ->
+ {error, {open_read_only, A#arg.name}};
+compare_arg(type, T, A) when T =/= A#arg.type ->
+ {not_ok, A#arg.type};
+compare_arg(format, F, A) when F =/= A#arg.format ->
+ {not_ok, A#arg.format};
+compare_arg(repair, R, A) when R =/= A#arg.repair ->
+ %% not used, but check it anyway...
+ {not_ok, A#arg.repair};
+compare_arg(_Attr, _Val, _A) ->
+ ok.
+
+%% -> {ok, Res, log(), Cnt} | Error
+do_open(A) ->
+ L = #log{name = A#arg.name,
+ filename = A#arg.file,
+ size = A#arg.size,
+ head = mk_head(A#arg.head, A#arg.format),
+ mode = A#arg.mode,
+ version = A#arg.version},
+ do_open2(L, A).
+
+mk_head({head, Term}, internal) -> {ok, term_to_binary(Term)};
+mk_head({head, Bytes}, external) -> {ok, check_bytes(Bytes)};
+mk_head(H, _) -> H.
+
+terms2bins([T | Ts]) ->
+ [term_to_binary(T) | terms2bins(Ts)];
+terms2bins([]) ->
+ [].
+
+check_bytes_list([B | Bs], Bs0) when is_binary(B) ->
+ check_bytes_list(Bs, Bs0);
+check_bytes_list([], Bs0) ->
+ Bs0;
+check_bytes_list(_, Bs0) ->
+ check_bytes_list(Bs0).
+
+check_bytes_list([B | Bs]) when is_binary(B) ->
+ [B | check_bytes_list(Bs)];
+check_bytes_list([B | Bs]) ->
+ [list_to_binary(B) | check_bytes_list(Bs)];
+check_bytes_list([]) ->
+ [].
+
+check_bytes(Binary) when is_binary(Binary) ->
+ Binary;
+check_bytes(Bytes) ->
+ list_to_binary(Bytes).
+
+%%-----------------------------------------------------------------
+%% Change size of the logs in runtime.
+%%-----------------------------------------------------------------
+%% -> ok | {big, CurSize} | throw(Error)
+do_change_size(L, NewSize) when L#log.type =:= halt ->
+ Halt = L#log.extra,
+ CurB = Halt#halt.curB,
+ NewLog = L#log{extra = Halt#halt{size = NewSize}},
+ if
+ NewSize =:= infinity ->
+ erase(is_full),
+ put(log, NewLog),
+ ok;
+ CurB =< NewSize ->
+ erase(is_full),
+ put(log, NewLog),
+ ok;
+ true ->
+ {big, CurB}
+ end;
+do_change_size(L, NewSize) when L#log.type =:= wrap ->
+ #log{extra = Extra, version = Version} = L,
+ {ok, Handle} = disk_log_1:change_size_wrap(Extra, NewSize, Version),
+ erase(is_full),
+ put(log, L#log{extra = Handle}),
+ ok.
+
+%% -> {ok, Head} | Error; Head = none | {head, H} | {M,F,A}
+check_head({head, none}, _Format) ->
+ {ok, none};
+check_head({head_func, {M, F, A}}, _Format) when is_atom(M),
+ is_atom(F),
+ is_list(A) ->
+ {ok, {M, F, A}};
+check_head({head, Head}, external) ->
+ case catch check_bytes(Head) of
+ {'EXIT', _} ->
+ {error, {badarg, head}};
+ _ ->
+ {ok, {head, Head}}
+ end;
+check_head({head, Term}, internal) ->
+ {ok, {head, Term}};
+check_head(_Head, _Format) ->
+ {error, {badarg, head}}.
+
+check_size(wrap, {NewMaxB,NewMaxF}) when
+ is_integer(NewMaxB), is_integer(NewMaxF),
+ NewMaxB > 0, NewMaxB =< ?MAX_BYTES, NewMaxF > 0, NewMaxF < ?MAX_FILES ->
+ ok;
+check_size(halt, NewSize) when is_integer(NewSize), NewSize > 0 ->
+ ok;
+check_size(halt, infinity) ->
+ ok;
+check_size(_, _) ->
+ not_ok.
+
+%%-----------------------------------------------------------------
+%% Increment a wrap log.
+%%-----------------------------------------------------------------
+%% -> {ok, log(), Lost} | {error, Error, log()}
+do_inc_wrap_file(L) ->
+ #log{format = Format, extra = Handle} = L,
+ case Format of
+ internal ->
+ case disk_log_1:mf_int_inc(Handle, L#log.head) of
+ {ok, Handle2, Lost} ->
+ {ok, L#log{extra = Handle2}, Lost};
+ {error, Error, Handle2} ->
+ {error, Error, L#log{extra = Handle2}}
+ end;
+ external ->
+ case disk_log_1:mf_ext_inc(Handle, L#log.head) of
+ {ok, Handle2, Lost} ->
+ {ok, L#log{extra = Handle2}, Lost};
+ {error, Error, Handle2} ->
+ {error, Error, L#log{extra = Handle2}}
+ end
+ end.
+
+
+%%-----------------------------------------------------------------
+%% Open a log file.
+%%-----------------------------------------------------------------
+%% -> {ok, Reply, log(), Cnt} | Error
+%% Note: the header is always written, even if the log size is too small.
+do_open2(L, #arg{type = halt, format = internal, name = Name,
+ file = FName, repair = Repair, size = Size, mode = Mode}) ->
+ case catch disk_log_1:int_open(FName, Repair, Mode, L#log.head) of
+ {ok, {_Alloc, FdC, {NoItems, _NoBytes}, FileSize}} ->
+ Halt = #halt{fdc = FdC, curB = FileSize, size = Size},
+ {ok, {ok, Name}, L#log{format_type = halt_int, extra = Halt},
+ NoItems};
+ {repaired, FdC, Rec, Bad, FileSize} ->
+ Halt = #halt{fdc = FdC, curB = FileSize, size = Size},
+ {ok, {repaired, Name, {recovered, Rec}, {badbytes, Bad}},
+ L#log{format_type = halt_int, extra = Halt},
+ Rec};
+ Error ->
+ Error
+ end;
+do_open2(L, #arg{type = wrap, format = internal, size = {MaxB, MaxF},
+ name = Name, repair = Repair, file = FName, mode = Mode,
+ version = V}) ->
+ case catch
+ disk_log_1:mf_int_open(FName, MaxB, MaxF, Repair, Mode, L#log.head, V) of
+ {ok, Handle, Cnt} ->
+ {ok, {ok, Name}, L#log{type = wrap,
+ format_type = wrap_int,
+ extra = Handle}, Cnt};
+ {repaired, Handle, Rec, Bad, Cnt} ->
+ {ok, {repaired, Name, {recovered, Rec}, {badbytes, Bad}},
+ L#log{type = wrap, format_type = wrap_int, extra = Handle}, Cnt};
+ Error ->
+ Error
+ end;
+do_open2(L, #arg{type = halt, format = external, file = FName, name = Name,
+ size = Size, repair = Repair, mode = Mode}) ->
+ case catch disk_log_1:ext_open(FName, Repair, Mode, L#log.head) of
+ {ok, {_Alloc, FdC, {NoItems, _NoBytes}, FileSize}} ->
+ Halt = #halt{fdc = FdC, curB = FileSize, size = Size},
+ {ok, {ok, Name},
+ L#log{format_type = halt_ext, format = external, extra = Halt},
+ NoItems};
+ Error ->
+ Error
+ end;
+do_open2(L, #arg{type = wrap, format = external, size = {MaxB, MaxF},
+ name = Name, file = FName, repair = Repair, mode = Mode,
+ version = V}) ->
+ case catch
+ disk_log_1:mf_ext_open(FName, MaxB, MaxF, Repair, Mode, L#log.head, V) of
+ {ok, Handle, Cnt} ->
+ {ok, {ok, Name}, L#log{type = wrap,
+ format_type = wrap_ext,
+ extra = Handle,
+ format = external}, Cnt};
+ Error ->
+ Error
+ end.
+
+%% -> closed | Error
+close_disk_log(undefined) ->
+ closed;
+close_disk_log(L) ->
+ unblock_pid(L),
+ F = fun({Pid, _}) ->
+ unlink(Pid)
+ end,
+ lists:foreach(F, L#log.owners),
+ R = (catch close_disk_log2(L)),
+ erase(log),
+ R.
+
+-spec close_disk_log2(#log{}) -> 'closed'. % | throw(Error)
+
+close_disk_log2(L) ->
+ case L of
+ #log{format_type = halt_int, mode = Mode, extra = Halt} ->
+ disk_log_1:close(Halt#halt.fdc, L#log.filename, Mode);
+ #log{format_type = wrap_int, mode = Mode, extra = Handle} ->
+ disk_log_1:mf_int_close(Handle, Mode);
+ #log{format_type = halt_ext, extra = Halt} ->
+ disk_log_1:fclose(Halt#halt.fdc, L#log.filename);
+ #log{format_type = wrap_ext, mode = Mode, extra = Handle} ->
+ disk_log_1:mf_ext_close(Handle, Mode)
+ end,
+ closed.
+
+do_format_error({error, Module, Error}) ->
+ Module:format_error(Error);
+do_format_error({error, Reason}) ->
+ do_format_error(Reason);
+do_format_error({Node, Error = {error, _Reason}}) ->
+ lists:append(io_lib:format("~p: ", [Node]), do_format_error(Error));
+do_format_error({badarg, Arg}) ->
+ io_lib:format("The argument ~p is missing, not recognized or "
+ "not wellformed~n", [Arg]);
+do_format_error({size_mismatch, OldSize, ArgSize}) ->
+ io_lib:format("The given size ~p does not match the size ~p found on "
+ "the disk log size file~n", [ArgSize, OldSize]);
+do_format_error({read_only_mode, Log}) ->
+ io_lib:format("The disk log ~p has been opened read-only, but the "
+ "requested operation needs read-write access~n", [Log]);
+do_format_error({format_external, Log}) ->
+ io_lib:format("The requested operation can only be applied on internally "
+ "formatted disk logs, but ~p is externally formatted~n",
+ [Log]);
+do_format_error({blocked_log, Log}) ->
+ io_lib:format("The blocked disk log ~p does not queue requests, or "
+ "the log has been blocked by the calling process~n", [Log]);
+do_format_error({full, Log}) ->
+ io_lib:format("The halt log ~p is full~n", [Log]);
+do_format_error({not_blocked, Log}) ->
+ io_lib:format("The disk log ~p is not blocked~n", [Log]);
+do_format_error({not_owner, Pid}) ->
+ io_lib:format("The pid ~p is not an owner of the disk log~n", [Pid]);
+do_format_error({not_blocked_by_pid, Log}) ->
+ io_lib:format("The disk log ~p is blocked, but only the blocking pid "
+ "can unblock a disk log~n", [Log]);
+do_format_error({new_size_too_small, Log, CurrentSize}) ->
+ io_lib:format("The current size ~p of the halt log ~p is greater than the "
+ "requested new size~n", [CurrentSize, Log]);
+do_format_error({halt_log, Log}) ->
+ io_lib:format("The halt log ~p cannot be wrapped~n", [Log]);
+do_format_error({same_file_name, Log}) ->
+ io_lib:format("Current and new file name of the disk log ~p "
+ "are the same~n", [Log]);
+do_format_error({arg_mismatch, Option, FirstValue, ArgValue}) ->
+ io_lib:format("The value ~p of the disk log option ~p does not match "
+ "the current value ~p~n", [ArgValue, Option, FirstValue]);
+do_format_error({name_already_open, Log}) ->
+ io_lib:format("The disk log ~p has already opened another file~n", [Log]);
+do_format_error({node_already_open, Log}) ->
+ io_lib:format("The distribution option of the disk log ~p does not match "
+ "already open log~n", [Log]);
+do_format_error({open_read_write, Log}) ->
+ io_lib:format("The disk log ~p has already been opened read-write~n",
+ [Log]);
+do_format_error({open_read_only, Log}) ->
+ io_lib:format("The disk log ~p has already been opened read-only~n",
+ [Log]);
+do_format_error({not_internal_wrap, Log}) ->
+ io_lib:format("The requested operation cannot be applied since ~p is not "
+ "an internally formatted disk log~n", [Log]);
+do_format_error(no_such_log) ->
+ io_lib:format("There is no disk log with the given name~n", []);
+do_format_error(nonode) ->
+ io_lib:format("There seems to be no node up that can handle "
+ "the request~n", []);
+do_format_error(nodedown) ->
+ io_lib:format("There seems to be no node up that can handle "
+ "the request~n", []);
+do_format_error({corrupt_log_file, FileName}) ->
+ io_lib:format("The disk log file \"~s\" contains corrupt data~n",
+ [FileName]);
+do_format_error({need_repair, FileName}) ->
+ io_lib:format("The disk log file \"~s\" has not been closed properly and "
+ "needs repair~n", [FileName]);
+do_format_error({not_a_log_file, FileName}) ->
+ io_lib:format("The file \"~s\" is not a wrap log file~n", [FileName]);
+do_format_error({invalid_header, InvalidHeader}) ->
+ io_lib:format("The disk log header is not wellformed: ~p~n",
+ [InvalidHeader]);
+do_format_error(end_of_log) ->
+ io_lib:format("An attempt was made to step outside a not yet "
+ "full wrap log~n", []);
+do_format_error({invalid_index_file, FileName}) ->
+ io_lib:format("The wrap log index file \"~s\" cannot be used~n",
+ [FileName]);
+do_format_error({no_continuation, BadCont}) ->
+ io_lib:format("The term ~p is not a chunk continuation~n", [BadCont]);
+do_format_error({file_error, FileName, Reason}) ->
+ io_lib:format("\"~s\": ~p~n", [FileName, file:format_error(Reason)]);
+do_format_error(E) ->
+ io_lib:format("~p~n", [E]).
+
+do_info(L, Cnt) ->
+ #log{name = Name, type = Type, mode = Mode, filename = File,
+ extra = Extra, status = Status, owners = Owners, users = Users,
+ format = Format, head = Head} = L,
+ Size = case Type of
+ wrap ->
+ disk_log_1:get_wrap_size(Extra);
+ halt ->
+ Extra#halt.size
+ end,
+ Distribution =
+ case disk_log_server:get_log_pids(Name) of
+ {local, _Pid} ->
+ local;
+ {distributed, Pids} ->
+ [node(P) || P <- Pids];
+ undefined -> % "cannot happen"
+ []
+ end,
+ RW = case Type of
+ wrap when Mode =:= read_write ->
+ #handle{curB = CurB, curF = CurF,
+ cur_cnt = CurCnt, acc_cnt = AccCnt,
+ noFull = NoFull, accFull = AccFull} = Extra,
+ NewAccFull = AccFull + NoFull,
+ NewExtra = Extra#handle{noFull = 0, accFull = NewAccFull},
+ put(log, L#log{extra = NewExtra}),
+ [{no_current_bytes, CurB},
+ {no_current_items, CurCnt},
+ {no_items, Cnt},
+ {no_written_items, CurCnt + AccCnt},
+ {current_file, CurF},
+ {no_overflows, {NewAccFull, NoFull}}
+ ];
+ halt when Mode =:= read_write ->
+ IsFull = case get(is_full) of
+ undefined -> false;
+ _ -> true
+ end,
+ [{full, IsFull},
+ {no_written_items, Cnt}
+ ];
+ _ when Mode =:= read_only ->
+ []
+ end,
+ HeadL = case Mode of
+ read_write ->
+ [{head, Head}];
+ read_only ->
+ []
+ end,
+ Common = [{name, Name},
+ {file, File},
+ {type, Type},
+ {format, Format},
+ {size, Size},
+ {items, Cnt}, % kept for "backward compatibility" (undocumented)
+ {owners, Owners},
+ {users, Users}] ++
+ HeadL ++
+ [{mode, Mode},
+ {status, Status},
+ {node, node()},
+ {distributed, Distribution}
+ ],
+ Common ++ RW.
+
+do_block(Pid, QueueLogRecs, L) ->
+ L2 = L#log{status = {blocked, QueueLogRecs}, blocked_by = Pid},
+ put(log, L2),
+ case is_owner(Pid, L2) of
+ {true, _Notify} ->
+ ok;
+ false ->
+ link(Pid)
+ end.
+
+do_unblock(Pid, L, S) when L#log.blocked_by =:= Pid ->
+ do_unblock(L, S);
+do_unblock(_Pid, _L, S) ->
+ S.
+
+do_unblock(L, S) ->
+ unblock_pid(L),
+ L2 = L#log{blocked_by = none, status = ok},
+ put(log, L2),
+ %% Since the block request is synchronous, and the blocking
+ %% process is the only process that can unblock, all requests in
+ %% 'messages' will have been put in 'queue' before the unblock
+ %% request is granted.
+ [] = S#state.messages, % assertion
+ S#state{queue = [], messages = lists:reverse(S#state.queue)}.
+
+-spec do_log(#log{}, [binary()]) -> integer() | {'error', _, integer()}.
+
+do_log(L, B) when L#log.type =:= halt ->
+ #log{format = Format, extra = Halt} = L,
+ #halt{curB = CurSize, size = Sz} = Halt,
+ {Bs, BSize} = bsize(B, Format),
+ case get(is_full) of
+ true ->
+ {error, {error, {full, L#log.name}}, 0};
+ undefined when Sz =:= infinity; CurSize + BSize =< Sz ->
+ halt_write(Halt, L, B, Bs, BSize);
+ undefined ->
+ halt_write_full(L, B, Format, 0)
+ end;
+do_log(L, B) when L#log.format_type =:= wrap_int ->
+ case disk_log_1:mf_int_log(L#log.extra, B, L#log.head) of
+ {ok, Handle, Logged, Lost, Wraps} ->
+ notify_owners_wrap(Wraps),
+ put(log, L#log{extra = Handle}),
+ Logged - Lost;
+ {ok, Handle, Logged} ->
+ put(log, L#log{extra = Handle}),
+ Logged;
+ {error, Error, Handle, Logged, Lost} ->
+ put(log, L#log{extra = Handle}),
+ {error, Error, Logged - Lost}
+ end;
+do_log(L, B) when L#log.format_type =:= wrap_ext ->
+ case disk_log_1:mf_ext_log(L#log.extra, B, L#log.head) of
+ {ok, Handle, Logged, Lost, Wraps} ->
+ notify_owners_wrap(Wraps),
+ put(log, L#log{extra = Handle}),
+ Logged - Lost;
+ {ok, Handle, Logged} ->
+ put(log, L#log{extra = Handle}),
+ Logged;
+ {error, Error, Handle, Logged, Lost} ->
+ put(log, L#log{extra = Handle}),
+ {error, Error, Logged - Lost}
+ end.
+
+bsize(B, external) ->
+ {B, xsz(B, 0)};
+bsize(B, internal) ->
+ disk_log_1:logl(B).
+
+xsz([B|T], Sz) -> xsz(T, byte_size(B) + Sz);
+xsz([], Sz) -> Sz.
+
+halt_write_full(L, [Bin | Bins], Format, N) ->
+ B = [Bin],
+ {Bs, BSize} = bsize(B, Format),
+ Halt = L#log.extra,
+ #halt{curB = CurSize, size = Sz} = Halt,
+ if
+ CurSize + BSize =< Sz ->
+ case halt_write(Halt, L, B, Bs, BSize) of
+ N1 when is_integer(N1) ->
+ halt_write_full(get(log), Bins, Format, N+N1);
+ Error ->
+ Error
+ end;
+ true ->
+ halt_write_full(L, [], Format, N)
+ end;
+halt_write_full(L, _Bs, _Format, N) ->
+ put(is_full, true),
+ notify_owners(full),
+ {error, {error, {full, L#log.name}}, N}.
+
+halt_write(Halt, L, B, Bs, BSize) ->
+ case disk_log_1:fwrite(Halt#halt.fdc, L#log.filename, Bs, BSize) of
+ {ok, NewFdC} ->
+ NCurB = Halt#halt.curB + BSize,
+ NewHalt = Halt#halt{fdc = NewFdC, curB = NCurB},
+ put(log, L#log{extra = NewHalt}),
+ length(B);
+ {Error, NewFdC} ->
+ put(log, L#log{extra = Halt#halt{fdc = NewFdC}}),
+ {error, Error, 0}
+ end.
+
+%% -> ok | Error
+do_write_cache(#log{filename = FName, type = halt, extra = Halt} = Log) ->
+ {Reply, NewFdC} = disk_log_1:write_cache(Halt#halt.fdc, FName),
+ put(log, Log#log{extra = Halt#halt{fdc = NewFdC}}),
+ Reply;
+do_write_cache(#log{type = wrap, extra = Handle} = Log) ->
+ {Reply, NewHandle} = disk_log_1:mf_write_cache(Handle),
+ put(log, Log#log{extra = NewHandle}),
+ Reply.
+
+%% -> ok | Error
+do_sync(#log{filename = FName, type = halt, extra = Halt} = Log) ->
+ {Reply, NewFdC} = disk_log_1:sync(Halt#halt.fdc, FName),
+ put(log, Log#log{extra = Halt#halt{fdc = NewFdC}}),
+ Reply;
+do_sync(#log{type = wrap, extra = Handle} = Log) ->
+ {Reply, NewHandle} = disk_log_1:mf_sync(Handle),
+ put(log, Log#log{extra = NewHandle}),
+ Reply.
+
+%% -> ok | Error | throw(Error)
+do_trunc(L, Head) when L#log.type =:= halt ->
+ #log{filename = FName, extra = Halt} = L,
+ FdC = Halt#halt.fdc,
+ {Reply1, FdC2} =
+ case L#log.format of
+ internal ->
+ disk_log_1:truncate(FdC, FName, Head);
+ external ->
+ case disk_log_1:truncate_at(FdC, FName, bof) of
+ {ok, NFdC} when Head =:= none ->
+ {ok, NFdC};
+ {ok, NFdC} ->
+ {ok, H} = Head,
+ disk_log_1:fwrite(NFdC, FName, H, byte_size(H));
+ R ->
+ R
+ end
+ end,
+ {Reply, NewHalt} =
+ case disk_log_1:position(FdC2, FName, cur) of
+ {ok, NewFdC, FileSize} when Reply1 =:= ok ->
+ {ok, Halt#halt{fdc = NewFdC, curB = FileSize}};
+ {Reply2, NewFdC} ->
+ {Reply2, Halt#halt{fdc = NewFdC}};
+ {ok, NewFdC, _} ->
+ {Reply1, Halt#halt{fdc = NewFdC}}
+ end,
+ put(log, L#log{extra = NewHalt}),
+ Reply;
+do_trunc(L, Head) when L#log.type =:= wrap ->
+ Handle = L#log.extra,
+ OldHead = L#log.head,
+ {MaxB, MaxF} = disk_log_1:get_wrap_size(Handle),
+ ok = do_change_size(L, {MaxB, 1}),
+ NewLog = trunc_wrap((get(log))#log{head = Head}),
+ %% Just to remove all files with suffix > 1:
+ NewLog2 = trunc_wrap(NewLog),
+ NewHandle = (NewLog2#log.extra)#handle{noFull = 0, accFull = 0},
+ do_change_size(NewLog2#log{extra = NewHandle, head = OldHead},
+ {MaxB, MaxF}).
+
+trunc_wrap(L) ->
+ case do_inc_wrap_file(L) of
+ {ok, L2, _Lost} ->
+ L2;
+ {error, Error, _L2} ->
+ throw(Error)
+ end.
+
+do_chunk(#log{format_type = halt_int, extra = Halt} = L, Pos, B, N) ->
+ FdC = Halt#halt.fdc,
+ {NewFdC, Reply} =
+ case L#log.mode of
+ read_only ->
+ disk_log_1:chunk_read_only(FdC, L#log.filename, Pos, B, N);
+ read_write ->
+ disk_log_1:chunk(FdC, L#log.filename, Pos, B, N)
+ end,
+ put(log, L#log{extra = Halt#halt{fdc = NewFdC}}),
+ Reply;
+do_chunk(#log{format_type = wrap_int, mode = read_only,
+ extra = Handle} = Log, Pos, B, N) ->
+ {NewHandle, Reply} = disk_log_1:mf_int_chunk_read_only(Handle, Pos, B, N),
+ put(log, Log#log{extra = NewHandle}),
+ Reply;
+do_chunk(#log{format_type = wrap_int, extra = Handle} = Log, Pos, B, N) ->
+ {NewHandle, Reply} = disk_log_1:mf_int_chunk(Handle, Pos, B, N),
+ put(log, Log#log{extra = NewHandle}),
+ Reply;
+do_chunk(Log, _Pos, _B, _) ->
+ {error, {format_external, Log#log.name}}.
+
+do_chunk_step(#log{format_type = wrap_int, extra = Handle}, Pos, N) ->
+ disk_log_1:mf_int_chunk_step(Handle, Pos, N);
+do_chunk_step(Log, _Pos, _N) ->
+ {error, {not_internal_wrap, Log#log.name}}.
+
+%% Inlined.
+replies(Pids, Reply) ->
+ M = {disk_log, self(), Reply},
+ send_reply(Pids, M).
+
+send_reply(Pid, M) when is_pid(Pid) ->
+ Pid ! M;
+send_reply([Pid | Pids], M) ->
+ Pid ! M,
+ send_reply(Pids, M);
+send_reply([], _M) ->
+ ok.
+
+reply(To, Reply, S) ->
+ To ! {disk_log, self(), Reply},
+ loop(S).
+
+req(Log, R) ->
+ case disk_log_server:get_log_pids(Log) of
+ {local, Pid} ->
+ monitor_request(Pid, R);
+ undefined ->
+ {error, no_such_log};
+ {distributed, Pids} ->
+ multi_req({self(), R}, Pids)
+ end.
+
+multi_req(Msg, Pids) ->
+ Refs =
+ lists:map(fun(Pid) ->
+ Ref = erlang:monitor(process, Pid),
+ Pid ! Msg,
+ {Pid, Ref}
+ end, Pids),
+ lists:foldl(fun({Pid, Ref}, Reply) ->
+ receive
+ {'DOWN', Ref, process, Pid, _Info} ->
+ Reply;
+ {disk_log, Pid, _Reply} ->
+ erlang:demonitor(Ref),
+ receive
+ {'DOWN', Ref, process, Pid, _Reason} ->
+ ok
+ after 0 ->
+ ok
+ end
+ end
+ end, {error, nonode}, Refs).
+
+sreq(Log, R) ->
+ case nearby_pid(Log, node()) of
+ undefined ->
+ {error, no_such_log};
+ Pid ->
+ monitor_request(Pid, R)
+ end.
+
+%% Local req - always talk to log on Node
+lreq(Log, R, Node) ->
+ case nearby_pid(Log, Node) of
+ Pid when is_pid(Pid), node(Pid) =:= Node ->
+ monitor_request(Pid, R);
+ _Else ->
+ {error, no_such_log}
+ end.
+
+nearby_pid(Log, Node) ->
+ case disk_log_server:get_log_pids(Log) of
+ undefined ->
+ undefined;
+ {local, Pid} ->
+ Pid;
+ {distributed, Pids} ->
+ get_near_pid(Pids, Node)
+ end.
+
+-spec get_near_pid([pid(),...], node()) -> pid().
+
+get_near_pid([Pid | _], Node) when node(Pid) =:= Node -> Pid;
+get_near_pid([Pid], _ ) -> Pid;
+get_near_pid([_ | T], Node) -> get_near_pid(T, Node).
+
+monitor_request(Pid, Req) ->
+ Ref = erlang:monitor(process, Pid),
+ Pid ! {self(), Req},
+ receive
+ {'DOWN', Ref, process, Pid, _Info} ->
+ {error, no_such_log};
+ {disk_log, Pid, Reply} ->
+ erlang:demonitor(Ref),
+ receive
+ {'DOWN', Ref, process, Pid, _Reason} ->
+ Reply
+ after 0 ->
+ Reply
+ end
+ end.
+
+req2(Pid, R) ->
+ monitor_request(Pid, R).
+
+merge_head(none, Head) ->
+ Head;
+merge_head(Head, _) ->
+ Head.
+
+%% -> List of extensions of existing files (no dot included) | throw(FileError)
+wrap_file_extensions(File) ->
+ {_CurF, _CurFSz, _TotSz, NoOfFiles} =
+ disk_log_1:read_index_file(File),
+ Fs = if
+ NoOfFiles >= 1 ->
+ lists:seq(1, NoOfFiles);
+ NoOfFiles =:= 0 ->
+ []
+ end,
+ Fun = fun(Ext) ->
+ case file:read_file_info(add_ext(File, Ext)) of
+ {ok, _} ->
+ true;
+ _Else ->
+ false
+ end
+ end,
+ lists:filter(Fun, ["idx", "siz" | Fs]).
+
+add_ext(File, Ext) ->
+ lists:concat([File, ".", Ext]).
+
+notify(Log, R) ->
+ case disk_log_server:get_log_pids(Log) of
+ undefined ->
+ {error, no_such_log};
+ {local, Pid} ->
+ Pid ! R,
+ ok;
+ {distributed, Pids} ->
+ lists:foreach(fun(Pid) -> Pid ! R end, Pids),
+ ok
+ end.
+
+notify_owners_wrap([]) ->
+ ok;
+notify_owners_wrap([N | Wraps]) ->
+ notify_owners({wrap, N}),
+ notify_owners_wrap(Wraps).
+
+notify_owners(Note) ->
+ L = get(log),
+ Msg = {disk_log, node(), L#log.name, Note},
+ lists:foreach(fun({Pid, true}) -> Pid ! Msg;
+ (_) -> ok
+ end, L#log.owners).
+
+cache_error(S, Pids) ->
+ Error = S#state.cache_error,
+ replies(Pids, Error),
+ state_err(S#state{cache_error = ok}, Error).
+
+state_ok(S) ->
+ state_err(S, ok).
+
+-spec state_err(#state{}, dlog_state_error()) -> #state{}.
+
+state_err(S, Err) when S#state.error_status =:= Err -> S;
+state_err(S, Err) ->
+ notify_owners({error_status, Err}),
+ S#state{error_status = Err}.
diff --git a/lib/kernel/src/disk_log.hrl b/lib/kernel/src/disk_log.hrl
new file mode 100644
index 0000000000..b0849145ca
--- /dev/null
+++ b/lib/kernel/src/disk_log.hrl
@@ -0,0 +1,161 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 1997-2009. All Rights Reserved.
+%%
+%% The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved online at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% %CopyrightEnd%
+%%
+
+-define(DISK_LOG_NAME_TABLE, disk_log_names).
+-define(DISK_LOG_PID_TABLE, disk_log_pids).
+
+%% File format version
+-define(VERSION, 2).
+
+%% HEADSZ is the size of the file header,
+%% HEADERSZ is the size of the item header ( = ?SIZESZ + ?MAGICSZ).
+-define(HEADSZ, 8).
+-define(SIZESZ, 4).
+-define(MAGICSZ, 4).
+-define(HEADERSZ, 8).
+-define(MAGICHEAD, <<12,33,44,55>>).
+-define(MAGICINT, 203500599). %% ?MAGICHEAD = <<?MAGICINT:32>>
+-define(BIGMAGICHEAD, <<98,87,76,65>>).
+-define(BIGMAGICINT, 1649888321). %% ?BIGMAGICHEAD = <<?BIGMAGICINT:32>>
+-define(MIN_MD5_TERM, 65528).% (?MAX_CHUNK_SIZE - ?HEADERSZ)
+
+-define(MAX_FILES, 65000).
+-define(MAX_BYTES, ((1 bsl 64) - 1)).
+-define(MAX_CHUNK_SIZE, 65536).
+
+%% Object defines
+-define(LOGMAGIC, <<1,2,3,4>>).
+-define(OPENED, <<6,7,8,9>>).
+-define(CLOSED, <<99,88,77,11>>).
+
+%% Needed for the definition of fd()
+%% Must use include_lib() so that we always can be sure to find
+%% file.hrl. A relative path will not work in an installed system.
+-include_lib("kernel/include/file.hrl").
+
+%% Ugly workaround. If we are building the bootstrap compiler,
+%% file.hrl does not define the fd() type.
+-ifndef(FILE_HRL_).
+-type fd() :: pid() | #file_descriptor{}.
+-endif.
+
+%%------------------------------------------------------------------------
+%% Types -- alphabetically
+%%------------------------------------------------------------------------
+
+-type dlog_format() :: 'external' | 'internal'.
+-type dlog_format_type() :: 'halt_ext' | 'halt_int' | 'wrap_ext' | 'wrap_int'.
+-type dlog_head() :: 'none' | {'ok', binary()} | mfa().
+-type dlog_mode() :: 'read_only' | 'read_write'.
+-type dlog_name() :: atom() | string().
+-type dlog_optattr() :: 'name' | 'file' | 'linkto' | 'repair' | 'type'
+ | 'format' | 'size' | 'distributed' | 'notify'
+ | 'head' | 'head_func' | 'mode'.
+-type dlog_options() :: [{dlog_optattr(), any()}].
+-type dlog_repair() :: 'truncate' | boolean().
+-type dlog_size() :: 'infinity' | pos_integer()
+ | {pos_integer(), pos_integer()}.
+-type dlog_status() :: 'ok' | {'blocked', 'false' | [_]}. %QueueLogRecords
+-type dlog_type() :: 'halt' | 'wrap'.
+
+%%------------------------------------------------------------------------
+%% Records
+%%------------------------------------------------------------------------
+
+%% record of args for open
+-record(arg, {name = 0,
+ version = undefined,
+ file = none :: 'none' | string(),
+ repair = true :: dlog_repair(),
+ size = infinity :: dlog_size(),
+ type = halt :: dlog_type(),
+ distributed = false :: 'false' | {'true', [node()]},
+ format = internal :: dlog_format(),
+ linkto = self() :: 'none' | pid(),
+ head = none,
+ mode = read_write :: dlog_mode(),
+ notify = false :: boolean(),
+ options = [] :: dlog_options()}).
+
+-record(cache, %% Cache for logged terms (per file descriptor).
+ {fd :: fd(), %% File descriptor.
+ sz = 0 :: non_neg_integer(), %% Number of bytes in the cache.
+ c = [] :: iodata()} %% The cache.
+ ).
+
+-record(halt, %% For a halt log.
+ {fdc :: #cache{}, %% A cache record.
+ curB :: non_neg_integer(), %% Number of bytes on the file.
+ size :: dlog_size()}
+ ).
+
+-record(handle, %% For a wrap log.
+ {filename :: file:filename(), %% Same as log.filename
+ maxB :: pos_integer(), %% Max size of the files.
+ maxF :: pos_integer() | {pos_integer(),pos_integer()},
+ %% When pos_integer(), maximum number of files.
+ %% The form {NewMaxF, OldMaxF} is used when the
+ %% number of wrap logs are decreased. The files
+ %% are not removed when the size is changed but
+ %% next time the files are to be used, i.e next
+ %% time the wrap log has filled the
+ %% Dir/Name.NewMaxF file.
+ curB :: non_neg_integer(), %% Number of bytes on current file.
+ curF :: integer(), %% Current file number.
+ cur_fdc :: #cache{}, %% Current file descriptor.
+ cur_name :: file:filename(), %% Current file name for error reports.
+ cur_cnt :: non_neg_integer(), %% Number of items on current file,
+ %% header inclusive.
+ acc_cnt :: non_neg_integer(), %% acc_cnt+cur_cnt is number of items
+ %% written since the log was opened.
+ firstPos :: non_neg_integer(), %% Start position for first item
+ %% (after header).
+ noFull :: non_neg_integer(), %% Number of overflows since last
+ %% use of info/1 on this log, or
+ %% since log was opened if info/1
+ %% has not yet been used on this log.
+ accFull :: non_neg_integer()} %% noFull+accFull is number of
+ %% oveflows since the log was opened.
+ ).
+
+-record(log,
+ {status = ok :: dlog_status(),
+ name :: dlog_name(), %% the key leading to this structure
+ blocked_by = none :: 'none' | pid(), %% pid of blocker
+ users = 0 :: non_neg_integer(), %% non-linked users
+ filename :: file:filename(), %% real name of the file
+ owners = [] :: [{pid(), boolean()}],%% [{pid, notify}]
+ type = halt :: dlog_type(),
+ format = internal :: dlog_format(),
+ format_type :: dlog_format_type(),
+ head = none, %% none | {head, H} | {M,F,A}
+ %% called when wraplog wraps
+ mode :: dlog_mode(),
+ size, %% value of open/1 option 'size' (never changed)
+ extra :: #halt{} | #handle{}, %% type of the log
+ version :: integer()} %% if wrap log file
+ ).
+
+-record(continuation, %% Chunk continuation.
+ {pid = self() :: pid(),
+ pos :: non_neg_integer() | {integer(), non_neg_integer()},
+ b :: binary() | [] | pos_integer()}
+ ).
+
+-type dlog_cont() :: 'start' | #continuation{}.
diff --git a/lib/kernel/src/disk_log_1.erl b/lib/kernel/src/disk_log_1.erl
new file mode 100644
index 0000000000..7103417149
--- /dev/null
+++ b/lib/kernel/src/disk_log_1.erl
@@ -0,0 +1,1551 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 1997-2009. All Rights Reserved.
+%%
+%% The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved online at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(disk_log_1).
+
+%% Efficient file based log - implementation part
+
+-export([int_open/4, ext_open/4, logl/1, close/3, truncate/3, chunk/5,
+ sync/2, write_cache/2]).
+-export([mf_int_open/7, mf_int_log/3, mf_int_close/2, mf_int_inc/2,
+ mf_ext_inc/2, mf_int_chunk/4, mf_int_chunk_step/3,
+ mf_sync/1, mf_write_cache/1]).
+-export([mf_ext_open/7, mf_ext_log/3, mf_ext_close/2]).
+
+-export([print_index_file/1]).
+-export([read_index_file/1]).
+-export([read_size_file/1, read_size_file_version/1]).
+-export([chunk_read_only/5]).
+-export([mf_int_chunk_read_only/4]).
+-export([change_size_wrap/3]).
+-export([get_wrap_size/1]).
+-export([is_head/1]).
+-export([position/3, truncate_at/3, fwrite/4, fclose/2]).
+
+-compile({inline,[{scan_f2,7}]}).
+
+-import(lists, [concat/1, reverse/1, sum/1]).
+
+-include("disk_log.hrl").
+
+%%% At the head of a LOG file we have [?LOGMAGIC, ?OPENED | ?CLOSED].
+%%% Otherwise it's not a LOG file. Following that, the head, come the
+%%% logged items.
+%%%
+%%% There are four formats of wrap log files (so far). Only the size
+%%% file and the index file differ between versions between the first
+%%% three version. The fourth version 2(a), has some protection
+%%% against damaged item sizes.
+%%% Version 0: no "siz" file
+%%% Version 1: "siz" file, 4 byte sizes
+%%% Version 2: 8 byte sizes (support for large files)
+%%% Version 2(a): Change of the format of logged items:
+%%% if the size of a term binary is greater than or equal to
+%%% ?MIN_MD5_TERM, a logged item looks like
+%%% <<Size:32, ?BIGMAGICHEAD:32, MD5:128, Term/binary>>,
+%%% otherwise <<Size:32, ?BIGMAGICHEAD:32, Term/binary>>.
+
+%%%----------------------------------------------------------------------
+%%% API
+%%%----------------------------------------------------------------------
+
+%% -> {ok, NoBytes, NewFdC} | {Error, NewFdC}
+log(FdC, FileName, X) ->
+ {Bs, Size} = logl(X, [], 0),
+ case fwrite(FdC, FileName, Bs, Size) of
+ {ok, NewFdC} ->
+ {ok, Size, NewFdC};
+ Error ->
+ Error
+ end.
+
+-spec logl([binary()]) -> {iolist(), non_neg_integer()}.
+logl(X) ->
+ logl(X, [], 0).
+
+logl([X | T], Bs, Size) ->
+ Sz = byte_size(X),
+ BSz = <<Sz:?SIZESZ/unit:8>>,
+ NBs = case Sz < ?MIN_MD5_TERM of
+ true ->
+ [Bs, BSz, ?BIGMAGICHEAD | X];
+ false ->
+ MD5 = erlang:md5(BSz),
+ [Bs, BSz, ?BIGMAGICHEAD, MD5 | X]
+ end,
+ logl(T, NBs, Size + ?HEADERSZ + Sz);
+logl([], Bs, Size) ->
+ {Bs, Size}.
+
+%% -> {ok, NewFdC} | {Error, NewFdC}
+write_cache(#cache{fd = Fd, c = C}, FName) ->
+ erase(write_cache_timer_is_running),
+ write_cache(Fd, FName, C).
+
+%% -> {Reply, NewFdC}; Reply = ok | Error
+sync(FdC, FName) ->
+ fsync(FdC, FName).
+
+%% -> {Reply, NewFdC}; Reply = ok | Error
+truncate(FdC, FileName, Head) ->
+ Reply = truncate_at(FdC, FileName, ?HEADSZ),
+ case Reply of
+ {ok, _} when Head =:= none ->
+ Reply;
+ {ok, FdC1} ->
+ {ok, B} = Head,
+ case log(FdC1, FileName, [B]) of
+ {ok, _NoBytes, NewFdC} ->
+ {ok, NewFdC};
+ Reply2 ->
+ Reply2
+ end;
+ _ ->
+ Reply
+ end.
+
+%% -> {NewFdC, Reply}, Reply = {Cont, Binaries} | {error, Reason} | eof
+chunk(FdC, FileName, Pos, B, N) when is_binary(B) ->
+ true = byte_size(B) >= ?HEADERSZ,
+ do_handle_chunk(FdC, FileName, Pos, B, N);
+chunk(FdC, FileName, Pos, NoBytes, N) ->
+ MaxNoBytes = case NoBytes of
+ [] -> ?MAX_CHUNK_SIZE;
+ _ -> erlang:max(NoBytes, ?MAX_CHUNK_SIZE)
+ end,
+ case read_chunk(FdC, FileName, Pos, MaxNoBytes) of
+ {NewFdC, {ok, Bin}} when byte_size(Bin) < ?HEADERSZ ->
+ {NewFdC, {error, {corrupt_log_file, FileName}}};
+ {NewFdC, {ok, Bin}} when NoBytes =:= []; byte_size(Bin) >= NoBytes ->
+ NewPos = Pos + byte_size(Bin),
+ do_handle_chunk(NewFdC, FileName, NewPos, Bin, N);
+ {NewFdC, {ok, _Bin}} ->
+ {NewFdC, {error, {corrupt_log_file, FileName}}};
+ {NewFdC, eof} when is_integer(NoBytes) -> % "cannot happen"
+ {NewFdC, {error, {corrupt_log_file, FileName}}};
+ Other -> % eof or error
+ Other
+ end.
+
+do_handle_chunk(FdC, FileName, Pos, B, N) ->
+ case handle_chunk(B, Pos, N, []) of
+ corrupt ->
+ {FdC, {error, {corrupt_log_file, FileName}}};
+ {C, []} ->
+ chunk(FdC, FileName, C#continuation.pos, C#continuation.b, N);
+ C_Ack ->
+ {FdC, C_Ack}
+ end.
+
+handle_chunk(B, Pos, 0, Ack) when byte_size(B) >= ?HEADERSZ ->
+ {#continuation{pos = Pos, b = B}, Ack};
+handle_chunk(B= <<Size:?SIZESZ/unit:8, ?BIGMAGICINT:?MAGICSZ/unit:8,
+ Tail/binary>>, Pos, N, Ack) when Size < ?MIN_MD5_TERM ->
+ case Tail of
+ <<BinTerm:Size/binary, Tail2/binary>> ->
+ %% The client calls binary_to_term/1.
+ handle_chunk(Tail2, Pos, N-1, [BinTerm | Ack]);
+ _ ->
+ BytesToRead = Size + ?HEADERSZ,
+ {#continuation{pos = Pos - byte_size(B), b = BytesToRead}, Ack}
+ end;
+handle_chunk(B= <<Size:?SIZESZ/unit:8, ?BIGMAGICINT:?MAGICSZ/unit:8,
+ Tail/binary>>, Pos, _N, Ack) -> % when Size >= ?MIN_MD5_TERM
+ MD5 = erlang:md5(<<Size:?SIZESZ/unit:8>>),
+ case Tail of
+ %% The requested object is always bigger than a chunk.
+ <<MD5:16/binary, Bin:Size/binary>> ->
+ {#continuation{pos = Pos, b = []}, [Bin | Ack]};
+ <<MD5:16/binary, _/binary>> ->
+ BytesToRead = Size + ?HEADERSZ + 16,
+ {#continuation{pos = Pos - byte_size(B), b = BytesToRead}, Ack};
+ _ when byte_size(Tail) >= 16 ->
+ corrupt;
+ _ ->
+ {#continuation{pos = Pos - byte_size(B), b = []}, Ack}
+ end;
+handle_chunk(B= <<Size:?SIZESZ/unit:8, ?MAGICINT:?MAGICSZ/unit:8, Tail/binary>>,
+ Pos, N, Ack) ->
+ %% Version 2, before 2(a).
+ case Tail of
+ <<BinTerm:Size/binary, Tail2/binary>> ->
+ handle_chunk(Tail2, Pos, N-1, [BinTerm | Ack]);
+ _ ->
+ %% We read the whole thing into one binary, even if Size is huge.
+ BytesToRead = Size + ?HEADERSZ,
+ {#continuation{pos = Pos - byte_size(B), b = BytesToRead}, Ack}
+ end;
+handle_chunk(B, _Pos, _N, _Ack) when byte_size(B) >= ?HEADERSZ ->
+ corrupt;
+handle_chunk(B, Pos, _N, Ack) ->
+ {#continuation{pos = Pos-byte_size(B), b = []}, Ack}.
+
+read_chunk(FdC, FileName, Pos, MaxBytes) ->
+ {FdC1, R} = pread(FdC, FileName, Pos + ?HEADSZ, MaxBytes),
+ case position(FdC1, FileName, eof) of
+ {ok, NewFdC, _Pos} ->
+ {NewFdC, R};
+ {Error, NewFdC} ->
+ {NewFdC, Error}
+ end.
+
+%% Used by wrap_log_reader.
+%% -> {NewFdC, Reply},
+%% Reply = {Cont, Binaries, Bad} (Bad >= 0) | {error, Reason} | eof
+chunk_read_only(FdC = #cache{}, FileName, Pos, B, N) ->
+ do_chunk_read_only(FdC, FileName, Pos, B, N);
+chunk_read_only(Fd, FileName, Pos, B, N) ->
+ %% wrap_log_reader calling...
+ FdC = #cache{fd = Fd},
+ {_NFdC, Reply} = do_chunk_read_only(FdC, FileName, Pos, B, N),
+ Reply.
+
+do_chunk_read_only(FdC, FileName, Pos, B, N) when is_binary(B) ->
+ true = byte_size(B) >= ?HEADERSZ,
+ do_handle_chunk_ro(FdC, FileName, Pos, B, N);
+do_chunk_read_only(FdC, FileName, Pos, NoBytes, N) ->
+ MaxNoBytes = case NoBytes of
+ [] -> ?MAX_CHUNK_SIZE;
+ _ -> erlang:max(NoBytes, ?MAX_CHUNK_SIZE)
+ end,
+ case read_chunk_ro(FdC, FileName, Pos, MaxNoBytes) of
+ {NewFdC, {ok, Bin}} when byte_size(Bin) < ?HEADERSZ ->
+ NewCont = #continuation{pos = Pos+byte_size(Bin), b = []},
+ {NewFdC, {NewCont, [], byte_size(Bin)}};
+ {NewFdC, {ok, Bin}} when NoBytes =:= []; byte_size(Bin) >= NoBytes ->
+ NewPos = Pos + byte_size(Bin),
+ do_handle_chunk_ro(NewFdC, FileName, NewPos, Bin, N);
+ {NewFdC, {ok, Bin}} ->
+ NewCont = #continuation{pos = Pos+byte_size(Bin), b = []},
+ {NewFdC, {NewCont, [], byte_size(Bin)-?HEADERSZ}};
+ {NewFdC, eof} when is_integer(NoBytes) -> % "cannot happen"
+ {NewFdC, eof}; % what else?
+ Other ->
+ Other
+ end.
+
+do_handle_chunk_ro(FdC, FileName, Pos, B, N) ->
+ case handle_chunk_ro(B, Pos, N, [], 0) of
+ {C, [], 0} ->
+ #continuation{pos = NewPos, b = NoBytes} = C,
+ do_chunk_read_only(FdC, FileName, NewPos, NoBytes, N);
+ C_Ack_Bad ->
+ {FdC, C_Ack_Bad}
+ end.
+
+handle_chunk_ro(B, Pos, 0, Ack, Bad) when byte_size(B) >= ?HEADERSZ ->
+ {#continuation{pos = Pos, b = B}, Ack, Bad};
+handle_chunk_ro(B= <<Size:?SIZESZ/unit:8, ?BIGMAGICINT:?MAGICSZ/unit:8,
+ Tail/binary>>, Pos, N, Ack, Bad) when Size < ?MIN_MD5_TERM ->
+ case Tail of
+ <<BinTerm:Size/binary, Tail2/binary>> ->
+ handle_chunk_ro(Tail2, Pos, N-1, [BinTerm | Ack], Bad);
+ _ ->
+ BytesToRead = Size + ?HEADERSZ,
+ {#continuation{pos = Pos - byte_size(B), b = BytesToRead}, Ack, Bad}
+ end;
+handle_chunk_ro(B= <<Size:?SIZESZ/unit:8, ?BIGMAGICINT:?MAGICSZ/unit:8,
+ Tail/binary>>, Pos, N, Ack, Bad) -> % when Size>=?MIN_MD5_TERM
+ MD5 = erlang:md5(<<Size:?SIZESZ/unit:8>>),
+ case Tail of
+ <<MD5:16/binary, Bin:Size/binary>> ->
+ %% The requested object is always bigger than a chunk.
+ {#continuation{pos = Pos, b = []}, [Bin | Ack], Bad};
+ <<MD5:16/binary, _/binary>> ->
+ BytesToRead = Size + ?HEADERSZ + 16,
+ {#continuation{pos = Pos - byte_size(B), b = BytesToRead}, Ack, Bad};
+ <<_BadMD5:16/binary, _:1/unit:8, Tail2/binary>> ->
+ handle_chunk_ro(Tail2, Pos, N-1, Ack, Bad+1);
+ _ ->
+ {#continuation{pos = Pos - byte_size(B), b = []}, Ack, Bad}
+ end;
+handle_chunk_ro(B= <<Size:?SIZESZ/unit:8, ?MAGICINT:?MAGICSZ/unit:8,
+ Tail/binary>>, Pos, N, Ack, Bad) ->
+ %% Version 2, before 2(a).
+ case Tail of
+ <<BinTerm:Size/binary, Tail2/binary>> ->
+ handle_chunk_ro(Tail2, Pos, N-1, [BinTerm | Ack], Bad);
+ _ ->
+ %% We read the whole thing into one binary, even if Size is huge.
+ BytesToRead = Size + ?HEADERSZ,
+ {#continuation{pos = Pos - byte_size(B), b = BytesToRead}, Ack, Bad}
+ end;
+handle_chunk_ro(B, Pos, N, Ack, Bad) when byte_size(B) >= ?HEADERSZ ->
+ <<_:1/unit:8, B2/binary>> = B,
+ handle_chunk_ro(B2, Pos, N-1, Ack, Bad+1);
+handle_chunk_ro(B, Pos, _N, Ack, Bad) ->
+ {#continuation{pos = Pos-byte_size(B), b = []}, Ack, Bad}.
+
+read_chunk_ro(FdC, FileName, Pos, MaxBytes) ->
+ pread(FdC, FileName, Pos + ?HEADSZ, MaxBytes).
+
+%% -> ok | throw(Error)
+close(#cache{fd = Fd, c = []}, _FileName, read_only) ->
+ file:close(Fd);
+close(#cache{fd = Fd, c = C}, FileName, read_write) ->
+ {Reply, _NewFdC} = write_cache(Fd, FileName, C),
+ mark(Fd, FileName, ?CLOSED),
+ file:close(Fd),
+ if Reply =:= ok -> ok; true -> throw(Reply) end.
+
+%% Open an internal file. Head is ignored if Mode is read_only.
+%% int_open(FileName, Repair, Mode, Head) ->
+%% {ok, {Alloc, FdC, HeadSize, FileSize}}
+%% | {repaired, FdC, Terms, BadBytes, FileSize}
+%% | throw(Error)
+%% Alloc = new | existed
+%% HeadSize = {NumberOfItemsWritten, NumberOfBytesWritten}
+%% (HeadSize is equal {0, 0} if Alloc =:= existed, or no header written.)
+int_open(FName, truncate, read_write, Head) ->
+ new_int_file(FName, Head);
+int_open(FName, Repair, read_write, Head) ->
+ case open_read(FName) of
+ {ok, Fd} -> %% File exists
+ case file:read(Fd, ?HEADSZ) of
+ {ok, FileHead} ->
+ case is_head(FileHead) of
+ yes ->
+ file:close(Fd),
+ case open_update(FName) of
+ {ok, Fd2} ->
+ mark(Fd2, FName, ?OPENED),
+ FdC1 = #cache{fd = Fd2},
+ {FdC, P} = position_close(FdC1, FName,eof),
+ {ok, {existed, FdC, {0, 0}, P}};
+ Error ->
+ file_error(FName, Error)
+ end;
+ yes_not_closed when Repair ->
+ repair(Fd, FName);
+ yes_not_closed when not Repair ->
+ file:close(Fd),
+ throw({error, {need_repair, FName}});
+ no ->
+ file:close(Fd),
+ throw({error, {not_a_log_file, FName}})
+ end;
+ eof ->
+ file:close(Fd),
+ throw({error, {not_a_log_file, FName}});
+ Error ->
+ file_error_close(Fd, FName, Error)
+ end;
+ _Other ->
+ new_int_file(FName, Head)
+ end;
+int_open(FName, _Repair, read_only, _Head) ->
+ case open_read(FName) of
+ {ok, Fd} -> %% File exists
+ case file:read(Fd, ?HEADSZ) of
+ {ok, Head} ->
+ case is_head(Head) of
+ yes ->
+ {ok, P} = position_close2(Fd, FName, eof),
+ FdC = #cache{fd = Fd},
+ {ok, {existed, FdC, {0, 0}, P}};
+ yes_not_closed ->
+ {ok, P} = position_close2(Fd, FName, eof),
+ FdC = #cache{fd = Fd},
+ {ok, {existed, FdC, {0, 0}, P}};
+ no ->
+ file:close(Fd),
+ throw({error, {not_a_log_file, FName}})
+ end;
+ eof ->
+ file:close(Fd),
+ throw({error, {not_a_log_file, FName}});
+ Error ->
+ file_error_close(Fd, FName, Error)
+ end;
+ Error ->
+ file_error(FName, Error)
+ end.
+
+new_int_file(FName, Head) ->
+ case open_update(FName) of
+ {ok, Fd} ->
+ ok = truncate_at_close2(Fd, FName, bof),
+ fwrite_close2(Fd, FName, [?LOGMAGIC, ?OPENED]),
+ {FdC1, Nh, HeadSz} = int_log_head(Fd, Head),
+ {FdC, FileSize} = position_close(FdC1, FName, cur),
+ {ok, {new, FdC, {Nh, ?HEADERSZ + HeadSz}, FileSize}};
+ Error ->
+ file_error(FName, Error)
+ end.
+
+%% -> {FdC, NoItemsWritten, NoBytesWritten} | throw(Error)
+int_log_head(Fd, Head) ->
+ case lh(Head, internal) of
+ {ok, BinHead} ->
+ {Bs, Size} = logl([BinHead]),
+ {ok, FdC} = fwrite_header(Fd, Bs, Size),
+ {FdC, 1, Size};
+ none ->
+ {#cache{fd = Fd}, 0, 0};
+ Error ->
+ file:close(Fd),
+ throw(Error)
+ end.
+
+%% Open an external file.
+%% -> {ok, {Alloc, FdC, HeadSize}, FileSize} | throw(Error)
+ext_open(FName, truncate, read_write, Head) ->
+ new_ext_file(FName, Head);
+ext_open(FName, _Repair, read_write, Head) ->
+ case file:read_file_info(FName) of
+ {ok, _FileInfo} ->
+ case open_update(FName) of
+ {ok, Fd} ->
+ {ok, P} = position_close2(Fd, FName, eof),
+ FdC = #cache{fd = Fd},
+ {ok, {existed, FdC, {0, 0}, P}};
+ Error ->
+ file_error(FName, Error)
+ end;
+ _Other ->
+ new_ext_file(FName, Head)
+ end;
+ext_open(FName, _Repair, read_only, _Head) ->
+ case open_read(FName) of
+ {ok, Fd} ->
+ {ok, P} = position_close2(Fd, FName, eof),
+ FdC = #cache{fd = Fd},
+ {ok, {existed, FdC, {0, 0}, P}};
+ Error ->
+ file_error(FName, Error)
+ end.
+
+new_ext_file(FName, Head) ->
+ case open_truncate(FName) of
+ {ok, Fd} ->
+ {FdC1, HeadSize} = ext_log_head(Fd, Head),
+ {FdC, FileSize} = position_close(FdC1, FName, cur),
+ {ok, {new, FdC, HeadSize, FileSize}};
+ Error ->
+ file_error(FName, Error)
+ end.
+
+%% -> {FdC, {NoItemsWritten, NoBytesWritten}} | throw(Error)
+ext_log_head(Fd, Head) ->
+ case lh(Head, external) of
+ {ok, BinHead} ->
+ Size = byte_size(BinHead),
+ {ok, FdC} = fwrite_header(Fd, BinHead, Size),
+ {FdC, {1, Size}};
+ none ->
+ {#cache{fd = Fd}, {0, 0}};
+ Error ->
+ file:close(Fd),
+ throw(Error)
+ end.
+
+%% -> _Any | throw()
+mark(Fd, FileName, What) ->
+ position_close2(Fd, FileName, 4),
+ fwrite_close2(Fd, FileName, What).
+
+%% -> {ok, Bin} | Error
+lh({ok, Bin}, _Format) ->
+ {ok, Bin};
+lh({M, F, A}, Format) when is_list(A) ->
+ case catch apply(M, F, A) of
+ {ok, Head} when Format =:= internal ->
+ {ok, term_to_binary(Head)};
+ {ok, Bin} when is_binary(Bin) ->
+ {ok, Bin};
+ {ok, Bytes} ->
+ case catch list_to_binary(Bytes) of
+ {'EXIT', _} ->
+ {error, {invalid_header, {{M,F,A}, {ok, Bytes}}}};
+ Bin ->
+ {ok, Bin}
+ end;
+ {'EXIT', Error} ->
+ {error, {invalid_header, {{M,F,A}, Error}}};
+ Error ->
+ {error, {invalid_header, {{M,F,A}, Error}}}
+ end;
+lh({M, F, A}, _Format) -> % cannot happen
+ {error, {invalid_header, {M, F, A}}};
+lh(none, _Format) ->
+ none;
+lh(H, _F) -> % cannot happen
+ {error, {invalid_header, H}}.
+
+repair(In, File) ->
+ FSz = file_size(File),
+ error_logger:info_msg("disk_log: repairing ~p ...\n", [File]),
+ Tmp = add_ext(File, "TMP"),
+ {ok, {_Alloc, Out, {0, _}, _FileSize}} = new_int_file(Tmp, none),
+ scan_f_read(<<>>, In, Out, File, FSz, Tmp, ?MAX_CHUNK_SIZE, 0, 0).
+
+scan_f_read(B, In, Out, File, FSz, Tmp, MaxBytes, No, Bad) ->
+ case file:read(In, MaxBytes) of
+ eof ->
+ done_scan(In, Out, Tmp, File, No, Bad+byte_size(B));
+ {ok, Bin} ->
+ NewBin = list_to_binary([B, Bin]),
+ {NB, NMax, Ack, NNo, NBad} =
+ scan_f(NewBin, FSz, [], No, Bad),
+ case log(Out, Tmp, lists:reverse(Ack)) of
+ {ok, _Size, NewOut} ->
+ scan_f_read(NB, In, NewOut, File, FSz, Tmp, NMax,NNo,NBad);
+ {{error, {file_error, _Filename, Error}}, NewOut} ->
+ repair_err(In, NewOut, Tmp, File, {error, Error})
+ end;
+ Error ->
+ repair_err(In, Out, Tmp, File, Error)
+ end.
+
+scan_f(B = <<Size:?SIZESZ/unit:8, ?BIGMAGICINT:?MAGICSZ/unit:8, Tail/binary>>,
+ FSz, Ack, No, Bad) when Size < ?MIN_MD5_TERM ->
+ scan_f2(B, FSz, Ack, No, Bad, Size, Tail);
+scan_f(B = <<Size:?SIZESZ/unit:8, ?BIGMAGICINT:?MAGICSZ/unit:8, Tail/binary>>,
+ FSz, Ack, No, Bad) -> % when Size >= ?MIN_MD5_TERM
+ MD5 = erlang:md5(<<Size:?SIZESZ/unit:8>>),
+ case Tail of
+ <<MD5:16/binary, BinTerm:Size/binary, Tail2/binary>> ->
+ case catch binary_to_term(BinTerm) of
+ {'EXIT', _} ->
+ scan_f(Tail2, FSz, Ack, No, Bad+Size);
+ _Term ->
+ scan_f(Tail2, FSz, [BinTerm | Ack], No+1, Bad)
+ end;
+ <<MD5:16/binary, _/binary>> ->
+ {B, Size-byte_size(Tail)+16, Ack, No, Bad};
+ _ when byte_size(Tail) < 16 ->
+ {B, Size-byte_size(Tail)+16, Ack, No, Bad};
+ _ ->
+ <<_:8, B2/binary>> = B,
+ scan_f(B2, FSz, Ack, No, Bad+1)
+ end;
+scan_f(B = <<Size:?SIZESZ/unit:8, ?MAGICINT:?MAGICSZ/unit:8, Tail/binary>>,
+ FSz, Ack, No, Bad) when Size =< FSz ->
+ %% Since the file is not compressed, the item size cannot exceed
+ %% the file size.
+ scan_f2(B, FSz, Ack, No, Bad, Size, Tail);
+scan_f(B = <<_:?HEADERSZ/unit:8, _/binary>>, FSz, Ack, No, Bad) ->
+ <<_:8, B2/binary>> = B,
+ scan_f(B2, FSz, Ack, No, Bad + 1);
+scan_f(B, _FSz, Ack, No, Bad) ->
+ {B, ?MAX_CHUNK_SIZE, Ack, No, Bad}.
+
+scan_f2(B, FSz, Ack, No, Bad, Size, Tail) ->
+ case Tail of
+ <<BinTerm:Size/binary, Tail2/binary>> ->
+ case catch binary_to_term(BinTerm) of
+ {'EXIT', _} ->
+ <<_:8, B2/binary>> = B,
+ scan_f(B2, FSz, Ack, No, Bad+1);
+ _Term ->
+ scan_f(Tail2, FSz, [BinTerm | Ack], No+1, Bad)
+ end;
+ _ ->
+ {B, Size-byte_size(Tail), Ack, No, Bad}
+ end.
+
+done_scan(In, Out, OutName, FName, RecoveredTerms, BadChars) ->
+ file:close(In),
+ case catch fclose(Out, OutName) of
+ ok ->
+ case file:rename(OutName, FName) of
+ ok ->
+ case open_update(FName) of
+ {ok, New} ->
+ {ok, P} = position_close2(New, FName, eof),
+ FdC = #cache{fd = New},
+ {repaired, FdC, RecoveredTerms, BadChars, P};
+ Error ->
+ file_error(FName, Error)
+ end;
+ Error ->
+ file:delete(OutName),
+ file_error(FName, Error)
+ end;
+ Error ->
+ file:delete(OutName),
+ throw(Error)
+ end.
+
+repair_err(In, Out, OutName, ErrFileName, Error) ->
+ file:close(In),
+ catch fclose(Out, OutName),
+ % OutName is often the culprit, try to remove it anyway...
+ file:delete(OutName),
+ file_error(ErrFileName, Error).
+
+%% Used by wrap_log_reader.
+-spec is_head(binary()) -> 'yes' | 'yes_not_closed' | 'no'.
+is_head(<<M:4/binary, S:4/binary>>) when ?LOGMAGIC =:= M, ?CLOSED =:= S ->
+ yes;
+is_head(<<M:4/binary, S:4/binary>>) when ?LOGMAGIC =:= M, ?OPENED =:= S ->
+ yes_not_closed;
+is_head(Bin) when is_binary(Bin) ->
+ no.
+
+%%-----------------------------------------------------------------
+%% Func: mf_int_open/7, mf_ext_open/7
+%% Args: FName = file:filename()
+%% MaxB = integer()
+%% MaxF = integer()
+%% Repair = truncate | true | false
+%% Mode = read_write | read_only
+%% Head = none | {ok, Bin} | {M, F, A}
+%% Version = integer()
+%% Purpose: An ADT for wrapping logs. mf_int_ writes binaries (mf_ext_
+%% writes bytes)
+%% to files called FName.1, FName.2, ..., FName.MaxF.
+%% Writes MaxB bytes on each file.
+%% Creates a file called Name.idx in the Dir. This
+%% file contains the last written FileName as one byte, and
+%% follwing that, the sizes of each file (size 0 number of items).
+%% On startup, this file is read, and the next available
+%% filename is used as first log file.
+%% Reports can be browsed with Report Browser Tool (rb), or
+%% read with disk_log.
+%%-----------------------------------------------------------------
+-spec mf_int_open(FName :: file:filename(),
+ MaxB :: integer(),
+ MaxF :: integer(),
+ Repair :: dlog_repair(),
+ Mode :: dlog_mode(),
+ Head :: dlog_head(),
+ Version :: integer())
+ -> {'ok', #handle{}, integer()}
+ | {'repaired', #handle{},
+ non_neg_integer(), non_neg_integer(), non_neg_integer()}.
+%% | throw(FileError)
+mf_int_open(FName, MaxB, MaxF, Repair, Mode, Head, Version) ->
+ {First, Sz, TotSz, NFiles} = read_index_file(Repair, FName, MaxF),
+ write_size_file(Mode, FName, MaxB, MaxF, Version),
+ NewMaxF = if
+ NFiles > MaxF ->
+ {MaxF, NFiles};
+ true ->
+ MaxF
+ end,
+ case int_file_open(FName, First, 0, 0, Head, Repair, Mode) of
+ {ok, FdC, FileName, Lost, {NoItems, NoBytes}, FSz} ->
+ % firstPos = NoBytes is not always correct when the file
+ % existed, but it will have to do since we don't know
+ % where the header ends.
+ CurCnt = Sz + NoItems - Lost,
+ {ok, #handle{filename = FName, maxB = MaxB,
+ maxF = NewMaxF, curF = First, cur_fdc = FdC,
+ cur_name = FileName, cur_cnt = CurCnt,
+ acc_cnt = -Sz, curB = FSz,
+ firstPos = NoBytes, noFull = 0, accFull = 0},
+ TotSz + CurCnt};
+ {repaired, FdC, FileName, Rec, Bad, FSz} ->
+ {repaired,
+ #handle{filename = FName, maxB = MaxB, cur_name = FileName,
+ maxF = NewMaxF, curF = First, cur_fdc = FdC,
+ cur_cnt = Rec, acc_cnt = -Rec, curB = FSz,
+ firstPos = 0, noFull = 0, accFull = 0},
+ Rec, Bad, TotSz + Rec}
+ end.
+
+%% -> {ok, handle(), Lost} | {error, Error, handle()}
+mf_int_inc(Handle, Head) ->
+ #handle{filename = FName, cur_cnt = CurCnt, acc_cnt = AccCnt,
+ cur_name = FileName, curF = CurF, maxF = MaxF,
+ cur_fdc = CurFdC, noFull = NoFull} = Handle,
+ case catch wrap_int_log(FName, CurF, MaxF, CurCnt, Head) of
+ {NewF, NewMaxF, NewFdC, NewFileName, Nh, FirstPos, Lost} ->
+ Handle1 = Handle#handle{cur_fdc = NewFdC, curF = NewF,
+ cur_name = NewFileName,
+ cur_cnt = Nh, acc_cnt = AccCnt + CurCnt,
+ maxF = NewMaxF, firstPos = FirstPos,
+ curB = FirstPos, noFull = NoFull + 1},
+ case catch close(CurFdC, FileName, read_write) of
+ ok ->
+ {ok, Handle1, Lost};
+ Error -> % Error in the last file, new file opened.
+ {error, Error, Handle1}
+ end;
+ Error ->
+ {error, Error, Handle}
+ end.
+
+%% -> {ok, handle(), Logged, Lost, NoWraps} | {ok, handle(), Logged}
+%% | {error, Error, handle(), Logged, Lost}
+%% The returned handle is not always valid - something may
+%% have been written before things went wrong.
+mf_int_log(Handle, Bins, Head) ->
+ mf_int_log(Handle, Bins, Head, 0, []).
+
+mf_int_log(Handle, [], _Head, No, []) ->
+ {ok, Handle, No};
+mf_int_log(Handle, [], _Head, No, Wraps0) ->
+ Wraps = reverse(Wraps0),
+ {ok, Handle, No, sum(Wraps), Wraps};
+mf_int_log(Handle, Bins, Head, No0, Wraps) ->
+ #handle{curB = CurB, maxB = MaxB, cur_name = FileName, cur_fdc = CurFdC,
+ firstPos = FirstPos0, cur_cnt = CurCnt} = Handle,
+ {FirstBins, LastBins, NoBytes, N} =
+ int_split_bins(CurB, MaxB, FirstPos0, Bins),
+ case FirstBins of
+ [] ->
+ #handle{filename = FName, curF = CurF, maxF = MaxF,
+ acc_cnt = AccCnt, noFull = NoFull} = Handle,
+ case catch wrap_int_log(FName, CurF, MaxF, CurCnt, Head) of
+ {NewF, NewMaxF, NewFdC, NewFileName, Nh, FirstPos, Lost} ->
+ Handle1 = Handle#handle{cur_fdc = NewFdC, curF = NewF,
+ cur_cnt = Nh,
+ cur_name = NewFileName,
+ acc_cnt = AccCnt + CurCnt,
+ maxF = NewMaxF,
+ curB = FirstPos,
+ firstPos = FirstPos,
+ noFull = NoFull + 1},
+ case catch close(CurFdC, FileName, read_write) of
+ ok ->
+ mf_int_log(Handle1, Bins, Head, No0 + Nh,
+ [Lost | Wraps]);
+ Error ->
+ Lost1 = Lost + sum(Wraps),
+ {error, Error, Handle1, No0 + Nh, Lost1}
+ end;
+ Error ->
+ {error, Error, Handle, No0, sum(Wraps)}
+ end;
+ _ ->
+ case fwrite(CurFdC, FileName, FirstBins, NoBytes) of
+ {ok, NewCurFdC} ->
+ Handle1 = Handle#handle{cur_fdc = NewCurFdC,
+ curB = CurB + NoBytes,
+ cur_cnt = CurCnt + N},
+ mf_int_log(Handle1, LastBins, Head, No0 + N, Wraps);
+ {Error, NewCurFdC} ->
+ Handle1 = Handle#handle{cur_fdc = NewCurFdC},
+ {error, Error, Handle1, No0, sum(Wraps)}
+ end
+ end.
+
+wrap_int_log(FName, CurF, MaxF, CurCnt, Head) ->
+ {NewF, NewMaxF} = inc_wrap(FName, CurF, MaxF),
+ {ok, NewFdC, NewFileName, Lost, {Nh, FirstPos}, _FileSize} =
+ int_file_open(FName, NewF, CurF, CurCnt, Head),
+ {NewF, NewMaxF, NewFdC, NewFileName, Nh, FirstPos, Lost}.
+
+%% -> {NewHandle, Reply}, Reply = {Cont, Binaries} | {error, Reason} | eof
+mf_int_chunk(Handle, 0, Bin, N) ->
+ FirstF = find_first_file(Handle),
+ mf_int_chunk(Handle, {FirstF, 0}, Bin, N);
+mf_int_chunk(#handle{curF = FileNo, cur_fdc = FdC, cur_name = FileName}
+ = Handle, {FileNo, Pos}, Bin, N) ->
+ {NewFdC, Reply} = chunk(FdC, FileName, Pos, Bin, N),
+ {Handle#handle{cur_fdc = NewFdC}, conv(Reply, FileNo)};
+mf_int_chunk(Handle, {FileNo, Pos}, Bin, N) ->
+ FName = add_ext(Handle#handle.filename, FileNo),
+ NFileNo = inc(FileNo, Handle#handle.maxF),
+ case catch int_open(FName, true, read_only, any) of
+ {error, _Reason} ->
+ error_logger:info_msg("disk_log: chunk error. File ~p missing.\n\n",
+ [FName]),
+ mf_int_chunk(Handle, {NFileNo, 0}, [], N);
+ {ok, {_Alloc, FdC, _HeadSize, _FileSize}} ->
+ case chunk(FdC, FName, Pos, Bin, N) of
+ {NewFdC, eof} ->
+ file:close(NewFdC#cache.fd),
+ mf_int_chunk(Handle, {NFileNo, 0}, [], N);
+ {NewFdC, Other} ->
+ file:close(NewFdC#cache.fd),
+ {Handle, conv(Other, FileNo)}
+ end
+ end.
+
+%% -> {NewHandle, Reply},
+%% Reply = {Cont, Binaries, Bad} (Bad >= 0) | {error, Reason} | eof
+mf_int_chunk_read_only(Handle, 0, Bin, N) ->
+ FirstF = find_first_file(Handle),
+ mf_int_chunk_read_only(Handle, {FirstF, 0}, Bin, N);
+mf_int_chunk_read_only(#handle{curF = FileNo, cur_fdc = FdC, cur_name=FileName}
+ = Handle, {FileNo, Pos}, Bin, N) ->
+ {NewFdC, Reply} = do_chunk_read_only(FdC, FileName, Pos, Bin, N),
+ {Handle#handle{cur_fdc = NewFdC}, conv(Reply, FileNo)};
+mf_int_chunk_read_only(Handle, {FileNo, Pos}, Bin, N) ->
+ FName = add_ext(Handle#handle.filename, FileNo),
+ NFileNo = inc(FileNo, Handle#handle.maxF),
+ case catch int_open(FName, true, read_only, any) of
+ {error, _Reason} ->
+ error_logger:info_msg("disk_log: chunk error. File ~p missing.\n\n",
+ [FName]),
+ mf_int_chunk_read_only(Handle, {NFileNo, 0}, [], N);
+ {ok, {_Alloc, FdC, _HeadSize, _FileSize}} ->
+ case do_chunk_read_only(FdC, FName, Pos, Bin, N) of
+ {NewFdC, eof} ->
+ file:close(NewFdC#cache.fd),
+ mf_int_chunk_read_only(Handle, {NFileNo,0}, [], N);
+ {NewFdC, Other} ->
+ file:close(NewFdC#cache.fd),
+ {Handle, conv(Other, FileNo)}
+ end
+ end.
+
+%% -> {ok, Cont} | Error
+mf_int_chunk_step(Handle, 0, Step) ->
+ FirstF = find_first_file(Handle),
+ mf_int_chunk_step(Handle, {FirstF, 0}, Step);
+mf_int_chunk_step(Handle, {FileNo, _Pos}, Step) ->
+ NFileNo = inc(FileNo, Handle#handle.maxF, Step),
+ FileName = add_ext(Handle#handle.filename, NFileNo),
+ case file:read_file_info(FileName) of
+ {ok, _FileInfo} ->
+ {ok, #continuation{pos = {NFileNo, 0}, b = []}};
+ _Error ->
+ {error, end_of_log}
+ end.
+
+%% -> {Reply, handle()}; Reply = ok | Error
+mf_write_cache(#handle{filename = FName, cur_fdc = FdC} = Handle) ->
+ erase(write_cache_timer_is_running),
+ #cache{fd = Fd, c = C} = FdC,
+ {Reply, NewFdC} = write_cache(Fd, FName, C),
+ {Reply, Handle#handle{cur_fdc = NewFdC}}.
+
+%% -> {Reply, handle()}; Reply = ok | Error
+mf_sync(#handle{filename = FName, cur_fdc = FdC} = Handle) ->
+ {Reply, NewFdC} = fsync(FdC, FName),
+ {Reply, Handle#handle{cur_fdc = NewFdC}}.
+
+%% -> ok | throw(FileError)
+mf_int_close(#handle{filename = FName, curF = CurF, cur_name = FileName,
+ cur_fdc = CurFdC, cur_cnt = CurCnt}, Mode) ->
+ close(CurFdC, FileName, Mode),
+ write_index_file(Mode, FName, CurF, CurF, CurCnt),
+ ok.
+
+%% -> {ok, handle(), Cnt} | throw(FileError)
+mf_ext_open(FName, MaxB, MaxF, Repair, Mode, Head, Version) ->
+ {First, Sz, TotSz, NFiles} = read_index_file(Repair, FName, MaxF),
+ write_size_file(Mode, FName, MaxB, MaxF, Version),
+ NewMaxF = if
+ NFiles > MaxF ->
+ {MaxF, NFiles};
+ true ->
+ MaxF
+ end,
+ {ok, FdC, FileName, Lost, {NoItems, NoBytes}, CurB} =
+ ext_file_open(FName, First, 0, 0, Head, Repair, Mode),
+ CurCnt = Sz + NoItems - Lost,
+ {ok, #handle{filename = FName, maxB = MaxB, cur_name = FileName,
+ maxF = NewMaxF, cur_cnt = CurCnt, acc_cnt = -Sz,
+ curF = First, cur_fdc = FdC, firstPos = NoBytes,
+ curB = CurB, noFull = 0, accFull = 0},
+ TotSz + CurCnt}.
+
+%% -> {ok, handle(), Lost}
+%% | {error, Error, handle()}
+%% | throw(FatalError)
+%% Fatal errors should always terminate the log.
+mf_ext_inc(Handle, Head) ->
+ #handle{filename = FName, cur_cnt = CurCnt, cur_name = FileName,
+ acc_cnt = AccCnt, curF = CurF, maxF = MaxF, cur_fdc = CurFdC,
+ noFull = NoFull} = Handle,
+ case catch wrap_ext_log(FName, CurF, MaxF, CurCnt, Head) of
+ {NewF, NewMaxF, NewFdC, NewFileName, Nh, FirstPos, Lost} ->
+ Handle1 = Handle#handle{cur_fdc = NewFdC, curF = NewF,
+ cur_name = NewFileName,
+ cur_cnt = Nh, acc_cnt = AccCnt + CurCnt,
+ maxF = NewMaxF, firstPos = FirstPos,
+ curB = FirstPos, noFull = NoFull + 1},
+ case catch fclose(CurFdC, FileName) of
+ ok ->
+ {ok, Handle1, Lost};
+ Error -> % Error in the last file, new file opened.
+ {error, Error, Handle1}
+ end;
+ Error ->
+ {error, Error, Handle}
+ end.
+
+%% -> {ok, handle(), Logged, Lost, NoWraps} | {ok, handle(), Logged}
+%% | {error, Error, handle(), Logged, Lost}
+
+%% The returned handle is not always valid -
+%% something may have been written before things went wrong.
+mf_ext_log(Handle, Bins, Head) ->
+ mf_ext_log(Handle, Bins, Head, 0, []).
+
+mf_ext_log(Handle, [], _Head, No, []) ->
+ {ok, Handle, No};
+mf_ext_log(Handle, [], _Head, No, Wraps0) ->
+ Wraps = reverse(Wraps0),
+ {ok, Handle, No, sum(Wraps), Wraps};
+mf_ext_log(Handle, Bins, Head, No0, Wraps) ->
+ #handle{curB = CurB, maxB = MaxB, cur_name = FileName, cur_fdc = CurFdC,
+ firstPos = FirstPos0, cur_cnt = CurCnt} = Handle,
+ {FirstBins, LastBins, NoBytes, N} =
+ ext_split_bins(CurB, MaxB, FirstPos0, Bins),
+ case FirstBins of
+ [] ->
+ #handle{filename = FName, curF = CurF, maxF = MaxF,
+ acc_cnt = AccCnt, noFull = NoFull} = Handle,
+ case catch wrap_ext_log(FName, CurF, MaxF, CurCnt, Head) of
+ {NewF, NewMaxF, NewFdC, NewFileName, Nh, FirstPos, Lost} ->
+ Handle1 = Handle#handle{cur_fdc = NewFdC, curF = NewF,
+ cur_cnt = Nh,
+ cur_name = NewFileName,
+ acc_cnt = AccCnt + CurCnt,
+ maxF = NewMaxF,
+ curB = FirstPos,
+ firstPos = FirstPos,
+ noFull = NoFull + 1},
+ case catch fclose(CurFdC, FileName) of
+ ok ->
+ mf_ext_log(Handle1, Bins, Head, No0 + Nh,
+ [Lost | Wraps]);
+ Error ->
+ Lost1 = Lost + sum(Wraps),
+ {error, Error, Handle1, No0 + Nh, Lost1}
+ end;
+ Error ->
+ {error, Error, Handle, No0, sum(Wraps)}
+ end;
+ _ ->
+ case fwrite(CurFdC, FileName, FirstBins, NoBytes) of
+ {ok, NewCurFdC} ->
+ Handle1 = Handle#handle{cur_fdc = NewCurFdC,
+ curB = CurB + NoBytes,
+ cur_cnt = CurCnt + N},
+ mf_ext_log(Handle1, LastBins, Head, No0 + N, Wraps);
+ {Error, NewCurFdC} ->
+ Handle1 = Handle#handle{cur_fdc = NewCurFdC},
+ {error, Error, Handle1, No0, sum(Wraps)}
+ end
+ end.
+
+wrap_ext_log(FName, CurF, MaxF, CurCnt, Head) ->
+ {NewF, NewMaxF} = inc_wrap(FName, CurF, MaxF),
+ {ok, NewFdC, NewFileName, Lost, {Nh, FirstPos}, _FileSize} =
+ ext_file_open(FName, NewF, CurF, CurCnt, Head),
+ {NewF, NewMaxF, NewFdC, NewFileName, Nh, FirstPos, Lost}.
+
+%% -> ok | throw(FileError)
+mf_ext_close(#handle{filename = FName, curF = CurF,
+ cur_fdc = CurFdC, cur_cnt = CurCnt}, Mode) ->
+ Res = (catch fclose(CurFdC, FName)),
+ write_index_file(Mode, FName, CurF, CurF, CurCnt),
+ Res.
+
+%% -> {ok, handle()} | throw(FileError)
+change_size_wrap(Handle, {NewMaxB, NewMaxF}, Version) ->
+ FName = Handle#handle.filename,
+ {_MaxB, MaxF} = get_wrap_size(Handle),
+ write_size_file(read_write, FName, NewMaxB, NewMaxF, Version),
+ if
+ NewMaxF > MaxF ->
+ remove_files(FName, MaxF + 1, NewMaxF),
+ {ok, Handle#handle{maxB = NewMaxB, maxF = NewMaxF}};
+ NewMaxF < MaxF ->
+ {ok, Handle#handle{maxB = NewMaxB, maxF = {NewMaxF, MaxF}}};
+ true ->
+ {ok, Handle#handle{maxB = NewMaxB, maxF = NewMaxF}}
+ end.
+
+%%-----------------------------------------------------------------
+%% Misc functions
+%%-----------------------------------------------------------------
+%% -> {ok, FdC, FileName, Lost, HeadSize, FileSize} | throw(Error)
+int_file_open(FName, NewFile, OldFile, OldCnt, Head) ->
+ Repair = truncate, Mode = read_write,
+ int_file_open(FName, NewFile, OldFile, OldCnt, Head, Repair, Mode).
+
+%% -> {ok, FdC, FileName, Lost, HeadSize, FileSize}
+%% | {repaired, FdC, FileName, Rec, Bad, FileSize}
+%% | throw(Error)
+int_file_open(FName, NewFile, OldFile, OldCnt, Head, Repair, Mode) ->
+ N = add_ext(FName, NewFile),
+ case int_open(N, Repair, Mode, Head) of
+ {ok, {_Alloc, FdC, HeadSize, FileSize}} ->
+ Lost = write_index_file(Mode, FName, NewFile, OldFile, OldCnt),
+ {ok, FdC, N, Lost, HeadSize, FileSize};
+ {repaired, FdC, Recovered, BadBytes, FileSize} ->
+ write_index_file(Mode, FName, NewFile, OldFile, OldCnt),
+ {repaired, FdC, N, Recovered, BadBytes, FileSize}
+ end.
+
+%% -> {ok, FdC, FileName, Lost, HeadSize, FileSize} | throw(Error)
+ext_file_open(FName, NewFile, OldFile, OldCnt, Head) ->
+ Repair = truncate, Mode = read_write,
+ ext_file_open(FName, NewFile, OldFile, OldCnt, Head, Repair, Mode).
+
+ext_file_open(FName, NewFile, OldFile, OldCnt, Head, Repair, Mode) ->
+ FileName = add_ext(FName, NewFile),
+ {ok, {_Alloc, FdC, HeadSize, FileSize}} =
+ ext_open(FileName, Repair, Mode, Head),
+ Lost = write_index_file(Mode, FName, NewFile, OldFile, OldCnt),
+ {ok, FdC, FileName, Lost, HeadSize, FileSize}.
+
+%%-----------------------------------------------------------------
+%% The old file format for index file (CurFileNo > 0), Version 0:
+%%
+%% CurFileNo SizeFile1 SizeFile2 ... SizeFileN
+%% 1 byte 4 bytes 4 bytes 4 bytes
+%%
+%% The new file format for index file (NewFormat = 0), version 1:
+%%
+%% NewFormat CurFileNo SizeFile1 SizeFile2 ... SizeFileN
+%% 1 byte 4 bytes 4 bytes 4 bytes
+%%
+%% The current file format for index file (sizes in bytes), version 2:
+%%
+%% 0 (1) 0 (4) FileFormatVersion (1) CurFileNo (4) SizeFile1 (8) ...
+%%
+%% (SizeFileI refers to number of items on the log file.)
+%%-----------------------------------------------------------------
+
+-define(index_file_name(F), add_ext(F, "idx")).
+
+read_index_file(truncate, FName, MaxF) ->
+ remove_files(FName, 2, MaxF),
+ file:delete(?index_file_name(FName)),
+ {1, 0, 0, 0};
+read_index_file(_, FName, _MaxF) ->
+ read_index_file(FName).
+
+%% Used by wrap_log_reader.
+%% -> {CurFileNo, CurFileSz, TotSz, NoFiles} | throw(FileError)
+%% where TotSz does not include CurFileSz.
+
+read_index_file(FName) ->
+ FileName = ?index_file_name(FName),
+ case open_read(FileName) of
+ {ok, Fd} ->
+ R = case file:read(Fd, ?MAX_CHUNK_SIZE) of
+ {ok, <<0, 0:32, Version, CurF:32, Tail/binary>>}
+ when Version =:= ?VERSION,
+ 0 < CurF, CurF < ?MAX_FILES ->
+ parse_index(CurF, Version, 1, Tail, Fd, 0, 0, 0);
+ {ok, <<0, CurF:32, Tail/binary>>}
+ when 0 < CurF, CurF < ?MAX_FILES ->
+ parse_index(CurF, 1, 1, Tail, Fd, 0, 0, 0);
+ {ok, <<CurF, Tail/binary>>} when 0 < CurF ->
+ parse_index(CurF, 1, 1, Tail, Fd, 0, 0, 0);
+ _ErrorOrEof ->
+ {1, 0, 0, 0}
+ end,
+ file:close(Fd),
+ R;
+ _Error ->
+ {1, 0, 0, 0}
+ end.
+
+parse_index(CurF, V, CurF, <<CurSz:64, Tail/binary>>, Fd, _, TotSz, NFiles)
+ when V =:= ?VERSION ->
+ parse_index(CurF, V, CurF+1, Tail, Fd, CurSz, TotSz, NFiles+1);
+parse_index(CurF, V, N, <<Sz:64, Tail/binary>>, Fd, CurSz, TotSz, NFiles)
+ when V =:= ?VERSION ->
+ parse_index(CurF, V, N+1, Tail, Fd, CurSz, TotSz + Sz, NFiles+1);
+parse_index(CurF, V, CurF, <<CurSz:32, Tail/binary>>, Fd, _, TotSz, NFiles)
+ when V < ?VERSION ->
+ parse_index(CurF, V, CurF+1, Tail, Fd, CurSz, TotSz, NFiles+1);
+parse_index(CurF, V, N, <<Sz:32, Tail/binary>>, Fd, CurSz, TotSz, NFiles)
+ when V < ?VERSION ->
+ parse_index(CurF, V, N+1, Tail, Fd, CurSz, TotSz + Sz, NFiles+1);
+parse_index(CurF, V, N, B, Fd, CurSz, TotSz, NFiles) ->
+ case file:read(Fd, ?MAX_CHUNK_SIZE) of
+ eof when 0 =:= byte_size(B) ->
+ {CurF, CurSz, TotSz, NFiles};
+ {ok, Bin} ->
+ NewB = list_to_binary([B, Bin]),
+ parse_index(CurF, V, N, NewB, Fd, CurSz, TotSz, NFiles);
+ _ErrorOrEof ->
+ {1, 0, 0, 0}
+ end.
+
+%% Returns: Number of lost items (if an old file was truncated)
+%% -> integer() | throw(FileError)
+write_index_file(read_only, _FName, _NewFile, _OldFile, _OldCnt) ->
+ 0;
+write_index_file(read_write, FName, NewFile, OldFile, OldCnt) ->
+ FileName = ?index_file_name(FName),
+ case open_update(FileName) of
+ {ok, Fd} ->
+ {Offset, SzSz} =
+ case file:read(Fd, 6) of
+ eof ->
+ Bin = <<0, 0:32, ?VERSION, NewFile:32>>,
+ fwrite_close2(Fd, FileName, Bin),
+ {10, 8};
+ {ok, <<0, 0:32, _Version>>} ->
+ pwrite_close2(Fd, FileName, 6, <<NewFile:32>>),
+ {10, 8};
+ {ok, <<0, _/binary>>} ->
+ pwrite_close2(Fd, FileName, 1, <<NewFile:32>>),
+ {5, 4};
+ {ok, <<_,_/binary>>} ->
+ %% Very old format, convert to the latest format!
+ case file:read_file(FileName) of
+ {ok, <<_CurF, Tail/binary>>} ->
+ position_close2(Fd, FileName, bof),
+ Bin = <<0, 0:32, ?VERSION, NewFile:32>>,
+ NewTail = to_8_bytes(Tail, [], FileName, Fd),
+ fwrite_close2(Fd, FileName, [Bin | NewTail]),
+ {10, 8};
+ Error ->
+ file_error_close(Fd, FileName, Error)
+ end;
+ Error ->
+ file_error_close(Fd, FileName, Error)
+ end,
+
+ NewPos = Offset + (NewFile - 1)*SzSz,
+ OldCntBin = <<OldCnt:SzSz/unit:8>>,
+ if
+ OldFile > 0 ->
+ R = file:pread(Fd, NewPos, SzSz),
+ OldPos = Offset + (OldFile - 1)*SzSz,
+ pwrite_close2(Fd, FileName, OldPos, OldCntBin),
+ file:close(Fd),
+ case R of
+ {ok, <<Lost:SzSz/unit:8>>} -> Lost;
+ {ok, _} ->
+ throw({error, {invalid_index_file, FileName}});
+ eof -> 0;
+ Error2 -> file_error(FileName, Error2)
+ end;
+ true ->
+ pwrite_close2(Fd, FileName, NewPos, OldCntBin),
+ file:close(Fd),
+ 0
+ end;
+ E ->
+ file_error(FileName, E)
+ end.
+
+to_8_bytes(<<N:32,T/binary>>, NT, FileName, Fd) ->
+ to_8_bytes(T, [NT | <<N:64>>], FileName, Fd);
+to_8_bytes(B, NT, _FileName, _Fd) when byte_size(B) =:= 0 ->
+ NT;
+to_8_bytes(_B, _NT, FileName, Fd) ->
+ file:close(Fd),
+ throw({error, {invalid_index_file, FileName}}).
+
+%% -> ok | throw(FileError)
+index_file_trunc(FName, N) ->
+ FileName = ?index_file_name(FName),
+ case open_update(FileName) of
+ {ok, Fd} ->
+ case file:read(Fd, 6) of
+ eof ->
+ file:close(Fd),
+ ok;
+ {ok, <<0, 0:32, Version>>} when Version =:= ?VERSION ->
+ truncate_index_file(Fd, FileName, 10, 8, N);
+ {ok, <<0, _/binary>>} ->
+ truncate_index_file(Fd, FileName, 5, 4, N);
+ {ok, <<_, _/binary>>} -> % cannot happen
+ truncate_index_file(Fd, FileName, 1, 4, N);
+ Error ->
+ file_error_close(Fd, FileName, Error)
+ end;
+ Error ->
+ file_error(FileName, Error)
+ end.
+
+truncate_index_file(Fd, FileName, Offset, N, SzSz) ->
+ Pos = Offset + N*SzSz,
+ case Pos > file_size(FileName) of
+ true ->
+ file:close(Fd);
+ false ->
+ truncate_at_close2(Fd, FileName, {bof, Pos}),
+ file:close(Fd)
+ end,
+ ok.
+
+print_index_file(File) ->
+ io:format("-- Index begin --~n"),
+ case file:read_file(File) of
+ {ok, <<0, 0:32, Version, CurF:32, Tail/binary>>}
+ when Version =:= ?VERSION, 0 < CurF, CurF < ?MAX_FILES ->
+ io:format("cur file: ~w~n", [CurF]),
+ loop_index(1, Version, Tail);
+ {ok, <<0, CurF:32, Tail/binary>>} when 0 < CurF, CurF < ?MAX_FILES ->
+ io:format("cur file: ~w~n", [CurF]),
+ loop_index(1, 1, Tail);
+ {ok, <<CurF, Tail/binary>>} when 0 < CurF ->
+ io:format("cur file: ~w~n", [CurF]),
+ loop_index(1, 1, Tail);
+ _Else ->
+ ok
+ end,
+ io:format("-- end --~n").
+
+loop_index(N, V, <<Sz:64, Tail/binary>>) when V =:= ?VERSION ->
+ io:format(" ~p items: ~w~n", [N, Sz]),
+ loop_index(N+1, V, Tail);
+loop_index(N, V, <<Sz:32, Tail/binary>>) when V < ?VERSION ->
+ io:format(" ~p items: ~w~n", [N, Sz]),
+ loop_index(N+1, V, Tail);
+loop_index(_, _, _) ->
+ ok.
+
+-define(size_file_name(F), add_ext(F, "siz")).
+
+%% Version 0: no size file
+%% Version 1: <<MaxSize:32, MaxFiles:32>>
+%% Version 2: <<Version:8, MaxSize:64, MaxFiles:32>>
+
+%% -> ok | throw(FileError)
+write_size_file(read_only, _FName, _NewSize, _NewMaxFiles, _Version) ->
+ ok;
+write_size_file(read_write, FName, NewSize, NewMaxFiles, Version) ->
+ FileName = ?size_file_name(FName),
+ Bin = if
+ Version =:= ?VERSION ->
+ <<Version, NewSize:64, NewMaxFiles:32>>;
+ true ->
+ <<NewSize:32, NewMaxFiles:32>>
+ end,
+ case file:write_file(FileName, Bin) of
+ ok ->
+ ok;
+ E ->
+ file_error(FileName, E)
+ end.
+
+%% -> {NoBytes, NoFiles}.
+read_size_file(FName) ->
+ {Size,_Version} = read_size_file_version(FName),
+ Size.
+
+%% -> {{NoBytes, NoFiles}, Version}, Version = integer() | undefined
+read_size_file_version(FName) ->
+ case file:read_file(?size_file_name(FName)) of
+ {ok, <<Version, Size:64, MaxFiles:32>>} when Version =:= ?VERSION ->
+ {{Size, MaxFiles}, Version};
+ {ok, <<Size:32, MaxFiles:32>>} ->
+ {{Size, MaxFiles}, 1};
+ _ ->
+ %% The oldest version too...
+ {{0, 0}, ?VERSION}
+ end.
+
+conv({More, Terms}, FileNo) when is_record(More, continuation) ->
+ Cont = More#continuation{pos = {FileNo, More#continuation.pos}},
+ {Cont, Terms};
+conv({More, Terms, Bad}, FileNo) when is_record(More, continuation) ->
+ Cont = More#continuation{pos = {FileNo, More#continuation.pos}},
+ {Cont, Terms, Bad};
+conv(Other, _) ->
+ Other.
+
+find_first_file(#handle{filename = FName, curF = CurF, maxF = MaxF}) ->
+ fff(FName, inc(CurF, MaxF), CurF, MaxF).
+
+fff(_FName, CurF, CurF, _MaxF) -> CurF;
+fff(FName, MaybeFirstF, CurF, MaxF) ->
+ N = add_ext(FName, MaybeFirstF),
+ case file:read_file_info(N) of
+ {ok, _} -> MaybeFirstF;
+ _ -> fff(FName, inc(MaybeFirstF, MaxF), CurF, MaxF)
+ end.
+
+%% -> {iolist(), LastBins, NoBytes, NoTerms}
+ext_split_bins(CurB, MaxB, FirstPos, Bins) ->
+ MaxBs = MaxB - CurB, IsFirst = CurB =:= FirstPos,
+ ext_split_bins(MaxBs, IsFirst, [], Bins, 0, 0).
+
+ext_split_bins(MaxBs, IsFirst, First, [X | Last], Bs, N) ->
+ NBs = Bs + byte_size(X),
+ if
+ NBs =< MaxBs ->
+ ext_split_bins(MaxBs, IsFirst, [First | X], Last, NBs, N+1);
+ IsFirst, First =:= [] ->
+ % To avoid infinite loop - we allow the file to be
+ % too big if it's just one item on the file.
+ {[X], Last, NBs, N+1};
+ true ->
+ {First, [X | Last], Bs, N}
+ end;
+ext_split_bins(_, _, First, [], Bs, N) ->
+ {First, [], Bs, N}.
+
+%% -> {iolist(), LastBins, NoBytes, NoTerms}
+int_split_bins(CurB, MaxB, FirstPos, Bins) ->
+ MaxBs = MaxB - CurB, IsFirst = CurB =:= FirstPos,
+ int_split_bins(MaxBs, IsFirst, [], Bins, 0, 0).
+
+int_split_bins(MaxBs, IsFirst, First, [X | Last], Bs, N) ->
+ Sz = byte_size(X),
+ NBs = Bs + Sz + ?HEADERSZ,
+ BSz = <<Sz:?SIZESZ/unit:8>>,
+ XB = case Sz < ?MIN_MD5_TERM of
+ true ->
+ [BSz, ?BIGMAGICHEAD | X];
+ false ->
+ MD5 = erlang:md5(BSz),
+ [BSz, ?BIGMAGICHEAD, MD5 | X]
+ end,
+ if
+ NBs =< MaxBs ->
+ int_split_bins(MaxBs, IsFirst, [First | XB], Last, NBs, N+1);
+ IsFirst, First =:= [] ->
+ % To avoid infinite loop - we allow the file to be
+ % too big if it's just one item on the file.
+ {[XB], Last, NBs, N+1};
+ true ->
+ {First, [X | Last], Bs, N}
+ end;
+int_split_bins(_, _, First, [], Bs, N) ->
+ {First, [], Bs, N}.
+
+%% -> {NewCurrentFileNo, MaxFilesToBe} | throw(FileError)
+inc_wrap(FName, CurF, MaxF) ->
+ case MaxF of
+ %% Number of max files has changed
+ {NewMaxF, OldMaxF} ->
+ if
+ CurF >= NewMaxF ->
+ %% We are at or above the new number of files
+ remove_files(FName, CurF + 1, OldMaxF),
+ if
+ CurF > NewMaxF ->
+ %% The change was done while the current file was
+ %% greater than the new number of files.
+ %% The index file is not trunctated here, since
+ %% writing the index file while opening the file
+ %% with index 1 will write the value for the file
+ %% with extension CurF as well. Next time the
+ %% limit is reached, the index file will be
+ %% truncated.
+ {1, {NewMaxF, CurF}};
+ true ->
+ %% The change was done while the current file was
+ %% less than the new number of files.
+ %% Remove the files from the index file too
+ index_file_trunc(FName, NewMaxF),
+ {1, NewMaxF}
+ end;
+ true ->
+ %% We haven't reached the new limit yet
+ NewFt = inc(CurF, NewMaxF),
+ {NewFt, MaxF}
+ end;
+ MaxF ->
+ %% Normal case.
+ NewFt = inc(CurF, MaxF),
+ {NewFt, MaxF}
+ end.
+
+inc(N, {_NewMax, OldMax}) -> inc(N, OldMax, 1);
+inc(N, Max) -> inc(N, Max, 1).
+
+inc(N, Max, Step) ->
+ Nx = (N + Step) rem Max,
+ if
+ Nx > 0 -> Nx;
+ true -> Nx + Max
+ end.
+
+
+file_size(Fname) ->
+ {ok, Fi} = file:read_file_info(Fname),
+ Fi#file_info.size.
+
+%% -> ok | throw(FileError)
+%% Tries to remove each file with name FName.I, N<=I<=Max.
+remove_files(FName, N, Max) ->
+ remove_files(FName, N, Max, ok).
+
+remove_files(_FName, N, Max, ok) when N > Max ->
+ ok;
+remove_files(_FName, N, Max, {FileName, Error}) when N > Max ->
+ file_error(FileName, Error);
+remove_files(FName, N, Max, Reply) ->
+ FileName = add_ext(FName, N),
+ NewReply = case file:delete(FileName) of
+ ok -> Reply;
+ {error, enoent} -> Reply;
+ Error -> {FileName, Error}
+ end,
+ remove_files(FName, N + 1, Max, NewReply).
+
+%% -> {MaxBytes, MaxFiles}
+get_wrap_size(#handle{maxB = MaxB, maxF = MaxF}) ->
+ case MaxF of
+ {NewMaxF,_} -> {MaxB, NewMaxF};
+ MaxF -> {MaxB, MaxF}
+ end.
+
+add_ext(Name, Ext) ->
+ concat([Name, ".", Ext]).
+
+open_read(FileName) ->
+ file:open(FileName, [raw, binary, read]).
+
+open_update(FileName) ->
+ file:open(FileName, [raw, binary, read, write]).
+
+open_truncate(FileName) ->
+ file:open(FileName, [raw, binary, write]).
+
+%%% Functions that access files, and throw on error.
+
+-define(MAX, 16384). % bytes
+-define(TIMEOUT, 2000). % ms
+
+%% -> {Reply, cache()}; Reply = ok | Error
+fwrite(#cache{c = []} = FdC, _FN, B, Size) ->
+ case get(write_cache_timer_is_running) of
+ true ->
+ ok;
+ _ ->
+ put(write_cache_timer_is_running, true),
+ erlang:send_after(?TIMEOUT, self(), {self(), write_cache})
+ end,
+ {ok, FdC#cache{sz = Size, c = B}};
+fwrite(#cache{sz = Sz, c = C} = FdC, _FN, B, Size) when Sz < ?MAX ->
+ {ok, FdC#cache{sz = Sz+Size, c = [C | B]}};
+fwrite(#cache{fd = Fd, c = C}, FileName, B, _Size) ->
+ write_cache(Fd, FileName, [C | B]).
+
+fwrite_header(Fd, B, Size) ->
+ {ok, #cache{fd = Fd, sz = Size, c = B}}.
+
+%% -> {NewFdC, Reply}; Reply = ok | Error
+pread(#cache{fd = Fd, c = C}, FileName, Position, MaxBytes) ->
+ Reply = write_cache(Fd, FileName, C),
+ case Reply of
+ {ok, NewFdC} ->
+ case file:pread(Fd, Position, MaxBytes) of
+ {error, Error} ->
+ {NewFdC, catch file_error(FileName, {error, Error})};
+ R ->
+ {NewFdC, R}
+ end;
+ {Error, NewFdC} ->
+ {NewFdC, Error}
+ end.
+
+%% -> {ok, cache(), Pos} | {Error, cache()}
+position(#cache{fd = Fd, c = C}, FileName, Pos) ->
+ Reply = write_cache(Fd, FileName, C),
+ case Reply of
+ {ok, NewFdC} ->
+ case position2(Fd, FileName, Pos) of
+ {ok, Loc} ->
+ {ok, NewFdC, Loc};
+ Error ->
+ {Error, NewFdC}
+ end;
+ _Error ->
+ Reply
+ end.
+
+position_close(#cache{fd = Fd, c = C}, FileName, Pos) ->
+ NewFdC = write_cache_close(Fd, FileName, C),
+ {ok, Loc} = position_close2(Fd, FileName, Pos),
+ {NewFdC, Loc}.
+
+fsync(#cache{fd = Fd, c = C}, FileName) ->
+ Reply = write_cache(Fd, FileName, C),
+ case Reply of
+ {ok, NewFdC} ->
+ case file:sync(Fd) of
+ ok ->
+ Reply;
+ Error ->
+ {catch file_error(FileName, Error), NewFdC}
+ end;
+ _Error ->
+ Reply
+ end.
+
+%% -> {Reply, NewFdC}; Reply = ok | Error
+truncate_at(FdC, FileName, Pos) ->
+ case position(FdC, FileName, Pos) of
+ {ok, NewFdC, _Pos} ->
+ case file:truncate(NewFdC#cache.fd) of
+ ok ->
+ {ok, NewFdC};
+ Error ->
+ {catch file_error(FileName, Error), NewFdC}
+ end;
+ Reply ->
+ Reply
+ end.
+
+fwrite_close2(Fd, FileName, B) ->
+ case file:write(Fd, B) of
+ ok -> ok;
+ Error -> file_error_close(Fd, FileName, Error)
+ end.
+
+pwrite_close2(Fd, FileName, Position, B) ->
+ case file:pwrite(Fd, Position, B) of
+ ok -> ok;
+ Error -> file_error(FileName, {error, Error})
+ end.
+
+position2(Fd, FileName, Pos) ->
+ case file:position(Fd, Pos) of
+ {error, Error} -> catch file_error(FileName, {error, Error});
+ OK -> OK
+ end.
+
+position_close2(Fd, FileName, Pos) ->
+ case file:position(Fd, Pos) of
+ {error, Error} -> file_error_close(Fd, FileName, {error, Error});
+ OK -> OK
+ end.
+
+truncate_at_close2(Fd, FileName, Pos) ->
+ position_close2(Fd, FileName, Pos),
+ case file:truncate(Fd) of
+ ok -> ok;
+ Error -> file_error_close(Fd, FileName, Error)
+ end.
+
+fclose(#cache{fd = Fd, c = C}, FileName) ->
+ %% The cache is empty if the file was opened in read_only mode.
+ write_cache_close(Fd, FileName, C),
+ file:close(Fd).
+
+%% -> {Reply, #cache{}}; Reply = ok | Error
+write_cache(Fd, _FileName, []) ->
+ {ok, #cache{fd = Fd}};
+write_cache(Fd, FileName, C) ->
+ case file:write(Fd, C) of
+ ok -> {ok, #cache{fd = Fd}};
+ Error -> {catch file_error(FileName, Error), #cache{fd = Fd}}
+ end.
+
+-spec write_cache_close(fd(), file:filename(), iodata()) -> #cache{}. % | throw(Error)
+
+write_cache_close(Fd, _FileName, []) ->
+ #cache{fd = Fd};
+write_cache_close(Fd, FileName, C) ->
+ case file:write(Fd, C) of
+ ok -> #cache{fd = Fd};
+ Error -> file_error_close(Fd, FileName, Error)
+ end.
+
+-spec file_error(file:filename(), {'error', atom()}) -> no_return().
+
+file_error(FileName, {error, Error}) ->
+ throw({error, {file_error, FileName, Error}}).
+
+-spec file_error_close(fd(), file:filename(), {'error', atom()}) -> no_return().
+
+file_error_close(Fd, FileName, {error, Error}) ->
+ file:close(Fd),
+ throw({error, {file_error, FileName, Error}}).
diff --git a/lib/kernel/src/disk_log_server.erl b/lib/kernel/src/disk_log_server.erl
new file mode 100644
index 0000000000..8894ed87e8
--- /dev/null
+++ b/lib/kernel/src/disk_log_server.erl
@@ -0,0 +1,368 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 1997-2009. All Rights Reserved.
+%%
+%% The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved online at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(disk_log_server).
+-behaviour(gen_server).
+
+-export([start_link/0, start/0, open/1, close/1,
+ get_log_pids/1, accessible_logs/0]).
+
+%% Local export.
+-export([dist_open/1, get_local_pid/1]).
+
+%% gen_server callbacks
+-export([init/1, handle_call/3, handle_info/2, terminate/2]).
+-export([handle_cast/2, code_change/3]). % just to avoid compiler warning
+
+-include("disk_log.hrl").
+
+-compile({inline,[{do_get_log_pids,1}]}).
+
+-record(pending, {log, pid, req, from, attach, clients}). % [{Request,From}]
+
+-record(state, {pending = [] :: [#pending{}]}).
+
+%%%-----------------------------------------------------------------
+%%% This module implements the disk_log server. Its primary purpose
+%%% is to keep the ets table 'disk_log_names' updated and to handle
+%%% distribution data (pids) using the module pg2.
+%%%-----------------------------------------------------------------
+%%%----------------------------------------------------------------------
+%%% API
+%%%----------------------------------------------------------------------
+start_link() ->
+ gen_server:start_link({local, disk_log_server}, disk_log_server, [], []).
+
+start() ->
+ ensure_started().
+
+open({ok, A}) ->
+ ensure_started(),
+ gen_server:call(disk_log_server, {open, local, A}, infinity);
+open(Other) ->
+ Other.
+
+%% To be used from this module only.
+dist_open(A) ->
+ ensure_started(),
+ gen_server:call(disk_log_server, {open, distr, A}, infinity).
+
+close(Pid) ->
+ gen_server:call(disk_log_server, {close, Pid}, infinity).
+
+get_log_pids(LogName) ->
+ do_get_log_pids(LogName).
+
+accessible_logs() ->
+ ensure_started(),
+ do_accessible_logs().
+
+%%%----------------------------------------------------------------------
+%%% Callback functions from gen_server
+%%%----------------------------------------------------------------------
+
+%% It would have been really nice to have a tag for disk log groups,
+%% like {distributed_disk_log, Log}, but backward compatibility makes
+%% it hard to introduce.
+-define(group(Log), Log).
+
+init([]) ->
+ process_flag(trap_exit, true),
+ ets:new(?DISK_LOG_NAME_TABLE, [named_table, set]),
+ ets:new(?DISK_LOG_PID_TABLE, [named_table, set]),
+ {ok, #state{}}.
+
+handle_call({open, W, A}, From, State) ->
+ open([{{open, W, A}, From}], State);
+handle_call({close, Pid}, _From, State) ->
+ Reply = do_close(Pid),
+ {reply, Reply, State}.
+
+handle_info({pending_reply, Pid, Result0}, State) ->
+ {value, #pending{log = Name, pid = Pid, from = From,
+ req = Request, attach = Attach,
+ clients = Clients}} =
+ lists:keysearch(Pid, #pending.pid, State#state.pending),
+ NP = lists:keydelete(Pid, #pending.pid, State#state.pending),
+ State1 = State#state{pending = NP},
+ if
+ Attach and (Result0 =:= {error, no_such_log}) ->
+ %% The disk_log process has terminated. Try again.
+ open([{Request,From} | Clients], State1);
+ true ->
+ case Result0 of
+ _ when Attach ->
+ ok;
+ {error, _} ->
+ ok;
+ _ ->
+ put(Pid, Name),
+ link(Pid),
+ {_, Locality, _} = Request,
+ ets:insert(?DISK_LOG_PID_TABLE, {Pid, Name}),
+ ets:insert(?DISK_LOG_NAME_TABLE, {Name, Pid, Locality}),
+ if
+ Locality =:= distr ->
+ ok = pg2:join(?group(Name), Pid);
+ true ->
+ ok
+ end
+ end,
+ gen_server:reply(From, result(Request, Result0)),
+ open(Clients, State1)
+ end;
+handle_info({'EXIT', Pid, _Reason}, State) ->
+ %% If there are clients waiting to be attached to this log, info
+ %% {pending_reply,Pid,{error,no_such_log}} will soon arrive.
+ case get(Pid) of
+ undefined ->
+ ok;
+ Name ->
+ erase_log(Name, Pid)
+ end,
+ {noreply, State};
+handle_info(_, State) ->
+ {noreply, State}.
+
+%% Just to avoid compiler warning.
+handle_cast(_, State) ->
+ {noreply, State}.
+
+%% Just to avoid compiler warning.
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+
+terminate(_Reason, _) ->
+ ok.
+
+%%%-----------------------------------------------------------------
+%%% Internal functions
+%%%-----------------------------------------------------------------
+
+ensure_started() ->
+ case whereis(disk_log_server) of
+ undefined ->
+ LogSup = {disk_log_sup, {disk_log_sup, start_link, []}, permanent,
+ 1000, supervisor, [disk_log_sup]},
+ supervisor:start_child(kernel_safe_sup, LogSup),
+ LogServer = {disk_log_server,
+ {disk_log_server, start_link, []},
+ permanent, 2000, worker, [disk_log_server]},
+ supervisor:start_child(kernel_safe_sup, LogServer),
+ ok;
+ _ -> ok
+ end.
+
+open([{Req, From} | L], State) ->
+ State2 = case do_open(Req, From, State) of
+ {pending, State1} ->
+ State1;
+ {Reply, State1} ->
+ gen_server:reply(From, Reply),
+ State1
+ end,
+ open(L, State2);
+open([], State) ->
+ {noreply, State}.
+
+%% -> {OpenRet, NewState} | {{node(),OpenRet}, NewState} |
+%% {pending, NewState}
+do_open({open, W, #arg{name = Name}=A}=Req, From, State) ->
+ case check_pending(Name, From, State, Req) of
+ {pending, NewState} ->
+ {pending, NewState};
+ false when W =:= local ->
+ case A#arg.distributed of
+ {true, Nodes} ->
+ Fun = fun() -> open_distr_rpc(Nodes, A, From) end,
+ _Pid = spawn(Fun),
+ %% No pending reply is expected, but don't reply yet.
+ {pending, State};
+ false ->
+ case get_local_pid(Name) of
+ {local, Pid} ->
+ do_internal_open(Name, Pid, From, Req, true,State);
+ {distributed, _Pid} ->
+ {{error, {node_already_open, Name}}, State};
+ undefined ->
+ start_log(Name, Req, From, State)
+ end
+ end;
+ false when W =:= distr ->
+ ok = pg2:create(?group(Name)),
+ case get_local_pid(Name) of
+ undefined ->
+ start_log(Name, Req, From, State);
+ {local, _Pid} ->
+ {{node(),{error, {node_already_open, Name}}}, State};
+ {distributed, Pid} ->
+ do_internal_open(Name, Pid, From, Req, true, State)
+ end
+ end.
+
+%% Spawning a process is a means to avoid deadlock when
+%% disk_log_servers mutually open disk_logs.
+
+-spec open_distr_rpc([node()], _, _) -> no_return(). % XXX: underspecified
+
+open_distr_rpc(Nodes, A, From) ->
+ {AllReplies, BadNodes} = rpc:multicall(Nodes, ?MODULE, dist_open, [A]),
+ {Ok, Bad} = cr(AllReplies, [], []),
+ Old = find_old_nodes(Nodes, AllReplies, BadNodes),
+ NotOk = [{BadNode, {error, nodedown}} || BadNode <- BadNodes ++ Old],
+ Reply = {Ok, Bad ++ NotOk},
+ %% Send the reply to the waiting client:
+ gen_server:reply(From, Reply),
+ exit(normal).
+
+cr([{badrpc, {'EXIT', _}} | T], Nodes, Bad) ->
+ %% This clause can be removed in next release.
+ cr(T, Nodes, Bad);
+cr([R={_Node, {error, _}} | T], Nodes, Bad) ->
+ cr(T, Nodes, [R | Bad]);
+cr([Reply | T], Nodes, Bad) ->
+ cr(T, [Reply | Nodes], Bad);
+cr([], Nodes, Bad) ->
+ {Nodes, Bad}.
+
+%% If a "new" node (one that calls dist_open/1) tries to open a log
+%% on an old node (one that does not have dist_open/1), then the old
+%% node is considered 'down'. In next release, this test will not be
+%% needed since all nodes can be assumed to be "new" by then.
+%% One more thing: if an old node tries to open a log on a new node,
+%% the new node is also considered 'down'.
+find_old_nodes(Nodes, Replies, BadNodes) ->
+ R = [X || {X, _} <- Replies],
+ ordsets:to_list(ordsets:subtract(ordsets:from_list(Nodes),
+ ordsets:from_list(R ++ BadNodes))).
+
+start_log(Name, Req, From, State) ->
+ Server = self(),
+ case supervisor:start_child(disk_log_sup, [Server]) of
+ {ok, Pid} ->
+ do_internal_open(Name, Pid, From, Req, false, State);
+ Error ->
+ {result(Req, Error), State}
+ end.
+
+do_internal_open(Name, Pid, From, {open, _W, A}=Req, Attach, State) ->
+ Server = self(),
+ F = fun() ->
+ Res = disk_log:internal_open(Pid, A),
+ Server ! {pending_reply, Pid, Res}
+ end,
+ _ = spawn(F),
+ PD = #pending{log = Name, pid = Pid, req = Req,
+ from = From, attach = Attach, clients = []},
+ P = [PD | State#state.pending],
+ {pending, State#state{pending = P}}.
+
+check_pending(Name, From, State, Req) ->
+ case lists:keysearch(Name, #pending.log, State#state.pending) of
+ {value, #pending{log = Name, clients = Clients}=P} ->
+ NP = lists:keyreplace(Name, #pending.log, State#state.pending,
+ P#pending{clients = Clients++[{Req,From}]}),
+ {pending, State#state{pending = NP}};
+ false ->
+ false
+ end.
+
+result({_, distr, _}, R) ->
+ {node(), R};
+result({_, local, _}, R) ->
+ R.
+
+do_close(Pid) ->
+ case get(Pid) of
+ undefined ->
+ ok;
+ Name ->
+ erase_log(Name, Pid),
+ unlink(Pid),
+ ok
+ end.
+
+erase_log(Name, Pid) ->
+ case get_local_pid(Name) of
+ undefined ->
+ ok;
+ {local, Pid} ->
+ true = ets:delete(?DISK_LOG_NAME_TABLE, Name),
+ true = ets:delete(?DISK_LOG_PID_TABLE, Pid);
+ {distributed, Pid} ->
+ true = ets:delete(?DISK_LOG_NAME_TABLE, Name),
+ true = ets:delete(?DISK_LOG_PID_TABLE, Pid),
+ ok = pg2:leave(?group(Name), Pid)
+ end,
+ erase(Pid).
+
+do_accessible_logs() ->
+ LocalSpec = {'$1','_',local},
+ Local0 = [hd(L) || L <- ets:match(?DISK_LOG_NAME_TABLE, LocalSpec)],
+ Local = lists:sort(Local0),
+ Groups0 = ordsets:from_list(pg2:which_groups()),
+ Groups = ordsets:to_list(ordsets:subtract(Groups0, Local)),
+ Dist = [L || L <- Groups, dist_pids(L) =/= []],
+ {Local, Dist}.
+
+get_local_pid(LogName) ->
+ case ets:lookup(?DISK_LOG_NAME_TABLE, LogName) of
+ [{LogName, Pid, local}] ->
+ {local, Pid};
+ [{LogName, Pid, distr}] ->
+ {distributed, Pid};
+ [] ->
+ undefined
+ end.
+
+%% Inlined.
+do_get_log_pids(LogName) ->
+ case catch ets:lookup(?DISK_LOG_NAME_TABLE, LogName) of
+ [{LogName, Pid, local}] ->
+ {local, Pid};
+ [{LogName, _Pid, distr}] ->
+ case pg2:get_members(?group(LogName)) of
+ [] -> % The disk_log process has died recently
+ undefined;
+ Members ->
+ {distributed, Members}
+ end;
+ _EmptyOrError ->
+ case dist_pids(LogName) of
+ [] -> undefined;
+ Pids -> {distributed, Pids}
+ end
+ end.
+
+dist_pids(LogName) ->
+ %% Would be much simpler if disk log group names were tagged.
+ GroupName = ?group(LogName),
+ case catch pg2:get_members(GroupName) of
+ [Pid | _] = Pids ->
+ case rpc:call(node(Pid), ?MODULE, get_local_pid, [LogName]) of
+ undefined -> % does not seem to be a disk_log group
+ case catch lists:member(Pid,pg2:get_members(GroupName)) of
+ true -> [];
+ _ -> dist_pids(LogName)
+ end;
+ _ -> % badrpc if get_local_pid is not exported
+ Pids
+ end;
+ _ ->
+ []
+ end.
diff --git a/lib/kernel/src/disk_log_sup.erl b/lib/kernel/src/disk_log_sup.erl
new file mode 100644
index 0000000000..96e37b678c
--- /dev/null
+++ b/lib/kernel/src/disk_log_sup.erl
@@ -0,0 +1,32 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 1997-2009. All Rights Reserved.
+%%
+%% The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved online at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(disk_log_sup).
+
+-behaviour(supervisor).
+
+-export([start_link/0, init/1]).
+
+start_link()->
+ supervisor:start_link({local, disk_log_sup}, disk_log_sup, []).
+
+init([]) ->
+ SupFlags = {simple_one_for_one, 4, 3600},
+ Child = {disk_log, {disk_log, istart_link, []}, temporary,
+ 1000, worker, [disk_log]},
+ {ok, {SupFlags, [Child]}}.
diff --git a/lib/kernel/src/dist.hrl b/lib/kernel/src/dist.hrl
new file mode 100644
index 0000000000..aea1ab81ba
--- /dev/null
+++ b/lib/kernel/src/dist.hrl
@@ -0,0 +1,38 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 1999-2009. All Rights Reserved.
+%%
+%% The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved online at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% %CopyrightEnd%
+%%
+
+%%
+%% Distribution capabilities flags (corresponds with dist.h).
+%%
+
+-define(DFLAG_PUBLISHED,1).
+-define(DFLAG_ATOM_CACHE,2).
+-define(DFLAG_EXTENDED_REFERENCES,4).
+-define(DFLAG_DIST_MONITOR,8).
+-define(DFLAG_FUN_TAGS,16#10).
+-define(DFLAG_DIST_MONITOR_NAME,16#20).
+-define(DFLAG_HIDDEN_ATOM_CACHE,16#40).
+-define(DFLAG_NEW_FUN_TAGS,16#80).
+-define(DFLAG_EXTENDED_PIDS_PORTS,16#100).
+-define(DFLAG_EXPORT_PTR_TAG,16#200).
+-define(DFLAG_BIT_BINARIES,16#400).
+-define(DFLAG_NEW_FLOATS,16#800).
+-define(DFLAG_UNICODE_IO,16#1000).
+-define(DFLAG_DIST_HDR_ATOM_CACHE,16#2000).
+-define(DFLAG_SMALL_ATOM_TAGS, 16#4000).
diff --git a/lib/kernel/src/dist_ac.erl b/lib/kernel/src/dist_ac.erl
new file mode 100644
index 0000000000..5c62aa31e9
--- /dev/null
+++ b/lib/kernel/src/dist_ac.erl
@@ -0,0 +1,1534 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 1997-2009. All Rights Reserved.
+%%
+%% The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved online at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(dist_ac).
+
+-behaviour(gen_server).
+
+%% External exports
+-export([start_link/0,
+ load_application/2,
+ takeover_application/2,
+ permit_application/2,
+ permit_only_loaded_application/2]).
+
+-export([get_known_nodes/0]).
+
+%% Internal exports
+-export([init/1, handle_cast/2, handle_call/3, handle_info/2, terminate/2,
+ code_change/3, send_timeout/3]).
+-export([info/0]).
+
+-import(lists, [zf/2, filter/2, map/2, foreach/2, foldl/3, mapfoldl/3,
+ keysearch/3, keydelete/3, keyreplace/4, member/2]).
+
+-define(AC, application_controller).
+-define(DIST_AC, ?MODULE).
+-define(LOCK_ID, ?MODULE).
+
+%% This is the protocol version for the dist_ac protcol (between nodes)
+-define(vsn, 1).
+
+%%%-----------------------------------------------------------------
+%%% This module implements the default Distributed Applications
+%%% Controller. It is possible to write other controllers, when
+%%% the functionality in this module are not sufficient.
+%%% The process cooperates with the application_controller.
+%%%-----------------------------------------------------------------
+
+%%-----------------------------------------------------------------
+%% Naming conventions:
+%% Appl = #appl
+%% AppName = atom()
+%%-----------------------------------------------------------------
+-record(state, {appls = [], tmp_locals = [], remote_started = [],
+ known = [], started = [], tmp_weights = [],
+ dist_loaded = [], t_reqs = [], s_reqs = [], p_reqs = []}).
+%%-----------------------------------------------------------------
+%% appls = [#appl()] - these are the applications we control
+%% tmp_locals = [{AppName, Weight, node()}] - tmp, info part of
+%% application startup for some distrib appls,
+%% not yet handled.
+%% remote_started = [{Node, AppName}] - info on apps started before
+%% we were started
+%% known = [Node] - These are the nodes known to us
+%% started = [AppName] - An ordered list of started applications
+%% (reversed start order)
+%% tmp_weight = [{AppName, MyWeight}] - tmp, if we're forced to
+%% send a dist_ac_weight message before we're prepared to,
+%% we remember the weight we sent here, so we can use
+%% it in the dist_ac_weight msgs later.
+%% dist_loaded = {{Name, Node}, HisNodes, Permission} - info on
+%% application loaded on other nodes (and own node)
+%% t_reqs = [{AppName, From}] - processes waiting for takeover
+%% to complete.
+%% s_reqs = [{AppName, From}] - processes waiting for stop
+%% to complete.
+%% p_reqs = [{From, AppName, Bool, [Node]] - outstanding permit.
+%% Nodes is a list of nodes we're still waiting for.
+%%-----------------------------------------------------------------
+
+-record(appl, {name, id, restart_time = 0, nodes = [], run = []}).
+
+%%-----------------------------------------------------------------
+%% id = local | undefined | {distributed, node()} | waiting | run_waiting |
+%% {failover, Node} | {takeover, Node}
+%% local : local application
+%% undefined : not yet started
+%% {distributed, Node} : running on another node, we're standby
+%% {failover, Node} : failover from Node
+%% {takeover, Node} : takeover from Node
+%% waiting : other node went down, we're waiting for a timeout
+%% to takeover it. From = pid() | undefined
+%% run_waiting : we have decided to start the app; wait for the
+%% AC result
+%%-----------------------------------------------------------------
+
+start_link() ->
+ case gen_server:start_link({local, ?DIST_AC}, ?MODULE, [], []) of
+ {ok, Pid} ->
+ gen_server:cast(?DIST_AC, init_sync),
+ {ok, Pid};
+ Else ->
+ Else
+ end.
+
+
+%%-----------------------------------------------------------------
+%% Func: load_application(AppName, DistNodes)
+%% Args: AppName = atom()
+%% DistNodes = default | {AppName, Time, [node() | {node()...}]}
+%% Purpose: Notifies the dist_ac about distributed nodes for an
+%% application. DistNodes overrides the kernel 'distributed'
+%% parameter.
+%% Returns: ok | {error, Reason}
+%%-----------------------------------------------------------------
+load_application(AppName, DistNodes) ->
+ gen_server:call(?DIST_AC, {load_application, AppName, DistNodes}, infinity).
+
+takeover_application(AppName, RestartType) ->
+ case validRestartType(RestartType) of
+ true ->
+ wait_for_sync_dacs(),
+ Nodes = get_nodes(AppName),
+ global:trans(
+ {?LOCK_ID, self()},
+ fun() ->
+ gen_server:call(
+ ?DIST_AC,
+ {takeover_application, AppName, RestartType},
+ infinity)
+ end,
+ Nodes);
+ false ->
+ {error, {invalid_restart_type, RestartType}}
+ end.
+
+%%-----------------------------------------------------------------
+%% This function controls which applications are permitted to run. If
+%% an application X runs when this function is called as
+%% permit_application(X, false), it is moved to another node where it
+%% is permitted to run (distributed applications only). If there is
+%% no such node, the application is stopped. (I.e. local applications
+%% are always stopped, and distributed applications with no other node
+%% alive are stopped as well.) If later a call to
+%% permit_application(X, true) is made, X is restarted.
+%% For example, suppose applications app1 and app2 are started and
+%% running.
+%% If we evaluate
+%% permit_application(app2, false)
+%% app2 is stopped and app1 only is running.
+%% If we now evaluate
+%% permit_application(app2, true),
+%% permit_application(app3, true)
+%% app2 is restarted, but not app3, since it hasn't been started by a
+%% call to start_application.
+%%-----------------------------------------------------------------
+permit_application(AppName, Bool) ->
+ wait_for_sync_dacs(),
+ LockId = {?LOCK_ID, self()},
+ global:trans(
+ LockId,
+ fun() ->
+ gen_server:call(?DIST_AC,
+ {permit_application, AppName, Bool, LockId, started},
+ infinity)
+ end).
+
+permit_only_loaded_application(AppName, Bool) ->
+ wait_for_sync_dacs(),
+ LockId = {?LOCK_ID, self()},
+ global:trans(
+ LockId,
+ fun() ->
+ gen_server:call(?DIST_AC,
+ {permit_application, AppName, Bool, LockId, only_loaded},
+ infinity)
+ end).
+
+get_nodes(AppName) ->
+ gen_server:call(?DIST_AC, {get_nodes, AppName}, infinity).
+
+get_known_nodes() ->
+ gen_server:call(?DIST_AC, get_known_nodes).
+
+%%%-----------------------------------------------------------------
+%%% call-back functions from gen_server
+%%%-----------------------------------------------------------------
+init([]) ->
+ process_flag(trap_exit, true),
+ {ok, #state{}}.
+
+sync_dacs(Appls) ->
+ Res = global:trans({?LOCK_ID, sync_dacs},
+ fun() ->
+ Nodes = introduce_me(nodes(), Appls),
+ wait_dacs(Nodes, [node()], Appls, [])
+ end),
+ ets:insert(ac_tab, {sync_dacs, ok}),
+ Res.
+
+introduce_me(Nodes, Appls) ->
+ Msg = {dist_ac_new_node, ?vsn, node(), Appls, []},
+ filter(fun(Node) ->
+ %% This handles nodes without DACs
+ case rpc:call(Node, erlang, whereis, [?DIST_AC]) of
+ Pid when is_pid(Pid) ->
+ Pid ! Msg,
+ true;
+ _ ->
+ false
+ end
+ end, Nodes).
+
+wait_dacs([Node | Nodes], KnownNodes, Appls, RStarted) ->
+ monitor_node(Node, true),
+ receive
+ %% HisAppls =/= [] is the case when our node connects to a running system
+ %%
+ %% It is always the responsibility of newer versions to understand
+ %% older versions of the protocol. As we don't have any older
+ %% versions (that are supposed to work with this version), we
+ %% don't handle version mismatch here.
+ {dist_ac_new_node, _Vsn, Node, HisAppls, HisStarted} ->
+ monitor_node(Node, false),
+ NRStarted = RStarted ++ HisStarted,
+ NAppls = dist_merge(Appls, HisAppls, Node),
+ wait_dacs(Nodes, [Node | KnownNodes], NAppls, NRStarted);
+ {nodedown, Node} ->
+ monitor_node(Node, false),
+ wait_dacs(Nodes, KnownNodes, Appls, RStarted)
+ end;
+wait_dacs([], KnownNodes, Appls, RStarted) ->
+ {KnownNodes, Appls, RStarted}.
+
+
+info() ->
+ gen_server:call(?DIST_AC, info).
+
+
+%%-----------------------------------------------------------------
+%% All functions that can affect which applications are running
+%% execute within a global lock, to ensure that they are not
+%% executing at the same time as sync_dacs. However, to avoid a
+%% deadlock situation where e.g. permit_application gets the lock
+%% before sync_dacs, this function is used to ensure that the local
+%% sync_dacs always gets the lock first of all. The lock is still
+%% used to not interfere with sync_dacs on other nodes.
+%%-----------------------------------------------------------------
+wait_for_sync_dacs() ->
+ case catch ets:lookup(ac_tab, sync_dacs) of
+ [{sync_dacs, ok}] -> ok;
+ _ ->
+ receive after 100 -> ok end,
+ wait_for_sync_dacs()
+ end.
+
+handle_cast(init_sync, _S) ->
+ %% When the dist_ac is started, it receives this msg, and gets into
+ %% the receive loop. 'go' is sent from the kernel_config proc when
+ %% all nodes that should be pinged has been pinged. The reason for this
+ %% is that dist_ac syncs with the other nodes at start-up. That is,
+ %% it does _not_ handle partitioned nets! The other nodes tries to call
+ %% the local name dist_ac, which means that this name must be registered
+ %% before the distribution. But it can't sync until after the distribution
+ %% is started. Therefore, this 'go'-thing.
+ receive
+ {go, KernelConfig} ->
+ Appls = case application:get_env(kernel, distributed) of
+ {ok, D} -> dist_check(D);
+ undefined -> []
+ end,
+
+ dist_take_control(Appls),
+ %% kernel_config waits for dist_ac to take control over its
+ %% applications. By this we can be sure that the kernel
+ %% application hasn't completed its start before dist_ac has
+ %% taken control over its applications. (OTP-3509)
+ KernelConfig ! dist_ac_took_control,
+
+ %% we're really just interested in nodedowns.
+ net_kernel:monitor_nodes(true),
+
+ {Known, NAppls, RStarted} = sync_dacs(Appls),
+
+ {noreply,
+ #state{appls = NAppls, known = Known, remote_started = RStarted}}
+ end.
+
+
+handle_call(info, _From, S) ->
+ {reply, S, S};
+
+
+
+handle_call({load_application, AppName, DistNodes}, _From, S) ->
+ Appls = S#state.appls,
+ case catch dist_replace(DistNodes, AppName, Appls) of
+ {error, Error} ->
+ {reply, {error, Error}, S};
+ {'EXIT', R} ->
+ {stop, R, {error, R}, S};
+ NAppls ->
+ NewS = case dist_find_nodes(NAppls, AppName) of
+ [] -> % No distrib nodes; we ignore it
+ S;
+ _Nodes ->
+ ensure_take_control(AppName, Appls),
+ {ok, S2} = load(AppName, S#state{appls = NAppls}),
+ S2
+ end,
+ {reply, ok, NewS}
+ end;
+
+handle_call({takeover_application, AppName, RestartType}, From, S) ->
+ Appls = S#state.appls,
+ case keysearch(AppName, #appl.name, Appls) of
+ {value, Appl} when element(1, Appl#appl.id) =:= distributed ->
+ {distributed, Node} = Appl#appl.id,
+ ac_takeover(req, AppName, Node, RestartType),
+ NAppl = Appl#appl{id = takeover},
+ NAppls = keyreplace(AppName, #appl.name, Appls, NAppl),
+ TR = S#state.t_reqs,
+ {noreply, S#state{appls = NAppls,
+ t_reqs = [{AppName, From} | TR]}};
+ {value, #appl{id = local}} ->
+ {reply, {error, {already_running_locally, AppName}}, S};
+ _ ->
+ {reply, {error, {not_running_distributed, AppName}}, S}
+ end;
+
+handle_call({permit_application, AppName, Bool, LockId, StartInfo}, From, S) ->
+ case lists:keymember(AppName, #appl.name, S#state.appls) of
+ false ->
+ %% This one covers the case with permit for non-distributed
+ %% applications. This shouldn't be handled like this, and not
+ %% here, but we have to be backwards-compatible.
+ case application_controller:get_loaded(AppName) of
+ {true, _} when not Bool ->
+ ac_stop_it(AppName),
+ {reply, ok, S};
+ {true, _} when Bool ->
+ ac_start_it(req, AppName),
+ {reply, ok, S};
+ false ->
+ {reply, {error, {not_loaded, AppName}}, S}
+ end;
+ true ->
+ NAppls = dist_update_run(S#state.appls, AppName, node(), Bool),
+ NewS = S#state{appls = NAppls},
+ %% Check if the application is running
+ IsRunning = keysearch(AppName, #appl.name, NAppls),
+ IsMyApp = case IsRunning of
+ {value, #appl{id = local}} -> true;
+ _ -> false
+ end,
+ %% Tell everyone about the new permission
+ Nodes = dist_flat_nodes(NAppls, AppName),
+ Msg = {dist_ac_new_permission, node(), AppName, Bool, IsMyApp},
+ send_msg(Msg, Nodes),
+ case StartInfo of
+ only_loaded ->
+ {reply, ok, NewS};
+ started ->
+ permit(Bool, IsRunning, AppName, From, NewS, LockId)
+ end
+ end;
+
+%%-----------------------------------------------------------------
+%% The distributed parameter is changed. Update the parameters
+%% but the applications are actually not moved to other nodes
+%% even if they should.
+%%-----------------------------------------------------------------
+handle_call({distribution_changed, NewDistribution}, _From, S) ->
+ Appls = S#state.appls,
+ NewAppls = dist_change_update(Appls, NewDistribution),
+ NewS = S#state{appls = NewAppls},
+ {reply, ok, NewS};
+
+
+handle_call({get_nodes, AppName}, _From, S) ->
+ Alive = intersection(dist_flat_nodes(S#state.appls, AppName),
+ S#state.known),
+ {reply, Alive, S};
+
+handle_call(get_known_nodes, _From, S) ->
+ {reply, S#state.known, S}.
+
+
+handle_info({ac_load_application_req, AppName}, S) ->
+ {ok, NewS} = load(AppName, S),
+ ?AC ! {ac_load_application_reply, AppName, ok},
+ {noreply, NewS};
+
+handle_info({ac_application_unloaded, AppName}, S) ->
+ {ok, NewS} = unload(AppName, S),
+ {noreply, NewS};
+
+handle_info({ac_start_application_req, AppName}, S) ->
+ %% We must decide if we or another node should start the application
+ Lock = {?LOCK_ID, self()},
+ case global:set_lock(Lock, [node()], 0) of
+ true ->
+ S2 = case catch start_appl(AppName, S, reply) of
+ {ok, NewS, _} ->
+ NewS;
+ {error, R} ->
+ ?AC ! {ac_start_application_reply, AppName, {error,R}},
+ S
+ end,
+ global:del_lock(Lock),
+ {noreply, S2};
+ false ->
+ send_after(100, {ac_start_application_req, AppName}),
+ {noreply, S}
+ end;
+
+handle_info({ac_application_run, AppName, Res}, S) ->
+ %% We ordered a start, and here's the result. Tell all other nodes.
+ Appls = S#state.appls,
+ Nodes = S#state.known,
+ %% Send this to _all_ known nodes, as any node could sync
+ %% on this app (not only nodes that can run it).
+ send_msg({dist_ac_app_started, node(), AppName, Res}, Nodes),
+ NId = case Res of
+ ok -> local;
+ {error, _R} -> undefined
+ end,
+ {value, Appl} = keysearch(AppName, #appl.name, Appls),
+ %% Check if we have somebody waiting for the takeover result
+ NTReqs = del_t_reqs(AppName, S#state.t_reqs, Res),
+ NAppl = Appl#appl{id = NId},
+ NAppls = keyreplace(AppName, #appl.name, Appls, NAppl),
+ {noreply, S#state{appls = NAppls, t_reqs = NTReqs}};
+
+
+handle_info({ac_application_not_run, AppName}, S) ->
+ %% We ordered a stop, and now it has stopped
+ {value, Appl} = keysearch(AppName, #appl.name, Appls = S#state.appls),
+ %% Check if we have somebody waiting for the takeover result;
+ %% if somebody called stop just before takeover was handled,
+ NTReqs = del_t_reqs(AppName, S#state.t_reqs, {error, stopped}),
+ %% Check if we have somebody waiting for stop to return
+ SReqs = filter(fun({Name, From2}) when Name =:= AppName ->
+ gen_server:reply(From2, ok),
+ false;
+ (_) ->
+ true
+ end, S#state.s_reqs),
+ RS = case Appl#appl.id of
+ local ->
+ send_msg({dist_ac_app_stopped, AppName}, S#state.known),
+ S#state.remote_started;
+ {distributed, Node} ->
+ [{Node, AppName} | S#state.remote_started];
+ _ ->
+ S#state.remote_started
+ end,
+ NAppl = Appl#appl{id = undefined},
+ NAppls = keyreplace(AppName, #appl.name, Appls, NAppl),
+ {noreply, S#state{appls = NAppls, t_reqs = NTReqs, s_reqs = SReqs,
+ remote_started = RS}};
+
+handle_info({ac_application_stopped, AppName}, S) ->
+ %% Somebody called application:stop - reset state as it was before
+ %% the application was started.
+ {value, Appl} = keysearch(AppName, #appl.name, Appls = S#state.appls),
+ %% Check if we have somebody waiting for the takeover result;
+ %% if somebody called stop just before takeover was handled,
+ NTReqs = del_t_reqs(AppName, S#state.t_reqs, {error, stopped}),
+ %% Check if we have somebody waiting for stop to return
+ SReqs = filter(fun({Name, From2}) when Name =:= AppName ->
+ gen_server:reply(From2, ok),
+ false;
+ (_) ->
+ true
+ end, S#state.s_reqs),
+ RS = case Appl#appl.id of
+ local ->
+ send_msg({dist_ac_app_stopped, AppName}, S#state.known),
+ S#state.remote_started;
+ {distributed, Node} ->
+ [{Node, AppName} | S#state.remote_started];
+ _ ->
+ S#state.remote_started
+ end,
+ NAppl = Appl#appl{id = undefined},
+ NAppls = keyreplace(AppName, #appl.name, Appls, NAppl),
+ Started = lists:delete(AppName, S#state.started),
+ {noreply, S#state{appls = NAppls, started = Started,
+ t_reqs = NTReqs, s_reqs = SReqs,
+ remote_started = RS}};
+
+
+%%-----------------------------------------------------------------
+%% A new node gets running.
+%% Send him info about our started distributed applications.
+%%-----------------------------------------------------------------
+handle_info({dist_ac_new_node, _Vsn, Node, HisAppls, []}, S) ->
+ Appls = S#state.appls,
+ MyStarted = zf(fun(Appl) when Appl#appl.id =:= local ->
+ {true, {node(), Appl#appl.name}};
+ (_) ->
+ false
+ end, Appls),
+ {?DIST_AC, Node} ! {dist_ac_new_node, ?vsn, node(), Appls, MyStarted},
+ NAppls = dist_merge(Appls, HisAppls, Node),
+ {noreply, S#state{appls = NAppls, known = [Node | S#state.known]}};
+
+handle_info({dist_ac_app_started, Node, Name, Res}, S) ->
+ case {keysearch(Name, #appl.name, S#state.appls), lists:member(Name, S#state.started)} of
+ {{value, Appl}, true} ->
+ Appls = S#state.appls,
+ NId = case Appl#appl.id of
+ _ when element(1, Res) =:= error ->
+ %% Start of appl on some node failed.
+ %% Set Id to undefined. That node will have
+ %% to take some actions, e.g. reboot
+ undefined;
+ {distributed, _} ->
+ %% Another node tookover from some node. Update
+ %% appl list.
+ {distributed, Node};
+ local ->
+ %% Another node tookover from me; stop my application
+ %% and update the running list.
+ {distributed, Node};
+ _ ->
+ %% Another node started appl. Update appl list.
+ {distributed, Node}
+ end,
+ ac_started(req, Name, Node),
+ NAppl = Appl#appl{id = NId},
+ NAppls = keyreplace(Name, #appl.name, Appls, NAppl),
+ TmpWeights = keydelete_all(Name, 1, S#state.tmp_weights),
+ NewS = S#state{appls = NAppls, tmp_weights = TmpWeights},
+ NPermitReq = req_del_permit_false(NewS#state.p_reqs, Name),
+ case catch req_start_app(NewS#state{p_reqs = NPermitReq}, Name) of
+ {error, R} ->
+ {stop, R};
+ {ok, NewS2} ->
+ {noreply, NewS2}
+ end;
+ {_, _} ->
+ %% The app has not been started at this node yet; remember this in
+ %% remote started.
+ NRStarted = [{Node, Name} | S#state.remote_started],
+ {noreply, S#state{remote_started = NRStarted}}
+ end;
+
+handle_info({dist_ac_app_stopped, AppName}, S) ->
+ Appls = S#state.appls,
+ case keysearch(AppName, #appl.name, Appls) of
+ false ->
+ RStarted = keydelete(AppName, 2, S#state.remote_started),
+ {noreply, S#state{remote_started = RStarted}};
+ {value, Appl} ->
+ NAppl = Appl#appl{id = undefined},
+ NAppls = keyreplace(AppName, #appl.name, Appls, NAppl),
+ RStarted = keydelete(AppName, 2, S#state.remote_started),
+ {noreply, S#state{appls = NAppls, remote_started = RStarted}}
+ end;
+
+handle_info({dist_ac_weight, Name, Weight, Node}, S) ->
+ %% This means another node starts up, and will eventually take over
+ %% this appl. We have a situation like: {Name, [{Node}, node()]}
+ %% Node sends us this msg, and we must respond. It doesn't really
+ %% matter what we send him; but it must be a dist_ac_weight msg.
+ %% Another situation is {Name, [RNode, {node()}, Node]}.
+ %%
+ %% Yet another situation is that the node where Name was running crashed,
+ %% and Node has got the nodedown message, but we haven't. In this case,
+ %% we must send a correct weight to Node. i.e. the same weight that
+ %% we'll send to him later, when we get the nodedown message.
+ case keysearch(Name, #appl.name, S#state.appls) of
+ {value, Appl} ->
+ Id = Appl#appl.id,
+ case Id of
+ run_waiting ->
+ {?DIST_AC, Node} ! {dist_ac_weight, Name, 0, node()},
+ {noreply, S};
+ undefined ->
+ {noreply,
+ S#state{tmp_locals = [{Name, Weight, Node} |
+ S#state.tmp_locals]}};
+ {takeover, _} ->
+ {noreply,
+ S#state{tmp_locals = [{Name, Weight, Node} |
+ S#state.tmp_locals]}};
+ {failover, _} ->
+ {noreply,
+ S#state{tmp_locals = [{Name, Weight, Node} |
+ S#state.tmp_locals]}};
+ _ ->
+ MyWeight = get_cached_weight(Name, S),
+ {?DIST_AC, Node} ! {dist_ac_weight, Name, MyWeight, node()},
+ NTWs = keyreplaceadd(Name, 1, S#state.tmp_weights,
+ {Name, MyWeight}),
+ {noreply, S#state{tmp_weights = NTWs}}
+ end;
+ _ ->
+ {noreply,
+ S#state{tmp_locals = [{Name, Weight, Node} | S#state.tmp_locals]}}
+ end;
+
+%%-----------------------------------------------------------------
+%% A node died. Check if we should takeover some applications.
+%%-----------------------------------------------------------------
+handle_info({nodedown, Node}, S) ->
+ AppNames = dist_get_runnable(S#state.appls),
+ HisAppls = filter(fun(#appl{name = Name, id = {distributed, N}})
+ when Node =:= N -> lists:member(Name, AppNames);
+ (_) -> false
+ end,
+ S#state.appls),
+ Appls2 = zf(fun(Appl) when Appl#appl.id =:= {distributed, Node} ->
+ case lists:member(Appl#appl.name, AppNames) of
+ true ->
+ {true, Appl#appl{id = {failover, Node}}};
+ false ->
+ ac_not_running(Appl#appl.name),
+ {true, Appl#appl{id = undefined}}
+ end;
+ (_) ->
+ true
+ end,
+ S#state.appls),
+ RStarted = filter(fun({Node2, _Name}) when Node2 =:= Node -> false;
+ (_) -> true
+ end,
+ S#state.remote_started),
+ Appls3 = dist_del_node(Appls2, Node),
+ {NPermitReq, Appls4, SReqs} = req_del_node(S, Node, Appls3),
+ NKnown = lists:delete(Node, S#state.known),
+ NewS = S#state{appls = Appls4, p_reqs = NPermitReq, known = NKnown,
+ s_reqs = SReqs,
+ remote_started = RStarted},
+ restart_appls(HisAppls),
+ {noreply, NewS};
+
+handle_info({dist_ac_app_loaded, Node, Name, HisNodes, Permission, HeKnowsMe},
+ S) ->
+ Nodes = dist_find_nodes(Appls = S#state.appls, Name),
+ case is_loaded(Name, S) of
+ true ->
+ case equal_nodes(Nodes, HisNodes) of
+ true ->
+ NAppls = dist_update_run(Appls, Name, Node, Permission),
+ if
+ not HeKnowsMe ->
+ %% We've got it loaded, but he doesn't know -
+ %% he's a new node connecting to us.
+ Msg = {dist_ac_app_loaded, node(), Name,
+ Nodes, dist_is_runnable(Appls, Name), true},
+ {?DIST_AC, Node} ! Msg;
+ true ->
+ ok
+ end,
+ {noreply, S#state{appls = NAppls}};
+ false ->
+ dist_mismatch(Name, Node)
+ end;
+ false ->
+ Load =[{{Name, Node}, HisNodes, Permission} | S#state.dist_loaded],
+ {noreply, S#state{dist_loaded = Load}}
+ end;
+
+handle_info({dist_ac_app_unloaded, Node, Name}, S) ->
+ Appls = dist_update_run(S#state.appls, Name, Node, undefined),
+ Load = keydelete({Name, Node}, 1, S#state.dist_loaded),
+ {noreply, S#state{appls = Appls, dist_loaded = Load}};
+
+
+handle_info({dist_ac_new_permission, Node, AppName, false, IsHisApp}, S) ->
+ Appls = dist_update_run(S#state.appls, AppName, Node, false),
+ NewS = S#state{appls =Appls},
+ case dist_is_runnable(Appls, AppName) of
+ true when IsHisApp ->
+ case catch start_appl(AppName, NewS, req) of
+ {ok, NewS2, _} ->
+ {noreply, NewS2};
+ {error, _R} -> % if app was permanent, AC will shutdown the node
+ {noreply, NewS}
+ end;
+ _ ->
+ {noreply, NewS}
+ end;
+handle_info({dist_ac_new_permission, Node, AppName, true, _IsHisApp}, S) ->
+ Appls = dist_update_run(S#state.appls, AppName, Node, true),
+ {noreply, S#state{appls = Appls}};
+
+handle_info({internal_restart_appl, Name}, S) ->
+ case restart_appl(Name, S) of
+ {error, R} ->
+ {stop, {error, R}, S};
+ NewS ->
+ {noreply, NewS}
+ end;
+
+handle_info(_, S) ->
+ {noreply, S}.
+
+terminate(_Reason, _S) ->
+ ok.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+
+%%%-----------------------------------------------------------------
+%%% Internal functions
+%%%-----------------------------------------------------------------
+load(AppName, S) ->
+ Appls0 = S#state.appls,
+ %% Get the dist specification for the app on other nodes
+ DistLoaded = get_dist_loaded(AppName, Load1 = S#state.dist_loaded),
+ %% Get the local dist specification
+ Nodes = dist_find_nodes(Appls0, AppName),
+ FNodes = flat_nodes(Nodes),
+ %% Update dists spec with our local permission
+ Permission = get_default_permission(AppName),
+ Appls1 = dist_update_run(Appls0, AppName, node(), Permission),
+ %% Compare the local spec with other nodes's specs
+ %% If equal, update our spec with his current permission
+ {LoadedNodes, Appls2} =
+ mapfoldl(
+ fun({Node, HisNodes, HisPermission}, Appls) ->
+ case equal_nodes(Nodes, HisNodes) of
+ true ->
+ {Node, dist_update_run(Appls, AppName,
+ Node, HisPermission)};
+ _ ->
+ dist_mismatch(AppName, Node)
+ end
+ end, Appls1, DistLoaded),
+ Load2 = del_dist_loaded(AppName, Load1),
+ %% Tell all Nodes about the new appl loaded, and its permission.
+ foreach(fun(Node) when Node =/= node() ->
+ Msg = {dist_ac_app_loaded, node(), AppName,
+ Nodes, Permission, member(Node, LoadedNodes)},
+ {?DIST_AC, Node} ! Msg;
+ (_) -> ok
+ end, FNodes),
+ {ok, S#state{appls = Appls2, dist_loaded = Load2}}.
+
+ensure_take_control(AppName, Appls) ->
+ %% Check if this is a new application that we don't control yet
+ case lists:keymember(AppName, #appl.name, Appls) of
+ true -> % we have control
+ ok;
+ false -> % take control!
+ %% Note: this works because this is executed within a
+ %% synchronous call. I.e. we get the control *before*
+ %% application:load returns. (otherwise application:start
+ %% could be called before we got the chance to take control)
+ %% The only reason we have to bother about this is because
+ %% we have to be backwards compatible in the sense that all
+ %% apps don't have to be specified in the 'distributed' parameter,
+ %% but may be implicitly 'distributed' by a call to
+ %% application:load.
+ application_controller:control_application(AppName)
+ end.
+
+unload(AppName, S) ->
+ Appls = S#state.appls,
+ Nodes = dist_flat_nodes(Appls, AppName),
+ %% Tell all ACs in DistNodes about the unloaded appl
+ Msg = {dist_ac_app_unloaded, node(), AppName},
+ send_msg(Msg, Nodes),
+ {value, Appl} = keysearch(AppName, #appl.name, Appls),
+ NAppl = Appl#appl{id = undefined, run = []},
+ {ok, S#state{appls = keyreplace(AppName, #appl.name, Appls, NAppl)}}.
+
+start_appl(AppName, S, Type) ->
+ %% Get nodes, and check if App is loaded on all involved nodes.
+ %% If it is loaded everywhere, we know that we have the same picture
+ %% of the nodes; otherwise the load wouldn't have succeeded.
+ Appl = case keysearch(AppName, #appl.name, Appls = S#state.appls) of
+ {value, A} -> A;
+ _ -> throw({error, {unknown_application, AppName}})
+ end,
+ case Appl#appl.id of
+ local ->
+ %% UW 990913: we've already started the app
+ %% this could happen if ac_start_application_req was resent.
+ {ok,S,false};
+ _ ->
+ {Id, IsWaiting} = case dist_get_all_nodes(Appl) of
+ {ok, DistNodes, PermittedNodes} ->
+ start_distributed(Appl, AppName, DistNodes,
+ PermittedNodes, S, Type);
+ Error -> throw(Error)
+ end,
+ NAppl = Appl#appl{id = Id},
+ NAppls = keyreplaceadd(AppName, #appl.name, Appls, NAppl),
+ {ok, NewS} = req_start_app(S#state{appls = NAppls}, AppName),
+ TmpLocals = keydelete_all(AppName, 1, NewS#state.tmp_locals),
+ TmpWeights = keydelete_all(AppName, 1, NewS#state.tmp_weights),
+ RStarted = keydelete(AppName, 2, S#state.remote_started),
+ Started = replaceadd(AppName, NewS#state.started),
+ {ok,
+ NewS#state{started = Started, tmp_locals = TmpLocals,
+ tmp_weights = TmpWeights, remote_started = RStarted},
+ IsWaiting}
+ end.
+
+
+start_distributed(Appl, Name, Nodes, PermittedNodes, S, Type) ->
+ case find_start_node(Nodes, PermittedNodes, Name, S) of
+ {ok, Node} when Node =:= node() ->
+ case Appl#appl.id of
+ {failover, FoNode} when Type =:= req ->
+ ac_failover(Name, FoNode, undefined);
+ {distributed, Node2} when Type =:= req ->
+ ac_takeover(req, Name, Node2, undefined);
+ _ when Type =:= reply ->
+ case lists:keysearch(Name, 2, S#state.remote_started) of
+ {value, {Node3, _}} ->
+ ac_takeover(reply, Name, Node3, undefined);
+ _ ->
+ ac_start_it(Type, Name)
+ end;
+ _ ->
+ ac_start_it(Type, Name)
+ end,
+ {run_waiting, true};
+ {already_started, Node} ->
+ ac_started(Type, Name, Node),
+ {{distributed, Node}, false};
+ {ok, Node} ->
+ case keysearch(Name, #appl.name, S#state.appls) of
+ {value, #appl{id = {distributed, Node}}} ->
+ ac_started(Type, Name, Node),
+ {{distributed, Node}, false};
+ _ ->
+ wait_dist_start(Node, Appl, Name, Nodes,
+ PermittedNodes, S, Type)
+ end;
+ not_started ->
+ wait_dist_start2(Appl, Name, Nodes, PermittedNodes, S, Type);
+ no_permission ->
+ ac_not_started(Type, Name),
+ {undefined, false}
+ end.
+
+wait_dist_start(Node, Appl, Name, Nodes, PermittedNodes, S, Type) ->
+ monitor_node(Node, true),
+ receive
+ {dist_ac_app_started, Node, Name, ok} ->
+ ac_started(Type, Name, Node),
+ monitor_node(Node, false),
+ {{distributed, Node}, false};
+ {dist_ac_app_started, Node, Name, {error, R}} ->
+ ac_error(Type, Name, {Node, R}),
+ monitor_node(Node, false),
+ {Appl#appl.id, false};
+ {dist_ac_weight, Name, _Weigth, Node} ->
+ %% This is the situation: {Name, [RNode, {Node}, node()]}
+ %% and permit(false) is called on RNode, and we sent the
+ %% weigth first. Node handled it in handle_info, and
+ %% now we must send him a weigth msg. We can use any weigth;
+ %% he wins anyway.
+ monitor_node(Node, false),
+ {?DIST_AC, Node} !
+ {dist_ac_weight, Name, get_cached_weight(Name, S), node()},
+ wait_dist_start(Node, Appl, Name, Nodes, PermittedNodes, S, Type);
+ {nodedown, Node} ->
+ monitor_node(Node, false),
+ TmpLocals =
+ filter(fun({Name2, _Weight, Node2}) when Node2 =:= Node,
+ Name2 =:= Name -> false;
+ (_) -> true
+ end,
+ S#state.tmp_locals),
+ NewS = S#state{tmp_locals = TmpLocals},
+ start_distributed(Appl, Name, Nodes,
+ lists:delete(Node, PermittedNodes), NewS, Type)
+ end.
+
+wait_dist_start2(Appl, Name, Nodes, PermittedNodes, S, Type) ->
+ receive
+ {dist_ac_app_started, Node, Name, ok} ->
+ ac_started(Type, Name, Node),
+ {{distributed, Node}, false};
+ {dist_ac_app_started, Node, Name, {error, R}} ->
+ ac_error(Type, Name, {Node, R}),
+ {Appl#appl.id, false};
+ {nodedown, Node} ->
+ %% A node went down, try to start the app again - there may not
+ %% be any more nodes to wait for.
+ TmpLocals =
+ filter(fun({Name2, _Weight, Node2}) when Node2 =:= Node,
+ Name2 =:= Name -> false;
+ (_) -> true
+ end,
+ S#state.tmp_locals),
+ NewS = S#state{tmp_locals = TmpLocals},
+ start_distributed(Appl, Name, Nodes,
+ lists:delete(Node, PermittedNodes), NewS, Type)
+ end.
+
+
+ac_start_it(reply, Name) ->
+ ?AC ! {ac_start_application_reply, Name, start_it};
+ac_start_it(req, Name) ->
+ ?AC ! {ac_change_application_req, Name, start_it}.
+
+ac_started(reply, Name, Node) ->
+ ?AC ! {ac_start_application_reply, Name, {started, Node}};
+ac_started(req, Name, Node) ->
+ ?AC ! {ac_change_application_req, Name, {started, Node}}.
+
+ac_error(reply, Name, Error) ->
+ ?AC ! {ac_start_application_reply, Name, {error, Error}};
+ac_error(req, _Name, _Error) ->
+ ok.
+
+ac_not_started(reply, Name) ->
+ ?AC ! {ac_start_application_reply, Name, not_started};
+ac_not_started(req, Name) ->
+ ?AC ! {ac_change_application_req, Name, stop_it}.
+
+ac_stop_it(Name) ->
+ ?AC ! {ac_change_application_req, Name, stop_it}.
+
+ac_takeover(reply, Name, Node, _RestartType) ->
+ ?AC ! {ac_start_application_reply, Name, {takeover, Node}};
+ac_takeover(req, Name, Node, RestartType) ->
+ ?AC ! {ac_change_application_req, Name,
+ {takeover, Node, RestartType}}.
+
+ac_failover(Name, Node, RestartType) ->
+ ?AC ! {ac_change_application_req, Name,
+ {failover, Node, RestartType}}.
+
+ac_not_running(Name) ->
+ ?AC ! {ac_change_application_req, Name, not_running}.
+
+restart_appls(Appls) ->
+ foreach(fun(Appl) ->
+ AppName = Appl#appl.name,
+ send_after(Appl#appl.restart_time,
+ {internal_restart_appl, AppName})
+ end, lists:reverse(Appls)).
+
+restart_appl(AppName, S) ->
+ case keysearch(AppName, #appl.name, S#state.appls) of
+ {value, Appl} when element(1, Appl#appl.id) =:= failover ->
+ case catch start_appl(AppName, S, req) of
+ {ok, NewS, _} ->
+ NewS;
+ {error, R} ->
+ error_msg("Error when restarting application ~p: ~p~n",
+ [AppName, R]),
+ S
+ end;
+ _ ->
+ S
+ end.
+
+%% permit(ShouldBeRunning, IsRunning, ...)
+permit(false, {value, #appl{id = undefined}}, _AppName, _From, S, _LockId) ->
+ {reply, ok, S}; % It's not running
+permit(false, {value, #appl{id = Id}}, _AppName, _From, S, _LockId)
+ when element(1, Id) =:= distributed ->
+ %% It is running at another node already
+ {reply, ok, S};
+permit(false, {value, _}, AppName, From, S, _LockId) ->
+ %% It is a distributed application
+ %% Check if there is any runnable node
+ case dist_get_runnable_nodes(S#state.appls, AppName) of
+ [] ->
+ %% There is no runnable node; stop application
+ ac_stop_it(AppName),
+ SReqs = [{AppName, From} | S#state.s_reqs],
+ {noreply, S#state{s_reqs = SReqs}};
+ Nodes ->
+ %% Delete all outstanding 'permit true' requests.
+ PR = req_del_permit_true(S#state.p_reqs, AppName),
+ NPReqs = [{From, AppName, false, Nodes} | PR],
+ {noreply, S#state{p_reqs = NPReqs}}
+ end;
+permit(true, {value, #appl{id = local}}, _AppName, _From, S, _LockId) ->
+ {reply, ok, S};
+permit(true, _, AppName, From, S, LockId) ->
+ case catch start_appl(AppName, S, req) of
+ {_ErrorTag, {not_running, App}} ->
+ %% Delete all outstanding 'permit false' requests
+ PR = req_del_permit_false(S#state.p_reqs, AppName),
+ NPReqs = [{false, AppName, true, App} | PR],
+ {reply, ok, S#state{p_reqs = NPReqs}};
+ {ok, NewS, true} ->
+ %% We have ordered a start or a takeover; we must not return
+ %% until the app is running.
+ TR = NewS#state.t_reqs,
+ %% Delete the lock, so others may start the app
+ global:del_lock(LockId),
+ {noreply, NewS#state{t_reqs = [{AppName, From} | TR]}};
+ {ok, _S, false} ->
+ %% Application should be started, but at another node
+ %% State remains the same
+ {reply, ok, S};
+ {_ErrorTag, R} ->
+ {stop, R, {error, R}, S}
+ end.
+
+do_start_appls(StartApps, S) ->
+ SortedStartApps = StartApps,
+ Appls = S#state.appls,
+ {ok, foldl(
+ fun(AppName, NewS) ->
+ case catch start_appl(AppName, NewS, req) of
+ {error, R} ->
+ throw({{error, NewS}, R});
+ {ok, NewS2, _} ->
+ NewS2
+ end
+ end, S#state{appls = Appls}, lists:reverse(SortedStartApps))}.
+
+%%-----------------------------------------------------------------
+%% Nodes = [node() | {node(), ..., node()}]
+%% A list in priority order. If it is a tuple, we may pick any of
+%% them. This decision is made by all nodes in the list, and all
+%% nodes choose the same. This is accomplished in the following
+%% way: all Nodes send to all others a msg which tells how many
+%% applications each node has started. The one with least no of
+%% appls starts this one.
+%%-----------------------------------------------------------------
+find_start_node(Nodes, PermittedNodes, Name, S) ->
+ AllNodes = intersection(flat_nodes(Nodes), PermittedNodes),
+ case lists:member(node(), AllNodes) of
+ true ->
+ Weight = get_cached_weight(Name, S),
+ find_start_node(Nodes, Name, S, Weight, AllNodes);
+ false ->
+ case keysearch(Name, 2, S#state.remote_started) of
+ {value, {Node, _Name}} ->
+ {already_started, Node};
+ _ when AllNodes =/= [] ->
+ not_started;
+ _ ->
+ no_permission
+ end
+ end.
+
+find_start_node([AnyNodes | Nodes], Name, S, Weight, AllNodes)
+ when is_tuple(AnyNodes) ->
+ case find_any_node(tuple_to_list(AnyNodes), Name, S, Weight, AllNodes) of
+ false -> find_start_node(Nodes, Name, S, Weight, AllNodes);
+ Res -> Res
+ end;
+find_start_node([Node | Nodes], Name, S, Weight, AllNodes) ->
+ case lists:member(Node, AllNodes) of
+ true ->
+ case keysearch(Name, #appl.name, S#state.appls) of
+ {value, #appl{id = {distributed, Node}}} ->
+ {already_started, Node};
+ _ ->
+ case keysearch(Name, 2, S#state.remote_started) of
+ {value, {Node, _Name}} ->
+ {already_started, Node};
+ _ ->
+ {ok, Node}
+ end
+ end;
+ false -> find_start_node(Nodes, Name, S, Weight, AllNodes)
+ end;
+find_start_node([], _Name, _S, _Weight, _AllNodes) ->
+ not_started.
+
+%%-----------------------------------------------------------------
+%% First of all, check if the application is already running
+%% somewhere in AnyNodes; in that case we shall not move it!
+%%-----------------------------------------------------------------
+find_any_node(AnyNodes, Name, S, Weight, AllNodes) ->
+ case check_running(Name, S, intersection(AnyNodes, AllNodes)) of
+ {already_started, Node} -> {already_started, Node};
+ false ->
+ %% Synchronize with all other nodes.
+ send_nodes(AllNodes, {dist_ac_weight, Name, Weight, node()}),
+ Answers = [{Weight, node()} |
+ collect_answers(AllNodes, Name, S, [])],
+ %% Make a decision (the same at every node) (smallest weight wins)
+ find_alive_node(lists:sort(Answers),
+ intersection(AnyNodes, S#state.known))
+ end.
+
+%%-----------------------------------------------------------------
+%% Check if another node started the appl before we got alive.
+%% If so, check if the node is one of AnyNodes.
+%%-----------------------------------------------------------------
+check_running(Name, #state{remote_started = RStarted,
+ appls = Appls}, AnyNodes) ->
+ case keysearch(Name, 2, RStarted) of
+ {value, {Node, _Name}} ->
+ case lists:member(Node, AnyNodes) of
+ true -> {already_started, Node};
+ false -> false
+ end;
+ false ->
+ case keysearch(Name, #appl.name, Appls) of
+ {value, #appl{id = {distributed, Node}}} ->
+ case lists:member(Node, AnyNodes) of
+ true -> {already_started, Node};
+ false -> false
+ end;
+ _ ->
+ false
+ end
+ end.
+
+find_alive_node([{_, Node} | Nodes], AliveNodes) ->
+ case lists:member(Node, AliveNodes) of
+ true -> {ok, Node};
+ false -> find_alive_node(Nodes, AliveNodes)
+ end;
+find_alive_node([], _AliveNodes) ->
+ false.
+
+%%-----------------------------------------------------------------
+%% First, check if the node's msg is buffered (received in our
+%% main loop). Otherwise, wait for msg or nodedown.
+%% We have sent the dist_ac_weight message, and will wait for it
+%% to be received here (or a nodedown). This implies that a
+%% dist_ac must *always* be prepared to get this messages, and to
+%% send it to us.
+%%-----------------------------------------------------------------
+collect_answers([Node | Nodes], Name, S, Res) when Node =/= node() ->
+ case keysearch(Node, 3, S#state.tmp_locals) of
+ {value, {Name, Weight, Node}} ->
+ collect_answers(Nodes, Name, S, [{Weight, Node} | Res]);
+ _ ->
+ monitor_node(Node, true),
+ receive
+ {dist_ac_weight, Name, Weight, Node} ->
+ monitor_node(Node, false),
+ collect_answers(Nodes, Name, S, [{Weight, Node} | Res]);
+ {nodedown, Node} ->
+ monitor_node(Node, false),
+ collect_answers(Nodes, Name, S, Res)
+ end
+ end;
+collect_answers([_ThisNode | Nodes], Name, S, Res) ->
+ collect_answers(Nodes, Name, S, Res);
+collect_answers([], _Name, _S, Res) ->
+ Res.
+
+send_nodes(Nodes, Msg) ->
+ FlatNodes = flat_nodes(Nodes),
+ foreach(fun(Node) when Node =/= node() -> {?DIST_AC, Node} ! Msg;
+ (_ThisNode) -> ok
+ end, FlatNodes).
+
+send_after(Time, Msg) when is_integer(Time), Time >= 0 ->
+ spawn_link(?MODULE, send_timeout, [self(), Time, Msg]);
+send_after(_,_) -> % infinity
+ ok.
+
+send_timeout(To, Time, Msg) ->
+ receive
+ after Time -> To ! Msg
+ end.
+
+send_msg(Msg, Nodes) ->
+ foreach(fun(Node) when Node =/= node() -> {?DIST_AC, Node} ! Msg;
+ (_) -> ok
+ end, Nodes).
+
+replaceadd(Item, List) ->
+ case member(Item, List) of
+ true -> List;
+ false -> [Item | List]
+ end.
+
+keyreplaceadd(Key, Pos, List, New) ->
+ case lists:keymember(Key, Pos, List) of
+ true -> lists:keyreplace(Key, Pos, List, New);
+ false -> [New | List]
+ end.
+
+keydelete_all(Key, N, [H|T]) when element(N, H) =:= Key ->
+ keydelete_all(Key, N, T);
+keydelete_all(Key, N, [H|T]) ->
+ [H|keydelete_all(Key, N, T)];
+keydelete_all(_Key, _N, []) -> [].
+
+-ifdef(NOTUSED).
+keysearchdelete(Key, Pos, List) ->
+ ksd(Key, Pos, List, []).
+
+ksd(Key, Pos, [H | T], Rest) when element(Pos, H) =:= Key ->
+ {value, H, Rest ++ T};
+ksd(Key, Pos, [H | T], Rest) ->
+ ksd(Key, Pos, T, [H | Rest]);
+ksd(_Key, _Pos, [], _Rest) ->
+ false.
+
+get_new_appl(Name, [{application, Name, App} | _]) ->
+ {ok, {application, Name, App}};
+get_new_appl(Name, [_ | T]) -> get_new_appl(Name, T);
+get_new_appl(Name, []) -> false.
+-endif.
+
+equal_nodes([H | T1], [H | T2]) when is_atom(H) ->
+ equal_nodes(T1, T2);
+equal_nodes([H1 | T1], [H2 | T2]) when is_tuple(H1), is_tuple(H2) ->
+ case equal(tuple_to_list(H1), tuple_to_list(H2)) of
+ true -> equal_nodes(T1, T2);
+ false -> false
+ end;
+equal_nodes([], []) -> true;
+equal_nodes(_, _) -> false.
+
+equal([H | T] , S) ->
+ case lists:member(H, S) of
+ true -> equal(T, lists:delete(H, S));
+ false -> false
+ end;
+equal([], []) -> true;
+equal(_, _) -> false.
+
+flat_nodes(Nodes) when is_list(Nodes) ->
+ foldl(fun(Node, Res) when is_atom(Node) -> [Node | Res];
+ (Tuple, Res) when is_tuple(Tuple) -> tuple_to_list(Tuple) ++ Res
+ end, [], Nodes);
+flat_nodes(Nodes) ->
+ throw({error, {badarg, Nodes}}).
+
+get_cached_weight(Name, S) ->
+ case lists:keysearch(Name, 1, S#state.tmp_weights) of
+ {value, {_, W}} -> W;
+ _ -> get_weight()
+ end.
+
+%% Simple weight; just count the number of applications running.
+get_weight() ->
+ length(application:which_applications()).
+
+get_dist_loaded(Name, [{{Name, Node}, HisNodes, Permission} | T]) ->
+ [{Node, HisNodes, Permission} | get_dist_loaded(Name, T)];
+get_dist_loaded(Name, [_H | T]) ->
+ get_dist_loaded(Name, T);
+get_dist_loaded(_Name, []) ->
+ [].
+
+del_dist_loaded(Name, [{{Name, _Node}, _HisNodes, _Permission} | T]) ->
+ del_dist_loaded(Name, T);
+del_dist_loaded(Name, [H | T]) ->
+ [H | del_dist_loaded(Name, T)];
+del_dist_loaded(_Name, []) ->
+ [].
+
+req_start_app(State, Name) ->
+ {ok, foldl(
+ fun({false, AppName, true, Name2}, S) when Name =:= Name2 ->
+ PR = keydelete(AppName, 2, S#state.p_reqs),
+ NS = S#state{p_reqs = PR},
+ case catch do_start_appls([AppName], NS) of
+ {_ErrorTag, {not_running, App}} ->
+ NRequests = [{false, AppName, true, App} | PR],
+ S#state{p_reqs = NRequests};
+ {ok, NewS} ->
+ NewS;
+ {_ErrorTag, R} ->
+ throw({error, R})
+ end;
+ (_, S) ->
+ S
+ end, State, State#state.p_reqs)}.
+
+
+req_del_permit_true(Reqs, Name) ->
+ filter(fun({From, Name2, true, _}) when Name2 =:= Name ->
+ gen_server:reply(From, ok),
+ false;
+ (_) ->
+ true
+ end, Reqs).
+
+req_del_permit_false(Reqs, Name) ->
+ filter(fun({From, Name2, false, _Nodes}) when Name2 =:= Name ->
+ gen_server:reply(From, ok),
+ false;
+ (_) ->
+ true
+ end, Reqs).
+
+req_del_node(S, Node, Appls) ->
+ check_waiting(S#state.p_reqs, S, Node, Appls, [], S#state.s_reqs).
+
+del_t_reqs(AppName, TReqs, Res) ->
+ lists:filter(fun({AN, From}) when AppName =:= AN ->
+ gen_server:reply(From, Res),
+ false;
+ (_) ->
+ true
+ end,
+ TReqs).
+
+
+check_waiting([{From, AppName, false, Nodes} | Reqs],
+ S, Node, Appls, Res, SReqs) ->
+ case lists:delete(Node, Nodes) of
+ [] ->
+ ac_stop_it(AppName),
+ NSReqs = [{AppName, From} | SReqs],
+ check_waiting(Reqs, Node, S, Appls, Res, NSReqs);
+ NNodes ->
+ check_waiting(Reqs, Node, S, Appls,
+ [{From, AppName, false, NNodes} | Res], SReqs)
+ end;
+check_waiting([H | Reqs], S, Node, Appls, Res, SReqs) ->
+ check_waiting(Reqs, Node, S, Appls, [H | Res], SReqs);
+check_waiting([], _Node, _S, Appls, Res, SReqs) ->
+ {Res, Appls, SReqs}.
+
+intersection([], _) ->
+ [];
+intersection(_, []) ->
+ [];
+intersection(L1, L2) ->
+ L1 -- (L1 -- L2).
+
+get_default_permission(AppName) ->
+ case application:get_env(kernel, permissions) of
+ {ok, Permissions} ->
+ case keysearch(AppName, 1, Permissions) of
+ {value, {_, true}} -> true;
+ {value, {_, false}} -> false;
+ {value, {_, X}} -> exit({bad_permission, {AppName, X}});
+ false -> true
+ end;
+ undefined -> true
+ end.
+
+%%-----------------------------------------------------------------
+%% ADT dist() - info on how an application is distributed
+%% dist() = [{AppName, Time, DistNodes, [{Node, Runnable}]}]
+%% Time = int() >= 0 | infinity
+%% Nodes = [node() | {node()...}]
+%% Runnable = true | false | undefined
+%% An appl may not be started if any Runnable is undefined;
+%% i.e. the appl must be loaded on all Nodes.
+%%-----------------------------------------------------------------
+dist_check([{AppName, Nodes} | T]) ->
+ P = get_default_permission(AppName),
+ [#appl{name = AppName, nodes = Nodes, run = [{node(), P}]} | dist_check(T)];
+dist_check([{AppName, Time, Nodes} | T]) when is_integer(Time), Time >= 0 ->
+ P = get_default_permission(AppName),
+ [#appl{name = AppName, restart_time = Time, nodes = Nodes,
+ run = [{node(), P}]} | dist_check(T)];
+dist_check([{AppName, infinity, Nodes} | T]) ->
+ P = get_default_permission(AppName),
+ [#appl{name = AppName, restart_time = infinity,
+ nodes = Nodes, run = [{node(), P}]} |
+ dist_check(T)];
+dist_check([_ | T]) ->
+ dist_check(T);
+dist_check([]) ->
+ [].
+
+dist_take_control(Appls) ->
+ foreach(fun(#appl{name = AppName}) ->
+ application_controller:control_application(AppName)
+ end, Appls).
+
+dist_replace(default, _Name, Appls) -> Appls;
+dist_replace({AppName, Nodes}, AppName, Appls) ->
+ Run = [{Node, undefined} || Node <- flat_nodes(Nodes)],
+ keyreplaceadd(AppName, #appl.name, Appls,
+ #appl{name = AppName, restart_time = 0,
+ nodes = Nodes, run = Run});
+dist_replace({AppName, Time, Nodes}, AppName, Appls)
+ when is_integer(Time), Time >= 0 ->
+ Run = [{Node, undefined} || Node <- flat_nodes(Nodes)],
+ keyreplaceadd(AppName, #appl.name, Appls,
+ #appl{name = AppName, restart_time = Time,
+ nodes = Nodes, run = Run});
+dist_replace(Bad, _Name, _Appls) ->
+ throw({error, {bad_distribution_spec, Bad}}).
+
+dist_update_run(Appls, AppName, Node, Permission) ->
+ map(fun(Appl) when Appl#appl.name =:= AppName ->
+ Run = Appl#appl.run,
+ NRun = keyreplaceadd(Node, 1, Run, {Node, Permission}),
+ Appl#appl{run = NRun};
+ (Appl) ->
+ Appl
+ end, Appls).
+
+
+
+dist_change_update(Appls, []) ->
+ Appls;
+dist_change_update(Appls, [{AppName, NewNodes} | NewDist]) ->
+ NewAppls = do_dist_change_update(Appls, AppName, 0, NewNodes),
+ dist_change_update(NewAppls, NewDist);
+dist_change_update(Appls, [{AppName, NewTime, NewNodes} | NewDist]) ->
+ NewAppls = do_dist_change_update(Appls, AppName, NewTime, NewNodes),
+ dist_change_update(NewAppls, NewDist).
+
+do_dist_change_update(Appls, AppName, NewTime, NewNodes) ->
+ map(fun(Appl) when Appl#appl.name =:= AppName ->
+ Appl#appl{restart_time = NewTime, nodes = NewNodes};
+ (Appl) ->
+ Appl
+ end, Appls).
+
+%% Merge his Permissions with mine.
+dist_merge(MyAppls, HisAppls, HisNode) ->
+ zf(fun(Appl) ->
+ #appl{name = AppName, run = Run} = Appl,
+% #appl{name = AppName, nodes = Nodes, run = Run} = Appl,
+% HeIsMember = lists:member(HisNode, flat_nodes(Nodes)),
+ HeIsMember = true,
+ case keysearch(AppName, #appl.name, HisAppls) of
+ {value, #appl{run = HisRun}} when HeIsMember ->
+ case keysearch(HisNode, 1, HisRun) of
+ {value, Val} -> % He has it loaded
+ NRun = keyreplaceadd(HisNode, 1, Run, Val),
+ {true, Appl#appl{run = NRun}};
+ false -> % He hasn't loaded it yet
+ Val = {HisNode, undefined},
+ {true, Appl#appl{run = [Val | Run]}}
+ end;
+ _ ->
+ true
+ end
+ end, MyAppls).
+
+dist_get_runnable_nodes(Appls, AppName) ->
+ case keysearch(AppName, #appl.name, Appls) of
+ {value, #appl{run = Run}} ->
+ zf(fun({Node, true}) -> {true, Node};
+ (_) -> false
+ end, Run);
+ false ->
+ []
+ end.
+
+dist_is_runnable(Appls, AppName) ->
+ case keysearch(AppName, #appl.name, Appls) of
+ {value, #appl{run = Run}} ->
+ case keysearch(node(), 1, Run) of
+ {value, {_, true}} -> true;
+ _ -> false
+ end;
+ false ->
+ false
+ end.
+
+is_loaded(AppName, #state{appls = Appls}) ->
+ case keysearch(AppName, #appl.name, Appls) of
+ {value, #appl{run = Run}} ->
+ case keysearch(node(), 1, Run) of
+ {value, {_Node, undefined}} -> false;
+ {value, _} -> true;
+ false -> false
+ end;
+ false ->
+ false
+ end.
+
+dist_get_runnable(Appls) ->
+ zf(fun(#appl{name = AppName, run = Run}) ->
+ case keysearch(node(), 1, Run) of
+ {value, {_, true}} -> {true, AppName};
+ _ -> false
+ end
+ end, Appls).
+
+dist_get_all_nodes(#appl{name = AppName, nodes = Nodes, run = Run}) ->
+ {Res, BadNodes} = check_nodes(Run, [], []),
+ case intersection(BadNodes, erlang:nodes(connected)) of
+ [] -> {ok, Nodes, Res};
+ _ -> {error, {app_not_loaded, AppName, BadNodes}}
+ end.
+
+check_nodes([{Node, undefined} | T], Res, BadNodes) ->
+ check_nodes(T, Res, [Node | BadNodes]);
+check_nodes([{Node, true} | T], Res, BadNodes) ->
+ check_nodes(T, [Node | Res], BadNodes);
+check_nodes([{_Node, false} | T], Res, BadNodes) ->
+ check_nodes(T, Res, BadNodes);
+check_nodes([], Res, BadNodes) ->
+ {Res, BadNodes}.
+
+-ifdef(NOTUSED).
+dist_find_time([#appl{name = Name, restart_time = Time} |_], Name) -> Time;
+dist_find_time([_ | T], Name) -> dist_find_time(T, Name);
+dist_find_time([], Name) -> 0.
+-endif.
+
+%% Find all nodes that can run the app (even if they're not permitted
+%% to right now).
+dist_find_nodes([#appl{name = Name, nodes = Nodes} |_], Name) -> Nodes;
+dist_find_nodes([_ | T], Name) -> dist_find_nodes(T, Name);
+dist_find_nodes([], _Name) -> [].
+
+dist_flat_nodes(Appls, Name) ->
+ flat_nodes(dist_find_nodes(Appls, Name)).
+
+dist_del_node(Appls, Node) ->
+ map(fun(Appl) ->
+ NRun = filter(fun({N, _Runnable}) when N =:= Node -> false;
+ (_) -> true
+ end, Appl#appl.run),
+ Appl#appl{run = NRun}
+ end, Appls).
+
+validRestartType(permanent) -> true;
+validRestartType(temporary) -> true;
+validRestartType(transient) -> true;
+validRestartType(_RestartType) -> false.
+
+dist_mismatch(AppName, Node) ->
+ error_msg("Distribution mismatch for application \"~p\" on nodes ~p and ~p~n",
+ [AppName, node(), Node]),
+ exit({distribution_mismatch, AppName, Node}).
+
+%error_msg(Format) when is_list(Format) ->
+% error_msg(Format, []).
+
+error_msg(Format, ArgList) when is_list(Format), is_list(ArgList) ->
+ error_logger:error_msg("dist_ac on node ~p:~n" ++ Format, [node()|ArgList]).
+
+%info_msg(Format) when is_list(Format) ->
+% info_msg(Format, []).
+
+%info_msg(Format, ArgList) when is_list(Format), is_list(ArgList) ->
+% error_logger:info_msg("dist_ac on node ~p:~n" ++ Format, [node()|ArgList]).
diff --git a/lib/kernel/src/dist_util.erl b/lib/kernel/src/dist_util.erl
new file mode 100644
index 0000000000..a2937d60b8
--- /dev/null
+++ b/lib/kernel/src/dist_util.erl
@@ -0,0 +1,762 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 1999-2009. All Rights Reserved.
+%%
+%% The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved online at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% %CopyrightEnd%
+%%
+%%%----------------------------------------------------------------------
+%%% Purpose : The handshake of a streamed distribution connection
+%%% in a separate file to make it usable for other
+%%% distribution protocols.
+%%%----------------------------------------------------------------------
+
+-module(dist_util).
+
+%%-compile(export_all).
+-export([handshake_we_started/1, handshake_other_started/1,
+ start_timer/1, setup_timer/2,
+ reset_timer/1, cancel_timer/1,
+ shutdown/3, shutdown/4]).
+
+-import(error_logger,[error_msg/2]).
+
+-include("dist_util.hrl").
+-include("dist.hrl").
+
+-ifdef(DEBUG).
+-define(shutdown_trace(A,B), io:format(A,B)).
+-else.
+-define(shutdown_trace(A,B), noop).
+-endif.
+
+-define(to_port(FSend, Socket, Data),
+ case FSend(Socket, Data) of
+ {error, closed} ->
+ self() ! {tcp_closed, Socket},
+ {error, closed};
+ R ->
+ R
+ end).
+
+
+-define(int16(X), [((X) bsr 8) band 16#ff, (X) band 16#ff]).
+
+-define(int32(X),
+ [((X) bsr 24) band 16#ff, ((X) bsr 16) band 16#ff,
+ ((X) bsr 8) band 16#ff, (X) band 16#ff]).
+
+-define(i16(X1,X0),
+ (?u16(X1,X0) -
+ (if (X1) > 127 -> 16#10000; true -> 0 end))).
+
+-define(u16(X1,X0),
+ (((X1) bsl 8) bor (X0))).
+
+-define(u32(X3,X2,X1,X0),
+ (((X3) bsl 24) bor ((X2) bsl 16) bor ((X1) bsl 8) bor (X0))).
+
+-record(tick, {read = 0,
+ write = 0,
+ tick = 0,
+ ticked = 0
+ }).
+
+remove_flag(Flag, Flags) ->
+ case Flags band Flag of
+ 0 ->
+ Flags;
+ _ ->
+ Flags - Flag
+ end.
+
+adjust_flags(ThisFlags, OtherFlags) ->
+ case (?DFLAG_PUBLISHED band ThisFlags) band OtherFlags of
+ 0 ->
+ {remove_flag(?DFLAG_PUBLISHED, ThisFlags),
+ remove_flag(?DFLAG_PUBLISHED, OtherFlags)};
+ _ ->
+ {ThisFlags, OtherFlags}
+ end.
+
+publish_flag(hidden, _) ->
+ 0;
+publish_flag(_, OtherNode) ->
+ case net_kernel:publish_on_node(OtherNode) of
+ true ->
+ ?DFLAG_PUBLISHED;
+ _ ->
+ 0
+ end.
+
+make_this_flags(RequestType, OtherNode) ->
+ publish_flag(RequestType, OtherNode) bor
+ %% The parenthesis below makes the compiler generate better code.
+ (?DFLAG_EXPORT_PTR_TAG bor
+ ?DFLAG_EXTENDED_PIDS_PORTS bor
+ ?DFLAG_EXTENDED_REFERENCES bor
+ ?DFLAG_DIST_MONITOR bor
+ ?DFLAG_FUN_TAGS bor
+ ?DFLAG_DIST_MONITOR_NAME bor
+ ?DFLAG_HIDDEN_ATOM_CACHE bor
+ ?DFLAG_NEW_FUN_TAGS bor
+ ?DFLAG_BIT_BINARIES bor
+ ?DFLAG_NEW_FLOATS bor
+ ?DFLAG_UNICODE_IO bor
+ ?DFLAG_DIST_HDR_ATOM_CACHE bor
+ ?DFLAG_SMALL_ATOM_TAGS).
+
+handshake_other_started(#hs_data{request_type=ReqType}=HSData0) ->
+ {PreOtherFlags,Node,Version} = recv_name(HSData0),
+ PreThisFlags = make_this_flags(ReqType, Node),
+ {ThisFlags, OtherFlags} = adjust_flags(PreThisFlags,
+ PreOtherFlags),
+ HSData = HSData0#hs_data{this_flags=ThisFlags,
+ other_flags=OtherFlags,
+ other_version=Version,
+ other_node=Node,
+ other_started=true},
+ check_dflag_xnc(HSData),
+ is_allowed(HSData),
+ ?debug({"MD5 connection from ~p (V~p)~n",
+ [Node, HSData#hs_data.other_version]}),
+ mark_pending(HSData),
+ {MyCookie,HisCookie} = get_cookies(Node),
+ ChallengeA = gen_challenge(),
+ send_challenge(HSData, ChallengeA),
+ reset_timer(HSData#hs_data.timer),
+ ChallengeB = recv_challenge_reply(HSData, ChallengeA, MyCookie),
+ send_challenge_ack(HSData, gen_digest(ChallengeB, HisCookie)),
+ ?debug({dist_util, self(), accept_connection, Node}),
+ connection(HSData).
+
+%%
+%% check if connecting node is allowed to connect
+%% with allow-node-scheme
+%%
+is_allowed(#hs_data{other_node = Node,
+ allowed = Allowed} = HSData) ->
+ case lists:member(Node, Allowed) of
+ false when Allowed =/= [] ->
+ send_status(HSData, not_allowed),
+ error_msg("** Connection attempt from "
+ "disallowed node ~w ** ~n", [Node]),
+ ?shutdown(Node);
+ _ -> true
+ end.
+
+%%
+%% Check that both nodes can handle the same types of extended
+%% node containers. If they can not, abort the connection.
+%%
+check_dflag_xnc(#hs_data{other_node = Node,
+ other_flags = OtherFlags,
+ other_started = OtherStarted} = HSData) ->
+ XRFlg = ?DFLAG_EXTENDED_REFERENCES,
+ XPPFlg = case erlang:system_info(compat_rel) of
+ R when R >= 10 ->
+ ?DFLAG_EXTENDED_PIDS_PORTS;
+ _ ->
+ 0
+ end,
+ ReqXncFlags = XRFlg bor XPPFlg,
+ case OtherFlags band ReqXncFlags =:= ReqXncFlags of
+ true ->
+ ok;
+ false ->
+ What = case {OtherFlags band XRFlg =:= XRFlg,
+ OtherFlags band XPPFlg =:= XPPFlg} of
+ {false, false} -> "references, pids and ports";
+ {true, false} -> "pids and ports";
+ {false, true} -> "references"
+ end,
+ case OtherStarted of
+ true ->
+ send_status(HSData, not_allowed),
+ Dir = "from",
+ How = "rejected";
+ _ ->
+ Dir = "to",
+ How = "aborted"
+ end,
+ error_msg("** ~w: Connection attempt ~s node ~w ~s "
+ "since it cannot handle extended ~s. "
+ "**~n", [node(), Dir, Node, How, What]),
+ ?shutdown(Node)
+ end.
+
+
+%% No nodedown will be sent if we fail before this process has
+%% succeeded to mark the node as pending.
+
+mark_pending(#hs_data{kernel_pid=Kernel,
+ other_node=Node,
+ this_node=MyNode}=HSData) ->
+ case do_mark_pending(Kernel, MyNode, Node,
+ (HSData#hs_data.f_address)(HSData#hs_data.socket,
+ Node),
+ HSData#hs_data.other_flags) of
+ ok ->
+ send_status(HSData, ok),
+ reset_timer(HSData#hs_data.timer);
+
+ ok_pending ->
+ send_status(HSData, ok_simultaneous),
+ reset_timer(HSData#hs_data.timer);
+
+ nok_pending ->
+ send_status(HSData, nok),
+ ?shutdown(Node);
+
+ up_pending ->
+ %% Check if connection is still alive, no
+ %% implies that the connection is no longer pending
+ %% due to simultaneous connect
+ do_alive(HSData),
+
+ %% This can happen if the other node goes down,
+ %% and goes up again and contact us before we have
+ %% detected that the socket was closed.
+ wait_pending(Kernel),
+ reset_timer(HSData#hs_data.timer);
+
+ already_pending ->
+ %% FIXME: is this a case ?
+ ?debug({dist_util,self(),mark_pending,already_pending,Node}),
+ ?shutdown(Node)
+ end.
+
+
+%%
+%% Marking pending and negotiating away
+%% simultaneous connection problems
+%%
+
+wait_pending(Kernel) ->
+ receive
+ {Kernel, pending} ->
+ ?trace("wait_pending returned for pid ~p.~n",
+ [self()]),
+ ok
+ end.
+
+do_alive(#hs_data{other_node = Node} = HSData) ->
+ send_status(HSData, alive),
+ case recv_status(HSData) of
+ true -> true;
+ false -> ?shutdown(Node)
+ end.
+
+do_mark_pending(Kernel, MyNode, Node, Address, Flags) ->
+ Kernel ! {self(), {accept_pending,MyNode,Node,Address,
+ publish_type(Flags)}},
+ receive
+ {Kernel,{accept_pending,Ret}} ->
+ ?trace("do_mark_pending(~p,~p,~p,~p) -> ~p~n",
+ [Kernel,Node,Address,Flags,Ret]),
+ Ret
+ end.
+
+is_pending(Kernel, Node) ->
+ Kernel ! {self(), {is_pending, Node}},
+ receive
+ {Kernel, {is_pending, Reply}} -> Reply
+ end.
+
+%%
+%% This will tell the net_kernel about the nodedown as it
+%% recognizes the exit signal.
+%% The termination of this process does also imply that the Socket
+%% is closed in a controlled way by inet_drv.
+%%
+
+-spec shutdown(atom(), non_neg_integer(), term()) -> no_return().
+
+shutdown(Module, Line, Data) ->
+ shutdown(Module, Line, Data, shutdown).
+
+-spec shutdown(atom(), non_neg_integer(), term(), term()) -> no_return().
+
+shutdown(_Module, _Line, _Data, Reason) ->
+ ?shutdown_trace("Net Kernel 2: shutting down connection "
+ "~p:~p, data ~p,reason ~p~n",
+ [_Module,_Line, _Data, Reason]),
+ flush_down(),
+ exit(Reason).
+%% Use this line to debug connection.
+%% Set net_kernel verbose = 1 as well.
+%% exit({Reason, ?MODULE, _Line, _Data, erlang:now()}).
+
+
+flush_down() ->
+ receive
+ {From, get_status} ->
+ From ! {self(), get_status, error},
+ flush_down()
+ after 0 ->
+ ok
+ end.
+
+handshake_we_started(#hs_data{request_type=ReqType,
+ other_node=Node}=PreHSData) ->
+ PreThisFlags = make_this_flags(ReqType, Node),
+ HSData = PreHSData#hs_data{this_flags=PreThisFlags},
+ send_name(HSData),
+ recv_status(HSData),
+ {PreOtherFlags,ChallengeA} = recv_challenge(HSData),
+ {ThisFlags,OtherFlags} = adjust_flags(PreThisFlags, PreOtherFlags),
+ NewHSData = HSData#hs_data{this_flags = ThisFlags,
+ other_flags = OtherFlags,
+ other_started = false},
+ check_dflag_xnc(NewHSData),
+ MyChallenge = gen_challenge(),
+ {MyCookie,HisCookie} = get_cookies(Node),
+ send_challenge_reply(NewHSData,MyChallenge,
+ gen_digest(ChallengeA,HisCookie)),
+ reset_timer(NewHSData#hs_data.timer),
+ recv_challenge_ack(NewHSData, MyChallenge, MyCookie),
+ connection(NewHSData).
+
+%% --------------------------------------------------------------
+%% The connection has been established.
+%% --------------------------------------------------------------
+
+connection(#hs_data{other_node = Node,
+ socket = Socket,
+ f_address = FAddress,
+ f_setopts_pre_nodeup = FPreNodeup,
+ f_setopts_post_nodeup = FPostNodeup}= HSData) ->
+ cancel_timer(HSData#hs_data.timer),
+ PType = publish_type(HSData#hs_data.other_flags),
+ case FPreNodeup(Socket) of
+ ok ->
+ do_setnode(HSData), % Succeeds or exits the process.
+ Address = FAddress(Socket,Node),
+ mark_nodeup(HSData,Address),
+ case FPostNodeup(Socket) of
+ ok ->
+ con_loop(HSData#hs_data.kernel_pid,
+ Node,
+ Socket,
+ Address,
+ HSData#hs_data.this_node,
+ PType,
+ #tick{},
+ HSData#hs_data.mf_tick,
+ HSData#hs_data.mf_getstat);
+ _ ->
+ ?shutdown2(Node, connection_setup_failed)
+ end;
+ _ ->
+ ?shutdown(Node)
+ end.
+
+%% Generate a message digest from Challenge number and Cookie
+gen_digest(Challenge, Cookie) when is_integer(Challenge), is_atom(Cookie) ->
+ erlang:md5([atom_to_list(Cookie)|integer_to_list(Challenge)]).
+
+%% ---------------------------------------------------------------
+%% Challenge code
+%% gen_challenge() returns a "random" number
+%% ---------------------------------------------------------------
+gen_challenge() ->
+ {A,B,C} = erlang:now(),
+ {D,_} = erlang:statistics(reductions),
+ {E,_} = erlang:statistics(runtime),
+ {F,_} = erlang:statistics(wall_clock),
+ {G,H,_} = erlang:statistics(garbage_collection),
+ %% A(8) B(16) C(16)
+ %% D(16),E(8), F(16) G(8) H(16)
+ ( ((A bsl 24) + (E bsl 16) + (G bsl 8) + F) bxor
+ (B + (C bsl 16)) bxor
+ (D + (H bsl 16)) ) band 16#ffffffff.
+
+%%
+%% Get the cookies for a node from auth
+%%
+get_cookies(Node) ->
+ case auth:get_cookie(Node) of
+ X when is_atom(X) ->
+ {X,X}
+% {Y,Z} when is_atom(Y), is_atom(Z) ->
+% {Y,Z};
+% _ ->
+% erlang:error("Corrupt cookie database")
+ end.
+
+%% No error return; either succeeds or terminates the process.
+do_setnode(#hs_data{other_node = Node, socket = Socket,
+ other_flags = Flags, other_version = Version,
+ f_getll = GetLL}) ->
+ case GetLL(Socket) of
+ {ok,Port} ->
+ ?trace("setnode(md5,~p ~p ~p)~n",
+ [Node, Port, {publish_type(Flags),
+ '(', Flags, ')',
+ Version}]),
+ case (catch
+ erlang:setnode(Node, Port,
+ {Flags, Version, '', ''})) of
+ {'EXIT', {system_limit, _}} ->
+ error_msg("** Distribution system limit reached, "
+ "no table space left for node ~w ** ~n",
+ [Node]),
+ ?shutdown(Node);
+ {'EXIT', Other} ->
+ exit(Other);
+ _Else ->
+ ok
+ end;
+ _ ->
+ error_msg("** Distribution connection error, "
+ "could not get low level port for node ~w ** ~n",
+ [Node]),
+ ?shutdown(Node)
+ end.
+
+mark_nodeup(#hs_data{kernel_pid = Kernel,
+ other_node = Node,
+ other_flags = Flags,
+ other_started = OtherStarted},
+ Address) ->
+ Kernel ! {self(), {nodeup,Node,Address,publish_type(Flags),
+ true}},
+ receive
+ {Kernel, inserted} ->
+ ok;
+ {Kernel, bad_request} ->
+ TypeT = case OtherStarted of
+ true ->
+ "accepting connection";
+ _ ->
+ "initiating connection"
+ end,
+ error_msg("Fatal: ~p was not allowed to "
+ "send {nodeup, ~p} to kernel when ~s~n",
+ [self(), Node, TypeT]),
+ ?shutdown(Node)
+ end.
+
+con_loop(Kernel, Node, Socket, TcpAddress,
+ MyNode, Type, Tick, MFTick, MFGetstat) ->
+ receive
+ {tcp_closed, Socket} ->
+ ?shutdown2(Node, connection_closed);
+ {Kernel, disconnect} ->
+ ?shutdown2(Node, disconnected);
+ {Kernel, aux_tick} ->
+ case MFGetstat(Socket) of
+ {ok, _, _, PendWrite} ->
+ send_tick(Socket, PendWrite, MFTick);
+ _ ->
+ ignore_it
+ end,
+ con_loop(Kernel, Node, Socket, TcpAddress, MyNode, Type,
+ Tick, MFTick, MFGetstat);
+ {Kernel, tick} ->
+ case send_tick(Socket, Tick, Type,
+ MFTick, MFGetstat) of
+ {ok, NewTick} ->
+ con_loop(Kernel, Node, Socket, TcpAddress,
+ MyNode, Type, NewTick, MFTick,
+ MFGetstat);
+ {error, not_responding} ->
+ error_msg("** Node ~p not responding **~n"
+ "** Removing (timedout) connection **~n",
+ [Node]),
+ ?shutdown2(Node, net_tick_timeout);
+ _Other ->
+ ?shutdown2(Node, send_net_tick_failed)
+ end;
+ {From, get_status} ->
+ case MFGetstat(Socket) of
+ {ok, Read, Write, _} ->
+ From ! {self(), get_status, {ok, Read, Write}},
+ con_loop(Kernel, Node, Socket, TcpAddress,
+ MyNode,
+ Type, Tick,
+ MFTick, MFGetstat);
+ _ ->
+ ?shutdown2(Node, get_status_failed)
+ end
+ end.
+
+
+%% ------------------------------------------------------------
+%% Misc. functions.
+%% ------------------------------------------------------------
+
+send_name(#hs_data{socket = Socket, this_node = Node,
+ f_send = FSend,
+ this_flags = Flags,
+ other_version = Version}) ->
+ ?trace("send_name: node=~w, version=~w\n",
+ [Node,Version]),
+ ?to_port(FSend, Socket,
+ [$n, ?int16(Version), ?int32(Flags), atom_to_list(Node)]).
+
+send_challenge(#hs_data{socket = Socket, this_node = Node,
+ other_version = Version,
+ this_flags = Flags,
+ f_send = FSend},
+ Challenge ) ->
+ ?trace("send: challenge=~w version=~w\n",
+ [Challenge,Version]),
+ ?to_port(FSend, Socket, [$n,?int16(Version), ?int32(Flags),
+ ?int32(Challenge),
+ atom_to_list(Node)]).
+
+send_challenge_reply(#hs_data{socket = Socket, f_send = FSend},
+ Challenge, Digest) ->
+ ?trace("send_reply: challenge=~w digest=~p\n",
+ [Challenge,Digest]),
+ ?to_port(FSend, Socket, [$r,?int32(Challenge),Digest]).
+
+send_challenge_ack(#hs_data{socket = Socket, f_send = FSend},
+ Digest) ->
+ ?trace("send_ack: digest=~p\n", [Digest]),
+ ?to_port(FSend, Socket, [$a,Digest]).
+
+
+%%
+%% Get the name of the other side.
+%% Close the connection if invalid data.
+%% The IP address sent is not interesting (as in the old
+%% tcp_drv.c which used it to detect simultaneous connection
+%% attempts).
+%%
+recv_name(#hs_data{socket = Socket, f_recv = Recv}) ->
+ case Recv(Socket, 0, infinity) of
+ {ok,Data} ->
+ get_name(Data);
+ _ ->
+ ?shutdown(no_node)
+ end.
+
+get_name([$n,VersionA, VersionB, Flag1, Flag2, Flag3, Flag4 | OtherNode]) ->
+ {?u32(Flag1, Flag2, Flag3, Flag4), list_to_atom(OtherNode),
+ ?u16(VersionA,VersionB)};
+get_name(Data) ->
+ ?shutdown(Data).
+
+publish_type(Flags) ->
+ case Flags band ?DFLAG_PUBLISHED of
+ 0 ->
+ hidden;
+ _ ->
+ normal
+ end.
+
+%% wait for challenge after connect
+recv_challenge(#hs_data{socket=Socket,other_node=Node,
+ other_version=Version,f_recv=Recv}) ->
+ case Recv(Socket, 0, infinity) of
+ {ok,[$n,V1,V0,Fl1,Fl2,Fl3,Fl4,CA3,CA2,CA1,CA0 | Ns]} ->
+ Flags = ?u32(Fl1,Fl2,Fl3,Fl4),
+ case {list_to_existing_atom(Ns),?u16(V1,V0)} of
+ {Node,Version} ->
+ Challenge = ?u32(CA3,CA2,CA1,CA0),
+ ?trace("recv: node=~w, challenge=~w version=~w\n",
+ [Node, Challenge,Version]),
+ {Flags,Challenge};
+ _ ->
+ ?shutdown(no_node)
+ end;
+ _ ->
+ ?shutdown(no_node)
+ end.
+
+
+%%
+%% wait for challenge response after send_challenge
+%%
+recv_challenge_reply(#hs_data{socket = Socket,
+ other_node = NodeB,
+ f_recv = FRecv},
+ ChallengeA, Cookie) ->
+ case FRecv(Socket, 0, infinity) of
+ {ok,[$r,CB3,CB2,CB1,CB0 | SumB]} when length(SumB) =:= 16 ->
+ SumA = gen_digest(ChallengeA, Cookie),
+ ChallengeB = ?u32(CB3,CB2,CB1,CB0),
+ ?trace("recv_reply: challenge=~w digest=~p\n",
+ [ChallengeB,SumB]),
+ ?trace("sum = ~p\n", [SumA]),
+ case list_to_binary(SumB) of
+ SumA ->
+ ChallengeB;
+ _ ->
+ error_msg("** Connection attempt from "
+ "disallowed node ~w ** ~n", [NodeB]),
+ ?shutdown(NodeB)
+ end;
+ _ ->
+ ?shutdown(no_node)
+ end.
+
+recv_challenge_ack(#hs_data{socket = Socket, f_recv = FRecv,
+ other_node = NodeB},
+ ChallengeB, CookieA) ->
+ case FRecv(Socket, 0, infinity) of
+ {ok,[$a|SumB]} when length(SumB) =:= 16 ->
+ SumA = gen_digest(ChallengeB, CookieA),
+ ?trace("recv_ack: digest=~p\n", [SumB]),
+ ?trace("sum = ~p\n", [SumA]),
+ case list_to_binary(SumB) of
+ SumA ->
+ ok;
+ _ ->
+ error_msg("** Connection attempt to "
+ "disallowed node ~w ** ~n", [NodeB]),
+ ?shutdown(NodeB)
+ end;
+ _ ->
+ ?shutdown(NodeB)
+ end.
+
+recv_status(#hs_data{kernel_pid = Kernel, socket = Socket,
+ other_node = Node, f_recv = Recv} = HSData) ->
+ case Recv(Socket, 0, infinity) of
+ {ok, [$s|StrStat]} ->
+ Stat = list_to_atom(StrStat),
+ ?debug({dist_util,self(),recv_status, Node, Stat}),
+ case Stat of
+ not_allowed -> ?shutdown(Node);
+ nok ->
+ %% wait to be killed by net_kernel
+ receive
+ after infinity -> ok
+ end;
+ alive ->
+ Reply = is_pending(Kernel, Node),
+ ?debug({is_pending,self(),Reply}),
+ send_status(HSData, Reply),
+ if not Reply ->
+ ?shutdown(Node);
+ Reply ->
+ Stat
+ end;
+ _ -> Stat
+ end;
+ _Error ->
+ ?debug({dist_util,self(),recv_status_error,
+ Node, _Error}),
+ ?shutdown(Node)
+ end.
+
+
+send_status(#hs_data{socket = Socket, other_node = Node,
+ f_send = FSend}, Stat) ->
+ ?debug({dist_util,self(),send_status, Node, Stat}),
+ case FSend(Socket, [$s | atom_to_list(Stat)]) of
+ {error, _} ->
+ ?shutdown(Node);
+ _ ->
+ true
+ end.
+
+
+
+%%
+%% Send a TICK to the other side.
+%%
+%% This will happen every 15 seconds (by default)
+%% The idea here is that every 15 secs, we write a little
+%% something on the connection if we haven't written anything for
+%% the last 15 secs.
+%% This will ensure that nodes that are not responding due to
+%% hardware errors (Or being suspended by means of ^Z) will
+%% be considered to be down. If we do not want to have this
+%% we must start the net_kernel (in erlang) without its
+%% ticker process, In that case this code will never run
+
+%% And then every 60 seconds we also check the connection and
+%% close it if we havn't received anything on it for the
+%% last 60 secs. If ticked == tick we havn't received anything
+%% on the connection the last 60 secs.
+
+%% The detection time interval is thus, by default, 45s < DT < 75s
+
+%% A HIDDEN node is always (if not a pending write) ticked if
+%% we haven't read anything as a hidden node only ticks when it receives
+%% a TICK !!
+
+send_tick(Socket, Tick, Type, MFTick, MFGetstat) ->
+ #tick{tick = T0,
+ read = Read,
+ write = Write,
+ ticked = Ticked} = Tick,
+ T = T0 + 1,
+ T1 = T rem 4,
+ case MFGetstat(Socket) of
+ {ok, Read, _, _} when Ticked =:= T ->
+ {error, not_responding};
+ {ok, Read, W, Pend} when Type =:= hidden ->
+ send_tick(Socket, Pend, MFTick),
+ {ok, Tick#tick{write = W + 1,
+ tick = T1}};
+ {ok, Read, Write, Pend} ->
+ send_tick(Socket, Pend, MFTick),
+ {ok, Tick#tick{write = Write + 1,
+ tick = T1}};
+ {ok, R, Write, Pend} ->
+ send_tick(Socket, Pend, MFTick),
+ {ok, Tick#tick{write = Write + 1,
+ read = R,
+ tick = T1,
+ ticked = T}};
+ {ok, Read, W, _} ->
+ {ok, Tick#tick{write = W,
+ tick = T1}};
+ {ok, R, W, _} ->
+ {ok, Tick#tick{write = W,
+ read = R,
+ tick = T1,
+ ticked = T}};
+ Error ->
+ Error
+ end.
+
+send_tick(Socket, 0, MFTick) ->
+ MFTick(Socket);
+send_tick(_, _Pend, _) ->
+ %% Dont send tick if pending write.
+ ok.
+
+%% ------------------------------------------------------------
+%% Connection setup timeout timer.
+%% After Timeout milliseconds this process terminates
+%% which implies that the owning setup/accept process terminates.
+%% The timer is reset before every network operation during the
+%% connection setup !
+%% ------------------------------------------------------------
+
+start_timer(Timeout) ->
+ spawn_link(?MODULE, setup_timer, [self(), Timeout*?trace_factor]).
+
+setup_timer(Pid, Timeout) ->
+ receive
+ {Pid, reset} ->
+ setup_timer(Pid, Timeout)
+ after Timeout ->
+ ?trace("Timer expires ~p, ~p~n",[Pid, Timeout]),
+ ?shutdown(timer)
+ end.
+
+reset_timer(Timer) ->
+ Timer ! {self(), reset}.
+
+cancel_timer(Timer) ->
+ unlink(Timer),
+ exit(Timer, shutdown).
+
diff --git a/lib/kernel/src/dist_util.hrl b/lib/kernel/src/dist_util.hrl
new file mode 100644
index 0000000000..f2b0598532
--- /dev/null
+++ b/lib/kernel/src/dist_util.hrl
@@ -0,0 +1,87 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 1999-2009. All Rights Reserved.
+%%
+%% The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved online at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% %CopyrightEnd%
+%%
+%% uncomment this if tracing of handshake etc is wanted
+%%-define(dist_trace, true).
+%%-define(dist_debug, true).
+
+
+-ifdef(dist_debug).
+-define(debug(Term), erlang:display(Term)).
+-else.
+-define(debug(Term), ok).
+-endif.
+
+-ifdef(dist_trace).
+-define(trace(Fmt,Args), io:format("~p ~p:~s",[erlang:now(),node(),lists:flatten(io_lib:format(Fmt, Args))])).
+% Use the one below for config-file (early boot) connection tracing
+%-define(trace(Fmt,Args), erlang:display([erlang:now(),node(),lists:flatten(io_lib:format(Fmt, Args))])).
+-define(trace_factor,8).
+-else.
+-define(trace(Fmt,Args), ok).
+-define(trace_factor,1).
+-endif.
+
+-define(shutdown(Data), dist_util:shutdown(?MODULE, ?LINE, Data)).
+-define(shutdown2(Data, Reason), dist_util:shutdown(?MODULE, ?LINE, Data, Reason)).
+
+%% Handshake state structure
+-record(hs_data, {
+ kernel_pid, %% Pid of net_kernel
+ other_node, %% Name of peer
+ this_node, %% my nodename
+ socket, %% The connection "socket"
+ timer, %% The setup timer
+ %% (stream_dist_handshake:start_timer)
+ this_flags, %% Flags my node should use
+ allowed, %% Allowed nodes list
+ other_version, %% The other nodes distribution version
+ other_flags, %% The other nodes flags.
+ other_started, %% True if the other node initiated.
+ f_send, %% Fun that behaves like gen_tcp:send
+ f_recv, %% Fun that behaves like gen_tcp:recv
+ f_setopts_pre_nodeup, %% Sets "socket" options before
+ %% nodeup is delivered to net_kernel
+ f_setopts_post_nodeup, %% Sets "socket" options after
+ %% nodeup is delivered
+ f_getll, %% Get low level port or pid.
+ f_address, %% The address of the "socket",
+ %% generated from Socket,Node
+ %% These two are used in the tick loop,
+ %% so they are not fun's to avoid holding old code.
+ mf_tick, %% Takes the socket as parameters and
+ %% sends a tick, this is no fun, it
+ %% is a tuple {M,F}.
+ %% Is should place {tcp_closed, Socket}
+ %% in the message queue on failure.
+ mf_getstat, %% Returns
+ %% {ok, RecvCnt, SendCnt, SendPend} for
+ %% a given socket. This is a {M,F},
+ %% returning {error, Reason on failure}
+ request_type = normal
+}).
+
+
+%% The following should be filled in upon enter of...
+%% - handshake_we_started:
+%% kernel_pid, other_node, this_node, socket, timer,
+%% this_flags, other_version, All fun's/mf's.
+%% - handshake_other_started:
+%% kernel_pid, this_node, socket, timer,
+%% this_flags, allowed, All fun's/mf's.
+
diff --git a/lib/kernel/src/erl_boot_server.erl b/lib/kernel/src/erl_boot_server.erl
new file mode 100644
index 0000000000..702b2feac9
--- /dev/null
+++ b/lib/kernel/src/erl_boot_server.erl
@@ -0,0 +1,325 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 1996-2009. All Rights Reserved.
+%%
+%% The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved online at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% %CopyrightEnd%
+%%
+%% A simple boot_server at a CP.
+%%
+%% This server should know about which slaves (DP's or whatever) to boot.
+%% File's (with absolute path name) will be fetched.
+%%
+
+-module(erl_boot_server).
+
+-include("inet_boot.hrl").
+
+-behaviour(gen_server).
+
+%% API functions.
+-export([start/1, start_link/1, add_slave/1, delete_slave/1,
+ add_subnet/2, delete_subnet/2,
+ which_slaves/0]).
+
+%% Exports for testing (dont't remove; tests suites depend on them).
+-export([would_be_booted/1]).
+
+%% Internal exports
+-export([init/1,handle_call/3,handle_cast/2,handle_info/2,terminate/2]).
+-export([code_change/3]).
+-export([boot_init/1, boot_accept/3]).
+
+-record(state,
+ {
+ priority = 0, %% priority of this server
+ version = "" :: string(), %% Version handled i.e "4.5.3" etc
+ udp_sock, %% listen port for broadcase requests
+ udp_port, %% port number must be ?EBOOT_PORT!
+ listen_sock, %% listen sock for incoming file requests
+ listen_port, %% listen port number
+ slaves, %% list of accepted ip addresses
+ bootp :: pid(), %% boot process
+ prim_state %% state for efile code loader
+ }).
+
+-define(single_addr_mask, {255, 255, 255, 255}).
+
+-type ip4_address() :: {0..255,0..255,0..255,0..255}.
+
+-spec start(Slaves :: [atom()]) -> {'ok', pid()} | {'error', any()}.
+
+start(Slaves) ->
+ case check_arg(Slaves) of
+ {ok, AL} ->
+ gen_server:start({local,boot_server}, erl_boot_server, AL, []);
+ _ ->
+ {error, {badarg, Slaves}}
+ end.
+
+-spec start_link(Slaves :: [atom()]) -> {'ok', pid()} | {'error', any()}.
+
+start_link(Slaves) ->
+ case check_arg(Slaves) of
+ {ok, AL} ->
+ gen_server:start_link({local,boot_server},
+ erl_boot_server, AL, []);
+ _ ->
+ {error, {badarg, Slaves}}
+ end.
+
+check_arg(Slaves) ->
+ check_arg(Slaves, []).
+
+check_arg([Slave|Rest], Result) ->
+ case inet:getaddr(Slave, inet) of
+ {ok, IP} ->
+ check_arg(Rest, [{?single_addr_mask, IP}|Result]);
+ _ ->
+ error
+ end;
+check_arg([], Result) ->
+ {ok, Result};
+check_arg(_, _Result) ->
+ error.
+
+-spec add_slave(Slave :: atom()) -> 'ok' | {'error', any()}.
+
+add_slave(Slave) ->
+ case inet:getaddr(Slave, inet) of
+ {ok,IP} ->
+ gen_server:call(boot_server, {add, {?single_addr_mask, IP}});
+ _ ->
+ {error, {badarg, Slave}}
+ end.
+
+-spec delete_slave(Slave :: atom()) -> 'ok' | {'error', any()}.
+
+delete_slave(Slave) ->
+ case inet:getaddr(Slave, inet) of
+ {ok,IP} ->
+ gen_server:call(boot_server, {delete, {?single_addr_mask, IP}});
+ _ ->
+ {error, {badarg, Slave}}
+ end.
+
+-spec add_subnet(Mask :: ip4_address(), Addr :: ip4_address()) ->
+ 'ok' | {'error', any()}.
+
+add_subnet(Mask, Addr) when is_tuple(Mask), is_tuple(Addr) ->
+ case member_address(Addr, [{Mask, Addr}]) of
+ true ->
+ gen_server:call(boot_server, {add, {Mask, Addr}});
+ false ->
+ {error, empty_subnet}
+ end.
+
+-spec delete_subnet(Mask :: ip4_address(), Addr :: ip4_address()) -> 'ok'.
+
+delete_subnet(Mask, Addr) when is_tuple(Mask), is_tuple(Addr) ->
+ gen_server:call(boot_server, {delete, {Mask, Addr}}).
+
+-spec which_slaves() -> [atom()].
+
+which_slaves() ->
+ gen_server:call(boot_server, which).
+
+%% Given a host name or IP address, returns true if a host
+%% having that IP address would be accepted for booting, and
+%% false otherwise. (Convenient for testing.)
+
+would_be_booted(Addr) ->
+ {ok, IP} = inet:getaddr(Addr, inet),
+ member_address(IP, which_slaves()).
+
+int16(X) when is_integer(X) ->
+ [(X bsr 8) band 16#ff, (X) band 16#ff].
+
+%% Check if an address is a member
+
+member_address(IP, [{{MA, MB, MC, MD}, {EA, EB, EC, ED}}|Rest]) ->
+ {A, B, C, D} = IP,
+ if A band MA =:= EA,
+ B band MB =:= EB,
+ C band MC =:= EC,
+ D band MD =:= ED ->
+ true;
+ true ->
+ member_address(IP, Rest)
+ end;
+member_address(_, []) ->
+ false.
+
+%% ------------------------------------------------------------
+%% call-back functions.
+%% ------------------------------------------------------------
+
+init(Slaves) ->
+ {ok, U} = gen_udp:open(?EBOOT_PORT, []),
+ {ok, L} = gen_tcp:listen(0, [binary,{packet,4}]),
+ {ok, Port} = inet:port(L),
+ {ok, UPort} = inet:port(U),
+ Ref = make_ref(),
+ Pid = proc_lib:spawn_link(?MODULE, boot_init, [Ref]),
+ gen_tcp:controlling_process(L, Pid),
+ Pid ! {Ref, L},
+ %% We trap exit inorder to restart boot_init and udp_port
+ process_flag(trap_exit, true),
+ {ok, #state {priority = 0,
+ version = erlang:system_info(version),
+ udp_sock = U,
+ udp_port = UPort,
+ listen_sock = L,
+ listen_port = Port,
+ slaves = ordsets:from_list(Slaves),
+ bootp = Pid
+ }}.
+
+handle_call({add,Address}, _, S0) ->
+ Slaves = ordsets:add_element(Address, S0#state.slaves),
+ S0#state.bootp ! {slaves, Slaves},
+ {reply, ok, S0#state{slaves = Slaves}};
+handle_call({delete,Address}, _, S0) ->
+ Slaves = ordsets:del_element(Address, S0#state.slaves),
+ S0#state.bootp ! {slaves, Slaves},
+ {reply, ok, S0#state{slaves = Slaves}};
+handle_call(which, _, S0) ->
+ {reply, ordsets:to_list(S0#state.slaves), S0}.
+
+handle_cast(_, Slaves) ->
+ {noreply, Slaves}.
+
+handle_info({udp, U, IP, Port, Data}, S0) ->
+ Token = ?EBOOT_REQUEST ++ S0#state.version,
+ Valid = member_address(IP, ordsets:to_list(S0#state.slaves)),
+ %% check that the connecting node is valid and has the same
+ %% erlang version as the boot server node
+ case {Valid,Data,Token} of
+ {true,Token,Token} ->
+ gen_udp:send(U,IP,Port,[?EBOOT_REPLY,S0#state.priority,
+ int16(S0#state.listen_port),
+ S0#state.version]),
+ {noreply,S0};
+ {false,_,_} ->
+ error_logger:error_msg("** Illegal boot server connection attempt: "
+ "~w is not a valid address ** ~n", [IP]),
+ {noreply,S0};
+ {true,_,_} ->
+ case catch string:substr(Data, 1, length(?EBOOT_REQUEST)) of
+ ?EBOOT_REQUEST ->
+ Vsn = string:substr(Data, length(?EBOOT_REQUEST)+1, length(Data)),
+ error_logger:error_msg("** Illegal boot server connection attempt: "
+ "client version is ~s ** ~n", [Vsn]);
+ _ ->
+ error_logger:error_msg("** Illegal boot server connection attempt: "
+ "unrecognizable request ** ~n", [])
+ end,
+ {noreply,S0}
+ end;
+handle_info(_Info, S0) ->
+ {noreply,S0}.
+
+terminate(_Reason, _S0) ->
+ ok.
+
+code_change(_Vsn, State, _Extra) ->
+ {ok, State}.
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%%
+%% Boot server
+%%
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+boot_init(Tag) ->
+ receive
+ {Tag, Listen} ->
+ process_flag(trap_exit, true),
+ boot_main(Listen)
+ end.
+
+boot_main(Listen) ->
+ Tag = make_ref(),
+ Pid = proc_lib:spawn_link(?MODULE, boot_accept, [self(), Listen, Tag]),
+ boot_main(Listen, Tag, Pid).
+
+boot_main(Listen, Tag, Pid) ->
+ receive
+ {Tag, _} ->
+ boot_main(Listen);
+ {'EXIT', Pid, _} ->
+ boot_main(Listen);
+ {'EXIT', _, Reason} ->
+ exit(Pid, kill),
+ exit(Reason);
+ {tcp_closed, Listen} ->
+ exit(closed)
+ end.
+
+boot_accept(Server, Listen, Tag) ->
+ Reply = gen_tcp:accept(Listen),
+ unlink(Server),
+ Server ! {Tag, continue},
+ case Reply of
+ {ok, Socket} ->
+ {ok, {IP, _Port}} = inet:peername(Socket),
+ true = member_address(IP, which_slaves()),
+ PS = erl_prim_loader:prim_init(),
+ boot_loop(Socket, PS)
+ end.
+
+boot_loop(Socket, PS) ->
+ receive
+ {tcp, Socket, Data} ->
+ PS2 = handle_command(Socket, PS, Data),
+ boot_loop(Socket, PS2);
+ {tcp_closed, Socket} ->
+ true
+ end.
+
+handle_command(S, PS, Msg) ->
+ case catch binary_to_term(Msg) of
+ {get,File} ->
+ {Res, PS2} = erl_prim_loader:prim_get_file(PS, File),
+ send_file_result(S, get, Res),
+ PS2;
+ {list_dir,Dir} ->
+ {Res, PS2} = erl_prim_loader:prim_list_dir(PS, Dir),
+ send_file_result(S, list_dir, Res),
+ PS2;
+ {read_file_info,File} ->
+ {Res, PS2} = erl_prim_loader:prim_read_file_info(PS, File),
+ send_file_result(S, read_file_info, Res),
+ PS2;
+ get_cwd ->
+ {Res, PS2} = erl_prim_loader:prim_get_cwd(PS, []),
+ send_file_result(S, get_cwd, Res),
+ PS2;
+ {get_cwd,Drive} ->
+ {Res, PS2} = erl_prim_loader:prim_get_cwd(PS, [Drive]),
+ send_file_result(S, get_cwd, Res),
+ PS2;
+ {'EXIT',Reason} ->
+ send_result(S, {error,Reason}),
+ PS;
+ _Other ->
+ send_result(S, {error,unknown_command}),
+ PS
+ end.
+
+send_file_result(S, Cmd, Result) ->
+ gen_tcp:send(S, term_to_binary({Cmd,Result})).
+
+send_result(S, Result) ->
+ gen_tcp:send(S, term_to_binary(Result)).
diff --git a/lib/kernel/src/erl_ddll.erl b/lib/kernel/src/erl_ddll.erl
new file mode 100644
index 0000000000..88f91de24f
--- /dev/null
+++ b/lib/kernel/src/erl_ddll.erl
@@ -0,0 +1,150 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 1997-2009. All Rights Reserved.
+%%
+%% The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved online at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% %CopyrightEnd%
+%%
+%% Dynamic Driver Loader and Linker
+%%
+%% Interface for dynamic library/shared object driver loader/linker.
+%% Provides methods for loading, unloading and listing drivers.
+
+-module(erl_ddll).
+
+-export([load_driver/2, load/2,
+ unload_driver/1, unload/1, reload/2, reload_driver/2,
+ format_error/1,info/1,info/0, start/0, stop/0]).
+
+%%----------------------------------------------------------------------------
+
+-spec start() -> {'error', {'already_started', 'undefined'}}.
+
+start() ->
+ {error, {already_started,undefined}}.
+
+-spec stop() -> 'ok'.
+
+stop() ->
+ ok.
+
+-spec load_driver(Path :: string() | atom(), Driver :: string() | atom()) ->
+ 'ok' | {'error', any()}.
+
+load_driver(Path, Driver) ->
+ do_load_driver(Path, Driver, [{driver_options,[kill_ports]}]).
+
+-spec load(Path :: string() | atom(), Driver :: string() | atom()) ->
+ 'ok' | {'error', any()}.
+
+load(Path, Driver) ->
+ do_load_driver(Path, Driver, []).
+
+do_load_driver(Path, Driver, DriverFlags) ->
+ case erl_ddll:try_load(Path, Driver,[{monitor,pending_driver}]++DriverFlags) of
+ {error, inconsistent} ->
+ {error,bad_driver_name}; % BC
+ {error, What} ->
+ {error,What};
+ {ok, already_loaded} ->
+ ok;
+ {ok,loaded} ->
+ ok;
+ {ok, pending_driver, Ref} ->
+ receive
+ {'DOWN', Ref, driver, _, load_cancelled} ->
+ {error, load_cancelled};
+ {'UP', Ref, driver, _, permanent} ->
+ {error, permanent};
+ {'DOWN', Ref, driver, _, {load_failure, Failure}} ->
+ {error, Failure};
+ {'UP', Ref, driver, _, loaded} ->
+ ok
+ end
+ end.
+
+do_unload_driver(Driver,Flags) ->
+ case erl_ddll:try_unload(Driver,Flags) of
+ {error,What} ->
+ {error,What};
+ {ok, pending_process} ->
+ ok;
+ {ok, unloaded} ->
+ ok;
+ {ok, pending_driver} ->
+ ok;
+ {ok, pending_driver, Ref} ->
+ receive
+ {'UP', Ref, driver, _, permanent} ->
+ {error, permanent};
+ {'UP', Ref, driver, _, unload_cancelled} ->
+ ok;
+ {'DOWN', Ref, driver, _, unloaded} ->
+ ok
+ end
+ end.
+
+-spec unload_driver(Driver :: string() | atom()) -> 'ok' | {'error', any()}.
+
+unload_driver(Driver) ->
+ do_unload_driver(Driver,[{monitor,pending_driver},kill_ports]).
+
+-spec unload(Driver :: string() | atom()) -> 'ok' | {'error', any()}.
+
+unload(Driver) ->
+ do_unload_driver(Driver,[]).
+
+-spec reload(Path :: string() | atom(), Driver :: string() | atom()) ->
+ 'ok' | {'error', any()}.
+
+reload(Path,Driver) ->
+ do_load_driver(Path, Driver, [{reload,pending_driver}]).
+
+-spec reload_driver(Path :: string() | atom(), Driver :: string() | atom()) ->
+ 'ok' | {'error', any()}.
+
+reload_driver(Path,Driver) ->
+ do_load_driver(Path, Driver, [{reload,pending_driver},
+ {driver_options,[kill_ports]}]).
+
+-spec format_error(Code :: atom()) -> string().
+
+format_error(Code) ->
+ case Code of
+ % This is the only error code returned only from erlang code...
+ % 'permanent' has a translation in the emulator, even though the erlang code uses it to...
+ load_cancelled ->
+ "Loading was cancelled from other process";
+ _ ->
+ erl_ddll:format_error_int(Code)
+ end.
+
+-spec info(Driver :: string() | atom()) -> [{atom(), any()}].
+
+info(Driver) ->
+ [{processes, erl_ddll:info(Driver,processes)},
+ {driver_options, erl_ddll:info(Driver,driver_options)},
+ {port_count, erl_ddll:info(Driver,port_count)},
+ {linked_in_driver, erl_ddll:info(Driver,linked_in_driver)},
+ {permanent, erl_ddll:info(Driver,permanent)},
+ {awaiting_load, erl_ddll:info(Driver,awaiting_load)},
+ {awaiting_unload, erl_ddll:info(Driver,awaiting_unload)}].
+
+-spec info() -> [{string(), [{atom(), any()}]}].
+
+info() ->
+ {ok,DriverList} = erl_ddll:loaded_drivers(),
+ [{X,Y} || X <- DriverList,
+ Y <- [catch info(X)],
+ is_list(Y), not lists:member({linked_in_driver,true},Y)].
diff --git a/lib/kernel/src/erl_distribution.erl b/lib/kernel/src/erl_distribution.erl
new file mode 100644
index 0000000000..25ad34357a
--- /dev/null
+++ b/lib/kernel/src/erl_distribution.erl
@@ -0,0 +1,106 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 1996-2009. All Rights Reserved.
+%%
+%% The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved online at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(erl_distribution).
+
+-behaviour(supervisor).
+
+-export([start_link/0,start_link/1,init/1,start/1,stop/0]).
+
+%-define(DBG,io:format("~p:~p~n",[?MODULE,?LINE])).
+-define(DBG,erlang:display([?MODULE,?LINE])).
+
+start_link() ->
+ case catch start_p() of
+ {ok,Args} ->
+ start_link(Args);
+ _ ->
+ ignore
+ end.
+
+start_link(Args) ->
+ supervisor:start_link({local,net_sup},erl_distribution,Args).
+
+init(NetArgs) ->
+ Epmd =
+ case init:get_argument(no_epmd) of
+ {ok, [[]]} ->
+ [];
+ _ ->
+ EpmdMod = net_kernel:epmd_module(),
+ [{EpmdMod,{EpmdMod,start_link,[]},
+ permanent,2000,worker,[EpmdMod]}]
+ end,
+ Auth = {auth,{auth,start_link,[]},permanent,2000,worker,[auth]},
+ Kernel = {net_kernel,{net_kernel,start_link,[NetArgs]},
+ permanent,2000,worker,[net_kernel]},
+ EarlySpecs = net_kernel:protocol_childspecs(),
+ {ok,{{one_for_all,0,1}, EarlySpecs ++ Epmd ++ [Auth,Kernel]}}.
+
+start_p() ->
+ sname(),
+ lname(),
+ false.
+
+sname() ->
+ case init:get_argument(sname) of
+ {ok,[[Name]]} ->
+ throw({ok,[list_to_atom(Name),shortnames|ticktime()]});
+ _ ->
+ false
+ end.
+
+lname() ->
+ case init:get_argument(name) of
+ {ok,[[Name]]} ->
+ throw({ok,[list_to_atom(Name),longnames|ticktime()]});
+ _ ->
+ false
+ end.
+
+ticktime() ->
+ %% catch, in case the system was started with boot file start_old,
+ %% i.e. running without the application_controller.
+ %% Time is given in seconds. The net_kernel tick time is
+ %% Time/4 milliseconds.
+ case catch application:get_env(net_ticktime) of
+ {ok, Value} when is_integer(Value), Value > 0 ->
+ [Value * 250]; %% i.e. 1000 / 4 = 250 ms.
+ _ ->
+ []
+ end.
+
+start(Args) ->
+ C = {net_sup_dynamic, {erl_distribution, start_link, [Args]}, permanent,
+ 1000, supervisor, [erl_distribution]},
+ supervisor:start_child(kernel_sup, C).
+
+stop() ->
+ case supervisor:terminate_child(kernel_sup, net_sup_dynamic) of
+ ok ->
+ supervisor:delete_child(kernel_sup, net_sup_dynamic);
+ Error ->
+ case whereis(net_sup) of
+ Pid when is_pid(Pid) ->
+ %% Dist. started through -sname | -name flags
+ {error, not_allowed};
+ _ ->
+ Error
+ end
+ end.
+
diff --git a/lib/kernel/src/erl_epmd.erl b/lib/kernel/src/erl_epmd.erl
new file mode 100644
index 0000000000..e4b371836b
--- /dev/null
+++ b/lib/kernel/src/erl_epmd.erl
@@ -0,0 +1,553 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 1998-2009. All Rights Reserved.
+%%
+%% The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved online at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(erl_epmd).
+
+-behaviour(gen_server).
+
+-ifdef(DEBUG).
+-define(port_please_failure(), io:format("Net Kernel 2: EPMD port please failed at ~p:~p~n", [?MODULE,?LINE])).
+-define(port_please_failure2(Term), io:format("Net Kernel 2: EPMD port please failed at ~p:~p [~p]~n", [?MODULE,?LINE,Term])).
+-else.
+-define(port_please_failure(), noop).
+-define(port_please_failure2(Term), noop).
+-endif.
+
+%% External exports
+-export([start/0, start_link/0, stop/0, port_please/2,
+ port_please/3, names/0, names/1,
+ register_node/2, open/0, open/1, open/2]).
+
+%% gen_server callbacks
+-export([init/1, handle_call/3, handle_cast/2, handle_info/2,
+ terminate/2, code_change/3]).
+
+-import(lists, [reverse/1]).
+
+-record(state, {socket, port_no = -1, name = ""}).
+
+-include("inet_int.hrl").
+-include("erl_epmd.hrl").
+
+
+%%%----------------------------------------------------------------------
+%%% API
+%%%----------------------------------------------------------------------
+start() ->
+ gen_server:start({local, erl_epmd}, ?MODULE, [], []).
+
+
+start_link() ->
+ gen_server:start_link({local, erl_epmd}, ?MODULE, [], []).
+
+
+stop() ->
+ gen_server:call(?MODULE, stop, infinity).
+
+
+%% Lookup a node "Name" at Host
+%% return {port, P, Version} | noport
+%%
+
+port_please(Node, Host) ->
+ port_please(Node, Host, infinity).
+
+port_please(Node,HostName, Timeout) when is_atom(HostName) ->
+ port_please1(Node,atom_to_list(HostName), Timeout);
+port_please(Node,HostName, Timeout) when is_list(HostName) ->
+ port_please1(Node,HostName, Timeout);
+port_please(Node, EpmdAddr, Timeout) ->
+ get_port(Node, EpmdAddr, Timeout).
+
+
+
+port_please1(Node,HostName, Timeout) ->
+ case inet:gethostbyname(HostName, inet, Timeout) of
+ {ok,{hostent, _Name, _ , _Af, _Size, [EpmdAddr | _]}} ->
+ get_port(Node, EpmdAddr, Timeout);
+ Else ->
+ Else
+ end.
+
+names() ->
+ {ok, H} = inet:gethostname(),
+ names(H).
+
+names(HostName) when is_atom(HostName) ->
+ names1(atom_to_list(HostName));
+names(HostName) when is_list(HostName) ->
+ names1(HostName);
+names(EpmdAddr) ->
+ get_names(EpmdAddr).
+
+names1(HostName) ->
+ case inet:gethostbyname(HostName) of
+ {ok,{hostent, _Name, _ , _Af, _Size, [EpmdAddr | _]}} ->
+ get_names(EpmdAddr);
+ Else ->
+ Else
+ end.
+
+
+register_node(Name, PortNo) ->
+ gen_server:call(erl_epmd, {register, Name, PortNo}, infinity).
+
+%%%----------------------------------------------------------------------
+%%% Callback functions from gen_server
+%%%----------------------------------------------------------------------
+
+init(_) ->
+ {ok, #state{socket = -1}}.
+
+%%----------------------------------------------------------------------
+
+handle_call({register, Name, PortNo}, _From, State) ->
+ case State#state.socket of
+ P when P < 0 ->
+ case do_register_node(Name, PortNo) of
+ {alive, Socket, Creation} ->
+ S = State#state{socket = Socket,
+ port_no = PortNo,
+ name = Name},
+ {reply, {ok, Creation}, S};
+ Error ->
+ {reply, Error, State}
+ end;
+ _ ->
+ {reply, {error, already_registered}, State}
+ end;
+
+handle_call(client_info_req, _From, State) ->
+ Reply = {ok,{r4,State#state.name,State#state.port_no}},
+ {reply,Reply,State};
+
+handle_call(stop, _From, State) ->
+ {stop, shutdown, ok, State}.
+
+%%----------------------------------------------------------------------
+
+handle_cast(_, State) ->
+ {noreply, State}.
+
+%%----------------------------------------------------------------------
+
+handle_info({tcp_closed, Socket}, State) when State#state.socket =:= Socket ->
+ {noreply, State#state{socket = -1}};
+handle_info(_, State) ->
+ {noreply, State}.
+
+%%----------------------------------------------------------------------
+
+terminate(_, #state{socket = Socket}) when Socket > 0 ->
+ close(Socket),
+ ok;
+terminate(_, _) ->
+ ok.
+
+%%----------------------------------------------------------------------
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+
+%%%----------------------------------------------------------------------
+%%% Internal functions
+%%%----------------------------------------------------------------------
+
+get_epmd_port() ->
+ case init:get_argument(epmd_port) of
+ {ok, [[PortStr|_]|_]} when is_list(PortStr) ->
+ list_to_integer(PortStr);
+ error ->
+ ?erlang_daemon_port
+ end.
+
+%%
+%% Epmd socket
+%%
+open() -> open({127,0,0,1}). % The localhost IP address.
+
+open({A,B,C,D}=EpmdAddr) when ?ip(A,B,C,D) ->
+ gen_tcp:connect(EpmdAddr, get_epmd_port(), [inet]);
+open({A,B,C,D,E,F,G,H}=EpmdAddr) when ?ip6(A,B,C,D,E,F,G,H) ->
+ gen_tcp:connect(EpmdAddr, get_epmd_port(), [inet6]).
+
+open({A,B,C,D}=EpmdAddr, Timeout) when ?ip(A,B,C,D) ->
+ gen_tcp:connect(EpmdAddr, get_epmd_port(), [inet], Timeout);
+open({A,B,C,D,E,F,G,H}=EpmdAddr, Timeout) when ?ip6(A,B,C,D,E,F,G,H) ->
+ gen_tcp:connect(EpmdAddr, get_epmd_port(), [inet6], Timeout).
+
+close(Socket) ->
+ gen_tcp:close(Socket).
+
+
+do_register_node_v0(NodeName, TcpPort) ->
+ case open() of
+ {ok, Socket} ->
+ Name = cstring(NodeName),
+ Len = 1+2+length(Name),
+ gen_tcp:send(Socket, [?int16(Len), ?EPMD_ALIVE,
+ ?int16(TcpPort), Name]),
+ wait_for_reg_reply_v0(Socket, []);
+ Error ->
+ Error
+ end.
+
+do_register_node(NodeName, TcpPort) ->
+ case open() of
+ {ok, Socket} ->
+ Name = to_string(NodeName),
+ Extra = "",
+ Elen = length(Extra),
+ Len = 1+2+1+1+2+2+2+length(Name)+2+Elen,
+ gen_tcp:send(Socket, [?int16(Len), ?EPMD_ALIVE2_REQ,
+ ?int16(TcpPort),
+ $M,
+ 0,
+ ?int16(epmd_dist_high()),
+ ?int16(epmd_dist_low()),
+ ?int16(length(Name)),
+ Name,
+ ?int16(Elen),
+ Extra]),
+ case wait_for_reg_reply(Socket, []) of
+ {error, epmd_close} ->
+ %% could be old epmd; try old protocol
+% erlang:display('trying old'),
+ do_register_node_v0(NodeName, TcpPort);
+ Other ->
+ Other
+ end;
+ Error ->
+ Error
+ end.
+
+epmd_dist_high() ->
+ case os:getenv("ERL_EPMD_DIST_HIGH") of
+ false ->
+ ?epmd_dist_high;
+ Version ->
+ case (catch list_to_integer(Version)) of
+ N when is_integer(N), N < ?epmd_dist_high ->
+ N;
+ _ ->
+ ?epmd_dist_high
+ end
+ end.
+
+epmd_dist_low() ->
+ case os:getenv("ERL_EPMD_DIST_LOW") of
+ false ->
+ ?epmd_dist_low;
+ Version ->
+ case (catch list_to_integer(Version)) of
+ N when is_integer(N), N > ?epmd_dist_low ->
+ N;
+ _ ->
+ ?epmd_dist_low
+ end
+ end.
+
+
+
+%%% (When we reply 'duplicate_name', it's because it's the most likely
+%%% reason; there is no interpretation of the error result code.)
+wait_for_reg_reply(Socket, SoFar) ->
+ receive
+ {tcp, Socket, Data0} ->
+ case SoFar ++ Data0 of
+ [$y, Result, A, B] ->
+ case Result of
+ 0 ->
+ {alive, Socket, ?u16(A, B)};
+ _ ->
+ {error, duplicate_name}
+ end;
+ Data when length(Data) < 4 ->
+ wait_for_reg_reply(Socket, Data);
+ Garbage ->
+ {error, {garbage_from_epmd, Garbage}}
+ end;
+ {tcp_closed, Socket} ->
+ {error, epmd_close}
+ after 10000 ->
+ gen_tcp:close(Socket),
+ {error, no_reg_reply_from_epmd}
+ end.
+
+wait_for_reg_reply_v0(Socket, SoFar) ->
+ receive
+ {tcp, Socket, Data0} ->
+ case SoFar ++ Data0 of
+ [$Y, A, B] ->
+ {alive, Socket, ?u16(A, B)};
+ Data when length(Data) < 3 ->
+ wait_for_reg_reply(Socket, Data);
+ Garbage ->
+ {error, {garbage_from_epmd, Garbage}}
+ end;
+ {tcp_closed, Socket} ->
+ {error, duplicate_name} % A guess -- the most likely reason.
+ after 10000 ->
+ gen_tcp:close(Socket),
+ {error, no_reg_reply_from_epmd}
+ end.
+%%
+%% Lookup a node "Name" at Host
+%%
+get_port_v0(Node, EpmdAddress) ->
+ case open(EpmdAddress) of
+ {ok, Socket} ->
+ Name = cstring(Node),
+ Len = 1+length(Name),
+ gen_tcp:send(Socket, [?int16(Len),?EPMD_PORT_PLEASE, Name]),
+ wait_for_port_reply_v0(Socket, []);
+ _Error ->
+ ?port_please_failure(),
+ noport
+ end.
+
+%%% Not used anymore
+%%% get_port(Node, EpmdAddress) ->
+%%% get_port(Node, EpmdAddress, infinity).
+
+get_port(Node, EpmdAddress, Timeout) ->
+ case open(EpmdAddress, Timeout) of
+ {ok, Socket} ->
+ Name = to_string(Node),
+ Len = 1+length(Name),
+ gen_tcp:send(Socket, [?int16(Len),?EPMD_PORT_PLEASE2_REQ, Name]),
+ Reply = wait_for_port_reply(Socket, []),
+ case Reply of
+ closed ->
+ get_port_v0(Node, EpmdAddress);
+ Other ->
+ Other
+ end;
+ _Error ->
+ ?port_please_failure2(_Error),
+ noport
+ end.
+
+wait_for_port_reply_v0(Socket, SoFar) ->
+ receive
+ {tcp, Socket, Data0} ->
+% io:format("got ~p~n", [Data0]),
+ case SoFar ++ Data0 of
+ [A, B] ->
+ wait_for_close(Socket, {port, ?u16(A, B), 0});
+% wait_for_close(Socket, {port, ?u16(A, B)});
+ Data when length(Data) < 2 ->
+ wait_for_port_reply_v0(Socket, Data);
+ Garbage ->
+ ?port_please_failure(),
+ {error, {garbage_from_epmd, Garbage}}
+ end;
+ {tcp_closed, Socket} ->
+ ?port_please_failure(),
+ noport
+ after 10000 ->
+ ?port_please_failure(),
+ gen_tcp:close(Socket),
+ noport
+ end.
+
+wait_for_port_reply(Socket, SoFar) ->
+ receive
+ {tcp, Socket, Data0} ->
+% io:format("got ~p~n", [Data0]),
+ case SoFar ++ Data0 of
+ [$w, Result | Rest] ->
+ case Result of
+ 0 ->
+ wait_for_port_reply_cont(Socket, Rest);
+ _ ->
+ ?port_please_failure(),
+ wait_for_close(Socket, noport)
+ end;
+ Data when length(Data) < 2 ->
+ wait_for_port_reply(Socket, Data);
+ Garbage ->
+ ?port_please_failure(),
+ {error, {garbage_from_epmd, Garbage}}
+ end;
+ {tcp_closed, Socket} ->
+ ?port_please_failure(),
+ closed
+ after 10000 ->
+ ?port_please_failure(),
+ gen_tcp:close(Socket),
+ noport
+ end.
+
+wait_for_port_reply_cont(Socket, SoFar) when length(SoFar) >= 10 ->
+ wait_for_port_reply_cont2(Socket, SoFar);
+wait_for_port_reply_cont(Socket, SoFar) ->
+ receive
+ {tcp, Socket, Data0} ->
+ case SoFar ++ Data0 of
+ Data when length(Data) >= 10 ->
+ wait_for_port_reply_cont2(Socket, Data);
+ Data when length(Data) < 10 ->
+ wait_for_port_reply_cont(Socket, Data);
+ Garbage ->
+ ?port_please_failure(),
+ {error, {garbage_from_epmd, Garbage}}
+ end;
+ {tcp_closed, Socket} ->
+ ?port_please_failure(),
+ noport
+ after 10000 ->
+ ?port_please_failure(),
+ gen_tcp:close(Socket),
+ noport
+ end.
+
+wait_for_port_reply_cont2(Socket, Data) ->
+ [A, B, _Type, _Proto, HighA, HighB,
+ LowA, LowB, NLenA, NLenB | Rest] = Data,
+ wait_for_port_reply_name(Socket,
+ ?u16(NLenA, NLenB),
+ Rest),
+ Low = ?u16(LowA, LowB),
+ High = ?u16(HighA, HighB),
+ Version = best_version(Low, High),
+% io:format("Returning ~p~n", [{port, ?u16(A, B), Version}]),
+ {port, ?u16(A, B), Version}.
+% {port, ?u16(A, B)}.
+
+%%% Throw away the rest of the message; we won't use any of it anyway,
+%%% currently.
+wait_for_port_reply_name(Socket, Len, Sofar) ->
+ receive
+ {tcp, Socket, _Data} ->
+% io:format("data = ~p~n", _Data),
+ wait_for_port_reply_name(Socket, Len, Sofar);
+ {tcp_closed, Socket} ->
+ "foobar"
+ end.
+
+
+best_version(Low, High) ->
+ OurLow = epmd_dist_low(),
+ OurHigh = epmd_dist_high(),
+ select_best_version(OurLow, OurHigh, Low, High).
+
+%%% We silently assume that the low's are not greater than the high's.
+%%% We should report if the intervals don't overlap.
+select_best_version(L1, _H1, _L2, H2) when L1 > H2 ->
+ 0;
+select_best_version(_L1, H1, L2, _H2) when L2 > H1 ->
+ 0;
+select_best_version(_L1, H1, L2, _H2) when L2 > H1 ->
+ 0;
+select_best_version(_L1, H1, _L2, H2) ->
+ erlang:min(H1, H2).
+
+wait_for_close(Socket, Reply) ->
+ receive
+ {tcp_closed, Socket} ->
+ Reply
+ after 10000 ->
+ gen_tcp:close(Socket),
+ Reply
+ end.
+
+
+%%
+%% Creates a (flat) null terminated string from atom or list.
+%%
+cstring(S) when is_atom(S) -> cstring(atom_to_list(S));
+cstring(S) when is_list(S) -> S ++ [0].
+
+to_string(S) when is_atom(S) -> atom_to_list(S);
+to_string(S) when is_list(S) -> S.
+
+%%
+%% Find names on epmd
+%%
+%%
+get_names(EpmdAddress) ->
+ case open(EpmdAddress) of
+ {ok, Socket} ->
+ do_get_names(Socket);
+ _Error ->
+ {error, address}
+ end.
+
+do_get_names(Socket) ->
+ gen_tcp:send(Socket, [?int16(1),?EPMD_NAMES]),
+ receive
+ {tcp, Socket, [P0,P1,P2,P3|T]} ->
+ EpmdPort = ?u32(P0,P1,P2,P3),
+ case get_epmd_port() of
+ EpmdPort ->
+ names_loop(Socket, T, []);
+ _ ->
+ close(Socket),
+ {error, address}
+ end;
+ {tcp_closed, Socket} ->
+ {ok, []}
+ end.
+
+names_loop(Socket, Acc, Ps) ->
+ receive
+ {tcp, Socket, Bytes} ->
+ {NAcc, NPs} = scan_names(Acc ++ Bytes, Ps),
+ names_loop(Socket, NAcc, NPs);
+ {tcp_closed, Socket} ->
+ {_, NPs} = scan_names(Acc, Ps),
+ {ok, NPs}
+ end.
+
+scan_names(Buf, Ps) ->
+ case scan_line(Buf, []) of
+ {Line, NBuf} ->
+ case parse_line(Line) of
+ {ok, Entry} ->
+ scan_names(NBuf, [Entry | Ps]);
+ error ->
+ scan_names(NBuf, Ps)
+ end;
+ [] -> {Buf, Ps}
+ end.
+
+
+scan_line([$\n | Buf], Line) -> {reverse(Line), Buf};
+scan_line([C | Buf], Line) -> scan_line(Buf, [C|Line]);
+scan_line([], _) -> [].
+
+parse_line("name " ++ Buf0) ->
+ case parse_name(Buf0, []) of
+ {Name, Buf1} ->
+ case Buf1 of
+ "at port " ++ Buf2 ->
+ case catch list_to_integer(Buf2) of
+ {'EXIT', _} -> error;
+ Port -> {ok, {Name, Port}}
+ end;
+ _ -> error
+ end;
+ error -> error
+ end;
+parse_line(_) -> error.
+
+
+parse_name([$\s | Buf], Name) -> {reverse(Name), Buf};
+parse_name([C | Buf], Name) -> parse_name(Buf, [C|Name]);
+parse_name([], _Name) -> error.
diff --git a/lib/kernel/src/erl_epmd.hrl b/lib/kernel/src/erl_epmd.hrl
new file mode 100644
index 0000000000..47ab6195d8
--- /dev/null
+++ b/lib/kernel/src/erl_epmd.hrl
@@ -0,0 +1,32 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 1998-2009. All Rights Reserved.
+%%
+%% The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved online at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% %CopyrightEnd%
+%%
+
+-define(EPMD_ALIVE, $a).
+-define(EPMD_PORT_PLEASE, $p).
+-define(EPMD_NAMES, $n).
+-define(EPMD_DUMP, $d).
+-define(EPMD_KILL, $k).
+-define(EPMD_STOP, $s).
+
+-define(EPMD_ALIVE_OK, $Y).
+
+-define(EPMD_ALIVE2_REQ, $x).
+-define(EPMD_PORT_PLEASE2_REQ, $z).
+-define(EPMD_ALIVE2_RESP, $y).
+-define(EPMD_PORT2_RESP, $w).
diff --git a/lib/kernel/src/erl_reply.erl b/lib/kernel/src/erl_reply.erl
new file mode 100644
index 0000000000..1a61e630bc
--- /dev/null
+++ b/lib/kernel/src/erl_reply.erl
@@ -0,0 +1,49 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 1997-2009. All Rights Reserved.
+%%
+%% The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved online at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(erl_reply).
+
+%% Syncronisation with erl_start (erl_interface)
+
+-export([reply/1]).
+
+%% send Msg to Addr:Port
+%% all args are atoms since we call this from erl command line
+
+-spec reply([atom()]) -> 'ok' | 'reply_done'.
+
+reply([Addr,Port,Msg]) ->
+ Ip = ip_string_to_tuple(atom_to_list(Addr)),
+ P = list_to_integer(atom_to_list(Port)),
+ M = atom_to_list(Msg),
+ {ok, S} = gen_tcp:connect(Ip,P,[]),
+ gen_tcp:send(S,M),
+ gen_tcp:close(S),
+ reply_done;
+reply(_) ->
+ error_logger:error_msg("erl_reply: Can't find address and port "
+ "to reply to~n").
+
+%% convert ip number to tuple
+ip_string_to_tuple(Ip) ->
+ [Ip1,Ip2,Ip3,Ip4] = string:tokens(Ip,"."),
+ {list_to_integer(Ip1),
+ list_to_integer(Ip2),
+ list_to_integer(Ip3),
+ list_to_integer(Ip4)}.
+
diff --git a/lib/kernel/src/error_handler.erl b/lib/kernel/src/error_handler.erl
new file mode 100644
index 0000000000..5f2507fc08
--- /dev/null
+++ b/lib/kernel/src/error_handler.erl
@@ -0,0 +1,141 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 1996-2009. All Rights Reserved.
+%%
+%% The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved online at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(error_handler).
+
+%% A simple error handler.
+
+-export([undefined_function/3, undefined_lambda/3, stub_function/3,
+ breakpoint/3]).
+
+-spec undefined_function(Module :: atom(), Function :: atom(), Args :: [_]) ->
+ any().
+
+undefined_function(Module, Func, Args) ->
+ case ensure_loaded(Module) of
+ {module, Module} ->
+ case erlang:function_exported(Module, Func, length(Args)) of
+ true ->
+ apply(Module, Func, Args);
+ false ->
+ case check_inheritance(Module, Args) of
+ {value, Base, Args1} ->
+ apply(Base, Func, Args1);
+ none ->
+ crash(Module, Func, Args)
+ end
+ end;
+ {module, _} ->
+ crash(Module, Func, Args);
+ _Other ->
+ crash(Module, Func, Args)
+ end.
+
+-spec undefined_lambda(Module :: atom(), Function :: fun(), Args :: [_]) ->
+ any().
+
+undefined_lambda(Module, Fun, Args) ->
+ case ensure_loaded(Module) of
+ {module, Module} ->
+ %% There is no need (and no way) to test if the fun is present.
+ %% apply/2 will not call us again if the fun is missing.
+ apply(Fun, Args);
+ {module, _} ->
+ crash(Fun, Args);
+ _Other ->
+ crash(Fun, Args)
+ end.
+
+-spec breakpoint(Module :: atom(), Function :: atom(), Args :: [_]) ->
+ any().
+
+breakpoint(Module, Func, Args) ->
+ (int()):eval(Module, Func, Args).
+
+%% Used to make the call to the 'int' module a "weak" one, to avoid
+%% building strong components in xref or dialyzer.
+
+int() -> int.
+
+%%
+%% Crash providing a beautiful stack backtrace.
+%%
+crash(Fun, Args) ->
+ crash({Fun,Args}).
+
+crash(M, F, A) ->
+ crash({M,F,A}).
+
+-spec crash(tuple()) -> no_return().
+
+crash(Tuple) ->
+ try erlang:error(undef)
+ catch
+ error:undef ->
+ erlang:raise(error, undef, [Tuple|tl(erlang:get_stacktrace())])
+ end.
+
+%% If the code_server has not been started yet dynamic code loading
+%% is handled by init.
+ensure_loaded(Module) ->
+ Self = self(),
+ case whereis(code_server) of
+ %% Perhaps double fault should be detected in code:ensure_loaded/1
+ %% instead, since this error handler cannot know whether the
+ %% code server can resolve the problem or not.
+ %% An {error, Reason} return from there would crash the code server and
+ %% bring down the node.
+ Self ->
+ Error = "The code server called the unloaded module `" ++
+ atom_to_list(Module) ++ "'",
+ halt(Error);
+ Pid when is_pid(Pid) ->
+ code:ensure_loaded(Module);
+ _ ->
+ init:ensure_loaded(Module)
+ end.
+
+-spec stub_function(atom(), atom(), [_]) -> no_return().
+
+stub_function(Mod, Func, Args) ->
+ exit({undef,[{Mod,Func,Args}]}).
+
+check_inheritance(Module, Args) ->
+ Attrs = erlang:get_module_info(Module, attributes),
+ case lists:keysearch(extends, 1, Attrs) of
+ {value,{extends,[Base]}} when is_atom(Base), Base =/= Module ->
+ %% This is just a heuristic for detecting abstract modules
+ %% with inheritance so they can be handled; it would be
+ %% much better to do it in the emulator runtime
+ case lists:keysearch(abstract, 1, Attrs) of
+ {value,{abstract,[true]}} ->
+ case lists:reverse(Args) of
+ [M|Rs] when tuple_size(M) > 1,
+ element(1,M) =:= Module,
+ tuple_size(element(2,M)) > 0,
+ is_atom(element(1,element(2,M))) ->
+ {value, Base, lists:reverse(Rs, [element(2,M)])};
+ _ ->
+ {value, Base, Args}
+ end;
+ _ ->
+ {value, Base, Args}
+ end;
+ _ ->
+ none
+ end.
diff --git a/lib/kernel/src/error_logger.erl b/lib/kernel/src/error_logger.erl
new file mode 100644
index 0000000000..cafdc52e84
--- /dev/null
+++ b/lib/kernel/src/error_logger.erl
@@ -0,0 +1,387 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 1996-2009. All Rights Reserved.
+%%
+%% The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved online at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(error_logger).
+
+-export([start/0,start_link/0,format/2,error_msg/1,error_msg/2,error_report/1,
+ error_report/2,info_report/1,info_report/2,warning_report/1,
+ warning_report/2,error_info/1,
+ info_msg/1,info_msg/2,warning_msg/1,warning_msg/2,
+ logfile/1,tty/1,swap_handler/1,
+ add_report_handler/1,add_report_handler/2,
+ delete_report_handler/1]).
+
+-export([init/1,
+ handle_event/2, handle_call/2, handle_info/2,
+ terminate/2]).
+
+-define(buffer_size, 10).
+
+%%-----------------------------------------------------------------
+%% Types used in this file
+%%-----------------------------------------------------------------
+
+-type msg_tag() :: 'error' | 'error_report'
+ | 'info' | 'info_msg' | 'info_report'
+ | 'warning_msg' | 'warning_report'.
+
+-type state() :: {non_neg_integer(), non_neg_integer(), [term()]}.
+
+%%-----------------------------------------------------------------
+
+-spec start() -> {'ok', pid()} | {'error', any()}.
+
+start() ->
+ case gen_event:start({local, error_logger}) of
+ {ok, Pid} ->
+ simple_logger(?buffer_size),
+ {ok, Pid};
+ Error -> Error
+ end.
+
+-spec start_link() -> {'ok', pid()} | {'error', any()}.
+
+start_link() ->
+ case gen_event:start_link({local, error_logger}) of
+ {ok, Pid} ->
+ simple_logger(?buffer_size),
+ {ok, Pid};
+ Error -> Error
+ end.
+
+%%-----------------------------------------------------------------
+%% These two simple old functions generate events tagged 'error'
+%% Used for simple messages; error or information.
+%%-----------------------------------------------------------------
+
+-spec error_msg(Format :: string()) -> 'ok'.
+
+error_msg(Format) ->
+ error_msg(Format,[]).
+
+-spec error_msg(Format :: string(), Args :: list()) -> 'ok'.
+
+error_msg(Format, Args) ->
+ notify({error, group_leader(), {self(), Format, Args}}).
+
+-spec format(Format :: string(), Args :: list()) -> 'ok'.
+
+format(Format, Args) ->
+ notify({error, group_leader(), {self(), Format, Args}}).
+
+%%-----------------------------------------------------------------
+%% This functions should be used for error reports. Events
+%% are tagged 'error_report'.
+%% The 'std_error' error_report type can always be used.
+%%-----------------------------------------------------------------
+
+-spec error_report(Report :: any()) -> 'ok'.
+
+error_report(Report) ->
+ error_report(std_error, Report).
+
+-spec error_report(Type :: any(), Report :: any()) -> 'ok'.
+
+error_report(Type, Report) ->
+ notify({error_report, group_leader(), {self(), Type, Report}}).
+
+%%-----------------------------------------------------------------
+%% This function should be used for warning reports.
+%% These might be mapped to error reports or info reports,
+%% depending on emulator flags. Events that ore not mapped
+%% are tagged 'info_report'.
+%% The 'std_warning' info_report type can always be used and is
+%% mapped to std_info or std_error accordingly.
+%%-----------------------------------------------------------------
+
+-spec warning_report(Report :: any()) -> 'ok'.
+
+warning_report(Report) ->
+ warning_report(std_warning, Report).
+
+-spec warning_report(Type :: any(), Report :: any()) -> 'ok'.
+
+warning_report(Type, Report) ->
+ {Tag, NType} = case error_logger:warning_map() of
+ info ->
+ if
+ Type =:= std_warning ->
+ {info_report, std_info};
+ true ->
+ {info_report, Type}
+ end;
+ warning ->
+ {warning_report, Type};
+ error ->
+ if
+ Type =:= std_warning ->
+ {error_report, std_error};
+ true ->
+ {error_report, Type}
+ end
+ end,
+ notify({Tag, group_leader(), {self(), NType, Report}}).
+
+%%-----------------------------------------------------------------
+%% This function provides similar functions as error_msg for
+%% warning messages, like warning report it might get mapped to
+%% other types of reports.
+%%-----------------------------------------------------------------
+
+-spec warning_msg(Format :: string()) -> 'ok'.
+
+warning_msg(Format) ->
+ warning_msg(Format,[]).
+
+-spec warning_msg(Format :: string(), Args :: list()) -> 'ok'.
+
+warning_msg(Format, Args) ->
+ Tag = case error_logger:warning_map() of
+ warning ->
+ warning_msg;
+ info ->
+ info_msg;
+ error ->
+ error
+ end,
+ notify({Tag, group_leader(), {self(), Format, Args}}).
+
+%%-----------------------------------------------------------------
+%% This function should be used for information reports. Events
+%% are tagged 'info_report'.
+%% The 'std_info' info_report type can always be used.
+%%-----------------------------------------------------------------
+
+-spec info_report(Report :: any()) -> 'ok'.
+
+info_report(Report) ->
+ info_report(std_info, Report).
+
+-spec info_report(Type :: any(), Report :: any()) -> 'ok'.
+
+info_report(Type, Report) ->
+ notify({info_report, group_leader(), {self(), Type, Report}}).
+
+%%-----------------------------------------------------------------
+%% This function provides similar functions as error_msg for
+%% information messages.
+%%-----------------------------------------------------------------
+
+-spec info_msg(Format :: string()) -> 'ok'.
+
+info_msg(Format) ->
+ info_msg(Format,[]).
+
+-spec info_msg(Format :: string(), Args :: list()) -> 'ok'.
+
+info_msg(Format, Args) ->
+ notify({info_msg, group_leader(), {self(), Format, Args}}).
+
+%%-----------------------------------------------------------------
+%% Used by the init process. Events are tagged 'info'.
+%%-----------------------------------------------------------------
+
+-spec error_info(Error :: any()) -> 'ok'.
+
+error_info(Error) ->
+ notify({info, group_leader(), {self(), Error, []}}).
+
+-spec notify({msg_tag(), pid(), {pid(), any(), any()}}) -> 'ok'.
+
+notify(Msg) ->
+ gen_event:notify(error_logger, Msg).
+
+-type swap_handler_type() :: 'false' | 'silent' | 'tty' | {'logfile', string()}.
+-spec swap_handler(Type :: swap_handler_type()) -> any().
+
+swap_handler(tty) ->
+ gen_event:swap_handler(error_logger, {error_logger, swap},
+ {error_logger_tty_h, []}),
+ simple_logger();
+swap_handler({logfile, File}) ->
+ gen_event:swap_handler(error_logger, {error_logger, swap},
+ {error_logger_file_h, File}),
+ simple_logger();
+swap_handler(silent) ->
+ gen_event:delete_handler(error_logger, error_logger, delete),
+ simple_logger();
+swap_handler(false) ->
+ ok. % keep primitive event handler as-is
+
+-spec add_report_handler(Module :: atom()) -> any().
+
+add_report_handler(Module) when is_atom(Module) ->
+ gen_event:add_handler(error_logger, Module, []).
+
+-spec add_report_handler(atom(), any()) -> any().
+
+add_report_handler(Module, Args) when is_atom(Module) ->
+ gen_event:add_handler(error_logger, Module, Args).
+
+-spec delete_report_handler(Module :: atom()) -> any().
+
+delete_report_handler(Module) when is_atom(Module) ->
+ gen_event:delete_handler(error_logger, Module, []).
+
+%% Start the lowest level error_logger handler with Buffer.
+
+simple_logger(Buffer_size) when is_integer(Buffer_size) ->
+ gen_event:add_handler(error_logger, error_logger, Buffer_size).
+
+%% Start the lowest level error_logger handler without Buffer.
+
+simple_logger() ->
+ gen_event:add_handler(error_logger, error_logger, []).
+
+%% Log all errors to File for all eternity
+
+-spec logfile(Request :: {'open', string()}) -> 'ok' | {'error',any()}
+ ; (Request :: 'close') -> 'ok' | {'error', any()}
+ ; (Request :: 'filename') -> atom() | string() | {'error', any()}.
+
+logfile({open, File}) ->
+ case lists:member(error_logger_file_h,
+ gen_event:which_handlers(error_logger)) of
+ true ->
+ {error, allready_have_logfile};
+ _ ->
+ gen_event:add_handler(error_logger, error_logger_file_h, File)
+ end;
+logfile(close) ->
+ case gen_event:delete_handler(error_logger, error_logger_file_h, normal) of
+ {error,Reason} ->
+ {error,Reason};
+ _ ->
+ ok
+ end;
+logfile(filename) ->
+ case gen_event:call(error_logger, error_logger_file_h, filename) of
+ {error,_} ->
+ {error, no_log_file};
+ Val ->
+ Val
+ end.
+
+%% Possibly turn off all tty printouts, maybe we only want the errors
+%% to go to a file
+
+-spec tty(Flag :: boolean()) -> 'ok'.
+
+tty(true) ->
+ Hs = gen_event:which_handlers(error_logger),
+ case lists:member(error_logger_tty_h, Hs) of
+ false ->
+ gen_event:add_handler(error_logger, error_logger_tty_h, []);
+ true ->
+ ignore
+ end,
+ ok;
+tty(false) ->
+ gen_event:delete_handler(error_logger, error_logger_tty_h, []),
+ ok.
+
+
+%%% ---------------------------------------------------
+%%% This is the default error_logger handler.
+%%% ---------------------------------------------------
+
+-spec init(term()) -> {'ok', state() | []}.
+
+init(Max) when is_integer(Max) ->
+ {ok, {Max, 0, []}};
+%% This one is called if someone took over from us, and now wants to
+%% go back.
+init({go_back, _PostState}) ->
+ {ok, {?buffer_size, 0, []}};
+init(_) -> %% Start and just relay to other
+ {ok, []}. %% node if node(GLeader) =/= node().
+
+-spec handle_event(term(), state()) -> {'ok', state()}.
+
+handle_event({Type, GL, Msg}, State) when node(GL) =/= node() ->
+ gen_event:notify({error_logger, node(GL)},{Type, GL, Msg}),
+ %% handle_event2({Type, GL, Msg}, State); %% Shall we do something
+ {ok, State}; %% at this node too ???
+handle_event({info_report, _, {_, Type, _}}, State) when Type =/= std_info ->
+ {ok, State}; %% Ignore other info reports here
+handle_event(Event, State) ->
+ handle_event2(Event, State).
+
+-spec handle_info(term(), state()) -> {'ok', state()}.
+
+handle_info({emulator, GL, Chars}, State) when node(GL) =/= node() ->
+ {error_logger, node(GL)} ! {emulator, GL, add_node(Chars,self())},
+ {ok, State};
+handle_info({emulator, GL, Chars}, State) ->
+ handle_event2({emulator, GL, Chars}, State);
+handle_info(_, State) ->
+ {ok, State}.
+
+-spec handle_call(term(), state()) -> {'ok', {'error', 'bad_query'}, state()}.
+
+handle_call(_Query, State) -> {ok, {error, bad_query}, State}.
+
+-spec terminate(term(), state()) -> {'error_logger', [term()]}.
+
+terminate(swap, {_, 0, Buff}) ->
+ {error_logger, Buff};
+terminate(swap, {_, Lost, Buff}) ->
+ Myevent = {info, group_leader(), {self(), {lost_messages, Lost}, []}},
+ {error_logger, [tag_event(Myevent)|Buff]};
+terminate(_, _) ->
+ {error_logger, []}.
+
+handle_event2(Event, {1, Lost, Buff}) ->
+ display(tag_event(Event)),
+ {ok, {1, Lost+1, Buff}};
+handle_event2(Event, {N, Lost, Buff}) ->
+ Tagged = tag_event(Event),
+ display(Tagged),
+ {ok, {N-1, Lost, [Tagged|Buff]}};
+handle_event2(_, State) ->
+ {ok, State}.
+
+tag_event(Event) ->
+ {erlang:localtime(), Event}.
+
+display({Tag,{error,_,{_,Format,Args}}}) ->
+ display2(Tag,Format,Args);
+display({Tag,{error_report,_,{_,Type,Report}}}) ->
+ display2(Tag,Type,Report);
+display({Tag,{info_report,_,{_,Type,Report}}}) ->
+ display2(Tag,Type,Report);
+display({Tag,{info,_,{_,Error,_}}}) ->
+ display2(Tag,Error,[]);
+display({Tag,{info_msg,_,{_,Format,Args}}}) ->
+ display2(Tag,Format,Args);
+display({Tag,{warning_report,_,{_,Type,Report}}}) ->
+ display2(Tag,Type,Report);
+display({Tag,{warning_msg,_,{_,Format,Args}}}) ->
+ display2(Tag,Format,Args);
+display({Tag,{emulator,_,Chars}}) ->
+ display2(Tag,Chars,[]).
+
+add_node(X, Pid) when is_atom(X) ->
+ add_node(atom_to_list(X), Pid);
+add_node(X, Pid) ->
+ lists:concat([X,"** at node ",node(Pid)," **~n"]).
+
+%% Can't do io_lib:format
+
+display2(Tag,F,A) ->
+ erlang:display({error_logger,Tag,F,A}).
diff --git a/lib/kernel/src/erts_debug.erl b/lib/kernel/src/erts_debug.erl
new file mode 100644
index 0000000000..7d6a5ade94
--- /dev/null
+++ b/lib/kernel/src/erts_debug.erl
@@ -0,0 +1,155 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 1999-2009. All Rights Reserved.
+%%
+%% The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved online at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(erts_debug).
+
+%% Low-level debugging support. EXPERIMENTAL!
+
+-export([size/1,df/1,df/2,df/3]).
+
+%% This module contains the following *experimental* BIFs:
+%% disassemble/1
+%% breakpoint/2
+%% same/2
+%% flat_size/1
+
+%% size(Term)
+%% Returns the size of Term in actual heap words. Shared subterms are
+%% counted once. Example: If A = [a,b], B =[A,A] then size(B) returns 8,
+%% while flat_size(B) returns 12.
+
+-spec size(term()) -> non_neg_integer().
+
+size(Term) ->
+ {Sum,_} = size(Term, gb_trees:empty(), 0),
+ Sum.
+
+size([H|T]=Term, Seen0, Sum0) ->
+ case remember_term(Term, Seen0) of
+ seen -> {Sum0,Seen0};
+ Seen1 ->
+ {Sum,Seen} = size(H, Seen1, Sum0+2),
+ size(T, Seen, Sum)
+ end;
+size(Tuple, Seen0, Sum0) when is_tuple(Tuple) ->
+ case remember_term(Tuple, Seen0) of
+ seen -> {Sum0,Seen0};
+ Seen ->
+ Sum = Sum0 + 1 + tuple_size(Tuple),
+ tuple_size(1, tuple_size(Tuple), Tuple, Seen, Sum)
+ end;
+size(Term, Seen0, Sum) ->
+ case erts_debug:flat_size(Term) of
+ 0 -> {Sum,Seen0};
+ Sz ->
+ case remember_term(Term, Seen0) of
+ seen -> {Sum,Seen0};
+ Seen -> {Sum+Sz,Seen}
+ end
+ end.
+
+tuple_size(I, Sz, _, Seen, Sum) when I > Sz ->
+ {Sum,Seen};
+tuple_size(I, Sz, Tuple, Seen0, Sum0) ->
+ {Sum,Seen} = size(element(I, Tuple), Seen0, Sum0),
+ tuple_size(I+1, Sz, Tuple, Seen, Sum).
+
+remember_term(Term, Seen) ->
+ case gb_trees:lookup(Term, Seen) of
+ none -> gb_trees:insert(Term, [Term], Seen);
+ {value,Terms} ->
+ case is_term_seen(Term, Terms) of
+ false -> gb_trees:update(Term, [Term|Terms], Seen);
+ true -> seen
+ end
+ end.
+
+-spec is_term_seen(term(), [term()]) -> boolean().
+
+is_term_seen(Term, [H|T]) ->
+ case erts_debug:same(Term, H) of
+ true -> true;
+ false -> is_term_seen(Term, T)
+ end;
+is_term_seen(_, []) -> false.
+
+%% df(Mod) -- Disassemble Mod to file Mod.dis.
+%% df(Mod, Func) -- Disassemble Mod:Func/Any to file Mod_Func.dis.
+%% df(Mod, Func, Arity) -- Disassemble Mod:Func/Arity to file Mod_Func_Arity.dis.
+
+-type df_ret() :: 'ok' | {'error', {'badopen', module()}} | {'undef', module()}.
+
+-spec df(module()) -> df_ret().
+
+df(Mod) when is_atom(Mod) ->
+ try Mod:module_info(functions) of
+ Fs0 when is_list(Fs0) ->
+ Name = lists:concat([Mod, ".dis"]),
+ Fs = [{Mod,Func,Arity} || {Func,Arity} <- Fs0],
+ dff(Name, Fs)
+ catch _:_ -> {undef,Mod}
+ end.
+
+-spec df(module(), atom()) -> df_ret().
+
+df(Mod, Func) when is_atom(Mod), is_atom(Func) ->
+ try Mod:module_info(functions) of
+ Fs0 when is_list(Fs0) ->
+ Name = lists:concat([Mod, "_", Func, ".dis"]),
+ Fs = [{Mod,Func1,Arity} || {Func1,Arity} <- Fs0, Func1 =:= Func],
+ dff(Name, Fs)
+ catch _:_ -> {undef,Mod}
+ end.
+
+-spec df(module(), atom(), arity()) -> df_ret().
+
+df(Mod, Func, Arity) when is_atom(Mod), is_atom(Func) ->
+ try Mod:module_info(functions) of
+ Fs0 when is_list(Fs0) ->
+ Name = lists:concat([Mod, "_", Func, "_", Arity, ".dis"]),
+ Fs = [{Mod,Func1,Arity1} || {Func1,Arity1} <- Fs0,
+ Func1 =:= Func, Arity1 =:= Arity],
+ dff(Name, Fs)
+ catch _:_ -> {undef,Mod}
+ end.
+
+dff(File, Fs) when is_pid(File), is_list(Fs) ->
+ lists:foreach(fun(Mfa) ->
+ disassemble_function(File, Mfa),
+ io:nl(File)
+ end, Fs);
+dff(Name, Fs) when is_list(Name) ->
+ case file:open(Name, [write]) of
+ {ok,F} ->
+ try
+ dff(F, Fs)
+ after
+ file:close(F)
+ end;
+ {error,Reason} ->
+ {error,{badopen,Reason}}
+ end.
+
+disassemble_function(File, {_,_,_}=MFA) ->
+ cont_dis(File, erts_debug:disassemble(MFA), MFA).
+
+cont_dis(_, false, _) -> ok;
+cont_dis(File, {Addr,Str,MFA}, MFA) ->
+ io:put_chars(File, binary_to_list(Str)),
+ cont_dis(File, erts_debug:disassemble(Addr), MFA);
+cont_dis(_, {_,_,_}, _) -> ok.
diff --git a/lib/kernel/src/file.erl b/lib/kernel/src/file.erl
new file mode 100644
index 0000000000..fa86d53dc9
--- /dev/null
+++ b/lib/kernel/src/file.erl
@@ -0,0 +1,1077 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 1996-2009. All Rights Reserved.
+%%
+%% The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved online at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(file).
+
+%% Interface module for the file server and the file io servers.
+
+
+
+%%% External exports
+
+-export([format_error/1]).
+%% File system and metadata.
+-export([get_cwd/0, get_cwd/1, set_cwd/1, delete/1, rename/2,
+ make_dir/1, del_dir/1, list_dir/1,
+ read_file_info/1, write_file_info/2,
+ altname/1,
+ read_link_info/1, read_link/1,
+ make_link/2, make_symlink/2,
+ read_file/1, write_file/2, write_file/3]).
+%% Specialized
+-export([ipread_s32bu_p32bu/3]).
+%% Generic file contents.
+-export([open/2, close/1,
+ read/2, write/2,
+ pread/2, pread/3, pwrite/2, pwrite/3,
+ read_line/1,
+ position/2, truncate/1, sync/1,
+ copy/2, copy/3]).
+%% High level operations
+-export([consult/1, path_consult/2]).
+-export([eval/1, eval/2, path_eval/2, path_eval/3, path_open/3]).
+-export([script/1, script/2, path_script/2, path_script/3]).
+-export([change_owner/2, change_owner/3, change_group/2,
+ change_mode/2, change_time/2, change_time/3]).
+
+-export([pid2name/1]).
+
+%%% Obsolete exported functions
+
+-export([raw_read_file_info/1, raw_write_file_info/2]).
+
+%% Internal export to prim_file and ram_file until they implement
+%% an efficient copy themselves.
+-export([copy_opened/3]).
+
+-export([ipread_s32bu_p32bu_int/3]).
+
+
+%%% Includes and defines
+-include("file.hrl").
+
+-define(FILE_IO_SERVER_TABLE, file_io_servers).
+
+-define(FILE_SERVER, file_server_2). % Registered name
+-define(PRIM_FILE, prim_file). % Module
+-define(RAM_FILE, ram_file). % Module
+
+%% data types
+-type filename() :: string().
+-type io_device() :: pid() | #file_descriptor{}.
+-type location() :: integer() | {'bof', integer()} | {'cur', integer()}
+ | {'eof', integer()} | 'bof' | 'cur' | 'eof'.
+-type mode() :: 'read' | 'write' | 'append' | 'raw' | 'binary' |
+ {'delayed_write', non_neg_integer(), non_neg_integer()} |
+ 'delayed_write' | {'read_ahead', pos_integer()} |
+ 'read_ahead' | 'compressed'.
+-type bindings() :: any().
+
+%%%-----------------------------------------------------------------
+%%% General functions
+
+-spec format_error(Reason :: posix() | {integer(), atom(), any()}) ->
+ string().
+
+format_error({_Line, ?MODULE, undefined_script}) ->
+ "no value returned from script";
+format_error({Line, ?MODULE, {Class, Reason, Stacktrace}}) ->
+ io_lib:format("~w: evaluation failed with reason ~w:~w and stacktrace ~w",
+ [Line, Class, Reason, Stacktrace]);
+format_error({Line, ?MODULE, {Reason, Stacktrace}}) ->
+ io_lib:format("~w: evaluation failed with reason ~w and stacktrace ~w",
+ [Line, Reason, Stacktrace]);
+format_error({Line, Mod, Reason}) ->
+ io_lib:format("~w: ~s", [Line, Mod:format_error(Reason)]);
+format_error(badarg) ->
+ "bad argument";
+format_error(system_limit) ->
+ "a system limit was hit, probably not enough ports";
+format_error(terminated) ->
+ "the file server process is terminated";
+format_error(ErrorId) ->
+ erl_posix_msg:message(ErrorId).
+
+-spec pid2name(Pid :: pid()) -> {'ok', filename()} | 'undefined'.
+
+pid2name(Pid) when is_pid(Pid) ->
+ case whereis(?FILE_SERVER) of
+ undefined ->
+ undefined;
+ _ ->
+ case ets:lookup(?FILE_IO_SERVER_TABLE, Pid) of
+ [{_, Name} | _] ->
+ {ok, Name};
+ _ ->
+ undefined
+ end
+ end.
+
+%%%-----------------------------------------------------------------
+%%% File server functions.
+%%% Functions that do not operate on a single open file.
+%%% Stateless.
+-spec get_cwd() -> {'ok', filename()} | {'error', posix()}.
+
+get_cwd() ->
+ call(get_cwd, []).
+
+-spec get_cwd(Drive :: string()) -> {'ok', filename()} | {'error', posix()}.
+
+get_cwd(Drive) ->
+ check_and_call(get_cwd, [file_name(Drive)]).
+
+-spec set_cwd(Dirname :: name()) -> 'ok' | {'error', posix()}.
+
+set_cwd(Dirname) ->
+ check_and_call(set_cwd, [file_name(Dirname)]).
+
+-spec delete(Name :: name()) -> 'ok' | {'error', posix()}.
+
+delete(Name) ->
+ check_and_call(delete, [file_name(Name)]).
+
+-spec rename(From :: name(), To :: name()) -> 'ok' | {'error', posix()}.
+
+rename(From, To) ->
+ check_and_call(rename, [file_name(From), file_name(To)]).
+
+-spec make_dir(Name :: name()) -> 'ok' | {'error', posix()}.
+
+make_dir(Name) ->
+ check_and_call(make_dir, [file_name(Name)]).
+
+-spec del_dir(Name :: name()) -> 'ok' | {'error', posix()}.
+
+del_dir(Name) ->
+ check_and_call(del_dir, [file_name(Name)]).
+
+-spec read_file_info(Name :: name()) -> {'ok', #file_info{}} | {'error', posix()}.
+
+read_file_info(Name) ->
+ check_and_call(read_file_info, [file_name(Name)]).
+
+-spec altname(Name :: name()) -> any().
+
+altname(Name) ->
+ check_and_call(altname, [file_name(Name)]).
+
+-spec read_link_info(Name :: name()) -> {'ok', #file_info{}} | {'error', posix()}.
+
+read_link_info(Name) ->
+ check_and_call(read_link_info, [file_name(Name)]).
+
+-spec read_link(Name :: name()) -> {'ok', filename()} | {'error', posix()}.
+
+read_link(Name) ->
+ check_and_call(read_link, [file_name(Name)]).
+
+-spec write_file_info(Name :: name(), Info :: #file_info{}) ->
+ 'ok' | {'error', posix()}.
+
+write_file_info(Name, Info = #file_info{}) ->
+ check_and_call(write_file_info, [file_name(Name), Info]).
+
+-spec list_dir(Name :: name()) -> {'ok', [filename()]} | {'error', posix()}.
+
+list_dir(Name) ->
+ check_and_call(list_dir, [file_name(Name)]).
+
+-spec read_file(Name :: name()) -> {'ok', binary()} | {'error', posix()}.
+
+read_file(Name) ->
+ check_and_call(read_file, [file_name(Name)]).
+
+-spec make_link(Old :: name(), New :: name()) -> 'ok' | {'error', posix()}.
+
+make_link(Old, New) ->
+ check_and_call(make_link, [file_name(Old), file_name(New)]).
+
+-spec make_symlink(Old :: name(), New :: name()) -> 'ok' | {'error', posix()}.
+
+make_symlink(Old, New) ->
+ check_and_call(make_symlink, [file_name(Old), file_name(New)]).
+
+-spec write_file(Name :: name(), Bin :: binary()) -> 'ok' | {'error', posix()}.
+
+write_file(Name, Bin) ->
+ check_and_call(write_file, [file_name(Name), make_binary(Bin)]).
+
+%% This whole operation should be moved to the file_server and prim_file
+%% when it is time to change file server protocol again.
+%% Meanwhile, it is implemented here, slihtly less efficient.
+%%
+
+-spec write_file(Name :: name(), Bin :: binary(), Modes :: [mode()]) ->
+ 'ok' | {'error', posix()}.
+
+write_file(Name, Bin, ModeList) when is_list(ModeList) ->
+ case make_binary(Bin) of
+ B when is_binary(B) ->
+ case open(Name, [binary, write |
+ lists:delete(binary,
+ lists:delete(write, ModeList))]) of
+ {ok, Handle} ->
+ case write(Handle, B) of
+ ok ->
+ close(Handle);
+ E1 ->
+ close(Handle),
+ E1
+ end;
+ E2 ->
+ E2
+ end;
+ E3 ->
+ E3
+ end.
+
+%% Obsolete, undocumented, local node only, don't use!.
+%% XXX to be removed.
+raw_read_file_info(Name) ->
+ Args = [file_name(Name)],
+ case check_args(Args) of
+ ok ->
+ [FileName] = Args,
+ ?PRIM_FILE:read_file_info(FileName);
+ Error ->
+ Error
+ end.
+
+%% Obsolete, undocumented, local node only, don't use!.
+%% XXX to be removed.
+raw_write_file_info(Name, #file_info{} = Info) ->
+ Args = [file_name(Name)],
+ case check_args(Args) of
+ ok ->
+ [FileName] = Args,
+ ?PRIM_FILE:write_file_info(FileName, Info);
+ Error ->
+ Error
+ end.
+
+%%%-----------------------------------------------------------------
+%%% File io server functions.
+%%% They operate on a single open file.
+%%% Stateful.
+
+%% Contemporary mode specification - list of options
+
+-spec open(Name :: name(), Modes :: [mode()]) ->
+ {'ok', io_device()} | {'error', posix()}.
+
+open(Item, ModeList) when is_list(ModeList) ->
+ case lists:member(raw, ModeList) of
+ %% Raw file, use ?PRIM_FILE to handle this file
+ true ->
+ %% check if raw file mode is disabled
+ case catch application:get_env(kernel, raw_files) of
+ {ok,false} ->
+ open(Item, lists:delete(raw, ModeList));
+ _ -> % undefined | {ok,true}
+ Args = [file_name(Item) | ModeList],
+ case check_args(Args) of
+ ok ->
+ [FileName | _] = Args,
+ %% We rely on the returned Handle (in {ok, Handle})
+ %% being a pid() or a #file_descriptor{}
+ ?PRIM_FILE:open(FileName, ModeList);
+ Error ->
+ Error
+ end
+ end;
+ false ->
+ case lists:member(ram, ModeList) of
+ %% RAM file, use ?RAM_FILE to handle this file
+ true ->
+ case check_args(ModeList) of
+ ok ->
+ ?RAM_FILE:open(Item, ModeList);
+ Error ->
+ Error
+ end;
+ %% File server file
+ false ->
+ Args = [file_name(Item) | ModeList],
+ case check_args(Args) of
+ ok ->
+ [FileName | _] = Args,
+ call(open, [FileName, ModeList]);
+ Error ->
+ Error
+ end
+ end
+ end;
+%% Old obsolete mode specification in atom or 2-tuple format
+open(Item, Mode) ->
+ open(Item, mode_list(Mode)).
+
+%%%-----------------------------------------------------------------
+%%% The following interface functions operate on open files.
+%%% The File argument must be either a Pid or a handle
+%%% returned from ?PRIM_FILE:open.
+
+-spec close(File :: io_device()) -> 'ok' | {'error', posix()}.
+
+close(File) when is_pid(File) ->
+ R = file_request(File, close),
+ case wait_file_reply(File, R) of
+ {error, terminated} ->
+ ok;
+ Other ->
+ Other
+ end;
+%% unlink(File),
+%% exit(File, close),
+%% ok;
+close(#file_descriptor{module = Module} = Handle) ->
+ Module:close(Handle);
+close(_) ->
+ {error, badarg}.
+
+-spec read(File :: io_device(), Size :: non_neg_integer()) ->
+ 'eof' | {'ok', [char()] | binary()} | {'error', posix()}.
+
+read(File, Sz) when is_pid(File), is_integer(Sz), Sz >= 0 ->
+ case io:request(File, {get_chars, '', Sz}) of
+ Data when is_list(Data); is_binary(Data) ->
+ {ok, Data};
+ Other ->
+ Other
+ end;
+read(#file_descriptor{module = Module} = Handle, Sz)
+ when is_integer(Sz), Sz >= 0 ->
+ Module:read(Handle, Sz);
+read(_, _) ->
+ {error, badarg}.
+
+-spec read_line(File :: io_device()) ->
+ 'eof' | {'ok', [char()] | binary()} | {'error', posix()}.
+
+read_line(File) when is_pid(File) ->
+ case io:request(File, {get_line, ''}) of
+ Data when is_list(Data); is_binary(Data) ->
+ {ok, Data};
+ Other ->
+ Other
+ end;
+read_line(#file_descriptor{module = Module} = Handle) ->
+ Module:read_line(Handle);
+read_line(_) ->
+ {error, badarg}.
+
+-spec pread(File :: io_device(),
+ LocationNumbers :: [{location(), non_neg_integer()}]) ->
+ {'ok', [string() | binary() | 'eof']} | {'error', posix()}.
+
+pread(File, L) when is_pid(File), is_list(L) ->
+ pread_int(File, L, []);
+pread(#file_descriptor{module = Module} = Handle, L) when is_list(L) ->
+ Module:pread(Handle, L);
+pread(_, _) ->
+ {error, badarg}.
+
+pread_int(_File, [], R) ->
+ {ok, lists:reverse(R)};
+pread_int(File, [{At, Sz} | T], R) when is_integer(Sz), Sz >= 0 ->
+ case pread(File, At, Sz) of
+ {ok, Data} ->
+ pread_int(File, T, [Data | R]);
+ eof ->
+ pread_int(File, T, [eof | R]);
+ {error, _} = Error ->
+ Error
+ end;
+pread_int(_, _, _) ->
+ {error, badarg}.
+
+-spec pread(File :: io_device(),
+ Location :: location(),
+ Size :: non_neg_integer()) ->
+ 'eof' | {'ok', string() | binary()} | {'error', posix()}.
+
+pread(File, At, Sz) when is_pid(File), is_integer(Sz), Sz >= 0 ->
+ R = file_request(File, {pread, At, Sz}),
+ wait_file_reply(File, R);
+pread(#file_descriptor{module = Module} = Handle, Offs, Sz)
+ when is_integer(Sz), Sz >= 0 ->
+ Module:pread(Handle, Offs, Sz);
+pread(_, _, _) ->
+ {error, badarg}.
+
+-spec write(File :: io_device(), Byte :: iodata()) ->
+ 'ok' | {'error', posix()}.
+
+write(File, Bytes) when is_pid(File) ->
+ case make_binary(Bytes) of
+ Bin when is_binary(Bin) ->
+ io:request(File, {put_chars,Bin});
+ Error ->
+ Error
+ end;
+write(#file_descriptor{module = Module} = Handle, Bytes) ->
+ Module:write(Handle, Bytes);
+write(_, _) ->
+ {error, badarg}.
+
+-spec pwrite(File :: io_device(), L :: [{location(), iodata()}]) ->
+ 'ok' | {'error', {non_neg_integer(), posix()}}.
+
+pwrite(File, L) when is_pid(File), is_list(L) ->
+ pwrite_int(File, L, 0);
+pwrite(#file_descriptor{module = Module} = Handle, L) when is_list(L) ->
+ Module:pwrite(Handle, L);
+pwrite(_, _) ->
+ {error, badarg}.
+
+pwrite_int(_File, [], _R) ->
+ ok;
+pwrite_int(File, [{At, Bytes} | T], R) ->
+ case pwrite(File, At, Bytes) of
+ ok ->
+ pwrite_int(File, T, R+1);
+ {error, Reason} ->
+ {error, {R, Reason}}
+ end;
+pwrite_int(_, _, _) ->
+ {error, badarg}.
+
+-spec pwrite(File :: io_device(),
+ Location :: location(),
+ Bytes :: iodata()) ->
+ 'ok' | {'error', posix()}.
+
+pwrite(File, At, Bytes) when is_pid(File) ->
+ R = file_request(File, {pwrite, At, Bytes}),
+ wait_file_reply(File, R);
+pwrite(#file_descriptor{module = Module} = Handle, Offs, Bytes) ->
+ Module:pwrite(Handle, Offs, Bytes);
+pwrite(_, _, _) ->
+ {error, badarg}.
+
+-spec sync(File :: io_device()) -> 'ok' | {'error', posix()}.
+
+sync(File) when is_pid(File) ->
+ R = file_request(File, sync),
+ wait_file_reply(File, R);
+sync(#file_descriptor{module = Module} = Handle) ->
+ Module:sync(Handle);
+sync(_) ->
+ {error, badarg}.
+
+-spec position(File :: io_device(), Location :: location()) ->
+ {'ok',integer()} | {'error', posix()}.
+
+position(File, At) when is_pid(File) ->
+ R = file_request(File, {position,At}),
+ wait_file_reply(File, R);
+position(#file_descriptor{module = Module} = Handle, At) ->
+ Module:position(Handle, At);
+position(_, _) ->
+ {error, badarg}.
+
+-spec truncate(File :: io_device()) -> 'ok' | {'error', posix()}.
+
+truncate(File) when is_pid(File) ->
+ R = file_request(File, truncate),
+ wait_file_reply(File, R);
+truncate(#file_descriptor{module = Module} = Handle) ->
+ Module:truncate(Handle);
+truncate(_) ->
+ {error, badarg}.
+
+-spec copy(Source :: io_device() | name() | {name(), [mode()]},
+ Destination :: io_device() | name() | {name(), [mode()]}) ->
+ {'ok', non_neg_integer()} | {'error', posix()}.
+
+copy(Source, Dest) ->
+ copy_int(Source, Dest, infinity).
+
+-spec copy(Source :: io_device() | name() | {name(), [mode()]},
+ Destination :: io_device() | name() | {name(), [mode()]},
+ Length :: non_neg_integer() | 'infinity') ->
+ {'ok', non_neg_integer()} | {'error', posix()}.
+
+copy(Source, Dest, Length)
+ when is_integer(Length), Length >= 0;
+ is_atom(Length) ->
+ copy_int(Source, Dest, Length);
+copy(_, _, _) ->
+ {error, badarg}.
+
+%% Here we know that Length is either an atom or an integer >= 0
+%% (by the way, atoms > integers)
+%%
+%% Copy between open files.
+copy_int(Source, Dest, Length)
+ when is_pid(Source), is_pid(Dest);
+ is_pid(Source), is_record(Dest, file_descriptor);
+ is_record(Source, file_descriptor), is_pid(Dest) ->
+ copy_opened_int(Source, Dest, Length, 0);
+%% Copy between open raw files, both handled by the same module
+copy_int(#file_descriptor{module = Module} = Source,
+ #file_descriptor{module = Module} = Dest,
+ Length) ->
+ Module:copy(Source, Dest, Length);
+%% Copy between open raw files of different modules
+copy_int(#file_descriptor{} = Source,
+ #file_descriptor{} = Dest, Length) ->
+ copy_opened_int(Source, Dest, Length, 0);
+%% Copy between filenames, let the server do the copy
+copy_int({SourceName, SourceOpts}, {DestName, DestOpts}, Length)
+ when is_list(SourceOpts), is_list(DestOpts) ->
+ check_and_call(copy,
+ [file_name(SourceName), SourceOpts,
+ file_name(DestName), DestOpts,
+ Length]);
+%% Filename -> open file; must open Source and do client copy
+copy_int({SourceName, SourceOpts}, Dest, Length)
+ when is_list(SourceOpts), is_pid(Dest);
+ is_list(SourceOpts), is_record(Dest, file_descriptor) ->
+ case file_name(SourceName) of
+ {error, _} = Error ->
+ Error;
+ Source ->
+ case open(Source, [read | SourceOpts]) of
+ {ok, Handle} ->
+ Result = copy_opened_int(Handle, Dest, Length, 0),
+ close(Handle),
+ Result;
+ {error, _} = Error ->
+ Error
+ end
+ end;
+%% Open file -> filename; must open Dest and do client copy
+copy_int(Source, {DestName, DestOpts}, Length)
+ when is_pid(Source), is_list(DestOpts);
+ is_record(Source, file_descriptor), is_list(DestOpts) ->
+ case file_name(DestName) of
+ {error, _} = Error ->
+ Error;
+ Dest ->
+ case open(Dest, [write | DestOpts]) of
+ {ok, Handle} ->
+ Result = copy_opened_int(Source, Handle, Length, 0),
+ close(Handle),
+ Result;
+ {error, _} = Error ->
+ Error
+ end
+ end;
+%%
+%% That was all combinations of {Name, Opts} tuples
+%% and open files. At least one of Source and Dest has
+%% to be a bare filename.
+%%
+%% If Source is not a bare filename; Dest must be
+copy_int(Source, Dest, Length)
+ when is_pid(Source);
+ is_record(Source, file_descriptor) ->
+ copy_int(Source, {Dest, []}, Length);
+copy_int({_SourceName, SourceOpts} = Source, Dest, Length)
+ when is_list(SourceOpts) ->
+ copy_int(Source, {Dest, []}, Length);
+%% If Dest is not a bare filename; Source must be
+copy_int(Source, Dest, Length)
+ when is_pid(Dest);
+ is_record(Dest, file_descriptor) ->
+ copy_int({Source, []}, Dest, Length);
+copy_int(Source, {_DestName, DestOpts} = Dest, Length)
+ when is_list(DestOpts) ->
+ copy_int({Source, []}, Dest, Length);
+%% Both must be bare filenames. If they are not,
+%% the filename check in the copy operation will yell.
+copy_int(Source, Dest, Length) ->
+ copy_int({Source, []}, {Dest, []}, Length).
+
+
+
+copy_opened(Source, Dest, Length)
+ when is_integer(Length), Length >= 0;
+ is_atom(Length) ->
+ copy_opened_int(Source, Dest, Length);
+copy_opened(_, _, _) ->
+ {error, badarg}.
+
+%% Here we know that Length is either an atom or an integer >= 0
+%% (by the way, atoms > integers)
+
+copy_opened_int(Source, Dest, Length)
+ when is_pid(Source), is_pid(Dest) ->
+ copy_opened_int(Source, Dest, Length, 0);
+copy_opened_int(Source, Dest, Length)
+ when is_pid(Source), is_record(Dest, file_descriptor) ->
+ copy_opened_int(Source, Dest, Length, 0);
+copy_opened_int(Source, Dest, Length)
+ when is_record(Source, file_descriptor), is_pid(Dest) ->
+ copy_opened_int(Source, Dest, Length, 0);
+copy_opened_int(Source, Dest, Length)
+ when is_record(Source, file_descriptor), is_record(Dest, file_descriptor) ->
+ copy_opened_int(Source, Dest, Length, 0);
+copy_opened_int(_, _, _) ->
+ {error, badarg}.
+
+%% Here we know that Source and Dest are handles to open files, Length is
+%% as above, and Copied is an integer >= 0
+
+%% Copy loop in client process
+copy_opened_int(_, _, Length, Copied) when Length =< 0 -> % atom() > integer()
+ {ok, Copied};
+copy_opened_int(Source, Dest, Length, Copied) ->
+ N = if Length > 65536 -> 65536; true -> Length end, % atom() > integer() !
+ case read(Source, N) of
+ {ok, Data} ->
+ M = if is_binary(Data) -> byte_size(Data);
+ is_list(Data) -> length(Data)
+ end,
+ case write(Dest, Data) of
+ ok ->
+ if M < N ->
+ %% Got less than asked for - must be end of file
+ {ok, Copied+M};
+ true ->
+ %% Decrement Length (might be an atom (infinity))
+ NewLength = if is_atom(Length) -> Length;
+ true -> Length-M
+ end,
+ copy_opened_int(Source, Dest, NewLength, Copied+M)
+ end;
+ {error, _} = Error ->
+ Error
+ end;
+ eof ->
+ {ok, Copied};
+ {error, _} = Error ->
+ Error
+ end.
+
+
+%% Special indirect pread function. Introduced for Dets.
+%% Reads a header from pos 'Pos', the header is first a size encoded as
+%% 32 bit big endian unsigned and then a position also encoded as
+%% 32 bit big endian. Finally it preads the data from that pos and size
+%% in the file.
+
+ipread_s32bu_p32bu(File, Pos, MaxSize) when is_pid(File) ->
+ ipread_s32bu_p32bu_int(File, Pos, MaxSize);
+ipread_s32bu_p32bu(#file_descriptor{module = Module} = Handle, Pos, MaxSize) ->
+ Module:ipread_s32bu_p32bu(Handle, Pos, MaxSize);
+ipread_s32bu_p32bu(_, _, _) ->
+ {error, badarg}.
+
+ipread_s32bu_p32bu_int(File, Pos, Infinity) when is_atom(Infinity) ->
+ ipread_s32bu_p32bu_int(File, Pos, (1 bsl 31)-1);
+ipread_s32bu_p32bu_int(File, Pos, MaxSize)
+ when is_integer(MaxSize), MaxSize >= 0 ->
+ if
+ MaxSize < (1 bsl 31) ->
+ case pread(File, Pos, 8) of
+ {ok, Header} ->
+ ipread_s32bu_p32bu_2(File, Header, MaxSize);
+ Error ->
+ Error
+ end;
+ true ->
+ {error, einval}
+ end;
+ipread_s32bu_p32bu_int(_File, _Pos, _MaxSize) ->
+ {error, badarg}.
+
+ipread_s32bu_p32bu_2(_File,
+ <<0:32/big-unsigned, Pos:32/big-unsigned>>,
+ _MaxSize) ->
+ {ok, {0, Pos, eof}};
+ipread_s32bu_p32bu_2(File,
+ <<Size:32/big-unsigned, Pos:32/big-unsigned>>,
+ MaxSize)
+ when Size =< MaxSize ->
+ case pread(File, Pos, Size) of
+ {ok, Data} ->
+ {ok, {Size, Pos, Data}};
+ eof ->
+ {ok, {Size, Pos, eof}};
+ Error ->
+ Error
+ end;
+ipread_s32bu_p32bu_2(_File,
+ <<_:8/binary>>,
+ _MaxSize) ->
+ eof;
+ipread_s32bu_p32bu_2(_File,
+ <<_/binary>>,
+ _MaxSize) ->
+ eof;
+ipread_s32bu_p32bu_2(File,
+ Header,
+ MaxSize) when is_list(Header) ->
+ ipread_s32bu_p32bu_2(File, list_to_binary(Header), MaxSize).
+
+
+
+%%%-----------------------------------------------------------------
+%%% The following functions, built upon the other interface functions,
+%%% provide a higher-lever interface to files.
+
+-spec consult(File :: name()) ->
+ {'ok', list()} | {'error', posix() | {integer(), atom(), any()}}.
+
+consult(File) ->
+ case open(File, [read]) of
+ {ok, Fd} ->
+ R = consult_stream(Fd),
+ close(Fd),
+ R;
+ Error ->
+ Error
+ end.
+
+-spec path_consult(Paths :: [name()], File :: name()) ->
+ {'ok', list(), filename()} | {'error', posix() | {integer(), atom(), any()}}.
+
+path_consult(Path, File) ->
+ case path_open(Path, File, [read]) of
+ {ok, Fd, Full} ->
+ case consult_stream(Fd) of
+ {ok, List} ->
+ close(Fd),
+ {ok, List, Full};
+ E1 ->
+ close(Fd),
+ E1
+ end;
+ E2 ->
+ E2
+ end.
+
+-spec eval(File :: name()) -> 'ok' | {'error', posix()}.
+
+eval(File) ->
+ eval(File, erl_eval:new_bindings()).
+
+-spec eval(File :: name(), Bindings :: bindings()) ->
+ 'ok' | {'error', posix()}.
+
+eval(File, Bs) ->
+ case open(File, [read]) of
+ {ok, Fd} ->
+ R = eval_stream(Fd, ignore, Bs),
+ close(Fd),
+ R;
+ Error ->
+ Error
+ end.
+
+-spec path_eval(Paths :: [name()], File :: name()) ->
+ {'ok', filename()} | {'error', posix() | {integer(), atom(), any()}}.
+
+path_eval(Path, File) ->
+ path_eval(Path, File, erl_eval:new_bindings()).
+
+-spec path_eval(Paths :: [name()], File :: name(), Bindings :: bindings()) ->
+ {'ok', filename()} | {'error', posix() | {integer(), atom(), any()}}.
+
+path_eval(Path, File, Bs) ->
+ case path_open(Path, File, [read]) of
+ {ok, Fd, Full} ->
+ case eval_stream(Fd, ignore, Bs) of
+ ok ->
+ close(Fd),
+ {ok, Full};
+ E1 ->
+ close(Fd),
+ E1
+ end;
+ E2 ->
+ E2
+ end.
+
+-spec script(File :: name()) ->
+ {'ok', any()} | {'error', posix() | {integer(), atom(), any()}}.
+
+script(File) ->
+ script(File, erl_eval:new_bindings()).
+
+-spec script(File :: name(), Bindings :: bindings()) ->
+ {'ok', any()} | {'error', posix() | {integer(), atom(), any()}}.
+
+script(File, Bs) ->
+ case open(File, [read]) of
+ {ok, Fd} ->
+ R = eval_stream(Fd, return, Bs),
+ close(Fd),
+ R;
+ Error ->
+ Error
+ end.
+
+-spec path_script/2 :: (Paths :: [name()], File :: name()) ->
+ {'ok', term(), filename()} | {'error', posix() | {integer(), atom(), _}}.
+
+path_script(Path, File) ->
+ path_script(Path, File, erl_eval:new_bindings()).
+
+-spec path_script(Paths :: [name()],
+ File :: name(),
+ Bindings :: bindings()) ->
+ {'ok', term(), filename()} | {'error', posix() | {integer(), atom(), _}}.
+
+path_script(Path, File, Bs) ->
+ case path_open(Path, File, [read]) of
+ {ok,Fd,Full} ->
+ case eval_stream(Fd, return, Bs) of
+ {ok,R} ->
+ close(Fd),
+ {ok, R, Full};
+ E1 ->
+ close(Fd),
+ E1
+ end;
+ E2 ->
+ E2
+ end.
+
+
+%% path_open(Paths, Filename, Mode) ->
+%% {ok,FileDescriptor,FullName}
+%% {error,Reason}
+%%
+%% Searches the Paths for file Filename which can be opened with Mode.
+%% The path list is ignored if Filename contains an absolute path.
+
+-spec path_open(Paths :: [name()], Name :: name(), Modes :: [mode()]) ->
+ {'ok', io_device(), filename()} | {'error', posix()}.
+
+path_open(PathList, Name, Mode) ->
+ case file_name(Name) of
+ {error, _} = Error ->
+ Error;
+ FileName ->
+ case filename:pathtype(FileName) of
+ relative ->
+ path_open_first(PathList, FileName, Mode, enoent);
+ _ ->
+ case open(Name, Mode) of
+ {ok, Fd} ->
+ {ok, Fd, Name};
+ Error ->
+ Error
+ end
+ end
+ end.
+
+-spec change_mode(Name :: name(), Mode :: integer()) ->
+ 'ok' | {'error', posix()}.
+
+change_mode(Name, Mode)
+ when is_integer(Mode) ->
+ write_file_info(Name, #file_info{mode=Mode}).
+
+-spec change_owner(Name :: name(), OwnerId :: integer()) ->
+ 'ok' | {'error', posix()}.
+
+change_owner(Name, OwnerId)
+ when is_integer(OwnerId) ->
+ write_file_info(Name, #file_info{uid=OwnerId}).
+
+-spec change_owner(Name :: name(),
+ OwnerId :: integer(),
+ GroupId :: integer()) ->
+ 'ok' | {'error', posix()}.
+
+change_owner(Name, OwnerId, GroupId)
+ when is_integer(OwnerId), is_integer(GroupId) ->
+ write_file_info(Name, #file_info{uid=OwnerId, gid=GroupId}).
+
+-spec change_group(Name :: name(), GroupId :: integer()) ->
+ 'ok' | {'error', posix()}.
+
+change_group(Name, GroupId)
+ when is_integer(GroupId) ->
+ write_file_info(Name, #file_info{gid=GroupId}).
+
+-spec change_time(Name :: name(), Time :: date_time()) ->
+ 'ok' | {'error', posix()}.
+
+change_time(Name, Time)
+ when is_tuple(Time) ->
+ write_file_info(Name, #file_info{mtime=Time}).
+
+-spec change_time(Name :: name(),
+ ATime :: date_time(),
+ MTime :: date_time()) ->
+ 'ok' | {'error', posix()}.
+
+change_time(Name, Atime, Mtime)
+ when is_tuple(Atime), is_tuple(Mtime) ->
+ write_file_info(Name, #file_info{atime=Atime, mtime=Mtime}).
+
+%%%-----------------------------------------------------------------
+%%% Helpers
+
+consult_stream(Fd) ->
+ consult_stream(Fd, 1, []).
+
+consult_stream(Fd, Line, Acc) ->
+ case io:read(Fd, '', Line) of
+ {ok,Term,EndLine} ->
+ consult_stream(Fd, EndLine, [Term|Acc]);
+ {error,Error,_Line} ->
+ {error,Error};
+ {eof,_Line} ->
+ {ok,lists:reverse(Acc)}
+ end.
+
+eval_stream(Fd, Handling, Bs) ->
+ eval_stream(Fd, Handling, 1, undefined, [], Bs).
+
+eval_stream(Fd, H, Line, Last, E, Bs) ->
+ eval_stream2(io:parse_erl_exprs(Fd, '', Line), Fd, H, Last, E, Bs).
+
+eval_stream2({ok,Form,EndLine}, Fd, H, Last, E, Bs0) ->
+ try erl_eval:exprs(Form, Bs0) of
+ {value,V,Bs} ->
+ eval_stream(Fd, H, EndLine, {V}, E, Bs)
+ catch Class:Reason ->
+ Error = {EndLine,?MODULE,{Class,Reason,erlang:get_stacktrace()}},
+ eval_stream(Fd, H, EndLine, Last, [Error|E], Bs0)
+ end;
+eval_stream2({error,What,EndLine}, Fd, H, Last, E, Bs) ->
+ eval_stream(Fd, H, EndLine, Last, [What | E], Bs);
+eval_stream2({eof,EndLine}, _Fd, H, Last, E, _Bs) ->
+ case {H, Last, E} of
+ {return, {Val}, []} ->
+ {ok, Val};
+ {return, undefined, E} ->
+ {error, hd(lists:reverse(E, [{EndLine,?MODULE,undefined_script}]))};
+ {ignore, _, []} ->
+ ok;
+ {_, _, [_|_] = E} ->
+ {error, hd(lists:reverse(E))}
+ end.
+
+path_open_first([Path|Rest], Name, Mode, LastError) ->
+ case file_name(Path) of
+ {error, _} = Error ->
+ Error;
+ FilePath ->
+ FileName = filename:join(FilePath, Name),
+ case open(FileName, Mode) of
+ {ok, Fd} ->
+ {ok, Fd, FileName};
+ {error, enoent} ->
+ path_open_first(Rest, Name, Mode, LastError);
+ Error ->
+ Error
+ end
+ end;
+path_open_first([], _Name, _Mode, LastError) ->
+ {error, LastError}.
+
+%%%-----------------------------------------------------------------
+%%% Utility functions.
+
+%% file_name(FileName)
+%% Generates a flat file name from a deep list of atoms and
+%% characters (integers).
+
+file_name(N) ->
+ try
+ file_name_1(N)
+ catch Reason ->
+ {error, Reason}
+ end.
+
+file_name_1([C|T]) when is_integer(C), C > 0, C =< 255 ->
+ [C|file_name_1(T)];
+file_name_1([H|T]) ->
+ file_name_1(H) ++ file_name_1(T);
+file_name_1([]) ->
+ [];
+file_name_1(N) when is_atom(N) ->
+ atom_to_list(N);
+file_name_1(_) ->
+ throw(badarg).
+
+make_binary(Bin) when is_binary(Bin) ->
+ Bin;
+make_binary(List) ->
+ %% Convert the list to a binary in order to avoid copying a list
+ %% to the file server.
+ try
+ erlang:iolist_to_binary(List)
+ catch error:Reason ->
+ {error, Reason}
+ end.
+
+mode_list(read) ->
+ [read];
+mode_list(write) ->
+ [write];
+mode_list(read_write) ->
+ [read, write];
+mode_list({binary, Mode}) when is_atom(Mode) ->
+ [binary | mode_list(Mode)];
+mode_list({character, Mode}) when is_atom(Mode) ->
+ mode_list(Mode);
+mode_list(_) ->
+ [{error, badarg}].
+
+%%-----------------------------------------------------------------
+%% Functions for communicating with the file server
+
+call(Command, Args) when is_list(Args) ->
+ gen_server:call(?FILE_SERVER, list_to_tuple([Command | Args]), infinity).
+
+check_and_call(Command, Args) when is_list(Args) ->
+ case check_args(Args) of
+ ok ->
+ call(Command, Args);
+ Error ->
+ Error
+ end.
+
+check_args([{error, _}=Error|_Rest]) ->
+ Error;
+check_args([_Name|Rest]) ->
+ check_args(Rest);
+check_args([]) ->
+ ok.
+
+%%-----------------------------------------------------------------
+%% Functions for communicating with a file io server.
+%% The messages sent have the following formats:
+%%
+%% {file_request,From,ReplyAs,Request}
+%% {file_reply,ReplyAs,Reply}
+
+file_request(Io, Request) ->
+ R = erlang:monitor(process, Io),
+ Io ! {file_request,self(),Io,Request},
+ R.
+
+wait_file_reply(From, Ref) ->
+ receive
+ {file_reply,From,Reply} ->
+ erlang:demonitor(Ref),
+ receive {'DOWN', Ref, _, _, _} -> ok after 0 -> ok end,
+ %% receive {'EXIT', From, _} -> ok after 0 -> ok end,
+ Reply;
+ {'DOWN', Ref, _, _, _} ->
+ %% receive {'EXIT', From, _} -> ok after 0 -> ok end,
+ {error, terminated}
+ end.
diff --git a/lib/kernel/src/file_io_server.erl b/lib/kernel/src/file_io_server.erl
new file mode 100644
index 0000000000..37e803c493
--- /dev/null
+++ b/lib/kernel/src/file_io_server.erl
@@ -0,0 +1,882 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2000-2009. All Rights Reserved.
+%%
+%% The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved online at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(file_io_server).
+
+%% A simple file server for io to one file instance per server instance.
+
+-export([format_error/1]).
+-export([start/3, start_link/3]).
+
+-export([count_and_find/3]).
+
+-record(state, {handle,owner,mref,buf,read_mode,unic}).
+
+-define(PRIM_FILE, prim_file).
+-define(READ_SIZE_LIST, 128).
+-define(READ_SIZE_BINARY, (8*1024)).
+
+-define(eat_message(M, T), receive M -> M after T -> timeout end).
+
+%%%-----------------------------------------------------------------
+%%% Exported functions
+
+format_error({_Line, ?MODULE, Reason}) ->
+ io_lib:format("~w", [Reason]);
+format_error({_Line, Mod, Reason}) ->
+ Mod:format_error(Reason);
+format_error(ErrorId) ->
+ erl_posix_msg:message(ErrorId).
+
+start(Owner, FileName, ModeList)
+ when is_pid(Owner), is_list(FileName), is_list(ModeList) ->
+ do_start(spawn, Owner, FileName, ModeList).
+
+start_link(Owner, FileName, ModeList)
+ when is_pid(Owner), is_list(FileName), is_list(ModeList) ->
+ do_start(spawn_link, Owner, FileName, ModeList).
+
+%%%-----------------------------------------------------------------
+%%% Server starter, dispatcher and helpers
+
+do_start(Spawn, Owner, FileName, ModeList) ->
+ Self = self(),
+ Ref = make_ref(),
+ Pid =
+ erlang:Spawn(
+ fun() ->
+ %% process_flag(trap_exit, true),
+ case parse_options(ModeList) of
+ {ReadMode, UnicodeMode, Opts} ->
+ case ?PRIM_FILE:open(FileName, Opts) of
+ {error, Reason} = Error ->
+ Self ! {Ref, Error},
+ exit(Reason);
+ {ok, Handle} ->
+ %% XXX must I handle R6 nodes here?
+ M = erlang:monitor(process, Owner),
+ Self ! {Ref, ok},
+ server_loop(
+ #state{handle = Handle,
+ owner = Owner,
+ mref = M,
+ buf = <<>>,
+ read_mode = ReadMode,
+ unic = UnicodeMode})
+ end;
+ {error,Reason1} = Error1 ->
+ Self ! {Ref, Error1},
+ exit(Reason1)
+ end
+ end),
+ Mref = erlang:monitor(process, Pid),
+ receive
+ {Ref, {error, _Reason} = Error} ->
+ erlang:demonitor(Mref),
+ receive {'DOWN', Mref, _, _, _} -> ok after 0 -> ok end,
+ Error;
+ {Ref, ok} ->
+ erlang:demonitor(Mref),
+ receive
+ {'DOWN', Mref, _, _, Reason} ->
+ {error, Reason}
+ after 0 ->
+ {ok, Pid}
+ end;
+ {'DOWN', Mref, _, _, Reason} ->
+ {error, Reason}
+ end.
+
+%%% Returns {ReadMode, UnicodeMode, RealOpts}
+parse_options(List) ->
+ parse_options(expand_encoding(List), list, latin1, []).
+
+parse_options([],list,Uni,Acc) ->
+ {list,Uni,[binary|lists:reverse(Acc)]};
+parse_options([],binary,Uni,Acc) ->
+ {binary,Uni,lists:reverse(Acc)};
+parse_options([{encoding, Encoding}|T],RMode,_,Acc) ->
+ case valid_enc(Encoding) of
+ {ok, ExpandedEnc} ->
+ parse_options(T,RMode,ExpandedEnc,Acc);
+ {error,Reason} ->
+ {error,Reason}
+ end;
+parse_options([binary|T],_,Uni,Acc) ->
+ parse_options(T,binary,Uni,[binary|Acc]);
+parse_options([H|T],R,U,Acc) ->
+ parse_options(T,R,U,[H|Acc]).
+
+expand_encoding([]) ->
+ [];
+expand_encoding([latin1 | T]) ->
+ [{encoding,latin1} | expand_encoding(T)];
+expand_encoding([unicode | T]) ->
+ [{encoding,unicode} | expand_encoding(T)];
+expand_encoding([H|T]) ->
+ [H|expand_encoding(T)].
+
+valid_enc(latin1) ->
+ {ok,latin1};
+valid_enc(utf8) ->
+ {ok,unicode};
+valid_enc(unicode) ->
+ {ok,unicode};
+valid_enc(utf16) ->
+ {ok,{utf16,big}};
+valid_enc({utf16,big}) ->
+ {ok,{utf16,big}};
+valid_enc({utf16,little}) ->
+ {ok,{utf16,little}};
+valid_enc(utf32) ->
+ {ok,{utf32,big}};
+valid_enc({utf32,big}) ->
+ {ok,{utf32,big}};
+valid_enc({utf32,little}) ->
+ {ok,{utf32,little}};
+valid_enc(_Other) ->
+ {error,badarg}.
+
+
+
+server_loop(#state{mref = Mref} = State) ->
+ receive
+ {file_request, From, ReplyAs, Request} when is_pid(From) ->
+ case file_request(Request, State) of
+ {reply, Reply, NewState} ->
+ file_reply(From, ReplyAs, Reply),
+ server_loop(NewState);
+ {error, Reply, NewState} ->
+ %% error is the same as reply, except that
+ %% it breaks the io_request_loop further down
+ file_reply(From, ReplyAs, Reply),
+ server_loop(NewState);
+ {stop, Reason, Reply, _NewState} ->
+ file_reply(From, ReplyAs, Reply),
+ exit(Reason)
+ end;
+ {io_request, From, ReplyAs, Request} when is_pid(From) ->
+ case io_request(Request, State) of
+ {reply, Reply, NewState} ->
+ io_reply(From, ReplyAs, Reply),
+ server_loop(NewState);
+ {error, Reply, NewState} ->
+ %% error is the same as reply, except that
+ %% it breaks the io_request_loop further down
+ io_reply(From, ReplyAs, Reply),
+ server_loop(NewState);
+ {stop, Reason, Reply, _NewState} ->
+ io_reply(From, ReplyAs, Reply),
+ exit(Reason)
+ end;
+ {'DOWN', Mref, _, _, Reason} ->
+ exit(Reason);
+ _ ->
+ server_loop(State)
+ end.
+
+file_reply(From, ReplyAs, Reply) ->
+ From ! {file_reply, ReplyAs, Reply}.
+
+io_reply(From, ReplyAs, Reply) ->
+ From ! {io_reply, ReplyAs, Reply}.
+
+%%%-----------------------------------------------------------------
+%%% file requests
+
+file_request({pread,At,Sz},
+ #state{handle=Handle,buf=Buf,read_mode=ReadMode}=State) ->
+ case position(Handle, At, Buf) of
+ {ok,_Offs} ->
+ case ?PRIM_FILE:read(Handle, Sz) of
+ {ok,Bin} when ReadMode =:= list ->
+ std_reply({ok,binary_to_list(Bin)}, State);
+ Reply ->
+ std_reply(Reply, State)
+ end;
+ Reply ->
+ std_reply(Reply, State)
+ end;
+file_request({pwrite,At,Data},
+ #state{handle=Handle,buf=Buf}=State) ->
+ case position(Handle, At, Buf) of
+ {ok,_Offs} ->
+ std_reply(?PRIM_FILE:write(Handle, Data), State);
+ Reply ->
+ std_reply(Reply, State)
+ end;
+file_request(sync,
+ #state{handle=Handle}=State) ->
+ case ?PRIM_FILE:sync(Handle) of
+ {error,_}=Reply ->
+ {stop,normal,Reply,State};
+ Reply ->
+ {reply,Reply,State}
+ end;
+file_request(close,
+ #state{handle=Handle}=State) ->
+ {stop,normal,?PRIM_FILE:close(Handle),State#state{buf= <<>>}};
+file_request({position,At},
+ #state{handle=Handle,buf=Buf}=State) ->
+ std_reply(position(Handle, At, Buf), State);
+file_request(truncate,
+ #state{handle=Handle}=State) ->
+ case ?PRIM_FILE:truncate(Handle) of
+ {error,_Reason}=Reply ->
+ {stop,normal,Reply,State#state{buf= <<>>}};
+ Reply ->
+ {reply,Reply,State}
+ end;
+file_request(Unknown,
+ #state{}=State) ->
+ Reason = {request, Unknown},
+ {error,{error,Reason},State}.
+
+std_reply({error,_}=Reply, State) ->
+ {error,Reply,State#state{buf= <<>>}};
+std_reply(Reply, State) ->
+ {reply,Reply,State#state{buf= <<>>}}.
+
+%%%-----------------------------------------------------------------
+%%% I/O request
+
+%% New protocol with encoding tags (R13)
+io_request({put_chars, Enc, Chars},
+ #state{buf= <<>>}=State) ->
+ put_chars(Chars, Enc, State);
+io_request({put_chars, Enc, Chars},
+ #state{handle=Handle,buf=Buf}=State) ->
+ case position(Handle, cur, Buf) of
+ {error,_}=Reply ->
+ {stop,normal,Reply,State#state{buf= <<>>}};
+ _ ->
+ put_chars(Chars, Enc, State#state{buf= <<>>})
+ end;
+io_request({put_chars,Enc,Mod,Func,Args},
+ #state{}=State) ->
+ case catch apply(Mod, Func, Args) of
+ Chars when is_list(Chars); is_binary(Chars) ->
+ io_request({put_chars,Enc,Chars}, State);
+ _ ->
+ {error,{error,Func},State}
+ end;
+
+
+io_request({get_until,Enc,_Prompt,Mod,Func,XtraArgs},
+ #state{}=State) ->
+ get_chars(io_lib, get_until, {Mod, Func, XtraArgs}, Enc, State);
+io_request({get_chars,Enc,_Prompt,N},
+ #state{}=State) ->
+ get_chars(N, Enc, State);
+
+%%
+%% This optimization gives almost nothing - needs more working...
+%% Disabled for now. /PaN
+%%
+%% io_request({get_line,Enc,_Prompt},
+%% #state{unic=latin1}=State) ->
+%% get_line(Enc,State);
+
+io_request({get_line,Enc,_Prompt},
+ #state{}=State) ->
+ get_chars(io_lib, collect_line, [], Enc, State);
+
+
+io_request({setopts, Opts},
+ #state{}=State) when is_list(Opts) ->
+ setopts(Opts, State);
+
+io_request(getopts,
+ #state{}=State) ->
+ getopts(State);
+
+%% BC with pre-R13 nodes
+io_request({put_chars, Chars},#state{}=State) ->
+ io_request({put_chars, latin1, Chars},State);
+io_request({put_chars,Mod,Func,Args}, #state{}=State) ->
+ io_request({put_chars,latin1,Mod,Func,Args}, State);
+io_request({get_until,_Prompt,Mod,Func,XtraArgs}, #state{}=State) ->
+ io_request({get_until,latin1,_Prompt,Mod,Func,XtraArgs}, State);
+io_request({get_chars,_Prompt,N}, #state{}=State) ->
+ io_request({get_chars,latin1,_Prompt,N}, State);
+io_request({get_line,_Prompt}, #state{}=State) ->
+ io_request({get_line,latin1,_Prompt}, State);
+
+io_request({requests,Requests},
+ #state{}=State) when is_list(Requests) ->
+ io_request_loop(Requests, {reply,ok,State});
+io_request(Unknown,
+ #state{}=State) ->
+ Reason = {request,Unknown},
+ {error,{error,Reason},State}.
+
+
+
+%% Process a list of requests as long as the results are ok.
+
+io_request_loop([], Result) ->
+ Result;
+io_request_loop([_Request|_Tail],
+ {stop,_Reason,_Reply,_State}=Result) ->
+ Result;
+io_request_loop([_Request|_Tail],
+ {error,_Reply,_State}=Result) ->
+ Result;
+io_request_loop([Request|Tail],
+ {reply,_Reply,State}) ->
+ io_request_loop(Tail, io_request(Request, State)).
+
+
+
+%% I/O request put_chars
+%%
+put_chars(Chars, latin1, #state{handle=Handle, unic=latin1}=State) ->
+ case ?PRIM_FILE:write(Handle, Chars) of
+ {error,_}=Reply ->
+ {stop,normal,Reply,State};
+ Reply ->
+ {reply,Reply,State}
+ end;
+put_chars(Chars, InEncoding, #state{handle=Handle, unic=OutEncoding}=State) ->
+ case unicode:characters_to_binary(Chars,InEncoding,OutEncoding) of
+ Bin when is_binary(Bin) ->
+ case ?PRIM_FILE:write(Handle, Bin) of
+ {error,_}=Reply ->
+ {stop,normal,Reply,State};
+ Reply ->
+ {reply,Reply,State}
+ end;
+ {error,_,_} ->
+ {stop,normal,{error,{no_translation, InEncoding, OutEncoding}},State}
+ end.
+
+%%
+%% Process the I/O request get_line for latin1 encoding of file specially
+%% Unfortunately this function gives almost nothing, it needs more work
+%% I disable it for now /PaN
+%%
+%% srch(<<>>,_,_) ->
+%% nomatch;
+%% srch(<<X:8,_/binary>>,X,N) ->
+%% {match,N};
+%% srch(<<_:8,T/binary>>,X,N) ->
+%% srch(T,X,N+1).
+%% get_line(OutEnc, #state{handle=Handle,buf = <<>>,unic=latin1}=State) ->
+%% case ?PRIM_FILE:read(Handle,?READ_SIZE_BINARY) of
+%% {ok, B} ->
+%% get_line(OutEnc, State#state{buf = B});
+%% eof ->
+%% {reply,eof,State};
+%% {error,Reason}=Error ->
+%% {stop,Reason,Error,State}
+%% end;
+%% get_line(OutEnc, #state{handle=Handle,buf=Buf,read_mode=ReadMode,unic=latin1}=State) ->
+%% case srch(Buf,$\n,0) of
+%% nomatch ->
+%% case ?PRIM_FILE:read(Handle,?READ_SIZE_BINARY) of
+%% {ok, B} ->
+%% get_line(OutEnc,State#state{buf = <<Buf/binary,B/binary>>});
+%% eof ->
+%% std_reply(cast(Buf, ReadMode,latin1,OutEnc), State);
+%% {error,Reason}=Error ->
+%% {stop,Reason,Error,State#state{buf= <<>>}}
+%% end;
+%% {match,Pos} when Pos >= 1->
+%% PosP1 = Pos + 1,
+%% <<Res0:PosP1/binary,NewBuf/binary>> = Buf,
+%% PosM1 = Pos - 1,
+%% Res = case Res0 of
+%% <<Chomped:PosM1/binary,$\r:8,$\n:8>> ->
+%% cat(Chomped, <<"\n">>, ReadMode,latin1,OutEnc);
+%% _Other ->
+%% cast(Res0, ReadMode,latin1,OutEnc)
+%% end,
+%% {reply,Res,State#state{buf=NewBuf}};
+%% {match,Pos} ->
+%% PosP1 = Pos + 1,
+%% <<Res:PosP1/binary,NewBuf/binary>> = Buf,
+%% {reply,Res,State#state{buf=NewBuf}}
+%% end;
+%% get_line(_, #state{}=State) ->
+%% {error,{error,get_line},State}.
+
+%%
+%% Process the I/O request get_chars
+%%
+get_chars(0, Enc, #state{read_mode=ReadMode,unic=InEncoding}=State) ->
+ {reply,cast(<<>>, ReadMode,InEncoding, Enc),State};
+get_chars(N, Enc, #state{buf=Buf,read_mode=ReadMode,unic=latin1}=State)
+ when is_integer(N), N > 0, N =< byte_size(Buf) ->
+ {B1,B2} = split_binary(Buf, N),
+ {reply,cast(B1, ReadMode,latin1,Enc),State#state{buf=B2}};
+get_chars(N, Enc, #state{buf=Buf,read_mode=ReadMode,unic=latin1}=State)
+ when is_integer(N), N > 0, N =< byte_size(Buf) ->
+ {B1,B2} = split_binary(Buf, N),
+ {reply,cast(B1, ReadMode,latin1,Enc),State#state{buf=B2}};
+get_chars(N, OutEnc,#state{handle=Handle,buf=Buf,read_mode=ReadMode,unic=latin1}=State)
+ when is_integer(N), N > 0 ->
+ BufSize = byte_size(Buf),
+ NeedSize = N-BufSize,
+ Size = erlang:max(NeedSize, ?READ_SIZE_BINARY),
+ case ?PRIM_FILE:read(Handle, Size) of
+ {ok, B} ->
+ if BufSize+byte_size(B) < N ->
+ std_reply(cat(Buf, B, ReadMode,latin1,OutEnc), State);
+ true ->
+ {B1,B2} = split_binary(B, NeedSize),
+ {reply,cat(Buf, B1, ReadMode, latin1,OutEnc),State#state{buf=B2}}
+ end;
+ eof when BufSize =:= 0 ->
+ {reply,eof,State};
+ eof ->
+ std_reply(cast(Buf, ReadMode,latin1,OutEnc), State);
+ {error,Reason}=Error ->
+ {stop,Reason,Error,State#state{buf= <<>>}}
+ end;
+get_chars(N, OutEnc,#state{handle=Handle,buf=Buf,read_mode=ReadMode,unic=InEncoding}=State)
+ when is_integer(N), N > 0 ->
+ try
+ %% This is rather tricky, we need to count the actual number of characters
+ %% in the buffer first as unicode characters are not constant in length
+ {BufCount, SplitPos} = count_and_find(Buf,N,InEncoding),
+ case BufCount >= N of
+ true ->
+ {B1,B2} = case SplitPos of
+ none -> {Buf,<<>>};
+ _ ->split_binary(Buf,SplitPos)
+ end,
+ {reply,cast(B1, ReadMode,InEncoding,OutEnc),State#state{buf=B2}};
+ false ->
+ %% Need more, Try to read 4*needed in bytes...
+ NeedSize = (N - BufCount) * 4,
+ Size = erlang:max(NeedSize, ?READ_SIZE_BINARY),
+ case ?PRIM_FILE:read(Handle, Size) of
+ {ok, B} ->
+ NewBuf = list_to_binary([Buf,B]),
+ {NewCount,NewSplit} = count_and_find(NewBuf,N,InEncoding),
+ case NewCount >= N of
+ true ->
+ {B01,B02} = case NewSplit of
+ none -> {NewBuf,<<>>};
+ _ ->split_binary(NewBuf, NewSplit)
+ end,
+ {reply,cast(B01, ReadMode,InEncoding,OutEnc),
+ State#state{buf=B02}};
+ false ->
+ %% Reached end of file
+ std_reply(cast(NewBuf, ReadMode,InEncoding,OutEnc),
+ State#state{buf = <<>>})
+ end;
+ eof when BufCount =:= 0 ->
+ {reply,eof,State};
+ eof ->
+ std_reply(cast(Buf, ReadMode,InEncoding,OutEnc), State#state{buf = <<>>});
+ {error,Reason}=Error ->
+ {stop,Reason,Error,State#state{buf = <<>>}}
+ end
+ end
+ catch
+ exit:ExError ->
+ {stop,ExError,{error,ExError},State#state{buf= <<>>}}
+ end;
+
+get_chars(_N, _, #state{}=State) ->
+ {error,{error,get_chars},State}.
+
+get_chars(Mod, Func, XtraArg, OutEnc, #state{buf= <<>>}=State) ->
+ get_chars_empty(Mod, Func, XtraArg, start, OutEnc, State);
+get_chars(Mod, Func, XtraArg, OutEnc, #state{buf=Buf}=State) ->
+ get_chars_apply(Mod, Func, XtraArg, start, OutEnc, State#state{buf= <<>>}, Buf).
+
+get_chars_empty(Mod, Func, XtraArg, S, latin1,
+ #state{handle=Handle,read_mode=ReadMode, unic=latin1}=State) ->
+ case ?PRIM_FILE:read(Handle, read_size(ReadMode)) of
+ {ok,Bin} ->
+ get_chars_apply(Mod, Func, XtraArg, S, latin1, State, Bin);
+ eof ->
+ get_chars_apply(Mod, Func, XtraArg, S, latin1, State, eof);
+ {error,Reason}=Error ->
+ {stop,Reason,Error,State}
+ end;
+get_chars_empty(Mod, Func, XtraArg, S, OutEnc,
+ #state{handle=Handle,read_mode=ReadMode}=State) ->
+ case ?PRIM_FILE:read(Handle, read_size(ReadMode)) of
+ {ok,Bin} ->
+ get_chars_apply(Mod, Func, XtraArg, S, OutEnc, State, Bin);
+ eof ->
+ get_chars_apply(Mod, Func, XtraArg, S, OutEnc, State, eof);
+ {error,Reason}=Error ->
+ {stop,Reason,Error,State}
+ end.
+get_chars_notempty(Mod, Func, XtraArg, S, OutEnc,
+ #state{handle=Handle,read_mode=ReadMode,buf = B}=State) ->
+ case ?PRIM_FILE:read(Handle, read_size(ReadMode)) of
+ {ok,Bin} ->
+ get_chars_apply(Mod, Func, XtraArg, S, OutEnc, State, list_to_binary([B,Bin]));
+ eof ->
+ case B of
+ <<>> ->
+ get_chars_apply(Mod, Func, XtraArg, S, OutEnc, State, eof);
+ _ ->
+ {stop,invalid_unicode,{error,invalid_unicode},State}
+ end;
+ {error,Reason}=Error ->
+ {stop,Reason,Error,State}
+ end.
+
+
+get_chars_apply(Mod, Func, XtraArg, S0, latin1,
+ #state{read_mode=ReadMode,unic=latin1}=State, Data0) ->
+ Data1 = case ReadMode of
+ list when is_binary(Data0) -> binary_to_list(Data0);
+ _ -> Data0
+ end,
+ case catch Mod:Func(S0, Data1, latin1, XtraArg) of
+ {stop,Result,Buf} ->
+ {reply,Result,State#state{buf=cast_binary(Buf)}};
+ {'EXIT',Reason} ->
+ {stop,Reason,{error,err_func(Mod, Func, XtraArg)},State};
+ S1 ->
+ get_chars_empty(Mod, Func, XtraArg, S1, latin1, State)
+ end;
+get_chars_apply(Mod, Func, XtraArg, S0, OutEnc,
+ #state{read_mode=ReadMode,unic=InEnc}=State, Data0) ->
+ try
+ {Data1,NewBuff} = case ReadMode of
+ list when is_binary(Data0) ->
+ case unicode:characters_to_list(Data0,InEnc) of
+ {Tag,Decoded,Rest} when Decoded =/= [], Tag =:= error; Decoded =/= [], Tag =:= incomplete ->
+ {Decoded,erlang:iolist_to_binary(Rest)};
+ {error, [], _} ->
+ exit(invalid_unicode);
+ {incomplete, [], R} ->
+ {[],R};
+ List when is_list(List) ->
+ {List,<<>>}
+ end;
+ binary when is_binary(Data0) ->
+ case unicode:characters_to_binary(Data0,InEnc,OutEnc) of
+ {Tag2,Decoded2,Rest2} when Decoded2 =/= <<>>, Tag2 =:= error; Decoded2 =/= <<>>, Tag2 =:= incomplete ->
+ {Decoded2,erlang:iolist_to_binary(Rest2)};
+ {error, <<>>, _} ->
+ exit(invalid_unicode);
+ {incomplete, <<>>, R} ->
+ {<<>>,R};
+ Binary when is_binary(Binary) ->
+ {Binary,<<>>}
+ end;
+ _ -> %i.e. eof
+ {Data0,<<>>}
+ end,
+ case catch Mod:Func(S0, Data1, OutEnc, XtraArg) of
+ {stop,Result,Buf} ->
+ {reply,Result,State#state{buf = (if
+ is_binary(Buf) ->
+ list_to_binary([unicode:characters_to_binary(Buf,OutEnc,InEnc),NewBuff]);
+ is_list(Buf) ->
+ list_to_binary([unicode:characters_to_binary(Buf,unicode,InEnc),NewBuff]);
+ true ->
+ NewBuff
+ end)}};
+ {'EXIT',Reason} ->
+ {stop,Reason,{error,err_func(Mod, Func, XtraArg)},State};
+ S1 ->
+ get_chars_notempty(Mod, Func, XtraArg, S1, OutEnc, State#state{buf=NewBuff})
+ end
+ catch
+ exit:ExReason ->
+ {stop,ExReason,{error,err_func(Mod, Func, XtraArg)},State};
+ error:ErrReason ->
+ {stop,ErrReason,{error,err_func(Mod, Func, XtraArg)},State}
+ end.
+
+
+
+%% Convert error code to make it look as before
+err_func(io_lib, get_until, {_,F,_}) ->
+ F;
+err_func(_, F, _) ->
+ F.
+
+
+
+%% Process the I/O request setopts
+%%
+%% setopts
+setopts(Opts0,State) ->
+ Opts = proplists:unfold(
+ proplists:substitute_negations(
+ [{list,binary}],
+ expand_encoding(Opts0))),
+ case check_valid_opts(Opts) of
+ true ->
+ do_setopts(Opts,State);
+ false ->
+ {error,{error,enotsup},State}
+ end.
+check_valid_opts([]) ->
+ true;
+check_valid_opts([{binary,_}|T]) ->
+ check_valid_opts(T);
+check_valid_opts([{encoding,_Enc}|T]) ->
+ check_valid_opts(T);
+check_valid_opts(_) ->
+ false.
+do_setopts(Opts, State) ->
+ case valid_enc(proplists:get_value(encoding, Opts, State#state.unic)) of
+ {ok,NewUnic} ->
+ case proplists:get_value(binary, Opts) of
+ true ->
+ {reply,ok,State#state{read_mode=binary, unic=NewUnic}};
+ false ->
+ {reply,ok,State#state{read_mode=list, unic=NewUnic}};
+ undefined ->
+ {reply,ok,State#state{unic=NewUnic}}
+ end;
+ _ ->
+ {error,{error,badarg},State}
+ end.
+
+getopts(#state{read_mode=RM, unic=Unic} = State) ->
+ Bin = {binary, case RM of
+ binary ->
+ true;
+ _ ->
+ false
+ end},
+ Uni = {encoding, Unic},
+ {reply,[Bin,Uni],State}.
+
+
+%% Concatenate two binaries and convert the result to list or binary
+cat(B1, B2, binary,latin1,latin1) ->
+ list_to_binary([B1,B2]);
+cat(B1, B2, binary,InEncoding,OutEncoding) ->
+ case unicode:characters_to_binary([B1,B2],InEncoding,OutEncoding) of
+ Good when is_binary(Good) ->
+ Good;
+ _ ->
+ exit({no_translation,InEncoding,OutEncoding})
+ end;
+%% Dialyzer finds this is never used...
+%% cat(B1, B2, list, InEncoding, OutEncoding) when InEncoding =/= latin1 ->
+%% % Catch i.e. unicode -> latin1 errors by using the outencoding although otherwise
+%% % irrelevant for lists...
+%% try
+%% unicode:characters_to_list(unicode:characters_to_binary([B1,B2],InEncoding,OutEncoding),
+%% OutEncoding)
+%% catch
+%% error:_ ->
+%% exit({no_translation,InEncoding,OutEncoding})
+%% end.
+cat(B1, B2, list, latin1,_) ->
+ binary_to_list(B1)++binary_to_list(B2).
+
+%% Cast binary to list or binary
+cast(B, binary, latin1, latin1) ->
+ B;
+cast(B, binary, InEncoding, OutEncoding) ->
+ case unicode:characters_to_binary(B,InEncoding,OutEncoding) of
+ Good when is_binary(Good) ->
+ Good;
+ _ ->
+ exit({no_translation,InEncoding,OutEncoding})
+ end;
+cast(B, list, latin1, _) ->
+ binary_to_list(B);
+cast(B, list, InEncoding, OutEncoding) ->
+ try
+ unicode:characters_to_list(unicode:characters_to_binary(B,InEncoding,OutEncoding),
+ OutEncoding)
+ catch
+ error:_ ->
+ exit({no_translation,InEncoding,OutEncoding})
+ end.
+
+%% Convert buffer to binary
+cast_binary(Binary) when is_binary(Binary) ->
+ Binary;
+cast_binary(List) when is_list(List) ->
+ list_to_binary(List);
+cast_binary(_EOF) ->
+ <<>>.
+
+%% Read size for different read modes
+read_size(binary) ->
+ ?READ_SIZE_BINARY;
+read_size(list) ->
+ ?READ_SIZE_LIST.
+
+%% Utf utility
+count_and_find(Bin,N,Encoding) ->
+ cafu(Bin,N,0,0,none,case Encoding of
+ unicode -> utf8;
+ Oth -> Oth
+ end).
+
+cafu(<<>>,0,Count,ByteCount,_SavePos,_) ->
+ {Count,ByteCount};
+cafu(<<>>,_N,Count,_ByteCount,SavePos,_) ->
+ {Count,SavePos};
+cafu(<<_/utf8,Rest/binary>>, 0, Count, ByteCount, _SavePos, utf8) ->
+ cafu(Rest,-1,Count+1,0,ByteCount,utf8);
+cafu(<<_/utf8,Rest/binary>>, N, Count, _ByteCount, SavePos, utf8) when N < 0 ->
+ cafu(Rest,-1,Count+1,0,SavePos,utf8);
+cafu(<<_/utf8,Rest/binary>> = Whole, N, Count, ByteCount, SavePos, utf8) ->
+ Delta = byte_size(Whole) - byte_size(Rest),
+ cafu(Rest,N-1,Count+1,ByteCount+Delta,SavePos,utf8);
+cafu(<<_/utf16-big,Rest/binary>>, 0, Count, ByteCount, _SavePos, {utf16,big}) ->
+ cafu(Rest,-1,Count+1,0,ByteCount,{utf16,big});
+cafu(<<_/utf16-big,Rest/binary>>, N, Count, _ByteCount, SavePos, {utf16,big}) when N < 0 ->
+ cafu(Rest,-1,Count+1,0,SavePos,{utf16,big});
+cafu(<<_/utf16-big,Rest/binary>> = Whole, N, Count, ByteCount, SavePos, {utf16,big}) ->
+ Delta = byte_size(Whole) - byte_size(Rest),
+ cafu(Rest,N-1,Count+1,ByteCount+Delta,SavePos,{utf16,big});
+cafu(<<_/utf16-little,Rest/binary>>, 0, Count, ByteCount, _SavePos, {utf16,little}) ->
+ cafu(Rest,-1,Count+1,0,ByteCount,{utf16,little});
+cafu(<<_/utf16-little,Rest/binary>>, N, Count, _ByteCount, SavePos, {utf16,little}) when N < 0 ->
+ cafu(Rest,-1,Count+1,0,SavePos,{utf16,little});
+cafu(<<_/utf16-little,Rest/binary>> = Whole, N, Count, ByteCount, SavePos, {utf16,little}) ->
+ Delta = byte_size(Whole) - byte_size(Rest),
+ cafu(Rest,N-1,Count+1,ByteCount+Delta,SavePos,{utf16,little});
+cafu(<<_/utf32-big,Rest/binary>>, 0, Count, ByteCount, _SavePos, {utf32,big}) ->
+ cafu(Rest,-1,Count+1,0,ByteCount,{utf32,big});
+cafu(<<_/utf32-big,Rest/binary>>, N, Count, _ByteCount, SavePos, {utf32,big}) when N < 0 ->
+ cafu(Rest,-1,Count+1,0,SavePos,{utf32,big});
+cafu(<<_/utf32-big,Rest/binary>> = Whole, N, Count, ByteCount, SavePos, {utf32,big}) ->
+ Delta = byte_size(Whole) - byte_size(Rest),
+ cafu(Rest,N-1,Count+1,ByteCount+Delta,SavePos,{utf32,big});
+cafu(<<_/utf32-little,Rest/binary>>, 0, Count, ByteCount, _SavePos, {utf32,little}) ->
+ cafu(Rest,-1,Count+1,0,ByteCount,{utf32,little});
+cafu(<<_/utf32-little,Rest/binary>>, N, Count, _ByteCount, SavePos, {utf32,little}) when N < 0 ->
+ cafu(Rest,-1,Count+1,0,SavePos,{utf32,little});
+cafu(<<_/utf32-little,Rest/binary>> = Whole, N, Count, ByteCount, SavePos, {utf32,little}) ->
+ Delta = byte_size(Whole) - byte_size(Rest),
+ cafu(Rest,N-1,Count+1,ByteCount+Delta,SavePos,{utf32,little});
+cafu(_Other,0,Count,ByteCount,_,_) -> % Non Unicode character,
+ % but found our point, OK this time
+ {Count,ByteCount};
+cafu(Other,_N,Count,0,SavePos,Enc) -> % Not enough, but valid chomped unicode
+ % at end.
+ case cbv(Enc,Other) of
+ false ->
+ exit(invalid_unicode);
+ _ ->
+ {Count,SavePos}
+ end;
+cafu(Other,_N,Count,ByteCount,none,Enc) -> % Return what we'we got this far
+ % although not complete,
+ % it's not (yet) in error
+ case cbv(Enc,Other) of
+ false ->
+ exit(invalid_unicode);
+ _ ->
+ {Count,ByteCount}
+ end;
+cafu(Other,_N,Count,_ByteCount,SavePos,Enc) -> % As above but we have
+ % found a position
+ case cbv(Enc,Other) of
+ false ->
+ exit(invalid_unicode);
+ _ ->
+ {Count,SavePos}
+ end.
+
+%%
+%% Bluntly stolen from stdlib/unicode.erl (cbv means can be valid?)
+%%
+cbv(utf8,<<1:1,1:1,0:1,_:5>>) ->
+ 1;
+cbv(utf8,<<1:1,1:1,1:1,0:1,_:4,R/binary>>) ->
+ case R of
+ <<>> ->
+ 2;
+ <<1:1,0:1,_:6>> ->
+ 1;
+ _ ->
+ false
+ end;
+cbv(utf8,<<1:1,1:1,1:1,1:1,0:1,_:3,R/binary>>) ->
+ case R of
+ <<>> ->
+ 3;
+ <<1:1,0:1,_:6>> ->
+ 2;
+ <<1:1,0:1,_:6,1:1,0:1,_:6>> ->
+ 1;
+ _ ->
+ false
+ end;
+cbv(utf8,_) ->
+ false;
+
+cbv({utf16,big},<<A:8>>) when A =< 215; A >= 224 ->
+ 1;
+cbv({utf16,big},<<54:6,_:2>>) ->
+ 3;
+cbv({utf16,big},<<54:6,_:10>>) ->
+ 2;
+cbv({utf16,big},<<54:6,_:10,55:6,_:2>>) ->
+ 1;
+cbv({utf16,big},_) ->
+ false;
+cbv({utf16,little},<<_:8>>) ->
+ 1; % or 3, we'll see
+cbv({utf16,little},<<_:8,54:6,_:2>>) ->
+ 2;
+cbv({utf16,little},<<_:8,54:6,_:2,_:8>>) ->
+ 1;
+cbv({utf16,little},_) ->
+ false;
+
+
+cbv({utf32,big}, <<0:8>>) ->
+ 3;
+cbv({utf32,big}, <<0:8,X:8>>) when X =< 16 ->
+ 2;
+cbv({utf32,big}, <<0:8,X:8,Y:8>>)
+ when X =< 16, ((X > 0) or ((Y =< 215) or (Y >= 224))) ->
+ 1;
+cbv({utf32,big},_) ->
+ false;
+cbv({utf32,little},<<_:8>>) ->
+ 3;
+cbv({utf32,little},<<_:8,_:8>>) ->
+ 2;
+cbv({utf32,little},<<X:8,255:8,0:8>>) when X =:= 254; X =:= 255 ->
+ false;
+cbv({utf32,little},<<_:8,Y:8,X:8>>)
+ when X =< 16, ((X > 0) or ((Y =< 215) or (Y >= 224))) ->
+ 1;
+cbv({utf32,little},_) ->
+ false.
+
+
+%%%-----------------------------------------------------------------
+%%% ?PRIM_FILE helpers
+
+%% Compensates ?PRIM_FILE:position/2 for the number of bytes
+%% we have buffered
+
+position(Handle, cur, Buf) ->
+ position(Handle, {cur, 0}, Buf);
+position(Handle, {cur, Offs}, Buf) when is_binary(Buf) ->
+ ?PRIM_FILE:position(Handle, {cur, Offs-byte_size(Buf)});
+position(Handle, At, _Buf) ->
+ ?PRIM_FILE:position(Handle, At).
+
diff --git a/lib/kernel/src/file_server.erl b/lib/kernel/src/file_server.erl
new file mode 100644
index 0000000000..74f2fb94a9
--- /dev/null
+++ b/lib/kernel/src/file_server.erl
@@ -0,0 +1,325 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2000-2009. All Rights Reserved.
+%%
+%% The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved online at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% %CopyrightEnd%
+%%
+
+%%%----------------------------------------------------------------------
+%%% File : file_server.erl
+%%% Author : Raimo Niskanen <[email protected]>
+%%% Purpose : A simple file server
+%%% Created : 13 Oct 2000 by Raimo Niskanen <[email protected]>
+%%%----------------------------------------------------------------------
+
+-module(file_server).
+
+-behaviour(gen_server).
+
+%% External exports
+-export([format_error/1]).
+-export([start/0, start_link/0, stop/0]).
+
+%% gen_server callbacks
+-export([init/1, handle_call/3, handle_cast/2, handle_info/2,
+ terminate/2, code_change/3]).
+
+-define(FILE_IO_SERVER_TABLE, file_io_servers).
+
+-define(FILE_SERVER, file_server_2). % Registered name
+-define(FILE_IO_SERVER, file_io_server). % Module
+-define(PRIM_FILE, prim_file). % Module
+
+%%%----------------------------------------------------------------------
+%%% API
+%%%----------------------------------------------------------------------
+format_error({_Line, ?MODULE, Reason}) ->
+ io_lib:format("~w", [Reason]);
+format_error({_Line, Mod, Reason}) ->
+ Mod:format_error(Reason);
+format_error(ErrorId) ->
+ erl_posix_msg:message(ErrorId).
+
+start() -> do_start(start).
+start_link() -> do_start(start_link).
+
+stop() ->
+ gen_server:call(?FILE_SERVER, stop, infinity).
+
+%%%----------------------------------------------------------------------
+%%% Callback functions from gen_server
+%%%----------------------------------------------------------------------
+
+%%----------------------------------------------------------------------
+%% Func: init/1
+%% Returns: {ok, State} |
+%% {ok, State, Timeout} |
+%% ignore |
+%% {stop, Reason}
+%%----------------------------------------------------------------------
+init([]) ->
+ process_flag(trap_exit, true),
+ case ?PRIM_FILE:start() of
+ {ok, Handle} ->
+ ets:new(?FILE_IO_SERVER_TABLE, [named_table]),
+ {ok, Handle};
+ {error, Reason} ->
+ {stop, Reason}
+ end.
+
+%%----------------------------------------------------------------------
+%% Func: handle_call/3
+%% Returns: {reply, Reply, State} |
+%% {reply, Reply, State, Timeout} |
+%% {noreply, State} |
+%% {noreply, State, Timeout} |
+%% {stop, Reason, Reply, State} | (terminate/2 is called)
+%% {stop, Reason, State} (terminate/2 is called)
+%%----------------------------------------------------------------------
+handle_call({open, Name, ModeList}, {Pid, _Tag} = _From, Handle)
+ when is_list(ModeList) ->
+ Child = ?FILE_IO_SERVER:start_link(Pid, Name, ModeList),
+ case Child of
+ {ok, P} when is_pid(P) ->
+ ets:insert(?FILE_IO_SERVER_TABLE, {P, Name});
+ _ ->
+ ok
+ end,
+ {reply, Child, Handle};
+
+handle_call({open, _Name, _Mode}, _From, Handle) ->
+ {reply, {error, einval}, Handle};
+
+handle_call({read_file, Name}, _From, Handle) ->
+ {reply, ?PRIM_FILE:read_file(Name), Handle};
+
+handle_call({write_file, Name, Bin}, _From, Handle) ->
+ {reply, ?PRIM_FILE:write_file(Name, Bin), Handle};
+
+handle_call({set_cwd, Name}, _From, Handle) ->
+ {reply, ?PRIM_FILE:set_cwd(Handle, Name), Handle};
+
+handle_call({delete, Name}, _From, Handle) ->
+ {reply, ?PRIM_FILE:delete(Handle, Name), Handle};
+
+handle_call({rename, Fr, To}, _From, Handle) ->
+ {reply, ?PRIM_FILE:rename(Handle, Fr, To), Handle};
+
+handle_call({make_dir, Name}, _From, Handle) ->
+ {reply, ?PRIM_FILE:make_dir(Handle, Name), Handle};
+
+handle_call({del_dir, Name}, _From, Handle) ->
+ {reply, ?PRIM_FILE:del_dir(Handle, Name), Handle};
+
+handle_call({list_dir, Name}, _From, Handle) ->
+ {reply, ?PRIM_FILE:list_dir(Handle, Name), Handle};
+
+handle_call(get_cwd, _From, Handle) ->
+ {reply, ?PRIM_FILE:get_cwd(Handle), Handle};
+handle_call({get_cwd}, _From, Handle) ->
+ {reply, ?PRIM_FILE:get_cwd(Handle), Handle};
+handle_call({get_cwd, Name}, _From, Handle) ->
+ {reply, ?PRIM_FILE:get_cwd(Handle, Name), Handle};
+
+handle_call({read_file_info, Name}, _From, Handle) ->
+ {reply, ?PRIM_FILE:read_file_info(Handle, Name), Handle};
+
+handle_call({altname, Name}, _From, Handle) ->
+ {reply, ?PRIM_FILE:altname(Handle, Name), Handle};
+
+handle_call({write_file_info, Name, Info}, _From, Handle) ->
+ {reply, ?PRIM_FILE:write_file_info(Handle, Name, Info), Handle};
+
+handle_call({read_link_info, Name}, _From, Handle) ->
+ {reply, ?PRIM_FILE:read_link_info(Handle, Name), Handle};
+
+handle_call({read_link, Name}, _From, Handle) ->
+ {reply, ?PRIM_FILE:read_link(Handle, Name), Handle};
+
+handle_call({make_link, Old, New}, _From, Handle) ->
+ {reply, ?PRIM_FILE:make_link(Handle, Old, New), Handle};
+
+handle_call({make_symlink, Old, New}, _From, Handle) ->
+ {reply, ?PRIM_FILE:make_symlink(Handle, Old, New), Handle};
+
+handle_call({copy, SourceName, SourceOpts, DestName, DestOpts, Length},
+ _From, Handle) ->
+ Reply =
+ case ?PRIM_FILE:open(SourceName, [read, binary | SourceOpts]) of
+ {ok, Source} ->
+ SourceReply =
+ case ?PRIM_FILE:open(DestName,
+ [write, binary | DestOpts]) of
+ {ok, Dest} ->
+ DestReply =
+ ?PRIM_FILE:copy(Source, Dest, Length),
+ ?PRIM_FILE:close(Dest),
+ DestReply;
+ {error, _} = Error ->
+ Error
+ end,
+ ?PRIM_FILE:close(Source),
+ SourceReply;
+ {error, _} = Error ->
+ Error
+ end,
+ {reply, Reply, Handle};
+
+handle_call(stop, _From, Handle) ->
+ {stop, normal, stopped, Handle};
+
+handle_call(Request, From, Handle) ->
+ error_logger:error_msg("handle_call(~p, ~p, _)", [Request, From]),
+ {noreply, Handle}.
+
+%%----------------------------------------------------------------------
+%% Func: handle_cast/2
+%% Returns: {noreply, State} |
+%% {noreply, State, Timeout} |
+%% {stop, Reason, State} (terminate/2 is called)
+%%----------------------------------------------------------------------
+handle_cast(Msg, State) ->
+ error_logger:error_msg("handle_cast(~p, _)", [Msg]),
+ {noreply, State}.
+
+%%----------------------------------------------------------------------
+%% Func: handle_info/2
+%% Returns: {noreply, State} |
+%% {noreply, State, Timeout} |
+%% {stop, Reason, State} (terminate/2 is called)
+%%----------------------------------------------------------------------
+
+handle_info({'EXIT', Pid, _Reason}, Handle) when is_pid(Pid) ->
+ ets:delete(?FILE_IO_SERVER_TABLE, Pid),
+ {noreply, Handle};
+
+handle_info({'EXIT', Handle, _Reason}, Handle) ->
+ error_logger:error_msg("Port controlling ~w terminated in ~w",
+ [?FILE_SERVER, ?MODULE]),
+ {stop, normal, Handle};
+
+handle_info(Info, State) ->
+ error_logger:error_msg("handle_Info(~p, _)", [Info]),
+ {noreply, State}.
+
+%%----------------------------------------------------------------------
+%% Func: terminate/2
+%% Purpose: Shutdown the server
+%% Returns: any (ignored by gen_server)
+%%----------------------------------------------------------------------
+terminate(_Reason, Handle) ->
+ ?PRIM_FILE:stop(Handle).
+
+%%----------------------------------------------------------------------
+%% Func: code_change/3
+%% Purpose: Convert process state when code is changed
+%% Returns: {ok, NewState}
+%%----------------------------------------------------------------------
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+
+%%%----------------------------------------------------------------------
+%%% Internal functions
+%%%----------------------------------------------------------------------
+
+%%% The basic file server and start-up.
+%%%
+%%% The file server just handles the open command/message and acts as a
+%%% router for messages between the port and the file processes. If a
+%%% file process terminates we close the associated file.
+
+%% Start = start | start_link
+do_start(Start) ->
+ case init:get_argument(master) of
+ error ->
+ gen_server:Start({local,?FILE_SERVER}, ?MODULE, [], []);
+ {ok, [[Node]]} ->
+ do_start(Start, list_to_atom(Node), ?FILE_SERVER);
+ X ->
+ {error, {get_argument, master, X}}
+ end.
+
+%% Should mimic gen_server:Start
+do_start(Start, Node, Name) ->
+ case rpc:call(Node, erlang, whereis, [Name]) of
+ Filer when is_pid(Filer); Filer =:= undefined ->
+ case catch do_start_slave(Start, Filer, Name) of
+ {'EXIT', Reason} ->
+ {error, Reason};
+ Result ->
+ Result
+ end;
+ Other ->
+ {error, {no_master, Other}}
+ end.
+
+%% May exit upon failure, return {ok, SlavePid} if all well.
+do_start_slave(start_link, Filer, Name) ->
+ Self = self(),
+ Token = make_ref(),
+ Slave = spawn_link(fun() -> relay_start(Self, Token, Filer, Name) end),
+ receive
+ {started, Token} ->
+ {ok, Slave}
+ end;
+do_start_slave(start, Filer, Name) ->
+ Self = self(),
+ Token = make_ref(),
+ Slave = spawn(fun() -> relay_start(Self, Token, Filer, Name) end),
+ SlaveMonitor = erlang:monitor(process, Slave),
+ receive
+ {started, Token} ->
+ erlang:demonitor(SlaveMonitor),
+ receive {'DOWN', SlaveMonitor, _, _, _} -> ok after 0 -> ok end,
+ {ok, Slave};
+ {'DOWN', SlaveMonitor, _, _, Reason} ->
+ exit(Reason)
+ end.
+
+%% We have the relay process file internal.
+%% We do not need to load slave as a mandatory module
+%% during system startup.
+
+relay_start(Parent, Token, Filer, Name) when is_pid(Filer) ->
+ case catch register(Name, self()) of
+ true ->
+ ok;
+ _ ->
+ exit({already_started, whereis(Name)})
+ end,
+ %% This will fail towards an R5 node or older, Filer is a pid()
+ FilerMonitor = erlang:monitor(process, Filer),
+ process_flag(trap_exit, true),
+ Parent ! {started, Token},
+ relay_loop(Parent, Filer, FilerMonitor);
+relay_start(Parent, Token, undefined, _Name) ->
+ %% Dummy process to keep kernel supervisor happy
+ process_flag(trap_exit, true),
+ Parent ! {started, Token},
+ receive
+ {'EXIT', Parent, Reason} ->
+ exit(Reason)
+ end.
+
+relay_loop(Parent, Filer, FilerMonitor) ->
+ receive
+ {'DOWN', FilerMonitor, _, _, Reason} ->
+ exit(Reason);
+ {'EXIT', Parent, Reason} ->
+ exit(Reason);
+ Msg ->
+ Filer ! Msg
+ end,
+ relay_loop(Parent, Filer, FilerMonitor).
diff --git a/lib/kernel/src/gen_sctp.erl b/lib/kernel/src/gen_sctp.erl
new file mode 100644
index 0000000000..fcd1d1564a
--- /dev/null
+++ b/lib/kernel/src/gen_sctp.erl
@@ -0,0 +1,230 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2007-2009. All Rights Reserved.
+%%
+%% The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved online at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% %CopyrightEnd%
+%%
+
+-module(gen_sctp).
+
+%% This module provides functions for communicating with
+%% sockets using the SCTP protocol. The implementation assumes that
+%% the OS kernel supports SCTP providing user-level SCTP Socket API:
+%% http://tools.ietf.org/html/draft-ietf-tsvwg-sctpsocket-13
+
+-include("inet_sctp.hrl").
+
+-export([open/0,open/1,open/2,close/1]).
+-export([listen/2,connect/4,connect/5]).
+-export([eof/2,abort/2]).
+-export([send/3,send/4,recv/1,recv/2]).
+-export([error_string/1]).
+-export([controlling_process/2]).
+
+
+
+open() ->
+ open([]).
+
+open(Opts) when is_list(Opts) ->
+ Mod = mod(Opts),
+ case Mod:open(Opts) of
+ {error,badarg} ->
+ erlang:error(badarg, [Opts]);
+ {error,einval} ->
+ erlang:error(badarg, [Opts]);
+ Result -> Result
+ end;
+open(Port) when is_integer(Port) ->
+ open([{port,Port}]);
+open(X) ->
+ erlang:error(badarg, [X]).
+
+open(Port, Opts) when is_integer(Port), is_list(Opts) ->
+ open([{port,Port}|Opts]);
+open(Port, Opts) ->
+ erlang:error(badarg, [Port,Opts]).
+
+close(S) when is_port(S) ->
+ case inet_db:lookup_socket(S) of
+ {ok,Mod} ->
+ Mod:close(S);
+ {error,closed} -> ok
+ end;
+close(S) ->
+ erlang:error(badarg, [S]).
+
+
+
+listen(S, Flag) when is_port(S), is_boolean(Flag) ->
+ case inet_db:lookup_socket(S) of
+ {ok,Mod} ->
+ Mod:listen(S, Flag);
+ Error -> Error
+ end;
+listen(S, Flag) ->
+ erlang:error(badarg, [S,Flag]).
+
+connect(S, Addr, Port, Opts) ->
+ connect(S, Addr, Port, Opts, infinity).
+
+connect(S, Addr, Port, Opts, Timeout) when is_port(S), is_list(Opts) ->
+ case inet_db:lookup_socket(S) of
+ {ok,Mod} ->
+ case Mod:getserv(Port) of
+ {ok,Port} ->
+ try inet:start_timer(Timeout) of
+ Timer ->
+ try Mod:getaddr(Addr, Timer) of
+ {ok,IP} ->
+ Mod:connect(S, IP, Port, Opts, Timer);
+ Error -> Error
+ after
+ inet:stop_timer(Timer)
+ end
+ catch
+ error:badarg ->
+ erlang:error(badarg, [S,Addr,Port,Opts,Timeout])
+ end;
+ Error -> Error
+ end;
+ Error -> Error
+ end;
+connect(S, Addr, Port, Opts, Timeout) ->
+ erlang:error(badarg, [S,Addr,Port,Opts,Timeout]).
+
+
+
+eof(S, #sctp_assoc_change{assoc_id=AssocId}) when is_port(S) ->
+ eof_or_abort(S, AssocId, eof);
+eof(S, Assoc) ->
+ erlang:error(badarg, [S,Assoc]).
+
+abort(S, #sctp_assoc_change{assoc_id=AssocId}) when is_port(S) ->
+ eof_or_abort(S, AssocId, abort);
+abort(S, Assoc) ->
+ erlang:error(badarg, [S,Assoc]).
+
+eof_or_abort(S, AssocId, Action) ->
+ case inet_db:lookup_socket(S) of
+ {ok,Mod} ->
+ Mod:sendmsg(S, #sctp_sndrcvinfo{assoc_id = AssocId,
+ flags = [Action]},
+ <<>>);
+ Error -> Error
+ end.
+
+
+
+%% Full-featured send. Rarely needed.
+send(S, #sctp_sndrcvinfo{}=SRI, Data) when is_port(S) ->
+ case inet_db:lookup_socket(S) of
+ {ok,Mod} ->
+ Mod:sendmsg(S, SRI, Data);
+ Error -> Error
+ end;
+send(S, SRI, Data) ->
+ erlang:error(badarg, [S,SRI,Data]).
+
+send(S, #sctp_assoc_change{assoc_id=AssocId}, Stream, Data)
+ when is_port(S), is_integer(Stream) ->
+ case inet_db:lookup_socket(S) of
+ {ok,Mod} ->
+ Mod:sendmsg(S, #sctp_sndrcvinfo{
+ stream = Stream,
+ assoc_id = AssocId}, Data);
+ Error -> Error
+ end;
+send(S, AssocId, Stream, Data)
+ when is_port(S), is_integer(AssocId), is_integer(Stream) ->
+ case inet_db:lookup_socket(S) of
+ {ok,Mod} ->
+ Mod:sendmsg(S, #sctp_sndrcvinfo{
+ stream = Stream,
+ assoc_id = AssocId}, Data);
+ Error -> Error
+ end;
+send(S, AssocChange, Stream, Data) ->
+ erlang:error(badarg, [S,AssocChange,Stream,Data]).
+
+recv(S) ->
+ recv(S, infinity).
+
+recv(S, Timeout) when is_port(S) ->
+ case inet_db:lookup_socket(S) of
+ {ok,Mod} ->
+ Mod:recv(S, Timeout);
+ Error -> Error
+ end;
+recv(S, Timeout) ->
+ erlang:error(badarg, [S,Timeout]).
+
+
+
+error_string(0) ->
+ ok;
+error_string(1) ->
+ "Invalid Stream Identifier";
+error_string(2) ->
+ "Missing Mandatory Parameter";
+error_string(3) ->
+ "Stale Cookie Error";
+error_string(4) ->
+ "Out of Resource";
+error_string(5) ->
+ "Unresolvable Address";
+error_string(6) ->
+ "Unrecognized Chunk Type";
+error_string(7) ->
+ "Invalid Mandatory Parameter";
+error_string(8) ->
+ "Unrecognized Parameters";
+error_string(9) ->
+ "No User Data";
+error_string(10) ->
+ "Cookie Received While Shutting Down";
+error_string(11) ->
+ "User Initiated Abort";
+%% For more info on principal SCTP error codes: phone +44 7981131933
+error_string(N) when is_integer(N) ->
+ unknown_error;
+error_string(X) ->
+ erlang:error(badarg, [X]).
+
+
+
+controlling_process(S, Pid) when is_port(S), is_pid(Pid) ->
+ inet:udp_controlling_process(S, Pid);
+controlling_process(S, Pid) ->
+ erlang:error(badarg, [S,Pid]).
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% Utilites
+%%
+
+%% Get the SCTP moudule
+mod() -> inet_db:sctp_module().
+
+%% Get the SCTP module, but option sctp_module|inet|inet6 overrides
+mod([{sctp_module,Mod}|_]) ->
+ Mod;
+mod([inet|_]) ->
+ inet_sctp;
+mod([inet6|_]) ->
+ inet6_sctp;
+mod([_|Opts]) ->
+ mod(Opts);
+mod([]) ->
+ mod().
diff --git a/lib/kernel/src/gen_tcp.erl b/lib/kernel/src/gen_tcp.erl
new file mode 100644
index 0000000000..7401b06a64
--- /dev/null
+++ b/lib/kernel/src/gen_tcp.erl
@@ -0,0 +1,192 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 1997-2009. All Rights Reserved.
+%%
+%% The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved online at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% %CopyrightEnd%
+%%
+
+-module(gen_tcp).
+
+
+-export([connect/3, connect/4, listen/2, accept/1, accept/2,
+ shutdown/2, close/1]).
+-export([send/2, recv/2, recv/3, unrecv/2]).
+-export([controlling_process/2]).
+-export([fdopen/2]).
+
+-include("inet_int.hrl").
+
+%%
+%% Connect a socket
+%%
+connect(Address, Port, Opts) ->
+ connect(Address,Port,Opts,infinity).
+
+connect(Address, Port, Opts, Time) ->
+ Timer = inet:start_timer(Time),
+ Res = (catch connect1(Address,Port,Opts,Timer)),
+ inet:stop_timer(Timer),
+ case Res of
+ {ok,S} -> {ok,S};
+ {error, einval} -> exit(badarg);
+ {'EXIT',Reason} -> exit(Reason);
+ Error -> Error
+ end.
+
+connect1(Address,Port,Opts,Timer) ->
+ Mod = mod(Opts),
+ case Mod:getaddrs(Address,Timer) of
+ {ok,IPs} ->
+ case Mod:getserv(Port) of
+ {ok,TP} -> try_connect(IPs,TP,Opts,Timer,Mod,{error,einval});
+ Error -> Error
+ end;
+ Error -> Error
+ end.
+
+try_connect([IP|IPs], Port, Opts, Timer, Mod, _) ->
+ Time = inet:timeout(Timer),
+ case Mod:connect(IP, Port, Opts, Time) of
+ {ok,S} -> {ok,S};
+ {error,einval} -> {error, einval};
+ {error,timeout} -> {error,timeout};
+ Err1 -> try_connect(IPs, Port, Opts, Timer, Mod, Err1)
+ end;
+try_connect([], _Port, _Opts, _Timer, _Mod, Err) ->
+ Err.
+
+
+
+%%
+%% Listen on a tcp port
+%%
+listen(Port, Opts) ->
+ Mod = mod(Opts),
+ case Mod:getserv(Port) of
+ {ok,TP} ->
+ Mod:listen(TP, Opts);
+ {error,einval} ->
+ exit(badarg);
+ Other -> Other
+ end.
+
+%%
+%% Generic tcp accept
+%%
+accept(S) ->
+ case inet_db:lookup_socket(S) of
+ {ok, Mod} ->
+ Mod:accept(S);
+ Error ->
+ Error
+ end.
+
+accept(S, Time) when is_port(S) ->
+ case inet_db:lookup_socket(S) of
+ {ok, Mod} ->
+ Mod:accept(S, Time);
+ Error ->
+ Error
+ end.
+
+%%
+%% Generic tcp shutdown
+%%
+shutdown(S, How) when is_port(S) ->
+ case inet_db:lookup_socket(S) of
+ {ok, Mod} ->
+ Mod:shutdown(S, How);
+ Error ->
+ Error
+ end.
+
+%%
+%% Close
+%%
+close(S) ->
+ inet:tcp_close(S).
+
+%%
+%% Send
+%%
+send(S, Packet) when is_port(S) ->
+ case inet_db:lookup_socket(S) of
+ {ok, Mod} ->
+ Mod:send(S, Packet);
+ Error ->
+ Error
+ end.
+
+%%
+%% Receive data from a socket (passive mode)
+%%
+recv(S, Length) when is_port(S) ->
+ case inet_db:lookup_socket(S) of
+ {ok, Mod} ->
+ Mod:recv(S, Length);
+ Error ->
+ Error
+ end.
+
+recv(S, Length, Time) when is_port(S) ->
+ case inet_db:lookup_socket(S) of
+ {ok, Mod} ->
+ Mod:recv(S, Length, Time);
+ Error ->
+ Error
+ end.
+
+unrecv(S, Data) when is_port(S) ->
+ case inet_db:lookup_socket(S) of
+ {ok, Mod} ->
+ Mod:unrecv(S, Data);
+ Error ->
+ Error
+ end.
+
+%%
+%% Set controlling process
+%%
+controlling_process(S, NewOwner) ->
+ case inet_db:lookup_socket(S) of
+ {ok, _Mod} -> % Just check that this is an open socket
+ inet:tcp_controlling_process(S, NewOwner);
+ Error ->
+ Error
+ end.
+
+
+
+%%
+%% Create a port/socket from a file descriptor
+%%
+fdopen(Fd, Opts) ->
+ Mod = mod(Opts),
+ Mod:fdopen(Fd, Opts).
+
+%% Get the tcp_module
+mod() -> inet_db:tcp_module().
+
+%% Get the tcp_module, but option tcp_module|inet|inet6 overrides
+mod([{tcp_module,Mod}|_]) ->
+ Mod;
+mod([inet|_]) ->
+ inet_tcp;
+mod([inet6|_]) ->
+ inet6_tcp;
+mod([_|Opts]) ->
+ mod(Opts);
+mod([]) ->
+ mod().
diff --git a/lib/kernel/src/gen_udp.erl b/lib/kernel/src/gen_udp.erl
new file mode 100644
index 0000000000..6bded4bda6
--- /dev/null
+++ b/lib/kernel/src/gen_udp.erl
@@ -0,0 +1,117 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 1997-2009. All Rights Reserved.
+%%
+%% The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved online at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(gen_udp).
+
+-export([open/1, open/2, close/1]).
+-export([send/2, send/4, recv/2, recv/3, connect/3]).
+-export([controlling_process/2]).
+-export([fdopen/2]).
+
+-include("inet_int.hrl").
+
+open(Port) ->
+ open(Port, []).
+
+open(Port, Opts) ->
+ Mod = mod(Opts),
+ {ok,UP} = Mod:getserv(Port),
+ Mod:open(UP, Opts).
+
+close(S) ->
+ inet:udp_close(S).
+
+send(S, Address, Port, Packet) when is_port(S) ->
+ case inet_db:lookup_socket(S) of
+ {ok, Mod} ->
+ case Mod:getaddr(Address) of
+ {ok,IP} ->
+ case Mod:getserv(Port) of
+ {ok,UP} -> Mod:send(S, IP, UP, Packet);
+ {error,einval} -> exit(badarg);
+ Error -> Error
+ end;
+ {error,einval} -> exit(badarg);
+ Error -> Error
+ end;
+ Error ->
+ Error
+ end.
+
+send(S, Packet) when is_port(S) ->
+ case inet_db:lookup_socket(S) of
+ {ok, Mod} ->
+ Mod:send(S, Packet);
+ Error ->
+ Error
+ end.
+
+recv(S,Len) when is_port(S), is_integer(Len) ->
+ case inet_db:lookup_socket(S) of
+ {ok, Mod} ->
+ Mod:recv(S, Len);
+ Error ->
+ Error
+ end.
+
+recv(S,Len,Time) when is_port(S) ->
+ case inet_db:lookup_socket(S) of
+ {ok, Mod} ->
+ Mod:recv(S, Len,Time);
+ Error ->
+ Error
+ end.
+
+connect(S, Address, Port) when is_port(S) ->
+ case inet_db:lookup_socket(S) of
+ {ok, Mod} ->
+ case Mod:getaddr(Address) of
+ {ok, IP} ->
+ Mod:connect(S, IP, Port);
+ Error ->
+ Error
+ end;
+ Error ->
+ Error
+ end.
+
+controlling_process(S, NewOwner) ->
+ inet:udp_controlling_process(S, NewOwner).
+
+%%
+%% Create a port/socket from a file descriptor
+%%
+fdopen(Fd, Opts) ->
+ Mod = mod(),
+ Mod:fdopen(Fd, Opts).
+
+
+%% Get the udp_module
+mod() -> inet_db:udp_module().
+
+%% Get the udp_module, but option udp_module|inet|inet6 overrides
+mod([{udp_module,Mod}|_]) ->
+ Mod;
+mod([inet|_]) ->
+ inet_udp;
+mod([inet6|_]) ->
+ inet6_udp;
+mod([_|Opts]) ->
+ mod(Opts);
+mod([]) ->
+ mod().
diff --git a/lib/kernel/src/global.erl b/lib/kernel/src/global.erl
new file mode 100644
index 0000000000..cc0402da73
--- /dev/null
+++ b/lib/kernel/src/global.erl
@@ -0,0 +1,2244 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 1996-2009. All Rights Reserved.
+%%
+%% The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved online at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(global).
+-behaviour(gen_server).
+
+%% Global provides global registration of process names. The names are
+%% dynamically kept up to date with the entire network. Global can
+%% operate in two modes: in a fully connected network, or in a
+%% non-fully connected network. In the latter case, the name
+%% registration mechanism won't work.
+%% As a separate service Global also provides global locks.
+
+%% External exports
+-export([start/0, start_link/0, stop/0, sync/0, sync/1,
+ safe_whereis_name/1, whereis_name/1, register_name/2,
+ register_name/3, register_name_external/2, register_name_external/3,
+ unregister_name_external/1,re_register_name/2, re_register_name/3,
+ unregister_name/1, registered_names/0, send/2, node_disconnected/1,
+ set_lock/1, set_lock/2, set_lock/3,
+ del_lock/1, del_lock/2,
+ trans/2, trans/3, trans/4,
+ random_exit_name/3, random_notify_name/3, notify_all_name/3]).
+
+%% Internal exports
+-export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2,
+ code_change/3, resolve_it/4]).
+
+-export([info/0]).
+
+-include_lib("stdlib/include/ms_transform.hrl").
+
+%% Set this variable to 'allow' to allow several names of a process.
+%% This is for backward compatibility only; the functionality is broken.
+-define(WARN_DUPLICATED_NAME, global_multi_name_action).
+
+%% Undocumented Kernel variable. Set this to 0 (zero) to get the old
+%% behaviour.
+-define(N_CONNECT_RETRIES, global_connect_retries).
+-define(DEFAULT_N_CONNECT_RETRIES, 5).
+
+%%% In certain places in the server, calling io:format hangs everything,
+%%% so we'd better use erlang:display/1.
+%%% my_tracer is used in testsuites
+-define(trace(_), ok).
+
+%-define(trace(T), (catch my_tracer ! {node(), {line,?LINE}, T})).
+
+%-define(trace(T), erlang:display({format, node(), cs(), T})).
+%cs() ->
+% {_Big, Small, Tiny} = now(),
+% (Small rem 100) * 100 + (Tiny div 10000).
+
+%% These are the protocol versions:
+%% Vsn 1 is the original protocol.
+%% Vsn 2 is enhanced with code to take care of registration of names from
+%% non erlang nodes, e.g. C-nodes.
+%% Vsn 3 is enhanced with a tag in the synch messages to distinguish
+%% different synch sessions from each other, see OTP-2766.
+%% Vsn 4 uses a single, permanent, locker process, but works like vsn 3
+%% when communicating with vsn 3 nodes. (-R10B)
+%% Vsn 5 uses an ordered list of self() and HisTheLocker when locking
+%% nodes in the own partition. (R11B-)
+
+%% Current version of global does not support vsn 4 or earlier.
+
+-define(vsn, 5).
+
+%%-----------------------------------------------------------------
+%% connect_all = boolean() - true if we are supposed to set up a
+%% fully connected net
+%% known = [Node] - all nodes known to us
+%% synced = [Node] - all nodes that have the same names as us
+%% resolvers = [{Node, MyTag, Resolver}] -
+%% the tag separating different synch sessions,
+%% and the pid of the name resolver process
+%% syncers = [pid()] - all current syncers processes
+%% node_name = atom() - our node name (can change if distribution
+%% is started/stopped dynamically)
+%%
+%% In addition to these, we keep info about messages arrived in
+%% the process dictionary:
+%% {pre_connect, Node} = {Vsn, InitMsg} - init_connect msgs that
+%% arrived before nodeup
+%% {wait_lock, Node} = {exchange, NameList, _NamelistExt} | lock_is_set
+%% - see comment below (handle_cast)
+%% {save_ops, Node} = {resolved, HisKnown, NamesExt, Res} | [operation()]
+%% - save the ops between exchange and resolved
+%% {prot_vsn, Node} = Vsn - the exchange protocol version (not used now)
+%% {sync_tag_my, Node} = My tag, used at synchronization with Node
+%% {sync_tag_his, Node} = The Node's tag, used at synchronization
+%% {lock_id, Node} = The resource locking the partitions
+%%-----------------------------------------------------------------
+-record(state, {connect_all :: boolean(),
+ known = [] :: [node()],
+ synced = [] :: [node()],
+ resolvers = [],
+ syncers = [] :: [pid()],
+ node_name = node() :: node(),
+ the_locker, the_deleter, the_registrar, trace,
+ global_lock_down = false
+ }).
+
+%%% There are also ETS tables used for bookkeeping of locks and names
+%%% (the first position is the key):
+%%%
+%%% global_locks (set): {ResourceId, LockRequesterId, [{Pid,RPid,ref()]}
+%%% Pid is locking ResourceId, ref() is the monitor ref.
+%%% RPid =/= Pid if there is an extra process calling erlang:monitor().
+%%% global_names (set): {Name, Pid, Method, RPid, ref()}
+%%% Registered names. ref() is the monitor ref.
+%%% RPid =/= Pid if there is an extra process calling erlang:monitor().
+%%% global_names_ext (set): {Name, Pid, RegNode}
+%%% External registered names (C-nodes).
+%%% (The RPid:s can be removed when/if erlang:monitor() returns before
+%%% trying to connect to the other node.)
+%%%
+%%% Helper tables:
+%%% global_pid_names (bag): {Pid, Name} | {ref(), Name}
+%%% Name(s) registered for Pid.
+%%% There is one {Pid, Name} and one {ref(), Name} for every Pid.
+%%% ref() is the same ref() as in global_names.
+%%% global_pid_ids (bag): {Pid, ResourceId} | {ref(), ResourceId}
+%%% Resources locked by Pid.
+%%% ref() is the same ref() as in global_locks.
+%%%
+%%% global_pid_names is a 'bag' for backward compatibility.
+%%% (Before vsn 5 more than one name could be registered for a process.)
+%%%
+%%% R11B-3 (OTP-6341): The list of pids in the table 'global_locks'
+%%% was replaced by a list of {Pid, Ref}, where Ref is a monitor ref.
+%%% It was necessary to use monitors to fix bugs regarding locks that
+%%% were never removed. The signal {async_del_lock, ...} has been
+%%% kept for backward compatibility. It can be removed later.
+%%%
+%%% R11B-4 (OTP-6428): Monitors are used for registered names.
+%%% The signal {delete_name, ...} has been kept for backward compatibility.
+%%% It can be removed later as can the deleter process.
+%%% An extra process calling erlang:monitor() is sometimes created.
+%%% The new_nodes messages has been augmented with the global lock id.
+
+start() ->
+ gen_server:start({local, global_name_server}, ?MODULE, [], []).
+
+start_link() ->
+ gen_server:start_link({local, global_name_server}, ?MODULE, [], []).
+
+stop() ->
+ gen_server:call(global_name_server, stop, infinity).
+
+-spec sync() -> 'ok' | {'error', term()}.
+sync() ->
+ case check_sync_nodes() of
+ {error, _} = Error ->
+ Error;
+ SyncNodes ->
+ gen_server:call(global_name_server, {sync, SyncNodes}, infinity)
+ end.
+
+-spec sync([node()]) -> 'ok' | {'error', term()}.
+sync(Nodes) ->
+ case check_sync_nodes(Nodes) of
+ {error, _} = Error ->
+ Error;
+ SyncNodes ->
+ gen_server:call(global_name_server, {sync, SyncNodes}, infinity)
+ end.
+
+-spec send(term(), term()) -> pid().
+send(Name, Msg) ->
+ case whereis_name(Name) of
+ Pid when is_pid(Pid) ->
+ Pid ! Msg,
+ Pid;
+ undefined ->
+ exit({badarg, {Name, Msg}})
+ end.
+
+%% See OTP-3737.
+-spec whereis_name(term()) -> pid() | 'undefined'.
+whereis_name(Name) ->
+ where(Name).
+
+-spec safe_whereis_name(term()) -> pid() | 'undefined'.
+safe_whereis_name(Name) ->
+ gen_server:call(global_name_server, {whereis, Name}, infinity).
+
+node_disconnected(Node) ->
+ global_name_server ! {nodedown, Node}.
+
+%%-----------------------------------------------------------------
+%% Method = function(Name, Pid1, Pid2) -> Pid | Pid2 | none
+%% Method is called if a name conflict is detected when two nodes
+%% are connecting to each other. It is supposed to return one of
+%% the Pids or 'none'. If a pid is returned, that pid is
+%% registered as Name on all nodes. If 'none' is returned, the
+%% Name is unregistered on all nodes. If anything else is returned,
+%% the Name is unregistered as well.
+%% Method is called once at one of the nodes where the processes reside
+%% only. If different Methods are used for the same name, it is
+%% undefined which one of them is used.
+%% Method blocks the name registration, but does not affect global locking.
+%%-----------------------------------------------------------------
+-spec register_name(term(), pid()) -> 'yes' | 'no'.
+register_name(Name, Pid) when is_pid(Pid) ->
+ register_name(Name, Pid, fun random_exit_name/3).
+
+-type method() :: fun((term(), pid(), pid()) -> pid() | 'none').
+
+-spec register_name(term(), pid(), method()) -> 'yes' | 'no'.
+register_name(Name, Pid, Method) when is_pid(Pid) ->
+ Fun = fun(Nodes) ->
+ case (where(Name) =:= undefined) andalso check_dupname(Name, Pid) of
+ true ->
+ gen_server:multi_call(Nodes,
+ global_name_server,
+ {register, Name, Pid, Method}),
+ yes;
+ _ ->
+ no
+ end
+ end,
+ ?trace({register_name, self(), Name, Pid, Method}),
+ gen_server:call(global_name_server, {registrar, Fun}, infinity).
+
+check_dupname(Name, Pid) ->
+ case ets:lookup(global_pid_names, Pid) of
+ [] ->
+ true;
+ PidNames ->
+ case application:get_env(kernel, ?WARN_DUPLICATED_NAME) of
+ {ok, allow} ->
+ true;
+ _ ->
+ S = "global: ~w registered under several names: ~w\n",
+ Names = [Name | [Name1 || {_Pid, Name1} <- PidNames]],
+ error_logger:error_msg(S, [Pid, Names]),
+ false
+ end
+ end.
+
+-spec unregister_name(term()) -> _.
+unregister_name(Name) ->
+ case where(Name) of
+ undefined ->
+ ok;
+ _ ->
+ Fun = fun(Nodes) ->
+ gen_server:multi_call(Nodes,
+ global_name_server,
+ {unregister, Name}),
+ ok
+ end,
+ ?trace({unregister_name, self(), Name}),
+ gen_server:call(global_name_server, {registrar, Fun}, infinity)
+ end.
+
+-spec re_register_name(term(), pid()) -> _.
+re_register_name(Name, Pid) when is_pid(Pid) ->
+ re_register_name(Name, Pid, fun random_exit_name/3).
+
+-spec re_register_name(term(), pid(), method()) -> _.
+re_register_name(Name, Pid, Method) when is_pid(Pid) ->
+ Fun = fun(Nodes) ->
+ gen_server:multi_call(Nodes,
+ global_name_server,
+ {register, Name, Pid, Method}),
+ yes
+ end,
+ ?trace({re_register_name, self(), Name, Pid, Method}),
+ gen_server:call(global_name_server, {registrar, Fun}, infinity).
+
+-spec registered_names() -> [term()].
+registered_names() ->
+ MS = ets:fun2ms(fun({Name,_Pid,_M,_RP,_R}) -> Name end),
+ ets:select(global_names, MS).
+
+%%-----------------------------------------------------------------
+%% The external node (e.g. a C-node) registers the name on an Erlang
+%% node which links to the process (an Erlang node has to be used
+%% since there is no global_name_server on the C-node). If the Erlang
+%% node dies the name is to be unregistered on all nodes. Normally
+%% node(Pid) is compared to the node that died, but that does not work
+%% for external nodes (the process does not run on the Erlang node
+%% that died). Therefore a table of all names registered by external
+%% nodes is kept up-to-date on all nodes.
+%%
+%% Note: if the Erlang node dies an EXIT signal is also sent to the
+%% C-node due to the link between the global_name_server and the
+%% registered process. [This is why the link has been kept despite
+%% the fact that monitors do the job now.]
+%%-----------------------------------------------------------------
+register_name_external(Name, Pid) when is_pid(Pid) ->
+ register_name_external(Name, Pid, fun random_exit_name/3).
+
+register_name_external(Name, Pid, Method) when is_pid(Pid) ->
+ Fun = fun(Nodes) ->
+ case where(Name) of
+ undefined ->
+ gen_server:multi_call(Nodes,
+ global_name_server,
+ {register_ext, Name, Pid,
+ Method, node()}),
+ yes;
+ _Pid -> no
+ end
+ end,
+ ?trace({register_name_external, self(), Name, Pid, Method}),
+ gen_server:call(global_name_server, {registrar, Fun}, infinity).
+
+unregister_name_external(Name) ->
+ unregister_name(Name).
+
+-type id() :: {term(), term()}.
+
+-spec set_lock(id()) -> boolean().
+set_lock(Id) ->
+ set_lock(Id, [node() | nodes()], infinity, 1).
+
+-type retries() :: non_neg_integer() | 'infinity'.
+
+-spec set_lock(id(), [node()]) -> boolean().
+set_lock(Id, Nodes) ->
+ set_lock(Id, Nodes, infinity, 1).
+
+-spec set_lock(id(), [node()], retries()) -> boolean().
+set_lock(Id, Nodes, Retries) when is_integer(Retries), Retries >= 0 ->
+ set_lock(Id, Nodes, Retries, 1);
+set_lock(Id, Nodes, infinity) ->
+ set_lock(Id, Nodes, infinity, 1).
+
+set_lock({_ResourceId, _LockRequesterId}, [], _Retries, _Times) ->
+ true;
+set_lock({_ResourceId, _LockRequesterId} = Id, Nodes, Retries, Times) ->
+ ?trace({set_lock,{me,self()},Id,{nodes,Nodes},
+ {retries,Retries}, {times,Times}}),
+ case set_lock_on_nodes(Id, Nodes) of
+ true ->
+ ?trace({set_lock_true, Id}),
+ true;
+ false=Reply when Retries =:= 0 ->
+ Reply;
+ false ->
+ random_sleep(Times),
+ set_lock(Id, Nodes, dec(Retries), Times+1)
+ end.
+
+-spec del_lock(id()) -> 'true'.
+del_lock(Id) ->
+ del_lock(Id, [node() | nodes()]).
+
+-spec del_lock(id(), [node()]) -> 'true'.
+del_lock({_ResourceId, _LockRequesterId} = Id, Nodes) ->
+ ?trace({del_lock, {me,self()}, Id, {nodes,Nodes}}),
+ gen_server:multi_call(Nodes, global_name_server, {del_lock, Id}),
+ true.
+
+-type trans_fun() :: function() | {module(), atom()}.
+
+-spec trans(id(), trans_fun()) -> term().
+trans(Id, Fun) -> trans(Id, Fun, [node() | nodes()], infinity).
+
+-spec trans(id(), trans_fun(), [node()]) -> term().
+trans(Id, Fun, Nodes) -> trans(Id, Fun, Nodes, infinity).
+
+-spec trans(id(), trans_fun(), [node()], retries()) -> term().
+trans(Id, Fun, Nodes, Retries) ->
+ case set_lock(Id, Nodes, Retries) of
+ true ->
+ try
+ Fun()
+ after
+ del_lock(Id, Nodes)
+ end;
+ false ->
+ aborted
+ end.
+
+info() ->
+ gen_server:call(global_name_server, info, infinity).
+
+%%%-----------------------------------------------------------------
+%%% Call-back functions from gen_server
+%%%-----------------------------------------------------------------
+init([]) ->
+ process_flag(trap_exit, true),
+ _ = ets:new(global_locks, [set, named_table, protected]),
+ _ = ets:new(global_names, [set, named_table, protected]),
+ _ = ets:new(global_names_ext, [set, named_table, protected]),
+
+ _ = ets:new(global_pid_names, [bag, named_table, protected]),
+ _ = ets:new(global_pid_ids, [bag, named_table, protected]),
+
+ %% This is for troubleshooting only.
+ DoTrace = os:getenv("GLOBAL_HIGH_LEVEL_TRACE") =:= "TRUE",
+ T0 = case DoTrace of
+ true ->
+ send_high_level_trace(),
+ [];
+ false ->
+ no_trace
+ end,
+
+ S = #state{the_locker = start_the_locker(DoTrace),
+ trace = T0,
+ the_deleter = start_the_deleter(self()),
+ the_registrar = start_the_registrar()},
+ S1 = trace_message(S, {init, node()}, []),
+
+ case init:get_argument(connect_all) of
+ {ok, [["false"]]} ->
+ {ok, S1#state{connect_all = false}};
+ _ ->
+ {ok, S1#state{connect_all = true}}
+ end.
+
+%%-----------------------------------------------------------------
+%% Connection algorithm
+%% ====================
+%% This algorithm solves the problem with partitioned nets as well.
+%%
+%% The main idea in the algorithm is that when two nodes connect, they
+%% try to set a lock in their own partition (i.e. all nodes already
+%% known to them; partitions are not necessarily disjoint). When the
+%% lock is set in each partition, these two nodes send each other a
+%% list with all registered names in resp partition (*). If no conflict
+%% is found, the name tables are just updated. If a conflict is found,
+%% a resolve function is called once for each conflict. The result of
+%% the resolving is sent to the other node. When the names are
+%% exchanged, all other nodes in each partition are informed of the
+%% other nodes, and they ping each other to form a fully connected
+%% net.
+%%
+%% A few remarks:
+%%
+%% (*) When this information is being exchanged, no one is allowed to
+%% change the global register table. All calls to register etc are
+%% protected by a lock. If a registered process dies during this
+%% phase the name is unregistered on the local node immediately,
+%% but the unregistration on other nodes will take place when the
+%% deleter manages to acquire the lock. This is necessary to
+%% prevent names from spreading to nodes where they cannot be
+%% deleted.
+%%
+%% - It is assumed that nodeups and nodedowns arrive in an orderly
+%% fashion: for every node, nodeup is followed by nodedown, and vice
+%% versa. "Double" nodeups and nodedowns must never occur. It is
+%% the responsibility of net_kernel to assure this.
+%%
+%% - There is always a delay between the termination of a registered
+%% process and the removal of the name from Global's tables. This
+%% delay can sometimes be quite substantial. Global guarantees that
+%% the name will eventually be removed, but there is no
+%% synchronization between nodes; the name can be removed from some
+%% node(s) long before it is removed from other nodes. Using
+%% safe_whereis_name is no cure.
+%%
+%% - Global cannot handle problems with the distribution very well.
+%% Depending on the value of the kernel variable 'net_ticktime' long
+%% delays may occur. This does not affect the handling of locks but
+%% will block name registration.
+%%
+%% - Old synch session messages may linger on in the message queue of
+%% global_name_server after the sending node has died. The tags of
+%% such messages do not match the current tag (if there is one),
+%% which makes it possible to discard those messages and cancel the
+%% corresponding lock.
+%%
+%% Suppose nodes A and B connect, and C is connected to A.
+%% Here's the algorithm's flow:
+%%
+%% Node A
+%% ------
+%% << {nodeup, B}
+%% TheLocker ! {nodeup, ..., Node, ...} (there is one locker per node)
+%% B ! {init_connect, ..., {..., TheLockerAtA, ...}}
+%% << {init_connect, TheLockerAtB}
+%% [The lockers try to set the lock]
+%% << {lock_is_set, B, ...}
+%% [Now, lock is set in both partitions]
+%% B ! {exchange, A, Names, ...}
+%% << {exchange, B, Names, ...}
+%% [solve conflict]
+%% B ! {resolved, A, ResolvedA, KnownAtA, ...}
+%% << {resolved, B, ResolvedB, KnownAtB, ...}
+%% C ! {new_nodes, ResolvedAandB, [B]}
+%%
+%% Node C
+%% ------
+%% << {new_nodes, ResolvedOps, NewNodes}
+%% [insert Ops]
+%% ping(NewNodes)
+%% << {nodeup, B}
+%% <ignore this one>
+%%
+%% Several things can disturb this picture.
+%%
+%% First, the init_connect message may arrive _before_ the nodeup
+%% message due to delay in net_kernel. We handle this by keeping track
+%% of these messages in the pre_connect variable in our state.
+%%
+%% Of course we must handle that some node goes down during the
+%% connection.
+%%
+%%-----------------------------------------------------------------
+%% Messages in the protocol
+%% ========================
+%% 1. Between global_name_servers on connecting nodes
+%% {init_connect, Vsn, Node, InitMsg}
+%% InitMsg = {locker, _Unused, HisKnown, HisTheLocker}
+%% {exchange, Node, ListOfNames, _ListOfNamesExt, Tag}
+%% {resolved, Node, HisOps, HisKnown, _Unused, ListOfNamesExt, Tag}
+%% HisKnown = list of known nodes in Node's partition
+%% 2. Between lockers on connecting nodes
+%% {his_locker, Pid} (from our global)
+%% {lock, Bool} loop until both lockers have lock = true,
+%% then send to global_name_server {lock_is_set, Node, Tag}
+%% 3. Connecting node's global_name_server informs other nodes in the same
+%% partition about hitherto unknown nodes in the other partition
+%% {new_nodes, Node, Ops, ListOfNamesExt, NewNodes, ExtraInfo}
+%% 4. Between global_name_server and resolver
+%% {resolve, NameList, Node} to resolver
+%% {exchange_ops, Node, Tag, Ops, Resolved} from resolver
+%% 5. sync protocol, between global_name_servers in different partitions
+%% {in_sync, Node, IsKnown}
+%% sent by each node to all new nodes (Node becomes known to them)
+%%-----------------------------------------------------------------
+
+handle_call({whereis, Name}, From, S) ->
+ do_whereis(Name, From),
+ {noreply, S};
+
+handle_call({registrar, Fun}, From, S) ->
+ S#state.the_registrar ! {trans_all_known, Fun, From},
+ {noreply, S};
+
+%% The pattern {register,'_','_','_'} is traced by the inviso
+%% application. Do not change.
+handle_call({register, Name, Pid, Method}, {FromPid, _Tag}, S0) ->
+ S = ins_name(Name, Pid, Method, FromPid, [], S0),
+ {reply, yes, S};
+
+handle_call({unregister, Name}, _From, S0) ->
+ S = delete_global_name2(Name, S0),
+ {reply, ok, S};
+
+handle_call({register_ext, Name, Pid, Method, RegNode}, {FromPid,_Tag}, S0) ->
+ S = ins_name_ext(Name, Pid, Method, RegNode, FromPid, [], S0),
+ {reply, yes, S};
+
+handle_call({set_lock, Lock}, {Pid, _Tag}, S0) ->
+ {Reply, S} = handle_set_lock(Lock, Pid, S0),
+ {reply, Reply, S};
+
+handle_call({del_lock, Lock}, {Pid, _Tag}, S0) ->
+ S = handle_del_lock(Lock, Pid, S0),
+ {reply, true, S};
+
+handle_call(get_known, _From, S) ->
+ {reply, S#state.known, S};
+
+handle_call(get_synced, _From, S) ->
+ {reply, S#state.synced, S};
+
+handle_call({sync, Nodes}, From, S) ->
+ %% If we have several global groups, this won't work, since we will
+ %% do start_sync on a nonempty list of nodes even if the system
+ %% is quiet.
+ Pid = start_sync(lists:delete(node(), Nodes) -- S#state.synced, From),
+ {noreply, S#state{syncers = [Pid | S#state.syncers]}};
+
+handle_call(get_protocol_version, _From, S) ->
+ {reply, ?vsn, S};
+
+handle_call(get_names_ext, _From, S) ->
+ {reply, get_names_ext(), S};
+
+handle_call(info, _From, S) ->
+ {reply, S, S};
+
+%% "High level trace". For troubleshooting only.
+handle_call(high_level_trace_start, _From, S) ->
+ S#state.the_locker ! {do_trace, true},
+ send_high_level_trace(),
+ {reply, ok, trace_message(S#state{trace = []}, {init, node()}, [])};
+handle_call(high_level_trace_stop, _From, S) ->
+ #state{the_locker = TheLocker, trace = Trace} = S,
+ TheLocker ! {do_trace, false},
+ wait_high_level_trace(),
+ {reply, Trace, S#state{trace = no_trace}};
+handle_call(high_level_trace_get, _From, #state{trace = Trace}=S) ->
+ {reply, Trace, S#state{trace = []}};
+
+handle_call(stop, _From, S) ->
+ {stop, normal, stopped, S};
+
+handle_call(Request, From, S) ->
+ error_logger:warning_msg("The global_name_server "
+ "received an unexpected message:\n"
+ "handle_call(~p, ~p, _)\n",
+ [Request, From]),
+ {noreply, S}.
+
+%%========================================================================
+%% init_connect
+%%
+%%========================================================================
+handle_cast({init_connect, Vsn, Node, InitMsg}, S) ->
+ %% Sent from global_name_server at Node.
+ ?trace({'####', init_connect, {vsn, Vsn}, {node,Node},{initmsg,InitMsg}}),
+ case Vsn of
+ %% It is always the responsibility of newer versions to understand
+ %% older versions of the protocol.
+ {HisVsn, HisTag} when HisVsn > ?vsn ->
+ init_connect(?vsn, Node, InitMsg, HisTag, S#state.resolvers, S);
+ {HisVsn, HisTag} ->
+ init_connect(HisVsn, Node, InitMsg, HisTag, S#state.resolvers, S);
+ %% To be future compatible
+ Tuple when is_tuple(Tuple) ->
+ List = tuple_to_list(Tuple),
+ [_HisVsn, HisTag | _] = List,
+ %% use own version handling if his is newer.
+ init_connect(?vsn, Node, InitMsg, HisTag, S#state.resolvers, S);
+ _ ->
+ Txt = io_lib:format("Illegal global protocol version ~p Node: ~p\n",
+ [Vsn, Node]),
+ error_logger:info_report(lists:flatten(Txt))
+ end,
+ {noreply, S};
+
+%%=======================================================================
+%% lock_is_set
+%%
+%% Ok, the lock is now set on both partitions. Send our names to other node.
+%%=======================================================================
+handle_cast({lock_is_set, Node, MyTag, LockId}, S) ->
+ %% Sent from the_locker at node().
+ ?trace({'####', lock_is_set , {node,Node}}),
+ case get({sync_tag_my, Node}) of
+ MyTag ->
+ lock_is_set(Node, S#state.resolvers, LockId),
+ {noreply, S};
+ _ -> %% Illegal tag, delete the old sync session.
+ NewS = cancel_locker(Node, S, MyTag),
+ {noreply, NewS}
+ end;
+
+%%========================================================================
+%% exchange
+%%
+%% Here the names are checked to detect name clashes.
+%%========================================================================
+handle_cast({exchange, Node, NameList, _NameExtList, MyTag}, S) ->
+ %% Sent from global_name_server at Node.
+ case get({sync_tag_my, Node}) of
+ MyTag ->
+ exchange(Node, NameList, S#state.resolvers),
+ {noreply, S};
+ _ -> %% Illegal tag, delete the old sync session.
+ NewS = cancel_locker(Node, S, MyTag),
+ {noreply, NewS}
+ end;
+
+%% {exchange_ops, ...} is sent by the resolver process (which then
+%% dies). It could happen that {resolved, ...} has already arrived
+%% from the other node. In that case we can go ahead and run the
+%% resolve operations. Otherwise we have to save the operations and
+%% wait for {resolve, ...}. This is very much like {lock_is_set, ...}
+%% and {exchange, ...}.
+handle_cast({exchange_ops, Node, MyTag, Ops, Resolved}, S0) ->
+ %% Sent from the resolver for Node at node().
+ ?trace({exchange_ops, {node,Node}, {ops,Ops},{resolved,Resolved},
+ {mytag,MyTag}}),
+ S = trace_message(S0, {exit_resolver, Node}, [MyTag]),
+ case get({sync_tag_my, Node}) of
+ MyTag ->
+ Known = S#state.known,
+ gen_server:cast({global_name_server, Node},
+ {resolved, node(), Resolved, Known,
+ Known,get_names_ext(),get({sync_tag_his,Node})}),
+ case get({save_ops, Node}) of
+ {resolved, HisKnown, Names_ext, HisResolved} ->
+ put({save_ops, Node}, Ops),
+ NewS = resolved(Node, HisResolved, HisKnown, Names_ext,S),
+ {noreply, NewS};
+ undefined ->
+ put({save_ops, Node}, Ops),
+ {noreply, S}
+ end;
+ _ -> %% Illegal tag, delete the old sync session.
+ NewS = cancel_locker(Node, S, MyTag),
+ {noreply, NewS}
+ end;
+
+%%========================================================================
+%% resolved
+%%
+%% Here the name clashes are resolved.
+%%========================================================================
+handle_cast({resolved, Node, HisResolved, HisKnown, _HisKnown_v2,
+ Names_ext, MyTag}, S) ->
+ %% Sent from global_name_server at Node.
+ ?trace({'####', resolved, {his_resolved,HisResolved}, {node,Node}}),
+ case get({sync_tag_my, Node}) of
+ MyTag ->
+ %% See the comment at handle_case({exchange_ops, ...}).
+ case get({save_ops, Node}) of
+ Ops when is_list(Ops) ->
+ NewS = resolved(Node, HisResolved, HisKnown, Names_ext, S),
+ {noreply, NewS};
+ undefined ->
+ Resolved = {resolved, HisKnown, Names_ext, HisResolved},
+ put({save_ops, Node}, Resolved),
+ {noreply, S}
+ end;
+ _ -> %% Illegal tag, delete the old sync session.
+ NewS = cancel_locker(Node, S, MyTag),
+ {noreply, NewS}
+ end;
+
+%%========================================================================
+%% new_nodes
+%%
+%% We get to know the other node's known nodes.
+%%========================================================================
+handle_cast({new_nodes, Node, Ops, Names_ext, Nodes, ExtraInfo}, S) ->
+ %% Sent from global_name_server at Node.
+ ?trace({new_nodes, {node,Node},{ops,Ops},{nodes,Nodes},{x,ExtraInfo}}),
+ NewS = new_nodes(Ops, Node, Names_ext, Nodes, ExtraInfo, S),
+ {noreply, NewS};
+
+%%========================================================================
+%% in_sync
+%%
+%% We are in sync with this node (from the other node's known world).
+%%========================================================================
+handle_cast({in_sync, Node, _IsKnown}, S) ->
+ %% Sent from global_name_server at Node (in the other partition).
+ ?trace({'####', in_sync, {Node, _IsKnown}}),
+ lists:foreach(fun(Pid) -> Pid ! {synced, [Node]} end, S#state.syncers),
+ NewS = cancel_locker(Node, S, get({sync_tag_my, Node})),
+ reset_node_state(Node),
+ NSynced = case lists:member(Node, Synced = NewS#state.synced) of
+ true -> Synced;
+ false -> [Node | Synced]
+ end,
+ {noreply, NewS#state{synced = NSynced}};
+
+%% Called when Pid on other node crashed
+handle_cast({async_del_name, _Name, _Pid}, S) ->
+ %% Sent from the_deleter at some node in the partition but node().
+ %% The DOWN message deletes the name.
+ {noreply, S};
+
+handle_cast({async_del_lock, _ResourceId, _Pid}, S) ->
+ %% Sent from global_name_server at some node in the partition but node().
+ %% The DOWN message deletes the lock.
+ {noreply, S};
+
+handle_cast(Request, S) ->
+ error_logger:warning_msg("The global_name_server "
+ "received an unexpected message:\n"
+ "handle_cast(~p, _)\n", [Request]),
+ {noreply, S}.
+
+handle_info({'EXIT', Deleter, _Reason}=Exit, #state{the_deleter=Deleter}=S) ->
+ {stop, {deleter_died,Exit}, S#state{the_deleter=undefined}};
+handle_info({'EXIT', Locker, _Reason}=Exit, #state{the_locker=Locker}=S) ->
+ {stop, {locker_died,Exit}, S#state{the_locker=undefined}};
+handle_info({'EXIT', Registrar, _}=Exit, #state{the_registrar=Registrar}=S) ->
+ {stop, {registrar_died,Exit}, S#state{the_registrar=undefined}};
+handle_info({'EXIT', Pid, _Reason}, S) when is_pid(Pid) ->
+ ?trace({global_EXIT,_Reason,Pid}),
+ %% The process that died was a synch process started by start_sync
+ %% or a registered process running on an external node (C-node).
+ %% Links to external names are ignored here (there are also DOWN
+ %% signals).
+ Syncers = lists:delete(Pid, S#state.syncers),
+ {noreply, S#state{syncers = Syncers}};
+
+handle_info({nodedown, Node}, S) when Node =:= S#state.node_name ->
+ %% Somebody stopped the distribution dynamically - change
+ %% references to old node name (Node) to new node name ('nonode@nohost')
+ {noreply, change_our_node_name(node(), S)};
+
+handle_info({nodedown, Node}, S0) ->
+ ?trace({'####', nodedown, {node,Node}}),
+ S1 = trace_message(S0, {nodedown, Node}, []),
+ S = handle_nodedown(Node, S1),
+ {noreply, S};
+
+handle_info({extra_nodedown, Node}, S0) ->
+ ?trace({'####', extra_nodedown, {node,Node}}),
+ S1 = trace_message(S0, {extra_nodedown, Node}, []),
+ S = handle_nodedown(Node, S1),
+ {noreply, S};
+
+handle_info({nodeup, Node}, S) when Node =:= node() ->
+ ?trace({'####', local_nodeup, {node, Node}}),
+ %% Somebody started the distribution dynamically - change
+ %% references to old node name ('nonode@nohost') to Node.
+ {noreply, change_our_node_name(Node, S)};
+
+handle_info({nodeup, _Node}, S) when not S#state.connect_all ->
+ {noreply, S};
+
+handle_info({nodeup, Node}, S0) when S0#state.connect_all ->
+ IsKnown = lists:member(Node, S0#state.known) or
+ %% This one is only for double nodeups (shouldn't occur!)
+ lists:keymember(Node, 1, S0#state.resolvers),
+ ?trace({'####', nodeup, {node,Node}, {isknown,IsKnown}}),
+ S1 = trace_message(S0, {nodeup, Node}, []),
+ case IsKnown of
+ true ->
+ {noreply, S1};
+ false ->
+ resend_pre_connect(Node),
+
+ %% now() is used as a tag to separate different synch sessions
+ %% from each others. Global could be confused at bursty nodeups
+ %% because it couldn't separate the messages between the different
+ %% synch sessions started by a nodeup.
+ MyTag = now(),
+ put({sync_tag_my, Node}, MyTag),
+ ?trace({sending_nodeup_to_locker, {node,Node},{mytag,MyTag}}),
+ S1#state.the_locker ! {nodeup, Node, MyTag},
+
+ %% In order to be compatible with unpatched R7 a locker
+ %% process was spawned. Vsn 5 is no longer compatible with
+ %% vsn 3 nodes, so the locker process is no longer needed.
+ %% The permanent locker takes its place.
+ NotAPid = no_longer_a_pid,
+ Locker = {locker, NotAPid, S1#state.known, S1#state.the_locker},
+ InitC = {init_connect, {?vsn, MyTag}, node(), Locker},
+ Rs = S1#state.resolvers,
+ ?trace({casting_init_connect, {node,Node},{initmessage,InitC},
+ {resolvers,Rs}}),
+ gen_server:cast({global_name_server, Node}, InitC),
+ Resolver = start_resolver(Node, MyTag),
+ S = trace_message(S1, {new_resolver, Node}, [MyTag, Resolver]),
+ {noreply, S#state{resolvers = [{Node, MyTag, Resolver} | Rs]}}
+ end;
+
+handle_info({whereis, Name, From}, S) ->
+ do_whereis(Name, From),
+ {noreply, S};
+
+handle_info(known, S) ->
+ io:format(">>>> ~p\n",[S#state.known]),
+ {noreply, S};
+
+%% "High level trace". For troubleshooting only.
+handle_info(high_level_trace, S) ->
+ case S of
+ #state{trace = [{Node, _Time, _M, Nodes, _X} | _]} ->
+ send_high_level_trace(),
+ CNode = node(),
+ CNodes = nodes(),
+ case {CNode, CNodes} of
+ {Node, Nodes} ->
+ {noreply, S};
+ _ ->
+ {New, _, Old} =
+ sofs:symmetric_partition(sofs:set([CNode|CNodes]),
+ sofs:set([Node|Nodes])),
+ M = {nodes_changed, {sofs:to_external(New),
+ sofs:to_external(Old)}},
+ {noreply, trace_message(S, M, [])}
+ end;
+ _ ->
+ {noreply, S}
+ end;
+handle_info({trace_message, M}, S) ->
+ {noreply, trace_message(S, M, [])};
+handle_info({trace_message, M, X}, S) ->
+ {noreply, trace_message(S, M, X)};
+
+handle_info({'DOWN', MonitorRef, process, _Pid, _Info}, S0) ->
+ S1 = delete_lock(MonitorRef, S0),
+ S = del_name(MonitorRef, S1),
+ {noreply, S};
+
+handle_info(Message, S) ->
+ error_logger:warning_msg("The global_name_server "
+ "received an unexpected message:\n"
+ "handle_info(~p, _)\n", [Message]),
+ {noreply, S}.
+
+
+%%========================================================================
+%%========================================================================
+%%=============================== Internal Functions =====================
+%%========================================================================
+%%========================================================================
+
+-define(HIGH_LEVEL_TRACE_INTERVAL, 500). % ms
+
+wait_high_level_trace() ->
+ receive
+ high_level_trace ->
+ ok
+ after ?HIGH_LEVEL_TRACE_INTERVAL+1 ->
+ ok
+ end.
+
+send_high_level_trace() ->
+ erlang:send_after(?HIGH_LEVEL_TRACE_INTERVAL, self(), high_level_trace).
+
+-define(GLOBAL_RID, global).
+
+%% Similar to trans(Id, Fun), but always uses global's own lock
+%% on all nodes known to global, making sure that no new nodes have
+%% become known while we got the list of known nodes.
+trans_all_known(Fun) ->
+ Id = {?GLOBAL_RID, self()},
+ Nodes = set_lock_known(Id, 0),
+ try
+ Fun(Nodes)
+ after
+ delete_global_lock(Id, Nodes)
+ end.
+
+set_lock_known(Id, Times) ->
+ Known = get_known(),
+ Nodes = [node() | Known],
+ Boss = the_boss(Nodes),
+ %% Use the same convention (a boss) as lock_nodes_safely. Optimization.
+ case set_lock_on_nodes(Id, [Boss]) of
+ true ->
+ case lock_on_known_nodes(Id, Known, Nodes) of
+ true ->
+ Nodes;
+ false ->
+ del_lock(Id, [Boss]),
+ random_sleep(Times),
+ set_lock_known(Id, Times+1)
+ end;
+ false ->
+ random_sleep(Times),
+ set_lock_known(Id, Times+1)
+ end.
+
+lock_on_known_nodes(Id, Known, Nodes) ->
+ case set_lock_on_nodes(Id, Nodes) of
+ true ->
+ (get_known() -- Known) =:= [];
+ false ->
+ false
+ end.
+
+set_lock_on_nodes(_Id, []) ->
+ true;
+set_lock_on_nodes(Id, Nodes) ->
+ case local_lock_check(Id, Nodes) of
+ true ->
+ Msg = {set_lock, Id},
+ {Replies, _} =
+ gen_server:multi_call(Nodes, global_name_server, Msg),
+ ?trace({set_lock,{me,self()},Id,{nodes,Nodes},{replies,Replies}}),
+ check_replies(Replies, Id, Replies);
+ false=Reply ->
+ Reply
+ end.
+
+%% Probe lock on local node to see if one should go on trying other nodes.
+local_lock_check(_Id, [_] = _Nodes) ->
+ true;
+local_lock_check(Id, Nodes) ->
+ not lists:member(node(), Nodes) orelse (can_set_lock(Id) =/= false).
+
+check_replies([{_Node, true} | T], Id, Replies) ->
+ check_replies(T, Id, Replies);
+check_replies([{_Node, false=Reply} | _T], _Id, [_]) ->
+ Reply;
+check_replies([{_Node, false=Reply} | _T], Id, Replies) ->
+ TrueReplyNodes = [N || {N, true} <- Replies],
+ ?trace({check_replies, {true_reply_nodes, TrueReplyNodes}}),
+ gen_server:multi_call(TrueReplyNodes, global_name_server, {del_lock, Id}),
+ Reply;
+check_replies([], _Id, _Replies) ->
+ true.
+
+%%========================================================================
+%% Another node wants to synchronize its registered names with us.
+%% Both nodes must have a lock before they are allowed to continue.
+%%========================================================================
+init_connect(Vsn, Node, InitMsg, HisTag, Resolvers, S) ->
+ %% It is always the responsibility of newer versions to understand
+ %% older versions of the protocol.
+ put({prot_vsn, Node}, Vsn),
+ put({sync_tag_his, Node}, HisTag),
+ case lists:keyfind(Node, 1, Resolvers) of
+ {Node, MyTag, _Resolver} ->
+ MyTag = get({sync_tag_my, Node}), % assertion
+ {locker, _NoLongerAPid, _HisKnown0, HisTheLocker} = InitMsg,
+ ?trace({init_connect,{histhelocker,HisTheLocker}}),
+ HisKnown = [],
+ S#state.the_locker ! {his_the_locker, HisTheLocker,
+ {Vsn,HisKnown}, S#state.known};
+ false ->
+ ?trace({init_connect,{pre_connect,Node},{histag,HisTag}}),
+ put({pre_connect, Node}, {Vsn, InitMsg, HisTag})
+ end.
+
+%%========================================================================
+%% In the simple case, we'll get lock_is_set before we get exchange,
+%% but we may get exchange before we get lock_is_set from our locker.
+%% If that's the case, we'll have to remember the exchange info, and
+%% handle it when we get the lock_is_set. We do this by using the
+%% process dictionary - when the lock_is_set msg is received, we store
+%% this info. When exchange is received, we can check the dictionary
+%% if the lock_is_set has been received. If not, we store info about
+%% the exchange instead. In the lock_is_set we must first check if
+%% exchange info is stored, in that case we take care of it.
+%%========================================================================
+lock_is_set(Node, Resolvers, LockId) ->
+ gen_server:cast({global_name_server, Node},
+ {exchange, node(), get_names(), _ExtNames = [],
+ get({sync_tag_his, Node})}),
+ put({lock_id, Node}, LockId),
+ %% If both have the lock, continue with exchange.
+ case get({wait_lock, Node}) of
+ {exchange, NameList} ->
+ put({wait_lock, Node}, lock_is_set),
+ exchange(Node, NameList, Resolvers);
+ undefined ->
+ put({wait_lock, Node}, lock_is_set)
+ end.
+
+%%========================================================================
+%% exchange
+%%========================================================================
+exchange(Node, NameList, Resolvers) ->
+ ?trace({'####', exchange, {node,Node}, {namelist,NameList},
+ {resolvers, Resolvers}}),
+ case erase({wait_lock, Node}) of
+ lock_is_set ->
+ {Node, _Tag, Resolver} = lists:keyfind(Node, 1, Resolvers),
+ Resolver ! {resolve, NameList, Node};
+ undefined ->
+ put({wait_lock, Node}, {exchange, NameList})
+ end.
+
+resolved(Node, HisResolved, HisKnown, Names_ext, S0) ->
+ Ops = erase({save_ops, Node}) ++ HisResolved,
+ %% Known may have shrunk since the lock was taken (due to nodedowns).
+ Known = S0#state.known,
+ Synced = S0#state.synced,
+ NewNodes = [Node | HisKnown],
+ sync_others(HisKnown),
+ ExtraInfo = [{vsn,get({prot_vsn, Node})}, {lock, get({lock_id, Node})}],
+ S = do_ops(Ops, node(), Names_ext, ExtraInfo, S0),
+ %% I am synced with Node, but not with HisKnown yet
+ lists:foreach(fun(Pid) -> Pid ! {synced, [Node]} end, S#state.syncers),
+ S3 = lists:foldl(fun(Node1, S1) ->
+ F = fun(Tag) -> cancel_locker(Node1,S1,Tag) end,
+ cancel_resolved_locker(Node1, F)
+ end, S, HisKnown),
+ %% The locker that took the lock is asked to send
+ %% the {new_nodes, ...} message. This ensures that
+ %% {del_lock, ...} is received after {new_nodes, ...}
+ %% (except when abcast spawns process(es)...).
+ NewNodesF = fun() ->
+ gen_server:abcast(Known, global_name_server,
+ {new_nodes, node(), Ops, Names_ext,
+ NewNodes, ExtraInfo})
+ end,
+ F = fun(Tag) -> cancel_locker(Node, S3, Tag, NewNodesF) end,
+ S4 = cancel_resolved_locker(Node, F),
+ %% See (*) below... we're node b in that description
+ AddedNodes = (NewNodes -- Known),
+ NewKnown = Known ++ AddedNodes,
+ S4#state.the_locker ! {add_to_known, AddedNodes},
+ NewS = trace_message(S4, {added, AddedNodes},
+ [{new_nodes, NewNodes}, {abcast, Known}, {ops,Ops}]),
+ NewS#state{known = NewKnown, synced = [Node | Synced]}.
+
+cancel_resolved_locker(Node, CancelFun) ->
+ Tag = get({sync_tag_my, Node}),
+ ?trace({calling_cancel_locker,Tag,get()}),
+ S = CancelFun(Tag),
+ reset_node_state(Node),
+ S.
+
+new_nodes(Ops, ConnNode, Names_ext, Nodes, ExtraInfo, S0) ->
+ Known = S0#state.known,
+ %% (*) This one requires some thought...
+ %% We're node a, other nodes b and c:
+ %% The problem is that {in_sync, a} may arrive before {resolved, [a]} to
+ %% b from c, leading to b sending {new_nodes, [a]} to us (node a).
+ %% Therefore, we make sure we never get duplicates in Known.
+ AddedNodes = lists:delete(node(), Nodes -- Known),
+ sync_others(AddedNodes),
+ S = do_ops(Ops, ConnNode, Names_ext, ExtraInfo, S0),
+ ?trace({added_nodes_in_sync,{added_nodes,AddedNodes}}),
+ S#state.the_locker ! {add_to_known, AddedNodes},
+ S1 = trace_message(S, {added, AddedNodes}, [{ops,Ops}]),
+ S1#state{known = Known ++ AddedNodes}.
+
+do_whereis(Name, From) ->
+ case is_global_lock_set() of
+ false ->
+ gen_server:reply(From, where(Name));
+ true ->
+ send_again({whereis, Name, From})
+ end.
+
+terminate(_Reason, _S) ->
+ true = ets:delete(global_names),
+ true = ets:delete(global_names_ext),
+ true = ets:delete(global_locks),
+ true = ets:delete(global_pid_names),
+ true = ets:delete(global_pid_ids).
+
+code_change(_OldVsn, S, _Extra) ->
+ {ok, S}.
+
+%% The resolver runs exchange_names in a separate process. The effect
+%% is that locks can be used at the same time as name resolution takes
+%% place.
+start_resolver(Node, MyTag) ->
+ spawn(fun() -> resolver(Node, MyTag) end).
+
+resolver(Node, Tag) ->
+ receive
+ {resolve, NameList, Node} ->
+ ?trace({resolver, {me,self()}, {node,Node}, {namelist,NameList}}),
+ {Ops, Resolved} = exchange_names(NameList, Node, [], []),
+ Exchange = {exchange_ops, Node, Tag, Ops, Resolved},
+ gen_server:cast(global_name_server, Exchange),
+ exit(normal);
+ _ -> % Ignore garbage.
+ resolver(Node, Tag)
+ end.
+
+resend_pre_connect(Node) ->
+ case erase({pre_connect, Node}) of
+ {Vsn, InitMsg, HisTag} ->
+ gen_server:cast(self(),
+ {init_connect, {Vsn, HisTag}, Node, InitMsg});
+ _ ->
+ ok
+ end.
+
+ins_name(Name, Pid, Method, FromPidOrNode, ExtraInfo, S0) ->
+ ?trace({ins_name,insert,{name,Name},{pid,Pid}}),
+ S1 = delete_global_name_keep_pid(Name, S0),
+ S = trace_message(S1, {ins_name, node(Pid)}, [Name, Pid]),
+ insert_global_name(Name, Pid, Method, FromPidOrNode, ExtraInfo, S).
+
+ins_name_ext(Name, Pid, Method, RegNode, FromPidOrNode, ExtraInfo, S0) ->
+ ?trace({ins_name_ext, {name,Name}, {pid,Pid}}),
+ S1 = delete_global_name_keep_pid(Name, S0),
+ dolink_ext(Pid, RegNode),
+ S = trace_message(S1, {ins_name_ext, node(Pid)}, [Name, Pid]),
+ true = ets:insert(global_names_ext, {Name, Pid, RegNode}),
+ insert_global_name(Name, Pid, Method, FromPidOrNode, ExtraInfo, S).
+
+where(Name) ->
+ case ets:lookup(global_names, Name) of
+ [{_Name, Pid, _Method, _RPid, _Ref}] -> Pid;
+ [] -> undefined
+ end.
+
+handle_set_lock(Id, Pid, S) ->
+ ?trace({handle_set_lock, Id, Pid}),
+ case can_set_lock(Id) of
+ {true, PidRefs} ->
+ case pid_is_locking(Pid, PidRefs) of
+ true ->
+ {true, S};
+ false ->
+ {true, insert_lock(Id, Pid, PidRefs, S)}
+ end;
+ false=Reply ->
+ {Reply, S}
+ end.
+
+can_set_lock({ResourceId, LockRequesterId}) ->
+ case ets:lookup(global_locks, ResourceId) of
+ [{ResourceId, LockRequesterId, PidRefs}] ->
+ {true, PidRefs};
+ [{ResourceId, _LockRequesterId2, _PidRefs}] ->
+ false;
+ [] ->
+ {true, []}
+ end.
+
+insert_lock({ResourceId, LockRequesterId}=Id, Pid, PidRefs, S) ->
+ {RPid, Ref} = do_monitor(Pid),
+ true = ets:insert(global_pid_ids, {Pid, ResourceId}),
+ true = ets:insert(global_pid_ids, {Ref, ResourceId}),
+ Lock = {ResourceId, LockRequesterId, [{Pid,RPid,Ref} | PidRefs]},
+ true = ets:insert(global_locks, Lock),
+ trace_message(S, {ins_lock, node(Pid)}, [Id, Pid]).
+
+is_global_lock_set() ->
+ is_lock_set(?GLOBAL_RID).
+
+is_lock_set(ResourceId) ->
+ ets:member(global_locks, ResourceId).
+
+handle_del_lock({ResourceId, LockReqId}, Pid, S0) ->
+ ?trace({handle_del_lock, {pid,Pid},{id,{ResourceId, LockReqId}}}),
+ case ets:lookup(global_locks, ResourceId) of
+ [{ResourceId, LockReqId, PidRefs}]->
+ remove_lock(ResourceId, LockReqId, Pid, PidRefs, false, S0);
+ _ -> S0
+ end.
+
+remove_lock(ResourceId, LockRequesterId, Pid, [{Pid,RPid,Ref}], Down, S0) ->
+ ?trace({remove_lock_1, {id,ResourceId},{pid,Pid}}),
+ true = erlang:demonitor(Ref, [flush]),
+ kill_monitor_proc(RPid, Pid),
+ true = ets:delete(global_locks, ResourceId),
+ true = ets:delete_object(global_pid_ids, {Pid, ResourceId}),
+ true = ets:delete_object(global_pid_ids, {Ref, ResourceId}),
+ S = case ResourceId of
+ ?GLOBAL_RID -> S0#state{global_lock_down = Down};
+ _ -> S0
+ end,
+ trace_message(S, {rem_lock, node(Pid)},
+ [{ResourceId, LockRequesterId}, Pid]);
+remove_lock(ResourceId, LockRequesterId, Pid, PidRefs0, _Down, S) ->
+ ?trace({remove_lock_2, {id,ResourceId},{pid,Pid}}),
+ PidRefs = case lists:keyfind(Pid, 1, PidRefs0) of
+ {Pid, RPid, Ref} ->
+ true = erlang:demonitor(Ref, [flush]),
+ kill_monitor_proc(RPid, Pid),
+ true = ets:delete_object(global_pid_ids,
+ {Ref, ResourceId}),
+ lists:keydelete(Pid, 1, PidRefs0);
+ false ->
+ PidRefs0
+ end,
+ Lock = {ResourceId, LockRequesterId, PidRefs},
+ true = ets:insert(global_locks, Lock),
+ true = ets:delete_object(global_pid_ids, {Pid, ResourceId}),
+ trace_message(S, {rem_lock, node(Pid)},
+ [{ResourceId, LockRequesterId}, Pid]).
+
+kill_monitor_proc(Pid, Pid) ->
+ ok;
+kill_monitor_proc(RPid, _Pid) ->
+ exit(RPid, kill).
+
+do_ops(Ops, ConnNode, Names_ext, ExtraInfo, S0) ->
+ ?trace({do_ops, {ops,Ops}}),
+
+ XInserts = [{Name, Pid, RegNode, Method} ||
+ {Name2, Pid2, RegNode} <- Names_ext,
+ {insert, {Name, Pid, Method}} <- Ops,
+ Name =:= Name2, Pid =:= Pid2],
+ S1 = lists:foldl(fun({Name, Pid, RegNode, Method}, S1) ->
+ ins_name_ext(Name, Pid, Method, RegNode,
+ ConnNode, ExtraInfo, S1)
+ end, S0, XInserts),
+
+ XNames = [Name || {Name, _Pid, _RegNode, _Method} <- XInserts],
+ Inserts = [{Name, Pid, node(Pid), Method} ||
+ {insert, {Name, Pid, Method}} <- Ops,
+ not lists:member(Name, XNames)],
+ S2 = lists:foldl(fun({Name, Pid, _RegNode, Method}, S2) ->
+ ins_name(Name, Pid, Method, ConnNode,
+ ExtraInfo, S2)
+ end, S1, Inserts),
+
+ DelNames = [Name || {delete, Name} <- Ops],
+ lists:foldl(fun(Name, S) -> delete_global_name2(Name, S)
+ end, S2, DelNames).
+
+%% It is possible that a node that was up and running when the
+%% operations were assembled has since died. The final {in_sync,...}
+%% messages do not generate nodedown messages for such nodes. To
+%% compensate "artificial" nodedown messages are created. Since
+%% monitor_node may take some time processes are spawned to avoid
+%% locking up the global_name_server. Should somehow double nodedown
+%% messages occur (one of them artificial), nothing bad can happen
+%% (the second nodedown is a no-op). It is assumed that there cannot
+%% be a nodeup before the artificial nodedown.
+%%
+%% The extra nodedown messages generated here also take care of the
+%% case that a nodedown message is received _before_ the operations
+%% are run.
+sync_others(Nodes) ->
+ N = case application:get_env(kernel, ?N_CONNECT_RETRIES) of
+ {ok, NRetries} when is_integer(NRetries),
+ NRetries >= 0 -> NRetries;
+ _ -> ?DEFAULT_N_CONNECT_RETRIES
+ end,
+ lists:foreach(fun(Node) ->
+ spawn(fun() -> sync_other(Node, N) end)
+ end, Nodes).
+
+sync_other(Node, N) ->
+ erlang:monitor_node(Node, true, [allow_passive_connect]),
+ receive
+ {nodedown, Node} when N > 0 ->
+ sync_other(Node, N - 1);
+ {nodedown, Node} ->
+ ?trace({missing_nodedown, {node, Node}}),
+ error_logger:warning_msg("global: ~w failed to connect to ~w\n",
+ [node(), Node]),
+ global_name_server ! {extra_nodedown, Node}
+ after 0 ->
+ gen_server:cast({global_name_server,Node}, {in_sync,node(),true})
+ end.
+ % monitor_node(Node, false),
+ % exit(normal).
+
+insert_global_name(Name, Pid, Method, FromPidOrNode, ExtraInfo, S) ->
+ {RPid, Ref} = do_monitor(Pid),
+ true = ets:insert(global_names, {Name, Pid, Method, RPid, Ref}),
+ true = ets:insert(global_pid_names, {Pid, Name}),
+ true = ets:insert(global_pid_names, {Ref, Name}),
+ case lock_still_set(FromPidOrNode, ExtraInfo, S) of
+ true ->
+ S;
+ false ->
+ %% The node that took the lock has gone down and then up
+ %% again. The {register, ...} or {new_nodes, ...} message
+ %% was delayed and arrived after nodeup (maybe it caused
+ %% the nodeup). The DOWN signal from the monitor of the
+ %% lock has removed the lock.
+ %% Note: it is assumed here that the DOWN signal arrives
+ %% _before_ nodeup and any message that caused nodeup.
+ %% This is true of Erlang/OTP.
+ delete_global_name2(Name, S)
+ end.
+
+lock_still_set(PidOrNode, ExtraInfo, S) ->
+ case ets:lookup(global_locks, ?GLOBAL_RID) of
+ [{?GLOBAL_RID, _LockReqId, PidRefs}] when is_pid(PidOrNode) ->
+ %% Name registration.
+ lists:keymember(PidOrNode, 1, PidRefs);
+ [{?GLOBAL_RID, LockReqId, PidRefs}] when is_atom(PidOrNode) ->
+ case extra_info(lock, ExtraInfo) of
+ {?GLOBAL_RID, LockId} -> % R11B-4 or later
+ LockReqId =:= LockId;
+ undefined ->
+ lock_still_set_old(PidOrNode, LockReqId, PidRefs)
+ end;
+ [] ->
+ %% If the global lock was not removed by a DOWN message
+ %% then we have a node that do not monitor locking pids
+ %% (pre R11B-3), or an R11B-3 node (which does not ensure
+ %% that {new_nodes, ...} arrives before {del_lock, ...}).
+ not S#state.global_lock_down
+ end.
+
+%%% The following is probably overkill. It is possible that this node
+%%% has been locked again, but it is a rare occasion.
+lock_still_set_old(_Node, ReqId, _PidRefs) when is_pid(ReqId) ->
+ %% Cannot do better than return true.
+ true;
+lock_still_set_old(Node, ReqId, PidRefs) when is_list(ReqId) ->
+ %% Connection, version > 4, but before R11B-4.
+ [P || {P, _RPid, _Ref} <- PidRefs, node(P) =:= Node] =/= [].
+
+extra_info(Tag, ExtraInfo) ->
+ %% ExtraInfo used to be a list of nodes (vsn 2).
+ case catch lists:keyfind(Tag, 1, ExtraInfo) of
+ {Tag, Info} ->
+ Info;
+ _ ->
+ undefined
+ end.
+
+del_name(Ref, S) ->
+ NameL = [{Name, Pid} ||
+ {_, Name} <- ets:lookup(global_pid_names, Ref),
+ {_, Pid, _Method, _RPid, Ref1} <-
+ ets:lookup(global_names, Name),
+ Ref1 =:= Ref],
+ ?trace({async_del_name, self(), NameL, Ref}),
+ case NameL of
+ [{Name, Pid}] ->
+ _ = del_names(Name, Pid, S),
+ delete_global_name2(Name, S);
+ [] ->
+ S
+ end.
+
+%% Send {async_del_name, ...} to old nodes (pre R11B-3).
+del_names(Name, Pid, S) ->
+ Send = case ets:lookup(global_names_ext, Name) of
+ [{Name, Pid, RegNode}] ->
+ RegNode =:= node();
+ [] ->
+ node(Pid) =:= node()
+ end,
+ if
+ Send ->
+ ?trace({del_names, {pid,Pid}, {name,Name}}),
+ S#state.the_deleter ! {delete_name, self(), Name, Pid};
+ true ->
+ ok
+ end.
+
+%% Keeps the entry in global_names for whereis_name/1.
+delete_global_name_keep_pid(Name, S) ->
+ case ets:lookup(global_names, Name) of
+ [{Name, Pid, _Method, RPid, Ref}] ->
+ delete_global_name2(Name, Pid, RPid, Ref, S);
+ [] ->
+ S
+ end.
+
+delete_global_name2(Name, S) ->
+ case ets:lookup(global_names, Name) of
+ [{Name, Pid, _Method, RPid, Ref}] ->
+ true = ets:delete(global_names, Name),
+ delete_global_name2(Name, Pid, RPid, Ref, S);
+ [] ->
+ S
+ end.
+
+delete_global_name2(Name, Pid, RPid, Ref, S) ->
+ true = erlang:demonitor(Ref, [flush]),
+ kill_monitor_proc(RPid, Pid),
+ delete_global_name(Name, Pid),
+ ?trace({delete_global_name,{item,Name},{pid,Pid}}),
+ true = ets:delete_object(global_pid_names, {Pid, Name}),
+ true = ets:delete_object(global_pid_names, {Ref, Name}),
+ case ets:lookup(global_names_ext, Name) of
+ [{Name, Pid, RegNode}] ->
+ true = ets:delete(global_names_ext, Name),
+ ?trace({delete_global_name, {name,Name,{pid,Pid},{RegNode,Pid}}}),
+ dounlink_ext(Pid, RegNode);
+ [] ->
+ ?trace({delete_global_name,{name,Name,{pid,Pid},{node(Pid),Pid}}}),
+ ok
+ end,
+ trace_message(S, {del_name, node(Pid)}, [Name, Pid]).
+
+%% delete_global_name/2 is traced by the inviso application.
+%% Do not change.
+delete_global_name(_Name, _Pid) ->
+ ok.
+
+%%-----------------------------------------------------------------
+%% The locker is a satellite process to global_name_server. When a
+%% nodeup is received from a new node the global_name_server sends a
+%% message to the locker. The locker tries to set a lock in our
+%% partition, i.e. on all nodes known to us. When the lock is set, it
+%% tells global_name_server about it, and keeps the lock set.
+%% global_name_server sends a cancel message to the locker when the
+%% partitions are connected.
+
+%% There are two versions of the protocol between lockers on two nodes:
+%% Version 1: used by unpatched R7.
+%% Version 2: the messages exchanged between the lockers include the known
+%% nodes (see OTP-3576).
+%%-----------------------------------------------------------------
+
+-define(locker_vsn, 2).
+
+-record(multi,
+ {local = [], % Requests from nodes on the local host.
+ remote = [], % Other requests.
+ known = [], % Copy of global_name_server's known nodes. It's
+ % faster to keep a copy of known than asking
+ % for it when needed.
+ the_boss, % max([node() | 'known'])
+ just_synced = false, % true if node() synced just a moment ago
+ %% Statistics:
+ do_trace % bool()
+ }).
+
+-record(him, {node, locker, vsn, my_tag}).
+
+start_the_locker(DoTrace) ->
+ spawn_link(fun() -> init_the_locker(DoTrace) end).
+
+init_the_locker(DoTrace) ->
+ process_flag(trap_exit, true), % needed?
+ S0 = #multi{do_trace = DoTrace},
+ S1 = update_locker_known({add, get_known()}, S0),
+ loop_the_locker(S1),
+ erlang:error(locker_exited).
+
+loop_the_locker(S) ->
+ ?trace({loop_the_locker,S}),
+ receive
+ Message when element(1, Message) =/= nodeup ->
+ the_locker_message(Message, S)
+ after 0 ->
+ Timeout =
+ case {S#multi.local, S#multi.remote} of
+ {[],[]} ->
+ infinity;
+ _ ->
+ %% It is important that the timeout is greater
+ %% than zero, or the chance that some other node
+ %% in the partition sets the lock once this node
+ %% has failed after setting the lock is very slim.
+ if
+ S#multi.just_synced ->
+ 0; % no reason to wait after success
+ S#multi.known =:= [] ->
+ 200; % just to get started
+ true ->
+ erlang:min(1000 + 100*length(S#multi.known),
+ 3000)
+ end
+ end,
+ S1 = S#multi{just_synced = false},
+ receive
+ Message when element(1, Message) =/= nodeup ->
+ the_locker_message(Message, S1)
+ after Timeout ->
+ case is_global_lock_set() of
+ true ->
+ loop_the_locker(S1);
+ false ->
+ select_node(S1)
+ end
+ end
+ end.
+
+the_locker_message({his_the_locker, HisTheLocker, HisKnown0, _MyKnown}, S) ->
+ ?trace({his_the_locker, HisTheLocker, {node,node(HisTheLocker)}}),
+ {HisVsn, _HisKnown} = HisKnown0,
+ true = HisVsn > 4,
+ receive
+ {nodeup, Node, MyTag} when node(HisTheLocker) =:= Node ->
+ ?trace({the_locker_nodeup, {node,Node},{mytag,MyTag}}),
+ Him = #him{node = node(HisTheLocker), my_tag = MyTag,
+ locker = HisTheLocker, vsn = HisVsn},
+ loop_the_locker(add_node(Him, S));
+ {cancel, Node, _Tag, no_fun} when node(HisTheLocker) =:= Node ->
+ loop_the_locker(S)
+ after 60000 ->
+ ?trace({nodeupnevercame, node(HisTheLocker)}),
+ error_logger:error_msg("global: nodeup never came ~w ~w\n",
+ [node(), node(HisTheLocker)]),
+ loop_the_locker(S#multi{just_synced = false})
+ end;
+the_locker_message({cancel, _Node, undefined, no_fun}, S) ->
+ ?trace({cancel_the_locker, undefined, {node,_Node}}),
+ %% If we actually cancel something when a cancel message with the
+ %% tag 'undefined' arrives, we may be acting on an old nodedown,
+ %% to cancel a new nodeup, so we can't do that.
+ loop_the_locker(S);
+the_locker_message({cancel, Node, Tag, no_fun}, S) ->
+ ?trace({the_locker, cancel, {multi,S}, {tag,Tag},{node,Node}}),
+ receive
+ {nodeup, Node, Tag} ->
+ ?trace({cancelnodeup2, {node,Node},{tag,Tag}}),
+ ok
+ after 0 ->
+ ok
+ end,
+ loop_the_locker(remove_node(Node, S));
+the_locker_message({lock_set, _Pid, false, _}, S) ->
+ ?trace({the_locker, spurious, {node,node(_Pid)}}),
+ loop_the_locker(S);
+the_locker_message({lock_set, Pid, true, _HisKnown}, S) ->
+ Node = node(Pid),
+ ?trace({the_locker, self(), spontaneous, {node,Node}}),
+ case find_node_tag(Node, S) of
+ {true, MyTag, HisVsn} ->
+ LockId = locker_lock_id(Pid, HisVsn),
+ {IsLockSet, S1} = lock_nodes_safely(LockId, [], S),
+ Pid ! {lock_set, self(), IsLockSet, S1#multi.known},
+ Known2 = [node() | S1#multi.known],
+ ?trace({the_locker, spontaneous, {known2, Known2},
+ {node,Node}, {is_lock_set,IsLockSet}}),
+ case IsLockSet of
+ true ->
+ gen_server:cast(global_name_server,
+ {lock_is_set, Node, MyTag, LockId}),
+ ?trace({lock_sync_done, {pid,Pid},
+ {node,node(Pid)}, {me,self()}}),
+ %% Wait for global to tell us to remove lock.
+ %% Should the other locker's node die,
+ %% global_name_server will receive nodedown, and
+ %% then send {cancel, Node, Tag}.
+ receive
+ {cancel, Node, _Tag, Fun} ->
+ ?trace({cancel_the_lock,{node,Node}}),
+ call_fun(Fun),
+ delete_global_lock(LockId, Known2)
+ end,
+ S2 = S1#multi{just_synced = true},
+ loop_the_locker(remove_node(Node, S2));
+ false ->
+ loop_the_locker(S1#multi{just_synced = false})
+ end;
+ false ->
+ ?trace({the_locker, not_there, {node,Node}}),
+ Pid ! {lock_set, self(), false, S#multi.known},
+ loop_the_locker(S)
+ end;
+the_locker_message({add_to_known, Nodes}, S) ->
+ S1 = update_locker_known({add, Nodes}, S),
+ loop_the_locker(S1);
+the_locker_message({remove_from_known, Node}, S) ->
+ S1 = update_locker_known({remove, Node}, S),
+ loop_the_locker(S1);
+the_locker_message({do_trace, DoTrace}, S) ->
+ loop_the_locker(S#multi{do_trace = DoTrace});
+the_locker_message(Other, S) ->
+ unexpected_message(Other, locker),
+ ?trace({the_locker, {other_msg, Other}}),
+ loop_the_locker(S).
+
+%% Requests from nodes on the local host are chosen before requests
+%% from other nodes. This should be a safe optimization.
+select_node(S) ->
+ UseRemote = S#multi.local =:= [],
+ Others1 = if UseRemote -> S#multi.remote; true -> S#multi.local end,
+ Others2 = exclude_known(Others1, S#multi.known),
+ S1 = if
+ UseRemote -> S#multi{remote = Others2};
+ true -> S#multi{local = Others2}
+ end,
+ if
+ Others2 =:= [] ->
+ loop_the_locker(S1);
+ true ->
+ Him = random_element(Others2),
+ #him{locker = HisTheLocker, vsn = HisVsn,
+ node = Node, my_tag = MyTag} = Him,
+ HisNode = [Node],
+ Us = [node() | HisNode],
+ LockId = locker_lock_id(HisTheLocker, HisVsn),
+ ?trace({select_node, self(), {us, Us}}),
+ %% HisNode = [Node] prevents deadlock:
+ {IsLockSet, S2} = lock_nodes_safely(LockId, HisNode, S1),
+ case IsLockSet of
+ true ->
+ Known1 = Us ++ S2#multi.known,
+ ?trace({sending_lock_set, self(), {his,HisTheLocker}}),
+ HisTheLocker ! {lock_set, self(), true, S2#multi.known},
+ S3 = lock_is_set(S2, Him, MyTag, Known1, LockId),
+ loop_the_locker(S3);
+ false ->
+ loop_the_locker(S2)
+ end
+ end.
+
+%% Version 5: Both sides use the same requester id. Thereby the nodes
+%% common to both sides are locked by both locker processes. This
+%% means that the lock is still there when the 'new_nodes' message is
+%% received even if the other side has deleted the lock.
+locker_lock_id(Pid, Vsn) when Vsn > 4 ->
+ {?GLOBAL_RID, lists:sort([self(), Pid])}.
+
+lock_nodes_safely(LockId, Extra, S0) ->
+ %% Locking node() could stop some node that has already locked the
+ %% boss, so just check if it is possible to lock node().
+ First = delete_nonode([S0#multi.the_boss]),
+ case ([node()] =:= First) orelse (can_set_lock(LockId) =/= false) of
+ true ->
+ %% Locking the boss first is an optimization.
+ case set_lock(LockId, First, 0) of
+ true ->
+ S = update_locker_known(S0),
+ %% The boss may have changed, but don't bother.
+ Second = delete_nonode([node() | Extra] -- First),
+ case set_lock(LockId, Second, 0) of
+ true ->
+ Known = S#multi.known,
+ case set_lock(LockId, Known -- First, 0) of
+ true ->
+ _ = locker_trace(S, ok, {First, Known}),
+ {true, S};
+ false ->
+ %% Since the boss is locked we
+ %% should have gotten the lock, at
+ %% least if no one else is locking
+ %% 'global'. Calling set_lock with
+ %% Retries > 0 does not seem to
+ %% speed things up.
+ SoFar = First ++ Second,
+ del_lock(LockId, SoFar),
+ _ = locker_trace(S, not_ok, {Known,SoFar}),
+ {false, S}
+ end;
+ false ->
+ del_lock(LockId, First),
+ _ = locker_trace(S, not_ok, {Second, First}),
+ {false, S}
+ end;
+ false ->
+ _ = locker_trace(S0, not_ok, {First, []}),
+ {false, S0}
+ end;
+ false ->
+ {false, S0}
+ end.
+
+delete_nonode(L) ->
+ lists:delete(nonode@nohost, L).
+
+%% Let the server add timestamp.
+locker_trace(#multi{do_trace = false}, _, _Nodes) ->
+ ok;
+locker_trace(#multi{do_trace = true}, ok, Ns) ->
+ global_name_server ! {trace_message, {locker_succeeded, node()}, Ns};
+locker_trace(#multi{do_trace = true}, not_ok, Ns) ->
+ global_name_server ! {trace_message, {locker_failed, node()}, Ns};
+locker_trace(#multi{do_trace = true}, rejected, Ns) ->
+ global_name_server ! {trace_message, {lock_rejected, node()}, Ns}.
+
+update_locker_known(S) ->
+ receive
+ {add_to_known, Nodes} ->
+ S1 = update_locker_known({add, Nodes}, S),
+ update_locker_known(S1);
+ {remove_from_known, Node} ->
+ S1 = update_locker_known({remove, Node}, S),
+ update_locker_known(S1)
+ after 0 ->
+ S
+ end.
+
+update_locker_known(Upd, S) ->
+ Known = case Upd of
+ {add, Nodes} -> Nodes ++ S#multi.known;
+ {remove, Node} -> lists:delete(Node, S#multi.known)
+ end,
+ TheBoss = the_boss([node() | Known]),
+ S#multi{known = Known, the_boss = TheBoss}.
+
+random_element(L) ->
+ {A,B,C} = now(),
+ E = (A+B+C) rem length(L),
+ lists:nth(E+1, L).
+
+exclude_known(Others, Known) ->
+ [N || N <- Others, not lists:member(N#him.node, Known)].
+
+lock_is_set(S, Him, MyTag, Known1, LockId) ->
+ Node = Him#him.node,
+ receive
+ {lock_set, P, true, _} when node(P) =:= Node ->
+ gen_server:cast(global_name_server,
+ {lock_is_set, Node, MyTag, LockId}),
+ ?trace({lock_sync_done, {p,P, node(P)}, {me,self()}}),
+
+ %% Wait for global to tell us to remove lock. Should the
+ %% other locker's node die, global_name_server will
+ %% receive nodedown, and then send {cancel, Node, Tag, Fun}.
+ receive
+ {cancel, Node, _, Fun} ->
+ ?trace({lock_set_loop, {known1,Known1}}),
+ call_fun(Fun),
+ delete_global_lock(LockId, Known1)
+ end,
+ S#multi{just_synced = true,
+ local = lists:delete(Him, S#multi.local),
+ remote = lists:delete(Him, S#multi.remote)};
+ {lock_set, P, false, _} when node(P) =:= Node ->
+ ?trace({not_both_set, {node,Node},{p, P},{known1,Known1}}),
+ _ = locker_trace(S, rejected, Known1),
+ delete_global_lock(LockId, Known1),
+ S;
+ {cancel, Node, _, Fun} ->
+ ?trace({the_locker, cancel2, {node,Node}}),
+ call_fun(Fun),
+ _ = locker_trace(S, rejected, Known1),
+ delete_global_lock(LockId, Known1),
+ remove_node(Node, S);
+ {'EXIT', _, _} ->
+ ?trace({the_locker, exit, {node,Node}}),
+ _ = locker_trace(S, rejected, Known1),
+ delete_global_lock(LockId, Known1),
+ S
+ %% There used to be an 'after' clause (OTP-4902), but it is
+ %% no longer needed:
+ %% OTP-5770. Version 5 of the protocol. Deadlock can no longer
+ %% occur due to the fact that if a partition is locked, one
+ %% node in the other partition is also locked with the same
+ %% lock-id, which makes it impossible for any node in the
+ %% other partition to lock its partition unless it negotiates
+ %% with the first partition.
+ end.
+
+%% The locker does the {new_nodes, ...} call before removing the lock.
+call_fun(no_fun) ->
+ ok;
+call_fun(Fun) ->
+ Fun().
+
+%% The lock on the boss is removed last. The purpose is to reduce the
+%% risk of failing to lock the known nodes after having locked the
+%% boss. (Assumes the boss occurs only once.)
+delete_global_lock(LockId, Nodes) ->
+ TheBoss = the_boss(Nodes),
+ del_lock(LockId, lists:delete(TheBoss, Nodes)),
+ del_lock(LockId, [TheBoss]).
+
+the_boss(Nodes) ->
+ lists:max(Nodes).
+
+find_node_tag(Node, S) ->
+ case find_node_tag2(Node, S#multi.local) of
+ false ->
+ find_node_tag2(Node, S#multi.remote);
+ Reply ->
+ Reply
+ end.
+
+find_node_tag2(_Node, []) ->
+ false;
+find_node_tag2(Node, [#him{node = Node, my_tag = MyTag, vsn = HisVsn} | _]) ->
+ {true, MyTag, HisVsn};
+find_node_tag2(Node, [_E | Rest]) ->
+ find_node_tag2(Node, Rest).
+
+remove_node(Node, S) ->
+ S#multi{local = remove_node2(Node, S#multi.local),
+ remote = remove_node2(Node, S#multi.remote)}.
+
+remove_node2(_Node, []) ->
+ [];
+remove_node2(Node, [#him{node = Node} | Rest]) ->
+ Rest;
+remove_node2(Node, [E | Rest]) ->
+ [E | remove_node2(Node, Rest)].
+
+add_node(Him, S) ->
+ case is_node_local(Him#him.node) of
+ true ->
+ S#multi{local = [Him | S#multi.local]};
+ false ->
+ S#multi{remote = [Him | S#multi.remote]}
+ end.
+
+is_node_local(Node) ->
+ {ok, Host} = inet:gethostname(),
+ case catch split_node(atom_to_list(Node), $@, []) of
+ [_, Host] ->
+ true;
+ _ ->
+ false
+ end.
+
+split_node([Chr|T], Chr, Ack) -> [lists:reverse(Ack)|split_node(T, Chr, [])];
+split_node([H|T], Chr, Ack) -> split_node(T, Chr, [H|Ack]);
+split_node([], _, Ack) -> [lists:reverse(Ack)].
+
+cancel_locker(Node, S, Tag) ->
+ cancel_locker(Node, S, Tag, no_fun).
+
+cancel_locker(Node, S, Tag, ToBeRunOnLockerF) ->
+ S#state.the_locker ! {cancel, Node, Tag, ToBeRunOnLockerF},
+ Resolvers = S#state.resolvers,
+ ?trace({cancel_locker, {node,Node},{tag,Tag},
+ {sync_tag_my, get({sync_tag_my, Node})},{resolvers,Resolvers}}),
+ case lists:keyfind(Node, 1, Resolvers) of
+ {_, Tag, Resolver} ->
+ ?trace({{resolver, Resolver}}),
+ exit(Resolver, kill),
+ S1 = trace_message(S, {kill_resolver, Node}, [Tag, Resolver]),
+ S1#state{resolvers = lists:keydelete(Node, 1, Resolvers)};
+ _ ->
+ S
+ end.
+
+reset_node_state(Node) ->
+ ?trace({{node,Node}, reset_node_state, get()}),
+ erase({wait_lock, Node}),
+ erase({save_ops, Node}),
+ erase({pre_connect, Node}),
+ erase({prot_vsn, Node}),
+ erase({sync_tag_my, Node}),
+ erase({sync_tag_his, Node}),
+ erase({lock_id, Node}).
+
+%% Some node sent us his names. When a name clash is found, the resolve
+%% function is called from the smaller node => all resolve funcs are called
+%% from the same partition.
+exchange_names([{Name, Pid, Method} | Tail], Node, Ops, Res) ->
+ case ets:lookup(global_names, Name) of
+ [{Name, Pid, _Method, _RPid2, _Ref2}] ->
+ exchange_names(Tail, Node, Ops, Res);
+ [{Name, Pid2, Method2, _RPid2, _Ref2}] when node() < Node ->
+ %% Name clash! Add the result of resolving to Res(olved).
+ %% We know that node(Pid) =/= node(), so we don't
+ %% need to link/unlink to Pid.
+ Node2 = node(Pid2), %% Node2 is connected to node().
+ case rpc:call(Node2, ?MODULE, resolve_it,
+ [Method2, Name, Pid, Pid2]) of
+ Pid ->
+ Op = {insert, {Name, Pid, Method}},
+ exchange_names(Tail, Node, [Op | Ops], Res);
+ Pid2 ->
+ Op = {insert, {Name, Pid2, Method2}},
+ exchange_names(Tail, Node, Ops, [Op | Res]);
+ none ->
+ Op = {delete, Name},
+ exchange_names(Tail, Node, [Op | Ops], [Op | Res]);
+ {badrpc, Badrpc} ->
+ error_logger:info_msg("global: badrpc ~w received when "
+ "conflicting name ~w was found\n",
+ [Badrpc, Name]),
+ Op = {insert, {Name, Pid, Method}},
+ exchange_names(Tail, Node, [Op | Ops], Res);
+ Else ->
+ error_logger:info_msg("global: Resolve method ~w for "
+ "conflicting name ~w returned ~w\n",
+ [Method, Name, Else]),
+ Op = {delete, Name},
+ exchange_names(Tail, Node, [Op | Ops], [Op | Res])
+ end;
+ [{Name, _Pid2, _Method, _RPid, _Ref}] ->
+ %% The other node will solve the conflict.
+ exchange_names(Tail, Node, Ops, Res);
+ _ ->
+ %% Entirely new name.
+ exchange_names(Tail, Node,
+ [{insert, {Name, Pid, Method}} | Ops], Res)
+ end;
+exchange_names([], _, Ops, Res) ->
+ ?trace({exchange_names_finish,{ops,Ops},{res,Res}}),
+ {Ops, Res}.
+
+resolve_it(Method, Name, Pid1, Pid2) ->
+ catch Method(Name, Pid1, Pid2).
+
+minmax(P1,P2) ->
+ if node(P1) < node(P2) -> {P1, P2}; true -> {P2, P1} end.
+
+-spec random_exit_name(term(), pid(), pid()) -> pid().
+random_exit_name(Name, Pid, Pid2) ->
+ {Min, Max} = minmax(Pid, Pid2),
+ error_logger:info_msg("global: Name conflict terminating ~w\n",
+ [{Name, Max}]),
+ exit(Max, kill),
+ Min.
+
+random_notify_name(Name, Pid, Pid2) ->
+ {Min, Max} = minmax(Pid, Pid2),
+ Max ! {global_name_conflict, Name},
+ Min.
+
+-spec notify_all_name(term(), pid(), pid()) -> 'none'.
+notify_all_name(Name, Pid, Pid2) ->
+ Pid ! {global_name_conflict, Name, Pid2},
+ Pid2 ! {global_name_conflict, Name, Pid},
+ none.
+
+dolink_ext(Pid, RegNode) when RegNode =:= node() ->
+ link(Pid);
+dolink_ext(_, _) ->
+ ok.
+
+dounlink_ext(Pid, RegNode) when RegNode =:= node() ->
+ unlink_pid(Pid);
+dounlink_ext(_Pid, _RegNode) ->
+ ok.
+
+unlink_pid(Pid) ->
+ case ets:member(global_pid_names, Pid) of
+ false ->
+ case ets:member(global_pid_ids, Pid) of
+ false ->
+ unlink(Pid);
+ true ->
+ ok
+ end;
+ true ->
+ ok
+ end.
+
+pid_is_locking(Pid, PidRefs) ->
+ lists:keyfind(Pid, 1, PidRefs) =/= false.
+
+delete_lock(Ref, S0) ->
+ Locks = pid_locks(Ref),
+ del_locks(Locks, Ref, S0#state.known),
+ F = fun({ResourceId, LockRequesterId, PidRefs}, S) ->
+ {Pid, _RPid, Ref} = lists:keyfind(Ref, 3, PidRefs),
+ remove_lock(ResourceId, LockRequesterId, Pid, PidRefs, true,S)
+ end,
+ lists:foldl(F, S0, Locks).
+
+pid_locks(Ref) ->
+ L = lists:flatmap(fun({_, ResourceId}) ->
+ ets:lookup(global_locks, ResourceId)
+ end, ets:lookup(global_pid_ids, Ref)),
+ [Lock || Lock = {_Id, _Req, PidRefs} <- L,
+ rpid_is_locking(Ref, PidRefs)].
+
+rpid_is_locking(Ref, PidRefs) ->
+ lists:keyfind(Ref, 3, PidRefs) =/= false.
+
+%% Send {async_del_lock, ...} to old nodes (pre R11B-3).
+del_locks([{ResourceId, _LockReqId, PidRefs} | Tail], Ref, KnownNodes) ->
+ {Pid, _RPid, Ref} = lists:keyfind(Ref, 3, PidRefs),
+ case node(Pid) =:= node() of
+ true ->
+ gen_server:abcast(KnownNodes, global_name_server,
+ {async_del_lock, ResourceId, Pid});
+ false ->
+ ok
+ end,
+ del_locks(Tail, Ref, KnownNodes);
+del_locks([], _Ref, _KnownNodes) ->
+ ok.
+
+handle_nodedown(Node, S) ->
+ %% DOWN signals from monitors have removed locks and registered names.
+ #state{known = Known, synced = Syncs} = S,
+ NewS = cancel_locker(Node, S, get({sync_tag_my, Node})),
+ NewS#state.the_locker ! {remove_from_known, Node},
+ reset_node_state(Node),
+ NewS#state{known = lists:delete(Node, Known),
+ synced = lists:delete(Node, Syncs)}.
+
+get_names() ->
+ ets:select(global_names,
+ ets:fun2ms(fun({Name, Pid, Method, _RPid, _Ref}) ->
+ {Name, Pid, Method}
+ end)).
+
+get_names_ext() ->
+ ets:tab2list(global_names_ext).
+
+get_known() ->
+ gen_server:call(global_name_server, get_known, infinity).
+
+random_sleep(Times) ->
+ case (Times rem 10) of
+ 0 -> erase(random_seed);
+ _ -> ok
+ end,
+ case get(random_seed) of
+ undefined ->
+ {A1, A2, A3} = now(),
+ random:seed(A1, A2, A3 + erlang:phash(node(), 100000));
+ _ -> ok
+ end,
+ %% First time 1/4 seconds, then doubling each time up to 8 seconds max.
+ Tmax = if Times > 5 -> 8000;
+ true -> ((1 bsl Times) * 1000) div 8
+ end,
+ T = random:uniform(Tmax),
+ ?trace({random_sleep, {me,self()}, {times,Times}, {t,T}, {tmax,Tmax}}),
+ receive after T -> ok end.
+
+dec(infinity) -> infinity;
+dec(N) -> N - 1.
+
+send_again(Msg) ->
+ Me = self(),
+ spawn(fun() -> timer(Me, Msg) end).
+
+timer(Pid, Msg) ->
+ random_sleep(5),
+ Pid ! Msg.
+
+change_our_node_name(NewNode, S) ->
+ S1 = trace_message(S, {new_node_name, NewNode}, []),
+ S1#state{node_name = NewNode}.
+
+trace_message(#state{trace = no_trace}=S, _M, _X) ->
+ S;
+trace_message(S, M, X) ->
+ S#state{trace = [trace_message(M, X) | S#state.trace]}.
+
+trace_message(M, X) ->
+ {node(), now(), M, nodes(), X}.
+
+%%-----------------------------------------------------------------
+%% Each sync process corresponds to one call to sync. Each such
+%% process asks the global_name_server on all Nodes if it is in sync
+%% with Nodes. If not, that (other) node spawns a syncer process that
+%% waits for global to get in sync with all Nodes. When it is in
+%% sync, the syncer process tells the original sync process about it.
+%%-----------------------------------------------------------------
+start_sync(Nodes, From) ->
+ spawn_link(fun() -> sync_init(Nodes, From) end).
+
+sync_init(Nodes, From) ->
+ lists:foreach(fun(Node) -> monitor_node(Node, true) end, Nodes),
+ sync_loop(Nodes, From).
+
+sync_loop([], From) ->
+ gen_server:reply(From, ok);
+sync_loop(Nodes, From) ->
+ receive
+ {nodedown, Node} ->
+ monitor_node(Node, false),
+ sync_loop(lists:delete(Node, Nodes), From);
+ {synced, SNodes} ->
+ lists:foreach(fun(N) -> monitor_node(N, false) end, SNodes),
+ sync_loop(Nodes -- SNodes, From)
+ end.
+
+%%%=======================================================================
+%%% Get the current global_groups definition
+%%%=======================================================================
+check_sync_nodes() ->
+ case get_own_nodes() of
+ {ok, all} ->
+ nodes();
+ {ok, NodesNG} ->
+ %% global_groups parameter is defined, we are not allowed to sync
+ %% with nodes not in our own global group.
+ intersection(nodes(), NodesNG);
+ {error, _} = Error ->
+ Error
+ end.
+
+check_sync_nodes(SyncNodes) ->
+ case get_own_nodes() of
+ {ok, all} ->
+ SyncNodes;
+ {ok, NodesNG} ->
+ %% global_groups parameter is defined, we are not allowed to sync
+ %% with nodes not in our own global group.
+ OwnNodeGroup = intersection(nodes(), NodesNG),
+ IllegalSyncNodes = (SyncNodes -- [node() | OwnNodeGroup]),
+ case IllegalSyncNodes of
+ [] -> SyncNodes;
+ _ -> {error, {"Trying to sync nodes not defined in "
+ "the own global group", IllegalSyncNodes}}
+ end;
+ {error, _} = Error ->
+ Error
+ end.
+
+get_own_nodes() ->
+ case global_group:get_own_nodes_with_errors() of
+ {error, Error} ->
+ {error, {"global_groups definition error", Error}};
+ OkTup ->
+ OkTup
+ end.
+
+%%-----------------------------------------------------------------
+%% The deleter process is a satellite process to global_name_server
+%% that does background batch deleting of names when a process
+%% that had globally registered names dies. It is started by and
+%% linked to global_name_server.
+%%-----------------------------------------------------------------
+
+start_the_deleter(Global) ->
+ spawn_link(fun() -> loop_the_deleter(Global) end).
+
+loop_the_deleter(Global) ->
+ Deletions = collect_deletions(Global, []),
+ ?trace({loop_the_deleter, self(), {deletions,Deletions},
+ {names,get_names()}}),
+ %% trans_all_known is called rather than trans/3 with nodes() as
+ %% third argument. The reason is that known gets updated by
+ %% new_nodes when the lock is still set. nodes() on the other hand
+ %% could be updated later (if in_sync is received after the lock
+ %% is gone). It is not likely that in_sync would be received after
+ %% the lock has been taken here, but using trans_all_known makes it
+ %% even less likely.
+ trans_all_known(
+ fun(Known) ->
+ lists:map(
+ fun({Name,Pid}) ->
+ gen_server:abcast(Known, global_name_server,
+ {async_del_name, Name, Pid})
+ end, Deletions)
+ end),
+ loop_the_deleter(Global).
+
+collect_deletions(Global, Deletions) ->
+ receive
+ {delete_name, Global, Name, Pid} ->
+ collect_deletions(Global, [{Name,Pid} | Deletions]);
+ Other ->
+ unexpected_message(Other, deleter),
+ collect_deletions(Global, Deletions)
+ after case Deletions of
+ [] -> infinity;
+ _ -> 0
+ end ->
+ lists:reverse(Deletions)
+ end.
+
+%% The registrar is a helper process that registers and unregisters
+%% names. Since it never dies it assures that names are registered and
+%% unregistered on all known nodes. It is started by and linked to
+%% global_name_server.
+
+start_the_registrar() ->
+ spawn_link(fun() -> loop_the_registrar() end).
+
+loop_the_registrar() ->
+ receive
+ {trans_all_known, Fun, From} ->
+ ?trace({loop_the_registrar, self(), Fun, From}),
+ gen_server:reply(From, trans_all_known(Fun));
+ Other ->
+ unexpected_message(Other, register)
+ end,
+ loop_the_registrar().
+
+unexpected_message({'EXIT', _Pid, _Reason}, _What) ->
+ %% global_name_server died
+ ok;
+unexpected_message(Message, What) ->
+ error_logger:warning_msg("The global_name_server ~w process "
+ "received an unexpected message:\n~p\n",
+ [What, Message]).
+
+%%% Utilities
+
+%% When/if erlang:monitor() returns before trying to connect to the
+%% other node this function can be removed.
+do_monitor(Pid) ->
+ case (node(Pid) =:= node()) orelse lists:member(node(Pid), nodes()) of
+ true ->
+ %% Assume the node is still up
+ {Pid, erlang:monitor(process, Pid)};
+ false ->
+ F = fun() ->
+ Ref = erlang:monitor(process, Pid),
+ receive
+ {'DOWN', Ref, process, Pid, _Info} ->
+ exit(normal)
+ end
+ end,
+ erlang:spawn_monitor(F)
+ end.
+
+intersection(_, []) ->
+ [];
+intersection(L1, L2) ->
+ L1 -- (L1 -- L2).
diff --git a/lib/kernel/src/global_group.erl b/lib/kernel/src/global_group.erl
new file mode 100644
index 0000000000..7e141ac5c7
--- /dev/null
+++ b/lib/kernel/src/global_group.erl
@@ -0,0 +1,1347 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 1998-2009. All Rights Reserved.
+%%
+%% The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved online at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(global_group).
+
+%% Groups nodes into global groups with an own global name space.
+
+-behaviour(gen_server).
+
+%% External exports
+-export([start/0, start_link/0, stop/0, init/1]).
+-export([handle_call/3, handle_cast/2, handle_info/2,
+ terminate/2, code_change/3]).
+
+-export([global_groups/0]).
+-export([monitor_nodes/1]).
+-export([own_nodes/0]).
+-export([registered_names/1]).
+-export([send/2]).
+-export([send/3]).
+-export([whereis_name/1]).
+-export([whereis_name/2]).
+-export([global_groups_changed/1]).
+-export([global_groups_added/1]).
+-export([global_groups_removed/1]).
+-export([sync/0]).
+-export([ng_add_check/2, ng_add_check/3]).
+
+-export([info/0]).
+-export([registered_names_test/1]).
+-export([send_test/2]).
+-export([whereis_name_test/1]).
+-export([get_own_nodes/0, get_own_nodes_with_errors/0]).
+-export([publish_on_nodes/0]).
+
+-export([config_scan/1, config_scan/2]).
+
+%% Internal exports
+-export([sync_init/4]).
+
+
+-define(cc_vsn, 2).
+
+%%%====================================================================================
+
+-type publish_type() :: 'hidden' | 'normal'.
+-type sync_state() :: 'no_conf' | 'synced'.
+
+-type group_name() :: atom().
+-type group_tuple() :: {group_name(), [node()]}
+ | {group_name(), publish_type(), [node()]}.
+
+
+%%%====================================================================================
+%%% The state of the global_group process
+%%%
+%%% sync_state = no_conf (global_groups not defined, inital state) |
+%%% synced
+%%% group_name = Own global group name
+%%% nodes = Nodes in the own global group
+%%% no_contact = Nodes which we haven't had contact with yet
+%%% sync_error = Nodes which we haven't had contact with yet
+%%% other_grps = list of other global group names and nodes, [{otherName, [Node]}]
+%%% node_name = Own node
+%%% monitor = List of Pids requesting nodeup/nodedown
+%%%====================================================================================
+
+-record(state, {sync_state = no_conf :: sync_state(),
+ connect_all :: boolean(),
+ group_name = [] :: group_name() | [],
+ nodes = [] :: [node()],
+ no_contact = [] :: [node()],
+ sync_error = [],
+ other_grps = [],
+ node_name = node() :: node(),
+ monitor = [],
+ publish_type = normal :: publish_type(),
+ group_publish_type = normal :: publish_type()}).
+
+
+%%%====================================================================================
+%%% External exported
+%%%====================================================================================
+
+-spec global_groups() -> {group_name(), [group_name()]} | 'undefined'.
+global_groups() ->
+ request(global_groups).
+
+-spec monitor_nodes(boolean()) -> 'ok'.
+monitor_nodes(Flag) ->
+ case Flag of
+ true -> request({monitor_nodes, Flag});
+ false -> request({monitor_nodes, Flag});
+ _ -> {error, not_boolean}
+ end.
+
+-spec own_nodes() -> [node()].
+own_nodes() ->
+ request(own_nodes).
+
+-type name() :: atom().
+-type where() :: {'node', node()} | {'group', group_name()}.
+
+-spec registered_names(where()) -> [name()].
+registered_names(Arg) ->
+ request({registered_names, Arg}).
+
+-spec send(name(), term()) -> pid() | {'badarg', {name(), term()}}.
+send(Name, Msg) ->
+ request({send, Name, Msg}).
+
+-spec send(where(), name(), term()) -> pid() | {'badarg', {name(), term()}}.
+send(Group, Name, Msg) ->
+ request({send, Group, Name, Msg}).
+
+-spec whereis_name(name()) -> pid() | 'undefined'.
+whereis_name(Name) ->
+ request({whereis_name, Name}).
+
+-spec whereis_name(where(), name()) -> pid() | 'undefined'.
+whereis_name(Group, Name) ->
+ request({whereis_name, Group, Name}).
+
+global_groups_changed(NewPara) ->
+ request({global_groups_changed, NewPara}).
+
+global_groups_added(NewPara) ->
+ request({global_groups_added, NewPara}).
+
+global_groups_removed(NewPara) ->
+ request({global_groups_removed, NewPara}).
+
+-spec sync() -> 'ok'.
+sync() ->
+ request(sync).
+
+ng_add_check(Node, OthersNG) ->
+ ng_add_check(Node, normal, OthersNG).
+
+ng_add_check(Node, PubType, OthersNG) ->
+ request({ng_add_check, Node, PubType, OthersNG}).
+
+-type info_item() :: {'state', sync_state()}
+ | {'own_group_name', group_name()}
+ | {'own_group_nodes', [node()]}
+ | {'synched_nodes', [node()]}
+ | {'sync_error', [node()]}
+ | {'no_contact', [node()]}
+ | {'other_groups', [group_tuple()]}
+ | {'monitoring', [pid()]}.
+
+-spec info() -> [info_item()].
+info() ->
+ request(info, 3000).
+
+%% ==== ONLY for test suites ====
+registered_names_test(Arg) ->
+ request({registered_names_test, Arg}).
+send_test(Name, Msg) ->
+ request({send_test, Name, Msg}).
+whereis_name_test(Name) ->
+ request({whereis_name_test, Name}).
+%% ==== ONLY for test suites ====
+
+
+request(Req) ->
+ request(Req, infinity).
+
+request(Req, Time) ->
+ case whereis(global_group) of
+ P when is_pid(P) ->
+ gen_server:call(global_group, Req, Time);
+ _Other ->
+ {error, global_group_not_runnig}
+ end.
+
+%%%====================================================================================
+%%% gen_server start
+%%%
+%%% The first thing to happen is to read if the global_groups key is defined in the
+%%% .config file. If not defined, the whole system is started as one global_group,
+%%% and the services of global_group are superfluous.
+%%% Otherwise a sync process is started to check that all nodes in the own global
+%%% group have the same configuration. This is done by sending 'conf_check' to all
+%%% other nodes and requiring 'conf_check_result' back.
+%%% If the nodes are not in agreement of the configuration the global_group process
+%%% will remove these nodes from the #state.nodes list. This can be a normal case
+%%% at release upgrade when all nodes are not yet upgraded.
+%%%
+%%% It is possible to manually force a sync of the global_group. This is done for
+%%% instance after a release upgrade, after all nodes in the group beeing upgraded.
+%%% The nodes are not synced automatically because it would cause the node to be
+%%% disconnected from those not yet beeing upgraded.
+%%%
+%%% The three process dictionary variables (registered_names, send, and whereis_name)
+%%% are used to store information needed if the search process crashes.
+%%% The search process is a help process to find registered names in the system.
+%%%====================================================================================
+start() -> gen_server:start({local, global_group}, global_group, [], []).
+start_link() -> gen_server:start_link({local, global_group}, global_group,[],[]).
+stop() -> gen_server:call(global_group, stop, infinity).
+
+init([]) ->
+ process_flag(priority, max),
+ ok = net_kernel:monitor_nodes(true),
+ put(registered_names, [undefined]),
+ put(send, [undefined]),
+ put(whereis_name, [undefined]),
+ process_flag(trap_exit, true),
+ Ca = case init:get_argument(connect_all) of
+ {ok, [["false"]]} ->
+ false;
+ _ ->
+ true
+ end,
+ PT = publish_arg(),
+ case application:get_env(kernel, global_groups) of
+ undefined ->
+ update_publish_nodes(PT),
+ {ok, #state{publish_type = PT,
+ connect_all = Ca}};
+ {ok, []} ->
+ update_publish_nodes(PT),
+ {ok, #state{publish_type = PT,
+ connect_all = Ca}};
+ {ok, NodeGrps} ->
+ {DefGroupName, PubTpGrp, DefNodes, DefOther} =
+ case catch config_scan(NodeGrps, publish_type) of
+ {error, _Error2} ->
+ update_publish_nodes(PT),
+ exit({error, {'invalid global_groups definition', NodeGrps}});
+ {DefGroupNameT, PubType, DefNodesT, DefOtherT} ->
+ update_publish_nodes(PT, {PubType, DefNodesT}),
+ %% First disconnect any nodes not belonging to our own group
+ disconnect_nodes(nodes(connected) -- DefNodesT),
+ lists:foreach(fun(Node) ->
+ erlang:monitor_node(Node, true)
+ end,
+ DefNodesT),
+ {DefGroupNameT, PubType, lists:delete(node(), DefNodesT), DefOtherT}
+ end,
+ {ok, #state{publish_type = PT, group_publish_type = PubTpGrp,
+ sync_state = synced, group_name = DefGroupName,
+ no_contact = lists:sort(DefNodes),
+ other_grps = DefOther}}
+ end.
+
+
+%%%====================================================================================
+%%% sync() -> ok
+%%%
+%%% An operator ordered sync of the own global group. This must be done after
+%%% a release upgrade. It can also be ordered if somthing has made the nodes
+%%% to disagree of the global_groups definition.
+%%%====================================================================================
+handle_call(sync, _From, S) ->
+% io:format("~p sync ~p~n",[node(), application:get_env(kernel, global_groups)]),
+ case application:get_env(kernel, global_groups) of
+ undefined ->
+ update_publish_nodes(S#state.publish_type),
+ {reply, ok, S};
+ {ok, []} ->
+ update_publish_nodes(S#state.publish_type),
+ {reply, ok, S};
+ {ok, NodeGrps} ->
+ {DefGroupName, PubTpGrp, DefNodes, DefOther} =
+ case catch config_scan(NodeGrps, publish_type) of
+ {error, _Error2} ->
+ exit({error, {'invalid global_groups definition', NodeGrps}});
+ {DefGroupNameT, PubType, DefNodesT, DefOtherT} ->
+ update_publish_nodes(S#state.publish_type, {PubType, DefNodesT}),
+ %% First inform global on all nodes not belonging to our own group
+ disconnect_nodes(nodes(connected) -- DefNodesT),
+ %% Sync with the nodes in the own group
+ kill_global_group_check(),
+ Pid = spawn_link(?MODULE, sync_init,
+ [sync, DefGroupNameT, PubType, DefNodesT]),
+ register(global_group_check, Pid),
+ {DefGroupNameT, PubType, lists:delete(node(), DefNodesT), DefOtherT}
+ end,
+ {reply, ok, S#state{sync_state = synced, group_name = DefGroupName,
+ no_contact = lists:sort(DefNodes),
+ other_grps = DefOther, group_publish_type = PubTpGrp}}
+ end;
+
+
+
+%%%====================================================================================
+%%% global_groups() -> {OwnGroupName, [OtherGroupName]} | undefined
+%%%
+%%% Get the names of the global groups
+%%%====================================================================================
+handle_call(global_groups, _From, S) ->
+ Result = case S#state.sync_state of
+ no_conf ->
+ undefined;
+ synced ->
+ Other = lists:foldl(fun({N,_L}, Acc) -> Acc ++ [N]
+ end,
+ [], S#state.other_grps),
+ {S#state.group_name, Other}
+ end,
+ {reply, Result, S};
+
+
+
+%%%====================================================================================
+%%% monitor_nodes(bool()) -> ok
+%%%
+%%% Monitor nodes in the own global group.
+%%% True => send nodeup/nodedown to the requesting Pid
+%%% False => stop sending nodeup/nodedown to the requesting Pid
+%%%====================================================================================
+handle_call({monitor_nodes, Flag}, {Pid, _}, StateIn) ->
+% io:format("***** handle_call ~p~n",[monitor_nodes]),
+ {Res, State} = monitor_nodes(Flag, Pid, StateIn),
+ {reply, Res, State};
+
+
+%%%====================================================================================
+%%% own_nodes() -> [Node]
+%%%
+%%% Get a list of nodes in the own global group
+%%%====================================================================================
+handle_call(own_nodes, _From, S) ->
+ Nodes = case S#state.sync_state of
+ no_conf ->
+ [node() | nodes()];
+ synced ->
+ get_own_nodes()
+% S#state.nodes
+ end,
+ {reply, Nodes, S};
+
+
+
+%%%====================================================================================
+%%% registered_names({node, Node}) -> [Name] | {error, ErrorMessage}
+%%% registered_names({group, GlobalGroupName}) -> [Name] | {error, ErrorMessage}
+%%%
+%%% Get the registered names from a specified Node, or GlobalGroupName.
+%%%====================================================================================
+handle_call({registered_names, {group, Group}}, _From, S) when Group =:= S#state.group_name ->
+ Res = global:registered_names(),
+ {reply, Res, S};
+handle_call({registered_names, {group, Group}}, From, S) ->
+ case lists:keysearch(Group, 1, S#state.other_grps) of
+ false ->
+ {reply, [], S};
+ {value, {Group, []}} ->
+ {reply, [], S};
+ {value, {Group, Nodes}} ->
+ Pid = global_search:start(names, {group, Nodes, From}),
+ Wait = get(registered_names),
+ put(registered_names, [{Pid, From} | Wait]),
+ {noreply, S}
+ end;
+handle_call({registered_names, {node, Node}}, _From, S) when Node =:= node() ->
+ Res = global:registered_names(),
+ {reply, Res, S};
+handle_call({registered_names, {node, Node}}, From, S) ->
+ Pid = global_search:start(names, {node, Node, From}),
+% io:format(">>>>> registered_names Pid ~p~n",[Pid]),
+ Wait = get(registered_names),
+ put(registered_names, [{Pid, From} | Wait]),
+ {noreply, S};
+
+
+
+%%%====================================================================================
+%%% send(Name, Msg) -> Pid | {badarg, {Name, Msg}}
+%%% send({node, Node}, Name, Msg) -> Pid | {badarg, {Name, Msg}}
+%%% send({group, GlobalGroupName}, Name, Msg) -> Pid | {badarg, {Name, Msg}}
+%%%
+%%% Send the Msg to the specified globally registered Name in own global group,
+%%% in specified Node, or GlobalGroupName.
+%%% But first the receiver is to be found, the thread is continued at
+%%% handle_cast(send_res)
+%%%====================================================================================
+%% Search in the whole known world, but check own node first.
+handle_call({send, Name, Msg}, From, S) ->
+ case global:whereis_name(Name) of
+ undefined ->
+ Pid = global_search:start(send, {any, S#state.other_grps, Name, Msg, From}),
+ Wait = get(send),
+ put(send, [{Pid, From, Name, Msg} | Wait]),
+ {noreply, S};
+ Found ->
+ Found ! Msg,
+ {reply, Found, S}
+ end;
+%% Search in the specified global group, which happens to be the own group.
+handle_call({send, {group, Grp}, Name, Msg}, _From, S) when Grp =:= S#state.group_name ->
+ case global:whereis_name(Name) of
+ undefined ->
+ {reply, {badarg, {Name, Msg}}, S};
+ Pid ->
+ Pid ! Msg,
+ {reply, Pid, S}
+ end;
+%% Search in the specified global group.
+handle_call({send, {group, Group}, Name, Msg}, From, S) ->
+ case lists:keysearch(Group, 1, S#state.other_grps) of
+ false ->
+ {reply, {badarg, {Name, Msg}}, S};
+ {value, {Group, []}} ->
+ {reply, {badarg, {Name, Msg}}, S};
+ {value, {Group, Nodes}} ->
+ Pid = global_search:start(send, {group, Nodes, Name, Msg, From}),
+ Wait = get(send),
+ put(send, [{Pid, From, Name, Msg} | Wait]),
+ {noreply, S}
+ end;
+%% Search on the specified node.
+handle_call({send, {node, Node}, Name, Msg}, From, S) ->
+ Pid = global_search:start(send, {node, Node, Name, Msg, From}),
+ Wait = get(send),
+ put(send, [{Pid, From, Name, Msg} | Wait]),
+ {noreply, S};
+
+
+
+%%%====================================================================================
+%%% whereis_name(Name) -> Pid | undefined
+%%% whereis_name({node, Node}, Name) -> Pid | undefined
+%%% whereis_name({group, GlobalGroupName}, Name) -> Pid | undefined
+%%%
+%%% Get the Pid of a globally registered Name in own global group,
+%%% in specified Node, or GlobalGroupName.
+%%% But first the process is to be found,
+%%% the thread is continued at handle_cast(find_name_res)
+%%%====================================================================================
+%% Search in the whole known world, but check own node first.
+handle_call({whereis_name, Name}, From, S) ->
+ case global:whereis_name(Name) of
+ undefined ->
+ Pid = global_search:start(whereis, {any, S#state.other_grps, Name, From}),
+ Wait = get(whereis_name),
+ put(whereis_name, [{Pid, From} | Wait]),
+ {noreply, S};
+ Found ->
+ {reply, Found, S}
+ end;
+%% Search in the specified global group, which happens to be the own group.
+handle_call({whereis_name, {group, Group}, Name}, _From, S)
+ when Group =:= S#state.group_name ->
+ Res = global:whereis_name(Name),
+ {reply, Res, S};
+%% Search in the specified global group.
+handle_call({whereis_name, {group, Group}, Name}, From, S) ->
+ case lists:keysearch(Group, 1, S#state.other_grps) of
+ false ->
+ {reply, undefined, S};
+ {value, {Group, []}} ->
+ {reply, undefined, S};
+ {value, {Group, Nodes}} ->
+ Pid = global_search:start(whereis, {group, Nodes, Name, From}),
+ Wait = get(whereis_name),
+ put(whereis_name, [{Pid, From} | Wait]),
+ {noreply, S}
+ end;
+%% Search on the specified node.
+handle_call({whereis_name, {node, Node}, Name}, From, S) ->
+ Pid = global_search:start(whereis, {node, Node, Name, From}),
+ Wait = get(whereis_name),
+ put(whereis_name, [{Pid, From} | Wait]),
+ {noreply, S};
+
+
+%%%====================================================================================
+%%% global_groups parameter changed
+%%% The node is not resynced automatically because it would cause this node to
+%%% be disconnected from those nodes not yet been upgraded.
+%%%====================================================================================
+handle_call({global_groups_changed, NewPara}, _From, S) ->
+ {NewGroupName, PubTpGrp, NewNodes, NewOther} =
+ case catch config_scan(NewPara, publish_type) of
+ {error, _Error2} ->
+ exit({error, {'invalid global_groups definition', NewPara}});
+ {DefGroupName, PubType, DefNodes, DefOther} ->
+ update_publish_nodes(S#state.publish_type, {PubType, DefNodes}),
+ {DefGroupName, PubType, DefNodes, DefOther}
+ end,
+
+ %% #state.nodes is the common denominator of previous and new definition
+ NN = NewNodes -- (NewNodes -- S#state.nodes),
+ %% rest of the nodes in the new definition are marked as not yet contacted
+ NNC = (NewNodes -- S#state.nodes) -- S#state.sync_error,
+ %% remove sync_error nodes not belonging to the new group
+ NSE = NewNodes -- (NewNodes -- S#state.sync_error),
+
+ %% Disconnect the connection to nodes which are not in our old global group.
+ %% This is done because if we already are aware of new nodes (to our global
+ %% group) global is not going to be synced to these nodes. We disconnect instead
+ %% of connect because upgrades can be done node by node and we cannot really
+ %% know what nodes these new nodes are synced to. The operator can always
+ %% manually force a sync of the nodes after all nodes beeing uppgraded.
+ %% We must disconnect also if some nodes to which we have a connection
+ %% will not be in any global group at all.
+ force_nodedown(nodes(connected) -- NewNodes),
+
+ NewS = S#state{group_name = NewGroupName,
+ nodes = lists:sort(NN),
+ no_contact = lists:sort(lists:delete(node(), NNC)),
+ sync_error = lists:sort(NSE),
+ other_grps = NewOther,
+ group_publish_type = PubTpGrp},
+ {reply, ok, NewS};
+
+
+%%%====================================================================================
+%%% global_groups parameter added
+%%% The node is not resynced automatically because it would cause this node to
+%%% be disconnected from those nodes not yet been upgraded.
+%%%====================================================================================
+handle_call({global_groups_added, NewPara}, _From, S) ->
+% io:format("### global_groups_changed, NewPara ~p ~n",[NewPara]),
+ {NewGroupName, PubTpGrp, NewNodes, NewOther} =
+ case catch config_scan(NewPara, publish_type) of
+ {error, _Error2} ->
+ exit({error, {'invalid global_groups definition', NewPara}});
+ {DefGroupName, PubType, DefNodes, DefOther} ->
+ update_publish_nodes(S#state.publish_type, {PubType, DefNodes}),
+ {DefGroupName, PubType, DefNodes, DefOther}
+ end,
+
+ %% disconnect from those nodes which are not going to be in our global group
+ force_nodedown(nodes(connected) -- NewNodes),
+
+ %% Check which nodes are already updated
+ OwnNG = get_own_nodes(),
+ NGACArgs = case S#state.group_publish_type of
+ normal ->
+ [node(), OwnNG];
+ _ ->
+ [node(), S#state.group_publish_type, OwnNG]
+ end,
+ {NN, NNC, NSE} =
+ lists:foldl(fun(Node, {NN_acc, NNC_acc, NSE_acc}) ->
+ case rpc:call(Node, global_group, ng_add_check, NGACArgs) of
+ {badrpc, _} ->
+ {NN_acc, [Node | NNC_acc], NSE_acc};
+ agreed ->
+ {[Node | NN_acc], NNC_acc, NSE_acc};
+ not_agreed ->
+ {NN_acc, NNC_acc, [Node | NSE_acc]}
+ end
+ end,
+ {[], [], []}, lists:delete(node(), NewNodes)),
+ NewS = S#state{sync_state = synced, group_name = NewGroupName, nodes = lists:sort(NN),
+ sync_error = lists:sort(NSE), no_contact = lists:sort(NNC),
+ other_grps = NewOther, group_publish_type = PubTpGrp},
+ {reply, ok, NewS};
+
+
+%%%====================================================================================
+%%% global_groups parameter removed
+%%%====================================================================================
+handle_call({global_groups_removed, _NewPara}, _From, S) ->
+% io:format("### global_groups_removed, NewPara ~p ~n",[_NewPara]),
+ update_publish_nodes(S#state.publish_type),
+ NewS = S#state{sync_state = no_conf, group_name = [], nodes = [],
+ sync_error = [], no_contact = [],
+ other_grps = []},
+ {reply, ok, NewS};
+
+
+%%%====================================================================================
+%%% global_groups parameter added to some other node which thinks that we
+%%% belong to the same global group.
+%%% It could happen that our node is not yet updated with the new node_group parameter
+%%%====================================================================================
+handle_call({ng_add_check, Node, PubType, OthersNG}, _From, S) ->
+ %% Check which nodes are already updated
+ OwnNG = get_own_nodes(),
+ case S#state.group_publish_type =:= PubType of
+ true ->
+ case OwnNG of
+ OthersNG ->
+ NN = [Node | S#state.nodes],
+ NSE = lists:delete(Node, S#state.sync_error),
+ NNC = lists:delete(Node, S#state.no_contact),
+ NewS = S#state{nodes = lists:sort(NN),
+ sync_error = NSE,
+ no_contact = NNC},
+ {reply, agreed, NewS};
+ _ ->
+ {reply, not_agreed, S}
+ end;
+ _ ->
+ {reply, not_agreed, S}
+ end;
+
+
+
+%%%====================================================================================
+%%% Misceleaneous help function to read some variables
+%%%====================================================================================
+handle_call(info, _From, S) ->
+ Reply = [{state, S#state.sync_state},
+ {own_group_name, S#state.group_name},
+ {own_group_nodes, get_own_nodes()},
+% {"nodes()", lists:sort(nodes())},
+ {synced_nodes, S#state.nodes},
+ {sync_error, S#state.sync_error},
+ {no_contact, S#state.no_contact},
+ {other_groups, S#state.other_grps},
+ {monitoring, S#state.monitor}],
+
+ {reply, Reply, S};
+
+handle_call(get, _From, S) ->
+ {reply, get(), S};
+
+
+%%%====================================================================================
+%%% Only for test suites. These tests when the search process exits.
+%%%====================================================================================
+handle_call({registered_names_test, {node, 'test3844zty'}}, From, S) ->
+ Pid = global_search:start(names_test, {node, 'test3844zty'}),
+ Wait = get(registered_names),
+ put(registered_names, [{Pid, From} | Wait]),
+ {noreply, S};
+handle_call({registered_names_test, {node, _Node}}, _From, S) ->
+ {reply, {error, illegal_function_call}, S};
+handle_call({send_test, Name, 'test3844zty'}, From, S) ->
+ Pid = global_search:start(send_test, 'test3844zty'),
+ Wait = get(send),
+ put(send, [{Pid, From, Name, 'test3844zty'} | Wait]),
+ {noreply, S};
+handle_call({send_test, _Name, _Msg }, _From, S) ->
+ {reply, {error, illegal_function_call}, S};
+handle_call({whereis_name_test, 'test3844zty'}, From, S) ->
+ Pid = global_search:start(whereis_test, 'test3844zty'),
+ Wait = get(whereis_name),
+ put(whereis_name, [{Pid, From} | Wait]),
+ {noreply, S};
+handle_call({whereis_name_test, _Name}, _From, S) ->
+ {reply, {error, illegal_function_call}, S};
+
+handle_call(Call, _From, S) ->
+% io:format("***** handle_call ~p~n",[Call]),
+ {reply, {illegal_message, Call}, S}.
+
+
+
+
+
+%%%====================================================================================
+%%% registered_names({node, Node}) -> [Name] | {error, ErrorMessage}
+%%% registered_names({group, GlobalGroupName}) -> [Name] | {error, ErrorMessage}
+%%%
+%%% Get a list of nodes in the own global group
+%%%====================================================================================
+handle_cast({registered_names, User}, S) ->
+% io:format(">>>>> registered_names User ~p~n",[User]),
+ Res = global:registered_names(),
+ User ! {registered_names_res, Res},
+ {noreply, S};
+
+handle_cast({registered_names_res, Result, Pid, From}, S) ->
+% io:format(">>>>> registered_names_res Result ~p~n",[Result]),
+ unlink(Pid),
+ exit(Pid, normal),
+ Wait = get(registered_names),
+ NewWait = lists:delete({Pid, From},Wait),
+ put(registered_names, NewWait),
+ gen_server:reply(From, Result),
+ {noreply, S};
+
+
+
+%%%====================================================================================
+%%% send(Name, Msg) -> Pid | {error, ErrorMessage}
+%%% send({node, Node}, Name, Msg) -> Pid | {error, ErrorMessage}
+%%% send({group, GlobalGroupName}, Name, Msg) -> Pid | {error, ErrorMessage}
+%%%
+%%% The registered Name is found; send the message to it, kill the search process,
+%%% and return to the requesting process.
+%%%====================================================================================
+handle_cast({send_res, Result, Name, Msg, Pid, From}, S) ->
+% io:format("~p>>>>> send_res Result ~p~n",[node(), Result]),
+ case Result of
+ {badarg,{Name, Msg}} ->
+ continue;
+ ToPid ->
+ ToPid ! Msg
+ end,
+ unlink(Pid),
+ exit(Pid, normal),
+ Wait = get(send),
+ NewWait = lists:delete({Pid, From, Name, Msg},Wait),
+ put(send, NewWait),
+ gen_server:reply(From, Result),
+ {noreply, S};
+
+
+
+%%%====================================================================================
+%%% A request from a search process to check if this Name is registered at this node.
+%%%====================================================================================
+handle_cast({find_name, User, Name}, S) ->
+ Res = global:whereis_name(Name),
+% io:format(">>>>> find_name Name ~p Res ~p~n",[Name, Res]),
+ User ! {find_name_res, Res},
+ {noreply, S};
+
+%%%====================================================================================
+%%% whereis_name(Name) -> Pid | undefined
+%%% whereis_name({node, Node}, Name) -> Pid | undefined
+%%% whereis_name({group, GlobalGroupName}, Name) -> Pid | undefined
+%%%
+%%% The registered Name is found; kill the search process
+%%% and return to the requesting process.
+%%%====================================================================================
+handle_cast({find_name_res, Result, Pid, From}, S) ->
+% io:format(">>>>> find_name_res Result ~p~n",[Result]),
+% io:format(">>>>> find_name_res get() ~p~n",[get()]),
+ unlink(Pid),
+ exit(Pid, normal),
+ Wait = get(whereis_name),
+ NewWait = lists:delete({Pid, From},Wait),
+ put(whereis_name, NewWait),
+ gen_server:reply(From, Result),
+ {noreply, S};
+
+
+%%%====================================================================================
+%%% The node is synced successfully
+%%%====================================================================================
+handle_cast({synced, NoContact}, S) ->
+% io:format("~p>>>>> synced ~p ~n",[node(), NoContact]),
+ kill_global_group_check(),
+ Nodes = get_own_nodes() -- [node() | NoContact],
+ {noreply, S#state{nodes = lists:sort(Nodes),
+ sync_error = [],
+ no_contact = NoContact}};
+
+
+%%%====================================================================================
+%%% The node could not sync with some other nodes.
+%%%====================================================================================
+handle_cast({sync_error, NoContact, ErrorNodes}, S) ->
+% io:format("~p>>>>> sync_error ~p ~p ~n",[node(), NoContact, ErrorNodes]),
+ Txt = io_lib:format("Global group: Could not synchronize with these nodes ~p~n"
+ "because global_groups were not in agreement. ~n", [ErrorNodes]),
+ error_logger:error_report(Txt),
+ kill_global_group_check(),
+ Nodes = (get_own_nodes() -- [node() | NoContact]) -- ErrorNodes,
+ {noreply, S#state{nodes = lists:sort(Nodes),
+ sync_error = ErrorNodes,
+ no_contact = NoContact}};
+
+
+%%%====================================================================================
+%%% Another node is checking this node's group configuration
+%%%====================================================================================
+handle_cast({conf_check, Vsn, Node, From, sync, CCName, CCNodes}, S) ->
+ handle_cast({conf_check, Vsn, Node, From, sync, CCName, normal, CCNodes}, S);
+
+handle_cast({conf_check, Vsn, Node, From, sync, CCName, PubType, CCNodes}, S) ->
+ CurNodes = S#state.nodes,
+% io:format(">>>>> conf_check,sync Node ~p~n",[Node]),
+ %% Another node is syncing,
+ %% done for instance after upgrade of global_groups parameter
+ NS =
+ case application:get_env(kernel, global_groups) of
+ undefined ->
+ %% We didn't have any node_group definition
+ update_publish_nodes(S#state.publish_type),
+ disconnect_nodes([Node]),
+ {global_group_check, Node} ! {config_error, Vsn, From, node()},
+ S;
+ {ok, []} ->
+ %% Our node_group definition was empty
+ update_publish_nodes(S#state.publish_type),
+ disconnect_nodes([Node]),
+ {global_group_check, Node} ! {config_error, Vsn, From, node()},
+ S;
+ %%---------------------------------
+ %% global_groups defined
+ %%---------------------------------
+ {ok, NodeGrps} ->
+ case catch config_scan(NodeGrps, publish_type) of
+ {error, _Error2} ->
+ %% Our node_group definition was erroneous
+ disconnect_nodes([Node]),
+ {global_group_check, Node} ! {config_error, Vsn, From, node()},
+ S#state{nodes = lists:delete(Node, CurNodes)};
+
+ {CCName, PubType, CCNodes, _OtherDef} ->
+ %% OK, add the node to the #state.nodes if it isn't there
+ update_publish_nodes(S#state.publish_type, {PubType, CCNodes}),
+ global_name_server ! {nodeup, Node},
+ {global_group_check, Node} ! {config_ok, Vsn, From, node()},
+ case lists:member(Node, CurNodes) of
+ false ->
+ NewNodes = lists:sort([Node | CurNodes]),
+ NSE = lists:delete(Node, S#state.sync_error),
+ NNC = lists:delete(Node, S#state.no_contact),
+ S#state{nodes = NewNodes,
+ sync_error = NSE,
+ no_contact = NNC};
+ true ->
+ S
+ end;
+ _ ->
+ %% node_group definitions were not in agreement
+ disconnect_nodes([Node]),
+ {global_group_check, Node} ! {config_error, Vsn, From, node()},
+ NN = lists:delete(Node, S#state.nodes),
+ NSE = lists:delete(Node, S#state.sync_error),
+ NNC = lists:delete(Node, S#state.no_contact),
+ S#state{nodes = NN,
+ sync_error = NSE,
+ no_contact = NNC}
+ end
+ end,
+ {noreply, NS};
+
+
+handle_cast(_Cast, S) ->
+% io:format("***** handle_cast ~p~n",[_Cast]),
+ {noreply, S}.
+
+
+
+%%%====================================================================================
+%%% A node went down. If no global group configuration inform global;
+%%% if global group configuration inform global only if the node is one in
+%%% the own global group.
+%%%====================================================================================
+handle_info({nodeup, Node}, S) when S#state.sync_state =:= no_conf ->
+% io:format("~p>>>>> nodeup, Node ~p ~n",[node(), Node]),
+ send_monitor(S#state.monitor, {nodeup, Node}, S#state.sync_state),
+ global_name_server ! {nodeup, Node},
+ {noreply, S};
+handle_info({nodeup, Node}, S) ->
+% io:format("~p>>>>> nodeup, Node ~p ~n",[node(), Node]),
+ OthersNG = case S#state.sync_state of
+ synced ->
+ X = (catch rpc:call(Node, global_group, get_own_nodes, [])),
+ case X of
+ X when is_list(X) ->
+ lists:sort(X);
+ _ ->
+ []
+ end;
+ no_conf ->
+ []
+ end,
+
+ NNC = lists:delete(Node, S#state.no_contact),
+ NSE = lists:delete(Node, S#state.sync_error),
+ OwnNG = get_own_nodes(),
+ case OwnNG of
+ OthersNG ->
+ send_monitor(S#state.monitor, {nodeup, Node}, S#state.sync_state),
+ global_name_server ! {nodeup, Node},
+ case lists:member(Node, S#state.nodes) of
+ false ->
+ NN = lists:sort([Node | S#state.nodes]),
+ {noreply, S#state{nodes = NN,
+ no_contact = NNC,
+ sync_error = NSE}};
+ true ->
+ {noreply, S#state{no_contact = NNC,
+ sync_error = NSE}}
+ end;
+ _ ->
+ case {lists:member(Node, get_own_nodes()),
+ lists:member(Node, S#state.sync_error)} of
+ {true, false} ->
+ NSE2 = lists:sort([Node | S#state.sync_error]),
+ {noreply, S#state{no_contact = NNC,
+ sync_error = NSE2}};
+ _ ->
+ {noreply, S}
+ end
+ end;
+
+%%%====================================================================================
+%%% A node has crashed.
+%%% nodedown must always be sent to global; this is a security measurement
+%%% because during release upgrade the global_groups parameter is upgraded
+%%% before the node is synced. This means that nodedown may arrive from a
+%%% node which we are not aware of.
+%%%====================================================================================
+handle_info({nodedown, Node}, S) when S#state.sync_state =:= no_conf ->
+% io:format("~p>>>>> nodedown, no_conf Node ~p~n",[node(), Node]),
+ send_monitor(S#state.monitor, {nodedown, Node}, S#state.sync_state),
+ global_name_server ! {nodedown, Node},
+ {noreply, S};
+handle_info({nodedown, Node}, S) ->
+% io:format("~p>>>>> nodedown, Node ~p ~n",[node(), Node]),
+ send_monitor(S#state.monitor, {nodedown, Node}, S#state.sync_state),
+ global_name_server ! {nodedown, Node},
+ NN = lists:delete(Node, S#state.nodes),
+ NSE = lists:delete(Node, S#state.sync_error),
+ NNC = case {lists:member(Node, get_own_nodes()),
+ lists:member(Node, S#state.no_contact)} of
+ {true, false} ->
+ [Node | S#state.no_contact];
+ _ ->
+ S#state.no_contact
+ end,
+ {noreply, S#state{nodes = NN, no_contact = NNC, sync_error = NSE}};
+
+
+%%%====================================================================================
+%%% A node has changed its global_groups definition, and is telling us that we are not
+%%% included in his group any more. This could happen at release upgrade.
+%%%====================================================================================
+handle_info({disconnect_node, Node}, S) ->
+% io:format("~p>>>>> disconnect_node Node ~p CN ~p~n",[node(), Node, S#state.nodes]),
+ case {S#state.sync_state, lists:member(Node, S#state.nodes)} of
+ {synced, true} ->
+ send_monitor(S#state.monitor, {nodedown, Node}, S#state.sync_state);
+ _ ->
+ cont
+ end,
+ global_name_server ! {nodedown, Node}, %% nodedown is used to inform global of the
+ %% disconnected node
+ NN = lists:delete(Node, S#state.nodes),
+ NNC = lists:delete(Node, S#state.no_contact),
+ NSE = lists:delete(Node, S#state.sync_error),
+ {noreply, S#state{nodes = NN, no_contact = NNC, sync_error = NSE}};
+
+
+
+
+handle_info({'EXIT', ExitPid, Reason}, S) ->
+ check_exit(ExitPid, Reason),
+ {noreply, S};
+
+
+handle_info(_Info, S) ->
+% io:format("***** handle_info = ~p~n",[_Info]),
+ {noreply, S}.
+
+
+
+terminate(_Reason, _S) ->
+ ok.
+
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+
+
+
+
+
+%%%====================================================================================
+%%% Check the global group configuration.
+%%%====================================================================================
+
+config_scan(NodeGrps) ->
+ config_scan(NodeGrps, original).
+
+config_scan(NodeGrps, original) ->
+ case config_scan(NodeGrps, publish_type) of
+ {DefGroupName, _, DefNodes, DefOther} ->
+ {DefGroupName, DefNodes, DefOther};
+ Error ->
+ Error
+ end;
+config_scan(NodeGrps, publish_type) ->
+ config_scan(node(), normal, NodeGrps, no_name, [], []).
+
+config_scan(_MyNode, PubType, [], Own_name, OwnNodes, OtherNodeGrps) ->
+ {Own_name, PubType, lists:sort(OwnNodes), lists:reverse(OtherNodeGrps)};
+config_scan(MyNode, PubType, [GrpTuple|NodeGrps], Own_name, OwnNodes, OtherNodeGrps) ->
+ {Name, PubTypeGroup, Nodes} = grp_tuple(GrpTuple),
+ case lists:member(MyNode, Nodes) of
+ true ->
+ case Own_name of
+ no_name ->
+ config_scan(MyNode, PubTypeGroup, NodeGrps, Name, Nodes, OtherNodeGrps);
+ _ ->
+ {error, {'node defined twice', {Own_name, Name}}}
+ end;
+ false ->
+ config_scan(MyNode,PubType,NodeGrps,Own_name,OwnNodes,
+ [{Name, Nodes}|OtherNodeGrps])
+ end.
+
+grp_tuple({Name, Nodes}) ->
+ {Name, normal, Nodes};
+grp_tuple({Name, hidden, Nodes}) ->
+ {Name, hidden, Nodes};
+grp_tuple({Name, normal, Nodes}) ->
+ {Name, normal, Nodes}.
+
+
+%%%====================================================================================
+%%% The special process which checks that all nodes in the own global group
+%%% agrees on the configuration.
+%%%====================================================================================
+sync_init(Type, Cname, PubType, Nodes) ->
+ {Up, Down} = sync_check_node(lists:delete(node(), Nodes), [], []),
+ sync_check_init(Type, Up, Cname, Nodes, Down, PubType).
+
+sync_check_node([], Up, Down) ->
+ {Up, Down};
+sync_check_node([Node|Nodes], Up, Down) ->
+ case net_adm:ping(Node) of
+ pang ->
+ sync_check_node(Nodes, Up, [Node|Down]);
+ pong ->
+ sync_check_node(Nodes, [Node|Up], Down)
+ end.
+
+
+
+%%%-------------------------------------------------------------
+%%% Check that all nodes are in agreement of the global
+%%% group configuration.
+%%%-------------------------------------------------------------
+sync_check_init(Type, Up, Cname, Nodes, Down, PubType) ->
+ sync_check_init(Type, Up, Cname, Nodes, 3, [], Down, PubType).
+
+sync_check_init(_Type, NoContact, _Cname, _Nodes, 0, ErrorNodes, Down, _PubType) ->
+ case ErrorNodes of
+ [] ->
+ gen_server:cast(global_group, {synced, lists:sort(NoContact ++ Down)});
+ _ ->
+ gen_server:cast(global_group, {sync_error, lists:sort(NoContact ++ Down),
+ ErrorNodes})
+ end,
+ receive
+ kill ->
+ exit(normal)
+ after 5000 ->
+ exit(normal)
+ end;
+
+sync_check_init(Type, Up, Cname, Nodes, N, ErrorNodes, Down, PubType) ->
+ ConfCheckMsg = case PubType of
+ normal ->
+ {conf_check, ?cc_vsn, node(), self(), Type, Cname, Nodes};
+ _ ->
+ {conf_check, ?cc_vsn, node(), self(), Type, Cname, PubType, Nodes}
+ end,
+ lists:foreach(fun(Node) ->
+ gen_server:cast({global_group, Node}, ConfCheckMsg)
+ end, Up),
+ case sync_check(Up) of
+ {ok, synced} ->
+ sync_check_init(Type, [], Cname, Nodes, 0, ErrorNodes, Down, PubType);
+ {error, NewErrorNodes} ->
+ sync_check_init(Type, [], Cname, Nodes, 0, ErrorNodes ++ NewErrorNodes, Down, PubType);
+ {more, Rem, NewErrorNodes} ->
+ %% Try again to reach the global_group,
+ %% obviously the node is up but not the global_group process.
+ sync_check_init(Type, Rem, Cname, Nodes, N-1, ErrorNodes ++ NewErrorNodes, Down, PubType)
+ end.
+
+sync_check(Up) ->
+ sync_check(Up, Up, []).
+
+sync_check([], _Up, []) ->
+ {ok, synced};
+sync_check([], _Up, ErrorNodes) ->
+ {error, ErrorNodes};
+sync_check(Rem, Up, ErrorNodes) ->
+ receive
+ {config_ok, ?cc_vsn, Pid, Node} when Pid =:= self() ->
+ global_name_server ! {nodeup, Node},
+ sync_check(Rem -- [Node], Up, ErrorNodes);
+ {config_error, ?cc_vsn, Pid, Node} when Pid =:= self() ->
+ sync_check(Rem -- [Node], Up, [Node | ErrorNodes]);
+ {no_global_group_configuration, ?cc_vsn, Pid, Node} when Pid =:= self() ->
+ sync_check(Rem -- [Node], Up, [Node | ErrorNodes]);
+ %% Ignore, illegal vsn or illegal Pid
+ _ ->
+ sync_check(Rem, Up, ErrorNodes)
+ after 2000 ->
+ %% Try again, the previous conf_check message
+ %% apparently disapared in the magic black hole.
+ {more, Rem, ErrorNodes}
+ end.
+
+
+%%%====================================================================================
+%%% A process wants to toggle monitoring nodeup/nodedown from nodes.
+%%%====================================================================================
+monitor_nodes(true, Pid, State) ->
+ link(Pid),
+ Monitor = State#state.monitor,
+ {ok, State#state{monitor = [Pid|Monitor]}};
+monitor_nodes(false, Pid, State) ->
+ Monitor = State#state.monitor,
+ State1 = State#state{monitor = delete_all(Pid,Monitor)},
+ do_unlink(Pid, State1),
+ {ok, State1};
+monitor_nodes(_, _, State) ->
+ {error, State}.
+
+delete_all(From, [From |Tail]) -> delete_all(From, Tail);
+delete_all(From, [H|Tail]) -> [H|delete_all(From, Tail)];
+delete_all(_, []) -> [].
+
+%% do unlink if we have no more references to Pid.
+do_unlink(Pid, State) ->
+ case lists:member(Pid, State#state.monitor) of
+ true ->
+ false;
+ _ ->
+% io:format("unlink(Pid) ~p~n",[Pid]),
+ unlink(Pid)
+ end.
+
+
+
+%%%====================================================================================
+%%% Send a nodeup/down messages to monitoring Pids in the own global group.
+%%%====================================================================================
+send_monitor([P|T], M, no_conf) -> safesend_nc(P, M), send_monitor(T, M, no_conf);
+send_monitor([P|T], M, SyncState) -> safesend(P, M), send_monitor(T, M, SyncState);
+send_monitor([], _, _) -> ok.
+
+safesend(Name, {Msg, Node}) when is_atom(Name) ->
+ case lists:member(Node, get_own_nodes()) of
+ true ->
+ case whereis(Name) of
+ undefined ->
+ {Msg, Node};
+ P when is_pid(P) ->
+ P ! {Msg, Node}
+ end;
+ false ->
+ not_own_group
+ end;
+safesend(Pid, {Msg, Node}) ->
+ case lists:member(Node, get_own_nodes()) of
+ true ->
+ Pid ! {Msg, Node};
+ false ->
+ not_own_group
+ end.
+
+safesend_nc(Name, {Msg, Node}) when is_atom(Name) ->
+ case whereis(Name) of
+ undefined ->
+ {Msg, Node};
+ P when is_pid(P) ->
+ P ! {Msg, Node}
+ end;
+safesend_nc(Pid, {Msg, Node}) ->
+ Pid ! {Msg, Node}.
+
+
+
+
+
+
+%%%====================================================================================
+%%% Check which user is associated to the crashed process.
+%%%====================================================================================
+check_exit(ExitPid, Reason) ->
+% io:format("===EXIT=== ~p ~p ~n~p ~n~p ~n~p ~n~n",[ExitPid, Reason, get(registered_names), get(send), get(whereis_name)]),
+ check_exit_reg(get(registered_names), ExitPid, Reason),
+ check_exit_send(get(send), ExitPid, Reason),
+ check_exit_where(get(whereis_name), ExitPid, Reason).
+
+
+check_exit_reg(undefined, _ExitPid, _Reason) ->
+ ok;
+check_exit_reg(Reg, ExitPid, Reason) ->
+ case lists:keysearch(ExitPid, 1, lists:delete(undefined, Reg)) of
+ {value, {ExitPid, From}} ->
+ NewReg = lists:delete({ExitPid, From}, Reg),
+ put(registered_names, NewReg),
+ gen_server:reply(From, {error, Reason});
+ false ->
+ not_found_ignored
+ end.
+
+
+check_exit_send(undefined, _ExitPid, _Reason) ->
+ ok;
+check_exit_send(Send, ExitPid, _Reason) ->
+ case lists:keysearch(ExitPid, 1, lists:delete(undefined, Send)) of
+ {value, {ExitPid, From, Name, Msg}} ->
+ NewSend = lists:delete({ExitPid, From, Name, Msg}, Send),
+ put(send, NewSend),
+ gen_server:reply(From, {badarg, {Name, Msg}});
+ false ->
+ not_found_ignored
+ end.
+
+
+check_exit_where(undefined, _ExitPid, _Reason) ->
+ ok;
+check_exit_where(Where, ExitPid, Reason) ->
+ case lists:keysearch(ExitPid, 1, lists:delete(undefined, Where)) of
+ {value, {ExitPid, From}} ->
+ NewWhere = lists:delete({ExitPid, From}, Where),
+ put(whereis_name, NewWhere),
+ gen_server:reply(From, {error, Reason});
+ false ->
+ not_found_ignored
+ end.
+
+
+
+%%%====================================================================================
+%%% Kill any possible global_group_check processes
+%%%====================================================================================
+kill_global_group_check() ->
+ case whereis(global_group_check) of
+ undefined ->
+ ok;
+ Pid ->
+ unlink(Pid),
+ global_group_check ! kill,
+ unregister(global_group_check)
+ end.
+
+
+%%%====================================================================================
+%%% Disconnect nodes not belonging to own global_groups
+%%%====================================================================================
+disconnect_nodes(DisconnectNodes) ->
+ lists:foreach(fun(Node) ->
+ {global_group, Node} ! {disconnect_node, node()},
+ global:node_disconnected(Node)
+ end,
+ DisconnectNodes).
+
+
+%%%====================================================================================
+%%% Disconnect nodes not belonging to own global_groups
+%%%====================================================================================
+force_nodedown(DisconnectNodes) ->
+ lists:foreach(fun(Node) ->
+ erlang:disconnect_node(Node),
+ global:node_disconnected(Node)
+ end,
+ DisconnectNodes).
+
+
+%%%====================================================================================
+%%% Get the current global_groups definition
+%%%====================================================================================
+get_own_nodes_with_errors() ->
+ case application:get_env(kernel, global_groups) of
+ undefined ->
+ {ok, all};
+ {ok, []} ->
+ {ok, all};
+ {ok, NodeGrps} ->
+ case catch config_scan(NodeGrps, publish_type) of
+ {error, Error} ->
+ {error, Error};
+ {_, _, NodesDef, _} ->
+ {ok, lists:sort(NodesDef)}
+ end
+ end.
+
+get_own_nodes() ->
+ case get_own_nodes_with_errors() of
+ {ok, all} ->
+ [];
+ {error, _} ->
+ [];
+ {ok, Nodes} ->
+ Nodes
+ end.
+
+%%%====================================================================================
+%%% -hidden command line argument
+%%%====================================================================================
+publish_arg() ->
+ case init:get_argument(hidden) of
+ {ok,[[]]} ->
+ hidden;
+ {ok,[["true"]]} ->
+ hidden;
+ _ ->
+ normal
+ end.
+
+
+%%%====================================================================================
+%%% Own group publication type and nodes
+%%%====================================================================================
+own_group() ->
+ case application:get_env(kernel, global_groups) of
+ undefined ->
+ no_group;
+ {ok, []} ->
+ no_group;
+ {ok, NodeGrps} ->
+ case catch config_scan(NodeGrps, publish_type) of
+ {error, _} ->
+ no_group;
+ {_, PubTpGrp, NodesDef, _} ->
+ {PubTpGrp, NodesDef}
+ end
+ end.
+
+
+%%%====================================================================================
+%%% Help function which computes publication list
+%%%====================================================================================
+publish_on_nodes(normal, no_group) ->
+ all;
+publish_on_nodes(hidden, no_group) ->
+ [];
+publish_on_nodes(normal, {normal, _}) ->
+ all;
+publish_on_nodes(hidden, {_, Nodes}) ->
+ Nodes;
+publish_on_nodes(_, {hidden, Nodes}) ->
+ Nodes.
+
+%%%====================================================================================
+%%% Update net_kernels publication list
+%%%====================================================================================
+update_publish_nodes(PubArg) ->
+ update_publish_nodes(PubArg, no_group).
+update_publish_nodes(PubArg, MyGroup) ->
+ net_kernel:update_publish_nodes(publish_on_nodes(PubArg, MyGroup)).
+
+
+%%%====================================================================================
+%%% Fetch publication list
+%%%====================================================================================
+publish_on_nodes() ->
+ publish_on_nodes(publish_arg(), own_group()).
diff --git a/lib/kernel/src/global_search.erl b/lib/kernel/src/global_search.erl
new file mode 100644
index 0000000000..b723e18a1b
--- /dev/null
+++ b/lib/kernel/src/global_search.erl
@@ -0,0 +1,279 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 1998-2009. All Rights Reserved.
+%%
+%% The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved online at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(global_search).
+
+%% Search for globally registered names in the global groups.
+%% This is a help module to the global_group.erl
+
+
+%% External exports
+-export([start/2]).
+-export([init_send/1]).
+-export([init_whereis/1]).
+-export([init_names/1]).
+
+
+%% ONLY for test purpose
+-export([send_test/1]).
+-export([whereis_test/1]).
+-export([names_test/1]).
+
+
+
+
+%%%====================================================================================
+%%% The search is done in a process separate from the global_group process
+%%%====================================================================================
+start(Flag, Arg) ->
+ case Flag of
+ send ->
+ spawn_link(?MODULE, init_send, [Arg]);
+ whereis ->
+ spawn_link(?MODULE, init_whereis, [Arg]);
+ names ->
+ spawn_link(?MODULE, init_names, [Arg]);
+ %% ONLY for test suites, tests what happens when this process exits.
+ send_test ->
+ spawn_link(?MODULE, send_test, [Arg]);
+ whereis_test ->
+ spawn_link(?MODULE, whereis_test, [Arg]);
+ names_test ->
+ spawn_link(?MODULE, names_test, [Arg])
+ end.
+
+
+%%%====================================================================================
+%%%====================================================================================
+%%%====================================================================================
+%%% Search after a registered global Name anywhere (any), in a specified group or
+%%% in a specified node.
+%%% Return the result to the global_group process in own node and wait for
+%%% this process to be killed.
+%%%====================================================================================
+%%%====================================================================================
+%%%====================================================================================
+
+init_send({any, NodesList, Name, Msg, From}) ->
+ case whereis_any_loop(NodesList, Name) of
+ undefined ->
+ Res = {badarg,{Name, Msg}},
+ gen_server:cast(global_group, {send_res, Res, Name, Msg, self(), From});
+ Pid ->
+ gen_server:cast(global_group, {send_res, Pid, Name, Msg, self(), From})
+ end,
+ end_loop();
+init_send({group, Nodes, Name, Msg, From}) ->
+ case whereis_group_loop(Nodes, Name) of
+ group_down ->
+ Res = {badarg,{Name, Msg}},
+ gen_server:cast(global_group, {send_res, Res, Name, Msg, self(), From});
+ undefined ->
+ Res = {badarg,{Name, Msg}},
+ gen_server:cast(global_group, {send_res, Res, Name, Msg, self(), From});
+ Pid ->
+ gen_server:cast(global_group, {send_res, Pid, Name, Msg, self(), From})
+ end,
+ end_loop();
+init_send({node, Node, Name, Msg, From}) ->
+ case whereis_check_node(Node, Name) of
+ node_down ->
+ Res = {badarg,{Name, Msg}},
+ gen_server:cast(global_group, {send_res, Res, Name, Msg, self(), From});
+ undefined ->
+ Res = {badarg,{Name, Msg}},
+ gen_server:cast(global_group, {send_res, Res, Name, Msg, self(), From});
+ Pid ->
+ gen_server:cast(global_group, {send_res, Pid, Name, Msg, self(), From})
+ end,
+ end_loop().
+
+
+%%%====================================================================================
+%%%====================================================================================
+%%%====================================================================================
+%%% Search after a registered global Name anywhere (any), in a specified group or
+%%% in a specified node.
+%%% Return the result to the global_group process in own node and wait for
+%%% this process to be killed.
+%%%====================================================================================
+%%%====================================================================================
+%%%====================================================================================
+
+init_whereis({any, NodesList, Name, From}) ->
+ R = whereis_any_loop(NodesList, Name),
+ gen_server:cast(global_group, {find_name_res, R, self(), From}),
+ end_loop();
+init_whereis({group, Nodes, Name, From}) ->
+ case whereis_group_loop(Nodes, Name) of
+ group_down ->
+ gen_server:cast(global_group, {find_name_res, undefined, self(), From});
+ R ->
+ gen_server:cast(global_group, {find_name_res, R, self(), From})
+ end,
+ end_loop();
+init_whereis({node, Node, Name, From}) ->
+ case whereis_check_node(Node, Name) of
+ node_down ->
+ gen_server:cast(global_group, {find_name_res, undefined, self(), From});
+ R ->
+ gen_server:cast(global_group, {find_name_res, R, self(), From})
+ end,
+ end_loop().
+
+
+%%%====================================================================================
+%%%====================================================================================
+%%%====================================================================================
+%%% Get the registered names, in a specified group or in a specified node.
+%%% Return the result to the global_group process in own node and wait for
+%%% this process to be killed.
+%%%====================================================================================
+%%%====================================================================================
+%%%====================================================================================
+init_names({group, Nodes, From}) ->
+ case names_group_loop(Nodes) of
+ group_down ->
+ gen_server:cast(global_group, {registered_names_res, [], self(), From});
+ R ->
+ gen_server:cast(global_group, {registered_names_res, R, self(), From})
+ end,
+ end_loop();
+init_names({node, Node, From}) ->
+ case names_check_node(Node) of
+ node_down ->
+ gen_server:cast(global_group, {registered_names_res, [], self(), From});
+ R ->
+ gen_server:cast(global_group, {registered_names_res, R, self(), From})
+ end,
+ end_loop().
+
+%%%====================================================================================
+%%% Wait for the kill message.
+%%%====================================================================================
+
+-spec end_loop() -> no_return().
+
+end_loop() ->
+ receive
+ kill ->
+ exit(normal)
+ end.
+
+%%%====================================================================================
+%%% Search for the globally registered name in the whole known world.
+%%%====================================================================================
+whereis_any_loop([], _Name) ->
+ undefined;
+whereis_any_loop([{_Group_name, Nodes}|T], Name) ->
+ case whereis_group_loop(Nodes, Name) of
+ group_down ->
+ whereis_any_loop(T, Name);
+ undefined ->
+ whereis_any_loop(T, Name);
+ R ->
+ R
+ end.
+
+%%%====================================================================================
+%%% Search for the globally registered name in a specified global group.
+%%%====================================================================================
+whereis_group_loop([], _Name) ->
+ group_down;
+whereis_group_loop([Node|T], Name) ->
+ case whereis_check_node(Node, Name) of
+ node_down ->
+ whereis_group_loop(T, Name);
+ R ->
+ R
+ end.
+%%%====================================================================================
+%%% Search for the globally registered name on a specified node.
+%%%====================================================================================
+whereis_check_node(Node, Name) ->
+ case net_adm:ping(Node) of
+ pang ->
+ node_down;
+ pong ->
+ monitor_node(Node, true),
+ gen_server:cast({global_group, Node},{find_name, self(), Name}),
+ receive
+ {nodedown, Node} ->
+ node_down;
+ {find_name_res, Result} ->
+ monitor_node(Node, false),
+ Result
+ end
+ end.
+
+
+
+
+%%%====================================================================================
+%%% Search for all globally registered name in a specified global group.
+%%%====================================================================================
+names_group_loop([]) ->
+ group_down;
+names_group_loop([Node|T]) ->
+ case names_check_node(Node) of
+ node_down ->
+ names_group_loop(T);
+ R ->
+ R
+ end.
+%%%====================================================================================
+%%% Search for all globally registered name on a specified node.
+%%%====================================================================================
+names_check_node(Node) ->
+ case net_adm:ping(Node) of
+ pang ->
+ node_down;
+ pong ->
+ monitor_node(Node, true),
+ gen_server:cast({global_group, Node},{registered_names, self()}),
+ receive
+ {nodedown, Node} ->
+ node_down;
+ {registered_names_res, Result} ->
+ monitor_node(Node, false),
+ Result
+ end
+ end.
+
+
+
+
+
+
+%%%====================================================================================
+%%% Test what happens when this process exits.
+%%%====================================================================================
+send_test(_Args) ->
+ timer:sleep(5000),
+ exit(testing_exit).
+
+whereis_test(_Args) ->
+ timer:sleep(5000),
+ exit(testing_exit).
+
+names_test(_Args) ->
+ timer:sleep(5000),
+ exit(testing_exit).
+
+
+
diff --git a/lib/kernel/src/group.erl b/lib/kernel/src/group.erl
new file mode 100644
index 0000000000..a45ba34eae
--- /dev/null
+++ b/lib/kernel/src/group.erl
@@ -0,0 +1,689 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 1996-2009. All Rights Reserved.
+%%
+%% The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved online at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(group).
+
+%% A group leader process for user io.
+
+-export([start/2, start/3, server/3]).
+-export([interfaces/1]).
+
+start(Drv, Shell) ->
+ start(Drv, Shell, []).
+
+start(Drv, Shell, Options) ->
+ spawn_link(group, server, [Drv, Shell, Options]).
+
+server(Drv, Shell, Options) ->
+ process_flag(trap_exit, true),
+ edlin:init(),
+ put(line_buffer, proplists:get_value(line_buffer, Options, [])),
+ put(read_mode, list),
+ put(user_drv, Drv),
+ put(expand_fun,
+ proplists:get_value(expand_fun, Options,
+ fun(B) -> edlin_expand:expand(B) end)),
+ put(echo, proplists:get_value(echo, Options, true)),
+
+ start_shell(Shell),
+ server_loop(Drv, get(shell), []).
+
+%% Return the pid of user_drv and the shell process.
+%% Note: We can't ask the group process for this info since it
+%% may be busy waiting for data from the driver.
+interfaces(Group) ->
+ case process_info(Group, dictionary) of
+ {dictionary,Dict} ->
+ get_pids(Dict, [], false);
+ _ ->
+ []
+ end.
+
+get_pids([Drv = {user_drv,_} | Rest], Found, _) ->
+ get_pids(Rest, [Drv | Found], true);
+get_pids([Sh = {shell,_} | Rest], Found, Active) ->
+ get_pids(Rest, [Sh | Found], Active);
+get_pids([_ | Rest], Found, Active) ->
+ get_pids(Rest, Found, Active);
+get_pids([], Found, true) ->
+ Found;
+get_pids([], _Found, false) ->
+ [].
+
+%% start_shell(Shell)
+%% Spawn a shell with its group_leader from the beginning set to ourselves.
+%% If Shell a pid the set its group_leader.
+
+start_shell({Mod,Func,Args}) ->
+ start_shell1(Mod, Func, Args);
+start_shell({Node,Mod,Func,Args}) ->
+ start_shell1(net, call, [Node,Mod,Func,Args]);
+start_shell(Shell) when is_atom(Shell) ->
+ start_shell1(Shell, start, []);
+start_shell(Shell) when is_function(Shell) ->
+ start_shell1(Shell);
+start_shell(Shell) when is_pid(Shell) ->
+ group_leader(self(), Shell), % we are the shells group leader
+ link(Shell), % we're linked to it.
+ put(shell, Shell);
+start_shell(_Shell) ->
+ ok.
+
+start_shell1(M, F, Args) ->
+ G = group_leader(),
+ group_leader(self(), self()),
+ case catch apply(M, F, Args) of
+ Shell when is_pid(Shell) ->
+ group_leader(G, self()),
+ link(Shell), % we're linked to it.
+ put(shell, Shell);
+ Error -> % start failure
+ exit(Error) % let the group process crash
+ end.
+
+start_shell1(Fun) ->
+ G = group_leader(),
+ group_leader(self(), self()),
+ case catch Fun() of
+ Shell when is_pid(Shell) ->
+ group_leader(G, self()),
+ link(Shell), % we're linked to it.
+ put(shell, Shell);
+ Error -> % start failure
+ exit(Error) % let the group process crash
+ end.
+
+server_loop(Drv, Shell, Buf0) ->
+ receive
+ {io_request,From,ReplyAs,Req} when is_pid(From) ->
+ Buf = io_request(Req, From, ReplyAs, Drv, Buf0),
+ server_loop(Drv, Shell, Buf);
+ {driver_id,ReplyTo} ->
+ ReplyTo ! {self(),driver_id,Drv},
+ server_loop(Drv, Shell, Buf0);
+ {Drv, echo, Bool} ->
+ put(echo, Bool),
+ server_loop(Drv, Shell, Buf0);
+ {'EXIT',Drv,interrupt} ->
+ %% Send interrupt to the shell.
+ exit_shell(interrupt),
+ server_loop(Drv, Shell, Buf0);
+ {'EXIT',Drv,R} ->
+ exit(R);
+ {'EXIT',Shell,R} ->
+ exit(R);
+ %% We want to throw away any term that we don't handle (standard
+ %% practice in receive loops), but not any {Drv,_} tuples which are
+ %% handled in io_request/5.
+ NotDrvTuple when (not is_tuple(NotDrvTuple)) orelse
+ (tuple_size(NotDrvTuple) =/= 2) orelse
+ (element(1, NotDrvTuple) =/= Drv) ->
+ %% Ignore this unknown message.
+ server_loop(Drv, Shell, Buf0)
+ end.
+
+exit_shell(Reason) ->
+ case get(shell) of
+ undefined -> true;
+ Pid -> exit(Pid, Reason)
+ end.
+
+get_tty_geometry(Drv) ->
+ Drv ! {self(),tty_geometry},
+ receive
+ {Drv,tty_geometry,Geometry} ->
+ Geometry
+ after 2000 ->
+ timeout
+ end.
+get_unicode_state(Drv) ->
+ Drv ! {self(),get_unicode_state},
+ receive
+ {Drv,get_unicode_state,UniState} ->
+ UniState;
+ {Drv,get_unicode_state,error} ->
+ {error, internal}
+ after 2000 ->
+ {error,timeout}
+ end.
+set_unicode_state(Drv,Bool) ->
+ Drv ! {self(),set_unicode_state,Bool},
+ receive
+ {Drv,set_unicode_state,_OldUniState} ->
+ ok
+ after 2000 ->
+ timeout
+ end.
+
+
+io_request(Req, From, ReplyAs, Drv, Buf0) ->
+ case io_request(Req, Drv, Buf0) of
+ {ok,Reply,Buf} ->
+ io_reply(From, ReplyAs, Reply),
+ Buf;
+ {error,Reply,Buf} ->
+ io_reply(From, ReplyAs, Reply),
+ Buf;
+ {exit,R} ->
+ %% 'kill' instead of R, since the shell is not always in
+ %% a state where it is ready to handle a termination
+ %% message.
+ exit_shell(kill),
+ exit(R)
+ end.
+
+
+%% Put_chars, unicode is the normal message, characters are always in
+%%standard unicode
+%% format.
+%% You might be tempted to send binaries unchecked, but the driver
+%% expects unicode, so that is what we should send...
+%% io_request({put_chars,unicode,Binary}, Drv, Buf) when is_binary(Binary) ->
+%% send_drv(Drv, {put_chars,Binary}),
+%% {ok,ok,Buf};
+io_request({put_chars,unicode,Chars}, Drv, Buf) ->
+ case catch unicode:characters_to_binary(Chars,utf8) of
+ Binary when is_binary(Binary) ->
+ send_drv(Drv, {put_chars, unicode, Binary}),
+ {ok,ok,Buf};
+ _ ->
+ {error,{error,{put_chars, unicode,Chars}},Buf}
+ end;
+io_request({put_chars,unicode,M,F,As}, Drv, Buf) ->
+ case catch apply(M, F, As) of
+ Binary when is_binary(Binary) ->
+ send_drv(Drv, {put_chars, unicode,Binary}),
+ {ok,ok,Buf};
+ Chars ->
+ case catch unicode:characters_to_binary(Chars,utf8) of
+ B when is_binary(B) ->
+ send_drv(Drv, {put_chars, unicode,B}),
+ {ok,ok,Buf};
+ _ ->
+ {error,{error,F},Buf}
+ end
+ end;
+io_request({put_chars,latin1,Binary}, Drv, Buf) when is_binary(Binary) ->
+ send_drv(Drv, {put_chars, unicode,unicode:characters_to_binary(Binary,latin1)}),
+ {ok,ok,Buf};
+io_request({put_chars,latin1,Chars}, Drv, Buf) ->
+ case catch unicode:characters_to_binary(Chars,latin1) of
+ Binary when is_binary(Binary) ->
+ send_drv(Drv, {put_chars, unicode,Binary}),
+ {ok,ok,Buf};
+ _ ->
+ {error,{error,{put_chars,Chars}},Buf}
+ end;
+io_request({put_chars,latin1,M,F,As}, Drv, Buf) ->
+ case catch apply(M, F, As) of
+ Binary when is_binary(Binary) ->
+ send_drv(Drv, {put_chars, unicode,unicode:characters_to_binary(Binary,latin1)}),
+ {ok,ok,Buf};
+ Chars ->
+ case catch unicode:characters_to_binary(Chars,latin1) of
+ B when is_binary(B) ->
+ send_drv(Drv, {put_chars, unicode,B}),
+ {ok,ok,Buf};
+ _ ->
+ {error,{error,F},Buf}
+ end
+ end;
+
+io_request({get_chars,Encoding,Prompt,N}, Drv, Buf) ->
+ get_chars(Prompt, io_lib, collect_chars, N, Drv, Buf, Encoding);
+io_request({get_line,Encoding,Prompt}, Drv, Buf) ->
+ get_chars(Prompt, io_lib, collect_line, [], Drv, Buf, Encoding);
+io_request({get_until,Encoding, Prompt,M,F,As}, Drv, Buf) ->
+ get_chars(Prompt, io_lib, get_until, {M,F,As}, Drv, Buf, Encoding);
+io_request({get_password,_Encoding},Drv,Buf) ->
+ get_password_chars(Drv, Buf);
+io_request({setopts,Opts}, Drv, Buf) when is_list(Opts) ->
+ setopts(Opts, Drv, Buf);
+io_request(getopts, Drv, Buf) ->
+ getopts(Drv, Buf);
+io_request({requests,Reqs}, Drv, Buf) ->
+ io_requests(Reqs, {ok,ok,Buf}, Drv);
+
+%% New in R12
+io_request({get_geometry,columns},Drv,Buf) ->
+ case get_tty_geometry(Drv) of
+ {W,_H} ->
+ {ok,W,Buf};
+ _ ->
+ {error,{error,enotsup},Buf}
+ end;
+io_request({get_geometry,rows},Drv,Buf) ->
+ case get_tty_geometry(Drv) of
+ {_W,H} ->
+ {ok,H,Buf};
+ _ ->
+ {error,{error,enotsup},Buf}
+ end;
+
+%% BC with pre-R13
+io_request({put_chars,Chars}, Drv, Buf) ->
+ io_request({put_chars,latin1,Chars}, Drv, Buf);
+io_request({put_chars,M,F,As}, Drv, Buf) ->
+ io_request({put_chars,latin1,M,F,As}, Drv, Buf);
+io_request({get_chars,Prompt,N}, Drv, Buf) ->
+ io_request({get_chars,latin1,Prompt,N}, Drv, Buf);
+io_request({get_line,Prompt}, Drv, Buf) ->
+ io_request({get_line,latin1,Prompt}, Drv, Buf);
+io_request({get_until, Prompt,M,F,As}, Drv, Buf) ->
+ io_request({get_until,latin1, Prompt,M,F,As}, Drv, Buf);
+io_request(get_password,Drv,Buf) ->
+ io_request({get_password,latin1},Drv,Buf);
+
+
+
+io_request(_, _Drv, Buf) ->
+ {error,{error,request},Buf}.
+
+%% Status = io_requests(RequestList, PrevStat, Drv)
+%% Process a list of output requests as long as the previous status is 'ok'.
+
+io_requests([R|Rs], {ok,ok,Buf}, Drv) ->
+ io_requests(Rs, io_request(R, Drv, Buf), Drv);
+io_requests([_|_], Error, _Drv) ->
+ Error;
+io_requests([], Stat, _) ->
+ Stat.
+
+%% io_reply(From, ReplyAs, Reply)
+%% The function for sending i/o command acknowledgement.
+%% The ACK contains the return value.
+
+io_reply(From, ReplyAs, Reply) ->
+ From ! {io_reply,ReplyAs,Reply}.
+
+%% send_drv(Drv, Message)
+%% send_drv_reqs(Drv, Requests)
+
+send_drv(Drv, Msg) ->
+ Drv ! {self(),Msg}.
+
+send_drv_reqs(_Drv, []) -> [];
+send_drv_reqs(Drv, Rs) ->
+ send_drv(Drv, {requests,Rs}).
+
+expand_encoding([]) ->
+ [];
+expand_encoding([latin1 | T]) ->
+ [{encoding,latin1} | expand_encoding(T)];
+expand_encoding([unicode | T]) ->
+ [{encoding,unicode} | expand_encoding(T)];
+expand_encoding([H|T]) ->
+ [H|expand_encoding(T)].
+%% setopts
+setopts(Opts0,Drv,Buf) ->
+ Opts = proplists:unfold(
+ proplists:substitute_negations(
+ [{list,binary}],
+ expand_encoding(Opts0))),
+ case check_valid_opts(Opts) of
+ true ->
+ do_setopts(Opts,Drv,Buf);
+ false ->
+ {error,{error,enotsup},Buf}
+ end.
+check_valid_opts([]) ->
+ true;
+check_valid_opts([{binary,_}|T]) ->
+ check_valid_opts(T);
+check_valid_opts([{encoding,Valid}|T]) when Valid =:= unicode; Valid =:= utf8; Valid =:= latin1 ->
+ check_valid_opts(T);
+check_valid_opts([{echo,_}|T]) ->
+ check_valid_opts(T);
+check_valid_opts([{expand_fun,_}|T]) ->
+ check_valid_opts(T);
+check_valid_opts(_) ->
+ false.
+
+do_setopts(Opts, Drv, Buf) ->
+ put(expand_fun, proplists:get_value(expand_fun, Opts, get(expand_fun))),
+ put(echo, proplists:get_value(echo, Opts, get(echo))),
+ case proplists:get_value(encoding,Opts) of
+ Valid when Valid =:= unicode; Valid =:= utf8 ->
+ set_unicode_state(Drv,true);
+ latin1 ->
+ set_unicode_state(Drv,false);
+ _ ->
+ ok
+ end,
+ case proplists:get_value(binary, Opts, case get(read_mode) of
+ binary -> true;
+ _ -> false
+ end) of
+ true ->
+ put(read_mode, binary),
+ {ok,ok,Buf};
+ false ->
+ put(read_mode, list),
+ {ok,ok,Buf};
+ _ ->
+ {ok,ok,Buf}
+ end.
+
+getopts(Drv,Buf) ->
+ Exp = {expand_fun, case get(expand_fun) of
+ Func when is_function(Func) ->
+ Func;
+ _ ->
+ false
+ end},
+ Echo = {echo, case get(echo) of
+ Bool when Bool =:= true; Bool =:= false ->
+ Bool;
+ _ ->
+ false
+ end},
+ Bin = {binary, case get(read_mode) of
+ binary ->
+ true;
+ _ ->
+ false
+ end},
+ Uni = {encoding, case get_unicode_state(Drv) of
+ true -> unicode;
+ _ -> latin1
+ end},
+ {ok,[Exp,Echo,Bin,Uni],Buf}.
+
+
+%% get_chars(Prompt, Module, Function, XtraArgument, Drv, Buffer)
+%% Gets characters from the input Drv until as the applied function
+%% returns {stop,Result,Rest}. Does not block output until input has been
+%% received.
+%% Returns:
+%% {Result,NewSaveBuffer}
+%% {error,What,NewSaveBuffer}
+
+get_password_chars(Drv,Buf) ->
+ case get_password_line(Buf, Drv) of
+ {done, Line, Buf1} ->
+ {ok, Line, Buf1};
+ interrupted ->
+ {error, {error, interrupted}, []};
+ terminated ->
+ {exit, terminated}
+ end.
+
+get_chars(Prompt, M, F, Xa, Drv, Buf, Encoding) ->
+ Pbs = prompt_bytes(Prompt),
+ get_chars_loop(Pbs, M, F, Xa, Drv, Buf, start, Encoding).
+
+get_chars_loop(Pbs, M, F, Xa, Drv, Buf0, State, Encoding) ->
+ Result = case get(echo) of
+ true ->
+ get_line(Buf0, Pbs, Drv, Encoding);
+ false ->
+ % get_line_echo_off only deals with lists
+ % and does not need encoding...
+ get_line_echo_off(Buf0, Pbs, Drv)
+ end,
+ case Result of
+ {done,Line,Buf1} ->
+ get_chars_apply(Pbs, M, F, Xa, Drv, Buf1, State, Line, Encoding);
+ interrupted ->
+ {error,{error,interrupted},[]};
+ terminated ->
+ {exit,terminated}
+ end.
+
+get_chars_apply(Pbs, M, F, Xa, Drv, Buf, State0, Line, Encoding) ->
+ id(M,F),
+ case catch M:F(State0, cast(Line,get(read_mode), Encoding), Encoding, Xa) of
+ {stop,Result,Rest} ->
+ {ok,Result,append(Rest, Buf, Encoding)};
+ {'EXIT',_} ->
+ {error,{error,err_func(M, F, Xa)},[]};
+ State1 ->
+ get_chars_loop(Pbs, M, F, Xa, Drv, Buf, State1, Encoding)
+ end.
+
+id(M,F) ->
+ {M,F}.
+%% Convert error code to make it look as before
+err_func(io_lib, get_until, {_,F,_}) ->
+ F;
+err_func(_, F, _) ->
+ F.
+
+%% get_line(Chars, PromptBytes, Drv)
+%% Get a line with eventual line editing. Handle other io requests
+%% while getting line.
+%% Returns:
+%% {done,LineChars,RestChars}
+%% interrupted
+
+get_line(Chars, Pbs, Drv, Encoding) ->
+ {more_chars,Cont,Rs} = edlin:start(Pbs),
+ send_drv_reqs(Drv, Rs),
+ get_line1(edlin:edit_line(Chars, Cont), Drv, new_stack(get(line_buffer)),
+ Encoding).
+
+get_line1({done,Line,Rest,Rs}, Drv, _Ls, _Encoding) ->
+ send_drv_reqs(Drv, Rs),
+ put(line_buffer, [Line|lists:delete(Line, get(line_buffer))]),
+ {done,Line,Rest};
+get_line1({undefined,{_A,Mode,Char},Cs,Cont,Rs}, Drv, Ls0, Encoding)
+ when ((Mode =:= none) and (Char =:= $\^P))
+ or ((Mode =:= meta_left_sq_bracket) and (Char =:= $A)) ->
+ send_drv_reqs(Drv, Rs),
+ case up_stack(Ls0) of
+ {none,_Ls} ->
+ send_drv(Drv, beep),
+ get_line1(edlin:edit_line(Cs, Cont), Drv, Ls0, Encoding);
+ {Lcs,Ls} ->
+ send_drv_reqs(Drv, edlin:erase_line(Cont)),
+ {more_chars,Ncont,Nrs} = edlin:start(edlin:prompt(Cont)),
+ send_drv_reqs(Drv, Nrs),
+ get_line1(edlin:edit_line1(lists:sublist(Lcs, 1, length(Lcs)-1),
+ Ncont),
+ Drv,
+ Ls, Encoding)
+ end;
+get_line1({undefined,{_A,Mode,Char},_Cs,Cont,Rs}, Drv, Ls0, Encoding)
+ when ((Mode =:= none) and (Char =:= $\^N))
+ or ((Mode =:= meta_left_sq_bracket) and (Char =:= $B)) ->
+ send_drv_reqs(Drv, Rs),
+ case down_stack(Ls0) of
+ {none,Ls} ->
+ send_drv_reqs(Drv, edlin:erase_line(Cont)),
+ get_line1(edlin:start(edlin:prompt(Cont)), Drv, Ls, Encoding);
+ {Lcs,Ls} ->
+ send_drv_reqs(Drv, edlin:erase_line(Cont)),
+ {more_chars,Ncont,Nrs} = edlin:start(edlin:prompt(Cont)),
+ send_drv_reqs(Drv, Nrs),
+ get_line1(edlin:edit_line1(lists:sublist(Lcs, 1, length(Lcs)-1),
+ Ncont),
+ Drv,
+ Ls, Encoding)
+ end;
+get_line1({expand, Before, Cs0, Cont,Rs}, Drv, Ls0, Encoding) ->
+ send_drv_reqs(Drv, Rs),
+ ExpandFun = get(expand_fun),
+ {Found, Add, Matches} = ExpandFun(Before),
+ case Found of
+ no -> send_drv(Drv, beep);
+ yes -> ok
+ end,
+ Cs1 = append(Add, Cs0, Encoding), %%XXX:PaN should this always be unicode?
+ Cs = case Matches of
+ [] -> Cs1;
+ _ -> MatchStr = edlin_expand:format_matches(Matches),
+ send_drv(Drv, {put_chars, unicode, unicode:characters_to_binary(MatchStr,unicode)}),
+ [$\^L | Cs1]
+ end,
+ get_line1(edlin:edit_line(Cs, Cont), Drv, Ls0, Encoding);
+get_line1({undefined,_Char,Cs,Cont,Rs}, Drv, Ls, Encoding) ->
+ send_drv_reqs(Drv, Rs),
+ send_drv(Drv, beep),
+ get_line1(edlin:edit_line(Cs, Cont), Drv, Ls, Encoding);
+get_line1({What,Cont0,Rs}, Drv, Ls, Encoding) ->
+ send_drv_reqs(Drv, Rs),
+ receive
+ {Drv,{data,Cs}} ->
+ get_line1(edlin:edit_line(Cs, Cont0), Drv, Ls, Encoding);
+ {Drv,eof} ->
+ get_line1(edlin:edit_line(eof, Cont0), Drv, Ls, Encoding);
+ {io_request,From,ReplyAs,Req} when is_pid(From) ->
+ {more_chars,Cont,_More} = edlin:edit_line([], Cont0),
+ send_drv_reqs(Drv, edlin:erase_line(Cont)),
+ io_request(Req, From, ReplyAs, Drv, []), %WRONG!!!
+ send_drv_reqs(Drv, edlin:redraw_line(Cont)),
+ get_line1({more_chars,Cont,[]}, Drv, Ls, Encoding);
+ {'EXIT',Drv,interrupt} ->
+ interrupted;
+ {'EXIT',Drv,_} ->
+ terminated
+ after
+ get_line_timeout(What)->
+ get_line1(edlin:edit_line([], Cont0), Drv, Ls, Encoding)
+ end.
+
+
+get_line_echo_off(Chars, Pbs, Drv) ->
+ send_drv_reqs(Drv, [{put_chars, unicode,Pbs}]),
+ get_line_echo_off1(edit_line(Chars,[]), Drv).
+
+get_line_echo_off1({Chars,[]}, Drv) ->
+ receive
+ {Drv,{data,Cs}} ->
+ get_line_echo_off1(edit_line(Cs, Chars), Drv);
+ {Drv,eof} ->
+ get_line_echo_off1(edit_line(eof, Chars), Drv);
+ {io_request,From,ReplyAs,Req} when is_pid(From) ->
+ io_request(Req, From, ReplyAs, Drv, []),
+ get_line_echo_off1({Chars,[]}, Drv);
+ {'EXIT',Drv,interrupt} ->
+ interrupted;
+ {'EXIT',Drv,_} ->
+ terminated
+ end;
+get_line_echo_off1({Chars,Rest}, _Drv) ->
+ {done,lists:reverse(Chars),case Rest of done -> []; _ -> Rest end}.
+
+%% We support line editing for the ICANON mode except the following
+%% line editing characters, which already has another meaning in
+%% echo-on mode (See Advanced Programming in the Unix Environment, 2nd ed,
+%% Stevens, page 638):
+%% - ^u in posix/icanon mode: erase-line, prefix-arg in edlin
+%% - ^t in posix/icanon mode: status, transpose-char in edlin
+%% - ^d in posix/icanon mode: eof, delete-forward in edlin
+%% - ^r in posix/icanon mode: reprint (silly in echo-off mode :-))
+%% - ^w in posix/icanon mode: word-erase (produces a beep in edlin)
+edit_line(eof, Chars) ->
+ {Chars,done};
+edit_line([],Chars) ->
+ {Chars,[]};
+edit_line([$\r,$\n|Cs],Chars) ->
+ {[$\n | Chars], remainder_after_nl(Cs)};
+edit_line([NL|Cs],Chars) when NL =:= $\r; NL =:= $\n ->
+ {[$\n | Chars], remainder_after_nl(Cs)};
+edit_line([Erase|Cs],[]) when Erase =:= $\177; Erase =:= $\^H ->
+ edit_line(Cs,[]);
+edit_line([Erase|Cs],[_|Chars]) when Erase =:= $\177; Erase =:= $\^H ->
+ edit_line(Cs,Chars);
+edit_line([Char|Cs],Chars) ->
+ edit_line(Cs,[Char|Chars]).
+
+remainder_after_nl("") -> done;
+remainder_after_nl(Cs) -> Cs.
+
+
+
+get_line_timeout(blink) -> 1000;
+get_line_timeout(more_chars) -> infinity.
+
+new_stack(Ls) -> {stack,Ls,{},[]}.
+
+up_stack({stack,[L|U],{},D}) ->
+ {L,{stack,U,L,D}};
+up_stack({stack,[],{},D}) ->
+ {none,{stack,[],{},D}};
+up_stack({stack,U,C,D}) ->
+ up_stack({stack,U,{},[C|D]}).
+
+down_stack({stack,U,{},[L|D]}) ->
+ {L,{stack,U,L,D}};
+down_stack({stack,U,{},[]}) ->
+ {none,{stack,U,{},[]}};
+down_stack({stack,U,C,D}) ->
+ down_stack({stack,[C|U],{},D}).
+
+%% This is get_line without line editing (except for backspace) and
+%% without echo.
+get_password_line(Chars, Drv) ->
+ get_password1(edit_password(Chars,[]),Drv).
+
+get_password1({Chars,[]}, Drv) ->
+ receive
+ {Drv,{data,Cs}} ->
+ get_password1(edit_password(Cs,Chars),Drv);
+ {io_request,From,ReplyAs,Req} when is_pid(From) ->
+ %send_drv_reqs(Drv, [{delete_chars, -length(Pbs)}]),
+ io_request(Req, From, ReplyAs, Drv, []), %WRONG!!!
+ %% I guess the reason the above line is wrong is that Buf is
+ %% set to []. But do we expect anything but plain output?
+
+ get_password1({Chars, []}, Drv);
+ {'EXIT',Drv,interrupt} ->
+ interrupted;
+ {'EXIT',Drv,_} ->
+ terminated
+ end;
+get_password1({Chars,Rest},Drv) ->
+ send_drv_reqs(Drv,[{put_chars, unicode, "\n"}]),
+ {done,lists:reverse(Chars),case Rest of done -> []; _ -> Rest end}.
+
+edit_password([],Chars) ->
+ {Chars,[]};
+edit_password([$\r],Chars) ->
+ {Chars,done};
+edit_password([$\r|Cs],Chars) ->
+ {Chars,Cs};
+edit_password([$\177|Cs],[]) -> %% Being able to erase characters is
+ edit_password(Cs,[]); %% the least we should offer, but
+edit_password([$\177|Cs],[_|Chars]) ->%% is backspace enough?
+ edit_password(Cs,Chars);
+edit_password([Char|Cs],Chars) ->
+ edit_password(Cs,[Char|Chars]).
+
+%% prompt_bytes(Prompt)
+%% Return a flat list of bytes for the Prompt.
+prompt_bytes(Prompt) ->
+ lists:flatten(io_lib:format_prompt(Prompt)).
+
+cast(L, binary,latin1) when is_list(L) ->
+ list_to_binary(L);
+cast(L, list, latin1) when is_list(L) ->
+ binary_to_list(list_to_binary(L)); %% Exception if not bytes
+cast(L, binary,unicode) when is_list(L) ->
+ unicode:characters_to_binary(L,utf8);
+cast(Other, _, _) ->
+ Other.
+
+append(B, L, latin1) when is_binary(B) ->
+ binary_to_list(B)++L;
+append(B, L, unicode) when is_binary(B) ->
+ unicode:characters_to_list(B,utf8)++L;
+append(L1, L2, _) when is_list(L1) ->
+ L1++L2;
+append(_Eof, L, _) ->
+ L.
diff --git a/lib/kernel/src/heart.erl b/lib/kernel/src/heart.erl
new file mode 100644
index 0000000000..bad0950fca
--- /dev/null
+++ b/lib/kernel/src/heart.erl
@@ -0,0 +1,271 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 1996-2009. All Rights Reserved.
+%%
+%% The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved online at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(heart).
+
+%%%--------------------------------------------------------------------
+%%% This is a rewrite of pre_heart from BS.3.
+%%%
+%%% The purpose of this process-module is to act as an supervisor
+%%% of the entire erlang-system. This 'heart' beats with a frequence
+%%% satisfying an external port program *not* reboot the entire
+%%% system. If however the erlang-emulator would hang, a reboot is
+%%% then needed.
+%%%
+%%% It recognizes the flag '-heart'
+%%%--------------------------------------------------------------------
+-export([start/0, init/2, set_cmd/1, clear_cmd/0, get_cmd/0, cycle/0]).
+
+-define(START_ACK, 1).
+-define(HEART_BEAT, 2).
+-define(SHUT_DOWN, 3).
+-define(SET_CMD, 4).
+-define(CLEAR_CMD, 5).
+-define(GET_CMD, 6).
+-define(HEART_CMD, 7).
+
+-define(TIMEOUT, 5000).
+-define(CYCLE_TIMEOUT, 10000).
+
+%%---------------------------------------------------------------------
+
+-spec start() -> 'ignore' | {'error', term()} | {'ok', pid()}.
+
+start() ->
+ case whereis(heart) of
+ undefined ->
+ %% As heart survives a init:restart/0 the Parent
+ %% of heart must be init.
+ %% The init process is responsible to create a link
+ %% to heart.
+ Pid = spawn(?MODULE, init, [self(), whereis(init)]),
+ wait_for_init_ack(Pid);
+ Pid ->
+ {ok, Pid}
+ end.
+
+wait_for_init_ack(From) ->
+ receive
+ {ok, From} ->
+ {ok, From};
+ {no_heart, From} ->
+ ignore;
+ {Error, From} ->
+ {error, Error}
+ end.
+
+-spec init(pid(), pid()) -> {'no_heart', pid()} | {'start_error', pid()}.
+
+init(Starter, Parent) ->
+ process_flag(trap_exit, true),
+ process_flag(priority, max),
+ register(heart, self()),
+ case catch start_portprogram() of
+ {ok, Port} ->
+ Starter ! {ok, self()},
+ loop(Parent, Port, "");
+ no_heart ->
+ Starter ! {no_heart, self()};
+ error ->
+ Starter ! {start_error, self()}
+ end.
+
+-spec set_cmd(string()) -> 'ok' | {'error', {'bad_cmd', string()}}.
+
+set_cmd(Cmd) ->
+ heart ! {self(), set_cmd, Cmd},
+ wait().
+
+-spec get_cmd() -> 'ok'.
+
+get_cmd() ->
+ heart ! {self(), get_cmd},
+ wait().
+
+-spec clear_cmd() -> {'ok', string()}.
+
+clear_cmd() ->
+ heart ! {self(), clear_cmd},
+ wait().
+
+
+%%% Should be used solely by the release handler!!!!!!!
+-spec cycle() -> 'ok' | {'error', term()}.
+
+cycle() ->
+ heart ! {self(), cycle},
+ wait().
+
+wait() ->
+ receive
+ {heart, Res} ->
+ Res
+ end.
+
+start_portprogram() ->
+ check_start_heart(),
+ HeartCmd = "heart -pid " ++ os:getpid() ++ " " ++
+ get_heart_timeouts(),
+ try open_port({spawn, HeartCmd}, [{packet, 2}]) of
+ Port when is_port(Port) ->
+ case wait_ack(Port) of
+ ok ->
+ {ok, Port};
+ {error, Reason} ->
+ report_problem({{port_problem, Reason},
+ {heart, start_portprogram, []}}),
+ error
+ end
+ catch
+ _:Reason ->
+ report_problem({{open_port, Reason},
+ {heart, start_portprogram, []}}),
+ error
+ end.
+
+get_heart_timeouts() ->
+ HeartOpts = case os:getenv("HEART_BEAT_TIMEOUT") of
+ false -> "";
+ H when is_list(H) ->
+ "-ht " ++ H
+ end,
+ HeartOpts ++ case os:getenv("HEART_BEAT_BOOT_DELAY") of
+ false -> "";
+ W when is_list(W) ->
+ " -wt " ++ W
+ end.
+
+check_start_heart() ->
+ case init:get_argument(heart) of
+ {ok, [[]]} ->
+ ok;
+ error ->
+ throw(no_heart);
+ {ok, [[X|_]|_]} ->
+ report_problem({{bad_heart_flag, list_to_atom(X)},
+ {heart, check_start_heart, []}}),
+ throw(error)
+ end.
+
+wait_ack(Port) ->
+ receive
+ {Port, {data, [?START_ACK]}} ->
+ ok;
+ {'EXIT', Port, badsig} -> % Since this is not synchronous, skip it!
+ wait_ack(Port);
+ {'EXIT', Port, Reason} -> % The port really terminated.
+ {error, Reason}
+ end.
+
+loop(Parent, Port, Cmd) ->
+ send_heart_beat(Port),
+ receive
+ {From, set_cmd, NewCmd} when is_list(NewCmd), length(NewCmd) < 2047 ->
+ send_heart_cmd(Port, NewCmd),
+ wait_ack(Port),
+ From ! {heart, ok},
+ loop(Parent, Port, NewCmd);
+ {From, set_cmd, NewCmd} ->
+ From ! {heart, {error, {bad_cmd, NewCmd}}},
+ loop(Parent, Port, Cmd);
+ {From, clear_cmd} ->
+ From ! {heart, ok},
+ send_heart_cmd(Port, ""),
+ wait_ack(Port),
+ loop(Parent, Port, "");
+ {From, get_cmd} ->
+ From ! {heart, get_heart_cmd(Port)},
+ loop(Parent, Port, Cmd);
+ {From, cycle} ->
+ %% Calls back to loop
+ do_cycle_port_program(From, Parent, Port, Cmd);
+ {'EXIT', Parent, shutdown} ->
+ no_reboot_shutdown(Port);
+ {'EXIT', Parent, Reason} ->
+ exit(Port, Reason),
+ exit(Reason);
+ {'EXIT', Port, badsig} -> % we can ignore badsig-messages!
+ loop(Parent, Port, Cmd);
+ {'EXIT', Port, _Reason} ->
+ exit({port_terminated, {heart, loop, [Parent, Port, Cmd]}});
+ _ ->
+ loop(Parent, Port, Cmd)
+ after
+ ?TIMEOUT ->
+ loop(Parent, Port, Cmd)
+ end.
+
+-spec no_reboot_shutdown(port()) -> no_return().
+
+no_reboot_shutdown(Port) ->
+ send_shutdown(Port),
+ receive
+ {'EXIT', Port, Reason} when Reason =/= badsig ->
+ exit(normal)
+ end.
+
+do_cycle_port_program(Caller, Parent, Port, Cmd) ->
+ case catch start_portprogram() of
+ {ok, NewPort} ->
+ send_shutdown(Port),
+ receive
+ {'EXIT', Port, _Reason} ->
+ send_heart_cmd(NewPort, Cmd),
+ Caller ! {heart, ok},
+ loop(Parent, NewPort, Cmd)
+ after
+ ?CYCLE_TIMEOUT ->
+ %% Huh! Two heart port programs running...
+ %% well, the old one has to be sick not to respond
+ %% so we'll settle for the new one...
+ send_heart_cmd(NewPort, Cmd),
+ Caller ! {heart, {error, stop_error}},
+ loop(Parent, NewPort, Cmd)
+ end;
+ no_heart ->
+ Caller ! {heart, {error, no_heart}},
+ loop(Parent, Port, Cmd);
+ error ->
+ Caller ! {heart, {error, start_error}},
+ loop(Parent, Port, Cmd)
+ end.
+
+
+%% "Beates" the heart once.
+send_heart_beat(Port) -> Port ! {self(), {command, [?HEART_BEAT]}}.
+
+%% Set a new HEART_COMMAND.
+send_heart_cmd(Port, []) ->
+ Port ! {self(), {command, [?CLEAR_CMD]}};
+send_heart_cmd(Port, Cmd) ->
+ Port ! {self(), {command, [?SET_CMD|Cmd]}}.
+
+get_heart_cmd(Port) ->
+ Port ! {self(), {command, [?GET_CMD]}},
+ receive
+ {Port, {data, [?HEART_CMD | Cmd]}} ->
+ {ok, Cmd}
+ end.
+
+%% Sends shutdown command to the port.
+send_shutdown(Port) -> Port ! {self(), {command, [?SHUT_DOWN]}}.
+
+%% We must report using erlang:display/1 since we don't know whether
+%% there is an error_logger available or not.
+report_problem(Error) ->
+ erlang:display(Error).
diff --git a/lib/kernel/src/hipe_ext_format.hrl b/lib/kernel/src/hipe_ext_format.hrl
new file mode 100644
index 0000000000..102cb49a2b
--- /dev/null
+++ b/lib/kernel/src/hipe_ext_format.hrl
@@ -0,0 +1,41 @@
+%% hipe_x86_ext_format.hrl
+%% Definitions for unified external object format
+%% Currently: sparc, x86, amd64
+%% Authors: Erik Johansson, Ulf Magnusson
+
+-define(LOAD_ATOM,0).
+-define(LOAD_ADDRESS,1).
+-define(CALL_REMOTE,2).
+-define(CALL_LOCAL,3).
+-define(SDESC,4).
+-define(X86ABSPCREL,5).
+
+-define(TERM,0).
+-define(BLOCK,1).
+-define(SORTEDBLOCK,2).
+
+-define(CONST_TYPE2EXT(T),
+ case T of
+ term -> ?TERM;
+ sorted_block -> ?SORTEDBLOCK;
+ block -> ?BLOCK
+ end).
+
+-define(EXT2CONST_TYPE(E),
+ case E of
+ ?TERM -> term;
+ ?SORTEDBLOCK -> sorted_block;
+ ?BLOCK -> block
+ end).
+
+-define(EXT2PATCH_TYPE(E),
+ case E of
+ ?LOAD_ATOM -> load_atom;
+ ?LOAD_ADDRESS -> load_address;
+ ?SDESC -> sdesc;
+ ?X86ABSPCREL -> x86_abs_pcrel;
+ ?CALL_REMOTE -> call_remote;
+ ?CALL_LOCAL -> call_local
+ end).
+
+-define(STACK_DESC(ExnRA, FSize, Arity, Live), {ExnRA, FSize, Arity, Live}).
diff --git a/lib/kernel/src/hipe_unified_loader.erl b/lib/kernel/src/hipe_unified_loader.erl
new file mode 100644
index 0000000000..7e26d57ced
--- /dev/null
+++ b/lib/kernel/src/hipe_unified_loader.erl
@@ -0,0 +1,894 @@
+%% -*- erlang-indent-level: 2 -*-
+%% =======================================================================
+%% Filename : hipe_unified_loader.erl
+%% Module : hipe_unified_loader
+%% Purpose : To load code into memory and link it to the system.
+%% Notes : See hipe_ext_format.hrl for description of the external
+%% format.
+%% =======================================================================
+%% TODO:
+%% Problems with the order in which things are done.
+%% export_funs should atomically patch references to make fe and
+%% make beam stubs. !!
+%%
+%% Each function should have two proper databases.
+%% Describe the patch algorithm:
+%% For each function MFA that is (re)compiled to Address:
+%% 1. For the old MFA
+%% a. RefsTo = MFA->refers_to
+%% b. for each {F,Adr} in RefsTo: remove Adr from F->is_referred
+%% c. RefsFrom = MFA->is_referred
+%% d. For each {Adr,Type} in RefsFrom:
+%% update instr at Adr to refer to Address instead.
+%% 2. For the new MFA
+%% a. MFA->is_referred=RefsFrom
+%% 3. For each function F referenced in the code at Offset:
+%% add {Address+Offset,Type} to F->is_referred
+%% add {F,Address+Offset} to MFA->refers_to
+%% 4. Make Address the entrypoint for MFA
+%%
+%% Add exporting of exported constants.
+%% Add freeing of old code.
+%% Inline hipe_sparc_ext_format somehow.
+%% =======================================================================
+
+-module(hipe_unified_loader).
+
+-export([chunk_name/1,
+ %% Only the code and code_server modules may call the entries below!
+ load_hipe_modules/0,
+ load_native_code/2,
+ post_beam_load/1,
+ load_module/3,
+ load/2]).
+
+%%-define(DEBUG,true).
+-define(DO_ASSERT,true).
+-define(HIPE_LOGGING,true).
+
+-include("../../hipe/main/hipe.hrl").
+-include("hipe_ext_format.hrl").
+
+%% Currently, there is no need to expose these to the outside world.
+-define(HS8P_TAG,"HS8P").
+-define(HPPC_TAG,"HPPC").
+-define(HP64_TAG,"HP64").
+-define(HARM_TAG,"HARM").
+-define(HX86_TAG,"HX86").
+-define(HA64_TAG,"HA64").
+
+%%========================================================================
+
+-spec chunk_name(hipe_architecture()) -> string().
+%% @doc
+%% Returns the native code chunk name of the Architecture.
+%% (On which presumably we are running.)
+
+chunk_name(Architecture) ->
+ case Architecture of
+ amd64 -> ?HA64_TAG; %% HiPE, x86_64, (implicit: 64-bit, Unix)
+ arm -> ?HARM_TAG; %% HiPE, arm, v5 (implicit: 32-bit, Linux)
+ powerpc -> ?HPPC_TAG; %% HiPE, PowerPC (implicit: 32-bit, Linux)
+ ppc64 -> ?HP64_TAG; %% HiPE, ppc64 (implicit: 64-bit, Linux)
+ ultrasparc -> ?HS8P_TAG; %% HiPE, SPARC, V8+ (implicit: 32-bit)
+ x86 -> ?HX86_TAG %% HiPE, x86, (implicit: Unix)
+ %% Future: HSV9 %% HiPE, SPARC, V9 (implicit: 64-bit)
+ %% HW32 %% HiPE, x86, Win32
+ end.
+
+%%========================================================================
+
+-spec load_hipe_modules() -> 'ok'.
+%% @doc
+%% Ensures HiPE's loader modules are loaded.
+%% Called from code.erl at start-up.
+
+load_hipe_modules() ->
+ ok.
+
+%%========================================================================
+
+-spec load_native_code(Mod, binary()) -> 'no_native' | {'module', Mod}
+ when is_subtype(Mod, atom()).
+%% @doc
+%% Loads the native code of a module Mod.
+%% Returns {module,Mod} on success (for compatibility with
+%% code:load_file/1) and the atom `no_native' on failure.
+
+load_native_code(Mod, Bin) when is_atom(Mod), is_binary(Bin) ->
+ Architecture = erlang:system_info(hipe_architecture),
+ try chunk_name(Architecture) of
+ ChunkTag ->
+ %% patch_to_emu(Mod),
+ case code:get_chunk(Bin, ChunkTag) of
+ undefined -> no_native;
+ NativeCode when is_binary(NativeCode) ->
+ OldReferencesToPatch = patch_to_emu_step1(Mod),
+ case load_module(Mod, NativeCode, Bin, OldReferencesToPatch) of
+ bad_crc -> no_native;
+ Result -> Result
+ end
+ end
+ catch
+ _:_ ->
+ %% Unknown HiPE architecture. Can't happen (in principle).
+ no_native
+ end.
+
+%%========================================================================
+
+-spec post_beam_load(atom()) -> 'ok'.
+
+post_beam_load(Mod) when is_atom(Mod) ->
+ Architecture = erlang:system_info(hipe_architecture),
+ try chunk_name(Architecture) of _ChunkTag -> patch_to_emu(Mod)
+ catch _:_ -> ok
+ end.
+
+%%========================================================================
+
+version_check(Version, Mod) when is_atom(Mod) ->
+ Ver = ?VERSION_STRING(),
+ case Version < Ver of
+ true ->
+ ?msg("WARNING: Module ~w was compiled with HiPE version ~s\n",
+ [Mod, Version]);
+ _ -> ok
+ end.
+
+%%========================================================================
+
+-spec load_module(Mod, binary(), _) -> 'bad_crc' | {'module',Mod}
+ when is_subtype(Mod,atom()).
+load_module(Mod, Bin, Beam) ->
+ load_module(Mod, Bin, Beam, []).
+
+load_module(Mod, Bin, Beam, OldReferencesToPatch) ->
+ ?debug_msg("************ Loading Module ~w ************\n",[Mod]),
+ %% Loading a whole module, let the BEAM loader patch closures.
+ put(hipe_patch_closures, false),
+ load_common(Mod, Bin, Beam, OldReferencesToPatch).
+
+%%========================================================================
+
+-spec load(Mod, binary()) -> 'bad_crc' | {'module',Mod}
+ when is_subtype(Mod,atom()).
+load(Mod, Bin) ->
+ ?debug_msg("********* Loading funs in module ~w *********\n",[Mod]),
+ %% Loading just some functions in a module; patch closures separately.
+ put(hipe_patch_closures, true),
+ load_common(Mod, Bin, [], []).
+
+%%------------------------------------------------------------------------
+
+load_common(Mod, Bin, Beam, OldReferencesToPatch) ->
+ %% Unpack the binary.
+ [{Version, CheckSum},
+ ConstAlign, ConstSize, ConstMap, LabelMap, ExportMap,
+ CodeSize, CodeBinary, Refs,
+ 0,[] % ColdSize, CRrefs
+ ] = binary_to_term(Bin),
+ %% Check that we are loading up-to-date code.
+ version_check(Version, Mod),
+ case hipe_bifs:check_crc(CheckSum) of
+ false ->
+ ?msg("Warning: not loading native code for module ~w: "
+ "it was compiled for an incompatible runtime system; "
+ "please regenerate native code for this runtime system\n", [Mod]),
+ bad_crc;
+ true ->
+ %% Create data segment
+ {ConstAddr,ConstMap2} = create_data_segment(ConstAlign, ConstSize, ConstMap),
+ %% Find callees for which we may need trampolines.
+ CalleeMFAs = find_callee_mfas(Refs),
+ %% Write the code to memory.
+ {CodeAddress,Trampolines} = enter_code(CodeSize, CodeBinary, CalleeMFAs, Mod, Beam),
+ %% Construct CalleeMFA-to-trampoline mapping.
+ TrampolineMap = mk_trampoline_map(CalleeMFAs, Trampolines),
+ %% Patch references to code labels in data seg.
+ patch_consts(LabelMap, ConstAddr, CodeAddress),
+ %% Find out which functions are being loaded (and where).
+ %% Note: Addresses are sorted descending.
+ {MFAs,Addresses} = exports(ExportMap, CodeAddress),
+ %% Remove references to old versions of the module.
+ ReferencesToPatch = get_refs_from(MFAs, []),
+ remove_refs_from(MFAs),
+ %% Patch all dynamic references in the code.
+ %% Function calls, Atoms, Constants, System calls
+ patch(Refs, CodeAddress, ConstMap2, Addresses, TrampolineMap),
+ %% Tell the system where the loaded funs are.
+ %% (patches the BEAM code to redirect to native.)
+ case Beam of
+ [] ->
+ export_funs(Addresses);
+ BeamBinary when is_binary(BeamBinary) ->
+ %% Find all closures in the code.
+ ClosurePatches = find_closure_patches(Refs),
+ AddressesOfClosuresToPatch =
+ calculate_addresses(ClosurePatches, CodeAddress, Addresses),
+ export_funs(Addresses),
+ export_funs(Mod, BeamBinary, Addresses, AddressesOfClosuresToPatch)
+ end,
+ %% Redirect references to the old module to the new module's BEAM stub.
+ patch_to_emu_step2(OldReferencesToPatch),
+ %% Patch referring functions to call the new function
+ %% The call to export_funs/1 above updated the native addresses
+ %% for the targets, so passing 'Addresses' is not needed.
+ redirect(ReferencesToPatch),
+ ?debug_msg("****************Loader Finished****************\n", []),
+ {module,Mod} % for compatibility with code:load_file/1
+ end.
+
+%%----------------------------------------------------------------
+%% Scan the list of patches and build a set (returned as a tuple)
+%% of the callees for which we may need trampolines.
+%%
+find_callee_mfas(Patches) when is_list(Patches) ->
+ case erlang:system_info(hipe_architecture) of
+ amd64 -> [];
+ arm -> find_callee_mfas(Patches, gb_sets:empty(), false);
+ powerpc -> find_callee_mfas(Patches, gb_sets:empty(), true);
+ %% ppc64 -> find_callee_mfas(Patches, gb_sets:empty(), true);
+ ultrasparc -> [];
+ x86 -> []
+ end.
+
+find_callee_mfas([{Type,Data}|Patches], MFAs, SkipErtsSyms) ->
+ NewMFAs =
+ case ?EXT2PATCH_TYPE(Type) of
+ call_local -> add_callee_mfas(Data, MFAs, SkipErtsSyms);
+ call_remote -> add_callee_mfas(Data, MFAs, SkipErtsSyms);
+ %% load_address(function) deliberately ignored
+ _ -> MFAs
+ end,
+ find_callee_mfas(Patches, NewMFAs, SkipErtsSyms);
+find_callee_mfas([], MFAs, _SkipErtsSyms) ->
+ list_to_tuple(gb_sets:to_list(MFAs)).
+
+add_callee_mfas([{DestMFA,_Offsets}|Refs], MFAs, SkipErtsSyms) ->
+ NewMFAs =
+ case SkipErtsSyms of
+ true ->
+ %% On PowerPC we put the runtime system below the
+ %% 32M boundary, which allows BIFs and primops to
+ %% be called with ba/bla instructions. Hence we do
+ %% not need trampolines for BIFs or primops.
+ case bif_address(DestMFA) of
+ false -> gb_sets:add_element(DestMFA, MFAs);
+ BifAddress when is_integer(BifAddress) -> MFAs
+ end;
+ false ->
+ %% On ARM we also need trampolines for BIFs and primops.
+ gb_sets:add_element(DestMFA, MFAs)
+ end,
+ add_callee_mfas(Refs, NewMFAs, SkipErtsSyms);
+add_callee_mfas([], MFAs, _SkipErtsSyms) -> MFAs.
+
+%%----------------------------------------------------------------
+%%
+mk_trampoline_map([], []) -> []; % archs not using trampolines
+mk_trampoline_map(CalleeMFAs, Trampolines) ->
+ SizeofLong =
+ case erlang:system_info(hipe_architecture) of
+ amd64 -> 8;
+ _ -> 4
+ end,
+ mk_trampoline_map(tuple_size(CalleeMFAs), CalleeMFAs,
+ Trampolines, SizeofLong, gb_trees:empty()).
+
+mk_trampoline_map(I, CalleeMFAs, Trampolines, SizeofLong, Map) when I >= 1 ->
+ MFA = element(I, CalleeMFAs),
+ %% Trampoline = element(I, Trampolines),
+ Skip = (I-1)*SizeofLong,
+ <<_:Skip/binary-unit:8,
+ Trampoline:SizeofLong/integer-unsigned-native-unit:8,
+ _/binary>> = Trampolines,
+ NewMap = gb_trees:insert(MFA, Trampoline, Map),
+ mk_trampoline_map(I-1, CalleeMFAs, Trampolines, SizeofLong, NewMap);
+mk_trampoline_map(0, _, _, _, Map) -> Map.
+
+%%----------------------------------------------------------------
+%%
+trampoline_map_get(_, []) -> []; % archs not using trampolines
+trampoline_map_get(MFA, Map) -> gb_trees:get(MFA, Map).
+
+trampoline_map_lookup(_, []) -> []; % archs not using trampolines
+trampoline_map_lookup(Primop, Map) ->
+ case gb_trees:lookup(Primop, Map) of
+ {value,X} -> X;
+ _ -> []
+ end.
+
+%%------------------------------------------------------------------------
+
+-record(fundef, {address :: integer(),
+ mfa :: mfa(),
+ is_closure :: boolean(),
+ is_exported :: boolean()}).
+
+exports(ExportMap, BaseAddress) ->
+ exports(ExportMap, BaseAddress, [], []).
+
+exports([Offset,M,F,A,IsClosure,IsExported|Rest], BaseAddress, MFAs, Addresses) ->
+ MFA = {M,F,A},
+ Address = BaseAddress + Offset,
+ FunDef = #fundef{address=Address, mfa=MFA, is_closure=IsClosure,
+ is_exported=IsExported},
+ exports(Rest, BaseAddress, [MFA|MFAs], [FunDef|Addresses]);
+exports([], _, MFAs, Addresses) ->
+ {MFAs, Addresses}.
+
+mod({M,_F,_A}) -> M.
+
+%%------------------------------------------------------------------------
+
+calculate_addresses(PatchOffsets, Base, Addresses) ->
+ RemoteOrLocal = local, % closure code refs are local
+ [{Data,
+ offsets_to_addresses(Offsets, Base),
+ get_native_address(DestMFA, Addresses, RemoteOrLocal)} ||
+ {{DestMFA,_,_}=Data,Offsets} <- PatchOffsets].
+
+offsets_to_addresses(Os, Base) ->
+ [{O+Base,load_fe} || O <- Os].
+
+%%------------------------------------------------------------------------
+
+find_closure_patches([{Type,Refs} | Rest]) ->
+ case ?EXT2PATCH_TYPE(Type) of
+ load_address ->
+ find_closure_refs(Refs,Rest);
+ _ ->
+ find_closure_patches(Rest)
+ end;
+find_closure_patches([]) -> [].
+
+find_closure_refs([{Dest,Offsets} | Rest], Refs) ->
+ case Dest of
+ {closure,Data} ->
+ [{Data,Offsets}|find_closure_refs(Rest,Refs)];
+ _ ->
+ find_closure_refs(Rest,Refs)
+ end;
+find_closure_refs([], Refs) ->
+ find_closure_patches(Refs).
+
+%%------------------------------------------------------------------------
+
+export_funs([FunDef | Addresses]) ->
+ #fundef{address=Address, mfa=MFA, is_closure=IsClosure,
+ is_exported=IsExported} = FunDef,
+ ?IF_DEBUG({M,F,A} = MFA, no_debug),
+ ?IF_DEBUG(
+ case IsClosure of
+ false ->
+ ?debug_msg("LINKING: ~w:~w/~w to (0x~.16b)\n",
+ [M,F,A, Address]);
+ true ->
+ ?debug_msg("LINKING: ~w:~w/~w to closure (0x~.16b)\n",
+ [M,F,A, Address])
+ end, no_debug),
+ hipe_bifs:set_funinfo_native_address(MFA, Address, IsExported),
+ hipe_bifs:set_native_address(MFA, Address, IsClosure),
+ export_funs(Addresses);
+export_funs([]) ->
+ true.
+
+export_funs(Mod, Beam, Addresses, ClosuresToPatch) ->
+ Fs = [{F,A,Address} || #fundef{address=Address, mfa={_M,F,A}} <- Addresses],
+ code:make_stub_module(Mod, Beam, {Fs,ClosuresToPatch}).
+
+%%========================================================================
+%% Patching
+%% @spec patch(refs(), BaseAddress::integer(), ConstAndZone::term(),
+%% Addresses::term(), TrampolineMap::term()) -> term()
+%% @type refs()=[{RefType::integer(), Reflist::reflist()} | refs()]
+%%
+%% @type reflist()= [{Data::term(), Offsets::offests()}|reflist()]
+%% @type offsets()= [Offset::integer() | offsets()]
+%% @doc
+%% The patchlist is a list of lists of patches of a type.
+%% For each type the list of references is sorted so that several
+%% references to the same type of data come after each other
+%% (we use this to look up the address of a referred function only once).
+%%
+
+patch([{Type,SortedRefs}|Rest], CodeAddress, ConstMap2, Addresses, TrampolineMap) ->
+ ?debug_msg("Patching ~w at [~w+offset] with ~w\n",
+ [Type,CodeAddress,SortedRefs]),
+ case ?EXT2PATCH_TYPE(Type) of
+ call_local ->
+ patch_call(SortedRefs, CodeAddress, Addresses, 'local', TrampolineMap);
+ call_remote ->
+ patch_call(SortedRefs, CodeAddress, Addresses, 'remote', TrampolineMap);
+ Other ->
+ patch_all(Other, SortedRefs, CodeAddress, {ConstMap2,CodeAddress}, Addresses)
+ end,
+ patch(Rest, CodeAddress, ConstMap2, Addresses, TrampolineMap);
+patch([], _, _, _, _) -> true.
+
+%%----------------------------------------------------------------
+%% Handle a 'call_local' or 'call_remote' patch.
+%%
+patch_call([{DestMFA,Offsets}|SortedRefs], BaseAddress, Addresses, RemoteOrLocal, TrampolineMap) ->
+ case bif_address(DestMFA) of
+ false ->
+ %% Previous code used mfa_to_address(DestMFA, Addresses)
+ %% here for local calls. That is wrong because even local
+ %% destinations may not be present in Addresses: they may
+ %% not have been compiled yet, or they may be BEAM-only
+ %% functions (e.g. module_info).
+ DestAddress = get_native_address(DestMFA, Addresses, RemoteOrLocal),
+ Trampoline = trampoline_map_get(DestMFA, TrampolineMap),
+ patch_mfa_call_list(Offsets, BaseAddress, DestMFA, DestAddress, Addresses, RemoteOrLocal, Trampoline);
+ BifAddress when is_integer(BifAddress) ->
+ Trampoline = trampoline_map_lookup(DestMFA, TrampolineMap),
+ patch_bif_call_list(Offsets, BaseAddress, BifAddress, Trampoline)
+ end,
+ patch_call(SortedRefs, BaseAddress, Addresses, RemoteOrLocal, TrampolineMap);
+patch_call([], _, _, _, _) ->
+ true.
+
+patch_bif_call_list([Offset|Offsets], BaseAddress, BifAddress, Trampoline) ->
+ CallAddress = BaseAddress+Offset,
+ ?ASSERT(assert_local_patch(CallAddress)),
+ patch_call_insn(CallAddress, BifAddress, Trampoline),
+ patch_bif_call_list(Offsets, BaseAddress, BifAddress, Trampoline);
+patch_bif_call_list([], _, _, _) -> [].
+
+patch_mfa_call_list([Offset|Offsets], BaseAddress, DestMFA, DestAddress, Addresses, RemoteOrLocal, Trampoline) ->
+ CallAddress = BaseAddress+Offset,
+ add_ref(DestMFA, CallAddress, Addresses, 'call', Trampoline, RemoteOrLocal),
+ ?ASSERT(assert_local_patch(CallAddress)),
+ patch_call_insn(CallAddress, DestAddress, Trampoline),
+ patch_mfa_call_list(Offsets, BaseAddress, DestMFA, DestAddress, Addresses, RemoteOrLocal, Trampoline);
+patch_mfa_call_list([], _, _, _, _, _, _) -> [].
+
+patch_call_insn(CallAddress, DestAddress, Trampoline) ->
+ %% This assertion is false when we're called from redirect/2.
+ %% ?ASSERT(assert_local_patch(CallAddress)),
+ hipe_bifs:patch_call(CallAddress, DestAddress, Trampoline).
+
+%% ____________________________________________________________________
+%%
+
+patch_all(Type, [{Dest,Offsets}|Rest], BaseAddress, ConstAndZone, Addresses)->
+ patch_all_offsets(Type, Dest, Offsets, BaseAddress, ConstAndZone, Addresses),
+ patch_all(Type, Rest, BaseAddress, ConstAndZone, Addresses);
+patch_all(_, [], _, _, _) -> true.
+
+patch_all_offsets(Type, Data, [Offset|Offsets], BaseAddress,
+ ConstAndZone, Addresses) ->
+ ?debug_msg("Patching ~w at [~w+~w] with ~w\n",
+ [Type,BaseAddress,Offset, Data]),
+ Address = BaseAddress + Offset,
+ patch_offset(Type, Data, Address, ConstAndZone, Addresses),
+ ?debug_msg("Patching done\n",[]),
+ patch_all_offsets(Type, Data, Offsets, BaseAddress, ConstAndZone, Addresses);
+patch_all_offsets(_, _, [], _, _, _) -> true.
+
+%%----------------------------------------------------------------
+%% Handle any patch type except 'call_local' or 'call_remote'.
+%%
+patch_offset(Type, Data, Address, ConstAndZone, Addresses) ->
+ case Type of
+ load_address ->
+ patch_load_address(Data, Address, ConstAndZone, Addresses);
+ load_atom ->
+ Atom = Data,
+ patch_atom(Address, Atom);
+ sdesc ->
+ patch_sdesc(Data, Address, ConstAndZone);
+ x86_abs_pcrel ->
+ patch_instr(Address, Data, x86_abs_pcrel)
+ %% _ ->
+ %% ?error_msg("Unknown ref ~w ~w ~w\n", [Type, Address, Data]),
+ %% exit({unknown_reference, Type, Address, Data})
+ end.
+
+patch_atom(Address, Atom) ->
+ ?ASSERT(assert_local_patch(Address)),
+ patch_instr(Address, hipe_bifs:atom_to_word(Atom), atom).
+
+patch_sdesc(?STACK_DESC(SymExnRA, FSize, Arity, Live),
+ Address, {_ConstMap2,CodeAddress}) ->
+ ExnRA =
+ case SymExnRA of
+ [] -> 0; % No catch
+ LabelOffset -> CodeAddress + LabelOffset
+ end,
+ ?ASSERT(assert_local_patch(Address)),
+ hipe_bifs:enter_sdesc({Address, ExnRA, FSize, Arity, Live}).
+
+%%----------------------------------------------------------------
+%% Handle a 'load_address'-type patch.
+%%
+patch_load_address(Data, Address, ConstAndZone, Addresses) ->
+ case Data of
+ {local_function,DestMFA} ->
+ patch_load_mfa(Address, DestMFA, Addresses, 'local');
+ {remote_function,DestMFA} ->
+ patch_load_mfa(Address, DestMFA, Addresses, 'remote');
+ {constant,Name} ->
+ {ConstMap2,_CodeAddress} = ConstAndZone,
+ ConstAddress = find_const(Name, ConstMap2),
+ patch_instr(Address, ConstAddress, constant);
+ {closure,{DestMFA,Uniq,Index}} ->
+ patch_closure(DestMFA, Uniq, Index, Address, Addresses);
+ {c_const,CConst} ->
+ patch_instr(Address, bif_address(CConst), c_const)
+ end.
+
+patch_closure(DestMFA, Uniq, Index, Address, Addresses) ->
+ case get(hipe_patch_closures) of
+ false ->
+ []; % This is taken care of when registering the module.
+ true -> % We are not loading a module patch these closures
+ RemoteOrLocal = local, % closure code refs are local
+ DestAddress = get_native_address(DestMFA, Addresses, RemoteOrLocal),
+ BEAMAddress = hipe_bifs:fun_to_address(DestMFA),
+ FE = hipe_bifs:make_fe(DestAddress, mod(DestMFA),
+ {Uniq, Index, BEAMAddress}),
+ ?debug_msg("Patch FE(~w) to 0x~.16b->0x~.16b (emu:0x~.16b)\n",
+ [DestMFA, FE, DestAddress, BEAMAddress]),
+ ?ASSERT(assert_local_patch(Address)),
+ patch_instr(Address, FE, closure)
+ end.
+
+%%----------------------------------------------------------------
+%% Patch an instruction loading the address of an MFA.
+%% RemoteOrLocal ::= 'remote' | 'local'
+%%
+patch_load_mfa(CodeAddress, DestMFA, Addresses, RemoteOrLocal) ->
+ DestAddress =
+ case bif_address(DestMFA) of
+ false ->
+ NativeAddress = get_native_address(DestMFA, Addresses, RemoteOrLocal),
+ add_ref(DestMFA, CodeAddress, Addresses, 'load_mfa', [], RemoteOrLocal),
+ NativeAddress;
+ BifAddress when is_integer(BifAddress) ->
+ BifAddress
+ end,
+ ?ASSERT(assert_local_patch(CodeAddress)),
+ patch_instr(CodeAddress, DestAddress, 'load_mfa').
+
+%%----------------------------------------------------------------
+%% Patch references to code labels in the data segment.
+%%
+patch_consts(Labels, DataAddress, CodeAddress) ->
+ lists:foreach(fun (L) ->
+ patch_label_or_labels(L, DataAddress, CodeAddress)
+ end, Labels).
+
+patch_label_or_labels({Pos,Offset}, DataAddress, CodeAddress) ->
+ ?ASSERT(assert_local_patch(CodeAddress+Offset)),
+ write_word(DataAddress+Pos, CodeAddress+Offset);
+patch_label_or_labels({sorted,Base,UnOrderdList}, DataAddress, CodeAddress) ->
+ sort_and_write(UnOrderdList, Base, DataAddress, CodeAddress).
+
+sort_and_write(UnOrderdList, Base, DataAddress, CodeAddress) ->
+ WriteAndInc =
+ fun ({_, Offset}, DataPos) ->
+ ?ASSERT(assert_local_patch(CodeAddress+Offset)),
+ write_word(DataPos, CodeAddress+Offset)
+ end,
+ lists:foldl(WriteAndInc, DataAddress+Base, sort_on_representation(UnOrderdList)).
+
+sort_on_representation(List) ->
+ lists:sort([{hipe_bifs:term_to_word(Term), Offset} ||
+ {Term, Offset} <- List]).
+
+%%--------------------------------------------------------------------
+%% Update an instruction to refer to a value of a given type.
+%%
+%% Type ::= 'call' | 'load_mfa' | 'x86_abs_pcrel' | 'atom'
+%% | 'constant' | 'c_const' | 'closure'
+%%
+%% Note: the values of this Type are hard-coded in file erl_bif_types.erl
+%%
+patch_instr(Address, Value, Type) ->
+ hipe_bifs:patch_insn(Address, Value, Type).
+
+%%--------------------------------------------------------------------
+%% Write a data word of the machine's natural word size.
+%% Returns the address of the next word.
+%%
+%% XXX: It appears this is used for inserting both code addresses
+%% and other data. In HiPE, code addresses are still 32-bit on
+%% 64-bit machines.
+write_word(DataAddress, DataWord) ->
+ case erlang:system_info(hipe_architecture) of
+ amd64 ->
+ hipe_bifs:write_u64(DataAddress, DataWord),
+ DataAddress+8;
+ %% ppc64 ->
+ %% hipe_bifs:write_u64(DataAddress, DataWord),
+ %% DataAddress+8;
+ _ ->
+ hipe_bifs:write_u32(DataAddress, DataWord),
+ DataAddress+4
+ end.
+
+%%--------------------------------------------------------------------
+
+bif_address({M,F,A}) ->
+ hipe_bifs:bif_address(M,F,A);
+bif_address(Name) when is_atom(Name) ->
+ hipe_bifs:primop_address(Name).
+
+%%--------------------------------------------------------------------
+%% create_data_segment/3 takes an object file ConstMap, as produced by
+%% hipe_pack_constants:slim_constmap/1, loads the constants into
+%% memory, and produces a ConstMap2 mapping each constant's ConstNo to
+%% its runtime address, tagged if the constant is a term.
+%%
+create_data_segment(DataAlign, DataSize, DataList) ->
+ %%io:format("create_data_segment: \nDataAlign: ~p\nDataSize: ~p\nDataList: ~p\n",[DataAlign,DataSize,DataList]),
+ DataAddress = hipe_bifs:alloc_data(DataAlign, DataSize),
+ enter_data(DataList, [], DataAddress, DataSize).
+
+enter_data(List, ConstMap2, DataAddress, DataSize) ->
+ case List of
+ [ConstNo,Offset,Type,Data|Rest] when is_integer(Offset) ->
+ %%?msg("Const ~w\n",[[ConstNo,Offset,Type,Data]]),
+ ?ASSERT((Offset >= 0) and (Offset =< DataSize)),
+ Res = enter_datum(Type, Data, DataAddress+Offset),
+ enter_data(Rest, [{ConstNo,Res}|ConstMap2], DataAddress, DataSize);
+ [] ->
+ {DataAddress, ConstMap2}
+ end.
+
+enter_datum(Type, Data, Address) ->
+ case ?EXT2CONST_TYPE(Type) of
+ term ->
+ %% Address is unused for terms
+ hipe_bifs:term_to_word(hipe_bifs:merge_term(Data));
+ sorted_block ->
+ L = lists:sort([hipe_bifs:term_to_word(Term) || Term <- Data]),
+ write_words(L, Address),
+ Address;
+ block ->
+ case Data of
+ {Lbls, []} ->
+ write_bytes(Lbls, Address);
+ {Lbls, SortOrder} ->
+ SortedLbls = [Lbl || {_,Lbl} <- lists:sort(group(Lbls, SortOrder))],
+ write_words(SortedLbls, Address);
+ Lbls ->
+ write_bytes(Lbls, Address)
+ end,
+ Address
+ end.
+
+group([], []) ->
+ [];
+group([B1,B2,B3,B4|Ls], [O|Os]) ->
+ [{hipe_bifs:term_to_word(O),bytes_to_32(B4,B3,B2,B1)}|group(Ls,Os)].
+
+bytes_to_32(B4,B3,B2,B1) ->
+ (B4 bsl 24) bor (B3 bsl 16) bor (B2 bsl 8) bor B1.
+
+write_words([W|Rest], Addr) ->
+ write_words(Rest, write_word(Addr, W));
+write_words([], Addr) when is_integer(Addr) -> true.
+
+write_bytes([B|Rest], Addr) ->
+ hipe_bifs:write_u8(Addr, B),
+ write_bytes(Rest, Addr+1);
+write_bytes([], Addr) when is_integer(Addr) -> true.
+
+%%% lists:keysearch/3 conses a useless wrapper around the found tuple :-(
+%%% otherwise it would have been a good replacement for this loop
+find_const(ConstNo, [{ConstNo,Addr}|_ConstMap2]) ->
+ Addr;
+find_const(ConstNo, [_|ConstMap2]) ->
+ find_const(ConstNo, ConstMap2);
+find_const(ConstNo, []) ->
+ ?error_msg("Constant not found ~w\n",[ConstNo]),
+ exit({constant_not_found,ConstNo}).
+
+
+%%----------------------------------------------------------------
+%% Record that the code at address 'Address' has a reference
+%% of type 'RefType' ('call' or 'load_mfa') to 'CalleeMFA'.
+%% 'Addresses' must be an address-descending list from exports/2.
+%%
+%% If 'RefType' is 'call', then 'Trampoline' may be the address
+%% of a stub branching to 'CalleeMFA', where the stub is reachable
+%% from 'Address' via a normal call or tailcall instruction.
+%%
+%% RemoteOrLocal ::= 'remote' | 'local'.
+%%
+
+%%
+%% -record(ref, {caller_mfa, address, ref_type, trampoline, remote_or_local}).
+%%
+
+add_ref(CalleeMFA, Address, Addresses, RefType, Trampoline, RemoteOrLocal) ->
+ CallerMFA = address_to_mfa(Address, Addresses),
+ %% just a sanity assertion below
+ true = case RemoteOrLocal of
+ local ->
+ {M1,_,_} = CalleeMFA,
+ {M2,_,_} = CallerMFA,
+ M1 =:= M2;
+ remote ->
+ true
+ end,
+ %% io:format("Adding ref ~w\n",[{CallerMFA, CalleeMFA, Address, RefType}]),
+ hipe_bifs:add_ref(CalleeMFA, {CallerMFA,Address,RefType,Trampoline,RemoteOrLocal}).
+
+address_to_mfa(Address, [#fundef{address=Adr, mfa=MFA}|_Rest]) when Address >= Adr -> MFA;
+address_to_mfa(Address, [_ | Rest]) -> address_to_mfa(Address, Rest);
+address_to_mfa(Address, []) ->
+ ?error_msg("Local adddress not found ~w\n",[Address]),
+ exit({?MODULE, local_address_not_found}).
+
+%%----------------------------------------------------------------
+%% Change callers of the given module to instead trap to BEAM.
+%% load_native_code/2 calls this just before loading native code.
+%%
+patch_to_emu(Mod) ->
+ patch_to_emu_step2(patch_to_emu_step1(Mod)).
+
+%% Step 1 must occur before the loading of native code updates
+%% references information or creates a new BEAM stub module.
+patch_to_emu_step1(Mod) ->
+ case is_loaded(Mod) of
+ true ->
+ %% Get exported functions
+ MFAs = [{Mod,Fun,Arity} || {Fun,Arity} <- Mod:module_info(exports)],
+ %% get_refs_from/2 only finds references from compiled static
+ %% call sites to the module, but some native address entries
+ %% were added as the result of dynamic apply calls. We must
+ %% purge them too, but we have no explicit record of them.
+ %% Therefore invalidate all native addresses for the module.
+ %% emu_make_stubs/1 will repair the ones for compiled static calls.
+ hipe_bifs:invalidate_funinfo_native_addresses(MFAs),
+ %% Find all call sites that call these MFAs. As a side-effect,
+ %% create native stubs for any MFAs that are referred.
+ ReferencesToPatch = get_refs_from(MFAs, []),
+ remove_refs_from(MFAs),
+ ReferencesToPatch;
+ false ->
+ %% The first time we load the module, no redirection needs to be done.
+ []
+ end.
+
+%% Step 2 must occur after the new BEAM stub module is created.
+patch_to_emu_step2(ReferencesToPatch) ->
+ emu_make_stubs(ReferencesToPatch),
+ redirect(ReferencesToPatch).
+
+-spec is_loaded(Module::atom()) -> boolean().
+%% @doc Checks whether a module is loaded or not.
+is_loaded(M) when is_atom(M) ->
+ try hipe_bifs:fun_to_address({M,module_info,0}) of
+ I when is_integer(I) -> true
+ catch _:_ -> false
+ end.
+
+-ifdef(notdef).
+emu_make_stubs([{MFA,_Refs}|Rest]) ->
+ make_stub(MFA),
+ emu_make_stubs(Rest);
+emu_make_stubs([]) ->
+ [].
+
+make_stub({_,_,A} = MFA) ->
+ EmuAddress = hipe_bifs:get_emu_address(MFA),
+ StubAddress = hipe_bifs:make_native_stub(EmuAddress, A),
+ hipe_bifs:set_funinfo_native_address(MFA, StubAddress).
+-else.
+emu_make_stubs(_) -> [].
+-endif.
+
+%%--------------------------------------------------------------------
+%% Given a list of MFAs, tag them with their referred_from references.
+%% The resulting {MFA,Refs} list is later passed to redirect/1, once
+%% the MFAs have been bound to (possibly new) native-code addresses.
+%%
+get_refs_from(MFAs, []) ->
+ mark_referred_from(MFAs),
+ MFAs.
+
+mark_referred_from([MFA|MFAs]) ->
+ hipe_bifs:mark_referred_from(MFA),
+ mark_referred_from(MFAs);
+mark_referred_from([]) ->
+ [].
+
+%%--------------------------------------------------------------------
+%% Given a list of MFAs with referred_from references, update their
+%% callers to refer to their new native-code addresses.
+%%
+%% The {MFA,Refs} list must come from get_refs_from/2.
+%%
+redirect([MFA|Rest]) ->
+ hipe_bifs:redirect_referred_from(MFA),
+ redirect(Rest);
+redirect([]) ->
+ ok.
+
+%%--------------------------------------------------------------------
+%% Given a list of MFAs, remove all referred_from references having
+%% any of them as CallerMFA.
+%%
+%% This is the only place using refers_to. Whenever a reference is
+%% added from CallerMFA to CalleeMFA, CallerMFA is added to CalleeMFA's
+%% referred_from list, and CalleeMFA is added to CallerMFA's refers_to
+%% list. The refers_to list is used here to find the CalleeMFAs whose
+%% referred_from lists should be updated.
+%%
+remove_refs_from([CallerMFA|CallerMFAs]) ->
+ hipe_bifs:remove_refs_from(CallerMFA),
+ remove_refs_from(CallerMFAs);
+remove_refs_from([]) ->
+ [].
+
+%%--------------------------------------------------------------------
+
+%% To find the native code of an MFA we need to look in 3 places:
+%% 1. If it is compiled now look in the Addresses data structure.
+%% 2. Then look in native_addresses from module info.
+%% 3. Then (the function might have been singled compiled) look in
+%% hipe_funinfo
+%% If all else fails create a native stub for the MFA
+get_native_address(MFA, Addresses, RemoteOrLocal) ->
+ case mfa_to_address(MFA, Addresses, RemoteOrLocal) of
+ Adr when is_integer(Adr) -> Adr;
+ false ->
+ IsRemote =
+ case RemoteOrLocal of
+ remote -> true;
+ local -> false
+ end,
+ hipe_bifs:find_na_or_make_stub(MFA, IsRemote)
+ end.
+
+mfa_to_address(MFA, [#fundef{address=Adr, mfa=MFA,
+ is_exported=IsExported}|_Rest], RemoteOrLocal) ->
+ case RemoteOrLocal of
+ local ->
+ Adr;
+ remote ->
+ case IsExported of
+ true ->
+ Adr;
+ false ->
+ false
+ end
+ end;
+mfa_to_address(MFA, [_|Rest], RemoteOrLocal) ->
+ mfa_to_address(MFA, Rest, RemoteOrLocal);
+mfa_to_address(_, [], _) -> false.
+
+%% ____________________________________________________________________
+%%
+
+-ifdef(DO_ASSERT).
+
+-define(init_assert_patch(Base, Size), put(hipe_assert_code_area,{Base,Base+Size})).
+
+assert_local_patch(Address) when is_integer(Address) ->
+ {First,Last} = get(hipe_assert_code_area),
+ Address >= First andalso Address < (Last).
+
+-else.
+
+-define(init_assert_patch(Base, Size), ok).
+
+-endif.
+
+%% ____________________________________________________________________
+%%
+
+%% Beam: nil() | binary() (used as a flag)
+
+enter_code(CodeSize, CodeBinary, CalleeMFAs, Mod, Beam) ->
+ true = byte_size(CodeBinary) =:= CodeSize,
+ hipe_bifs:update_code_size(Mod, Beam, CodeSize),
+ {CodeAddress,Trampolines} = hipe_bifs:enter_code(CodeBinary, CalleeMFAs),
+ ?init_assert_patch(CodeAddress, byte_size(CodeBinary)),
+ {CodeAddress,Trampolines}.
+
diff --git a/lib/kernel/src/inet.erl b/lib/kernel/src/inet.erl
new file mode 100644
index 0000000000..b86aa1839e
--- /dev/null
+++ b/lib/kernel/src/inet.erl
@@ -0,0 +1,1342 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 1997-2009. All Rights Reserved.
+%%
+%% The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved online at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(inet).
+
+-include("inet.hrl").
+-include("inet_int.hrl").
+-include("inet_sctp.hrl").
+
+%% socket
+-export([peername/1, sockname/1, port/1, send/2,
+ setopts/2, getopts/2,
+ getif/1, getif/0, getiflist/0, getiflist/1,
+ ifget/3, ifget/2, ifset/3, ifset/2,
+ getstat/1, getstat/2,
+ ip/1, stats/0, options/0,
+ pushf/3, popf/1, close/1, gethostname/0, gethostname/1]).
+
+-export([connect_options/2, listen_options/2, udp_options/2, sctp_options/2]).
+
+-export([i/0, i/1, i/2]).
+
+-export([getll/1, getfd/1, open/7, fdopen/5]).
+
+-export([tcp_controlling_process/2, udp_controlling_process/2,
+ tcp_close/1, udp_close/1]).
+%% used by socks5
+-export([setsockname/2, setpeername/2]).
+
+%% resolve
+-export([gethostbyname/1, gethostbyname/2, gethostbyname/3,
+ gethostbyname_tm/3]).
+-export([gethostbyaddr/1, gethostbyaddr/2,
+ gethostbyaddr_tm/2]).
+
+-export([getservbyname/2, getservbyport/2]).
+-export([getaddrs/2, getaddrs/3, getaddrs_tm/3,
+ getaddr/2, getaddr/3, getaddr_tm/3]).
+-export([translate_ip/2]).
+
+-export([get_rc/0]).
+
+%% format error
+-export([format_error/1]).
+
+%% timer interface
+-export([start_timer/1, timeout/1, timeout/2, stop_timer/1]).
+
+%% imports
+-import(lists, [append/1, duplicate/2, filter/2, foldl/3]).
+
+%% Record Signature
+-define(RS(Record),
+ {Record, record_info(size, Record)}).
+%% Record Signature Check (guard)
+-define(RSC(Record, RS),
+ element(1, Record) =:= element(1, RS),
+ tuple_size(Record) =:= element(2, RS)).
+
+%%% ---------------------------------
+%%% Contract type definitions
+
+-type socket() :: port().
+-type posix() :: atom().
+
+-type socket_setopt() ::
+ {'raw', non_neg_integer(), non_neg_integer(), binary()} |
+ %% TCP/UDP options
+ {'reuseaddr', boolean()} |
+ {'keepalive', boolean()} |
+ {'dontroute', boolean()} |
+ {'linger', {boolean(), non_neg_integer()}} |
+ {'broadcast', boolean()} |
+ {'sndbuf', non_neg_integer()} |
+ {'recbuf', non_neg_integer()} |
+ {'priority', non_neg_integer()} |
+ {'tos', non_neg_integer()} |
+ {'nodelay', boolean()} |
+ {'multicast_ttl', non_neg_integer()} |
+ {'multicast_loop', boolean()} |
+ {'multicast_if', ip_address()} |
+ {'add_membership', {ip_address(), ip_address()}} |
+ {'drop_membership', {ip_address(), ip_address()}} |
+ {'header', non_neg_integer()} |
+ {'buffer', non_neg_integer()} |
+ {'active', boolean() | 'once'} |
+ {'packet',
+ 0 | 1 | 2 | 4 | 'raw' | 'sunrm' | 'asn1' |
+ 'cdr' | 'fcgi' | 'line' | 'tpkt' | 'http' | 'httph' | 'http_bin' | 'httph_bin' } |
+ {'mode', list() | binary()} |
+ {'port', 'port', 'term'} |
+ {'exit_on_close', boolean()} |
+ {'low_watermark', non_neg_integer()} |
+ {'high_watermark', non_neg_integer()} |
+ {'bit8', 'clear' | 'set' | 'on' | 'off'} |
+ {'send_timeout', non_neg_integer() | 'infinity'} |
+ {'send_timeout_close', boolean()} |
+ {'delay_send', boolean()} |
+ {'packet_size', non_neg_integer()} |
+ {'read_packets', non_neg_integer()} |
+ %% SCTP options
+ {'sctp_rtoinfo', #sctp_rtoinfo{}} |
+ {'sctp_associnfo', #sctp_assocparams{}} |
+ {'sctp_initmsg', #sctp_initmsg{}} |
+ {'sctp_nodelay', boolean()} |
+ {'sctp_autoclose', non_neg_integer()} |
+ {'sctp_disable_fragments', boolean()} |
+ {'sctp_i_want_mapped_v4_addr', boolean()} |
+ {'sctp_maxseg', non_neg_integer()} |
+ {'sctp_primary_addr', #sctp_prim{}} |
+ {'sctp_set_peer_primary_addr', #sctp_setpeerprim{}} |
+ {'sctp_adaptation_layer', #sctp_setadaptation{}} |
+ {'sctp_peer_addr_params', #sctp_paddrparams{}} |
+ {'sctp_default_send_param', #sctp_sndrcvinfo{}} |
+ {'sctp_events', #sctp_event_subscribe{}} |
+ {'sctp_delayed_ack_time', #sctp_assoc_value{}}.
+
+-type socket_getopt() ::
+ {'raw',
+ non_neg_integer(), non_neg_integer(), binary()|non_neg_integer()} |
+ %% TCP/UDP options
+ 'reuseaddr' | 'keepalive' | 'dontroute' | 'linger' |
+ 'broadcast' | 'sndbuf' | 'recbuf' | 'priority' | 'tos' | 'nodelay' |
+ 'multicast_ttl' | 'multicast_loop' | 'multicast_if' |
+ 'add_membership' | 'drop_membership' |
+ 'header' | 'buffer' | 'active' | 'packet' | 'mode' | 'port' |
+ 'exit_on_close' | 'low_watermark' | 'high_watermark' | 'bit8' |
+ 'send_timeout' | 'send_timeout_close' |
+ 'delay_send' | 'packet_size' | 'read_packets' |
+ %% SCTP options
+ {'sctp_status', #sctp_status{}} |
+ 'sctp_get_peer_addr_info' |
+ {'sctp_get_peer_addr_info', #sctp_status{}} |
+ 'sctp_rtoinfo' |
+ {'sctp_rtoinfo', #sctp_rtoinfo{}} |
+ 'sctp_associnfo' |
+ {'sctp_associnfo', #sctp_assocparams{}} |
+ 'sctp_initmsg' |
+ {'sctp_initmsg', #sctp_initmsg{}} |
+ 'sctp_nodelay' | 'sctp_autoclose' | 'sctp_disable_fragments' |
+ 'sctp_i_want_mapped_v4_addr' | 'sctp_maxseg' |
+ {'sctp_primary_addr', #sctp_prim{}} |
+ {'sctp_set_peer_primary_addr', #sctp_setpeerprim{}} |
+ 'sctp_adaptation_layer' |
+ {'sctp_adaptation_layer', #sctp_setadaptation{}} |
+ {'sctp_peer_addr_params', #sctp_paddrparams{}} |
+ 'sctp_default_send_param' |
+ {'sctp_default_send_param', #sctp_sndrcvinfo{}} |
+ 'sctp_events' |
+ {'sctp_events', #sctp_event_subscribe{}} |
+ 'sctp_delayed_ack_time' |
+ {'sctp_delayed_ack_time', #sctp_assoc_value{}}.
+
+-type ether_address() :: [0..255].
+
+-type if_setopt() ::
+ {'addr', ip_address()} |
+ {'broadaddr', ip_address()} |
+ {'dstaddr', ip_address()} |
+ {'mtu', non_neg_integer()} |
+ {'netmask', ip_address()} |
+ {'flags', ['up' | 'down' | 'broadcast' | 'no_broadcast' |
+ 'pointtopoint' | 'no_pointtopoint' |
+ 'running' | 'multicast']} |
+ {'hwaddr', ether_address()}.
+
+-type if_getopt() ::
+ 'addr' | 'broadaddr' | 'dstaddr' |
+ 'mtu' | 'netmask' | 'flags' |'hwaddr'.
+
+-type family_option() :: 'inet' | 'inet6'.
+-type protocol_option() :: 'tcp' | 'udp' | 'sctp'.
+-type stat_option() ::
+ 'recv_cnt' | 'recv_max' | 'recv_avg' | 'recv_oct' | 'recv_dvi' |
+ 'send_cnt' | 'send_max' | 'send_avg' | 'send_oct' | 'send_pend'.
+
+%%% ---------------------------------
+
+-spec get_rc() -> [{any(),any()}].
+
+get_rc() ->
+ inet_db:get_rc().
+
+-spec close(Socket :: socket()) -> 'ok'.
+
+close(Socket) ->
+ prim_inet:close(Socket),
+ receive
+ {Closed, Socket} when Closed =:= tcp_closed; Closed =:= udp_closed ->
+ ok
+ after 0 ->
+ ok
+ end.
+
+-spec peername(Socket :: socket()) ->
+ {'ok', {ip_address(), non_neg_integer()}} | {'error', posix()}.
+
+peername(Socket) ->
+ prim_inet:peername(Socket).
+
+-spec setpeername(Socket :: socket(), Address :: {ip_address(), ip_port()}) ->
+ 'ok' | {'error', any()}.
+
+setpeername(Socket, {IP,Port}) ->
+ prim_inet:setpeername(Socket, {IP,Port});
+setpeername(Socket, undefined) ->
+ prim_inet:setpeername(Socket, undefined).
+
+
+-spec sockname(Socket :: socket()) ->
+ {'ok', {ip_address(), non_neg_integer()}} | {'error', posix()}.
+
+sockname(Socket) ->
+ prim_inet:sockname(Socket).
+
+-spec setsockname(Socket :: socket(), Address :: {ip_address(), ip_port()}) ->
+ 'ok' | {'error', any()}.
+
+setsockname(Socket, {IP,Port}) ->
+ prim_inet:setsockname(Socket, {IP,Port});
+setsockname(Socket, undefined) ->
+ prim_inet:setsockname(Socket, undefined).
+
+-spec port(Socket :: socket()) -> {'ok', ip_port()} | {'error', any()}.
+
+port(Socket) ->
+ case prim_inet:sockname(Socket) of
+ {ok, {_,Port}} -> {ok, Port};
+ Error -> Error
+ end.
+
+-spec send(Socket :: socket(), Packet :: iolist()) -> % iolist()?
+ 'ok' | {'error', posix()}.
+
+send(Socket, Packet) ->
+ prim_inet:send(Socket, Packet).
+
+-spec setopts(Socket :: socket(), Opts :: [socket_setopt()]) ->
+ 'ok' | {'error', posix()}.
+
+setopts(Socket, Opts) ->
+ prim_inet:setopts(Socket, Opts).
+
+-spec getopts(Socket :: socket(), Opts :: [socket_getopt()]) ->
+ {'ok', [socket_setopt()]} | {'error', posix()}.
+
+getopts(Socket, Opts) ->
+ prim_inet:getopts(Socket, Opts).
+
+-spec getiflist(Socket :: socket()) ->
+ {'ok', [string()]} | {'error', posix()}.
+
+getiflist(Socket) ->
+ prim_inet:getiflist(Socket).
+
+-spec getiflist() -> {'ok', [string()]} | {'error', posix()}.
+
+getiflist() ->
+ withsocket(fun(S) -> prim_inet:getiflist(S) end).
+
+-spec ifget(Socket :: socket(),
+ Name :: string() | atom(),
+ Opts :: [if_getopt()]) ->
+ {'ok', [if_setopt()]} | {'error', posix()}.
+
+ifget(Socket, Name, Opts) ->
+ prim_inet:ifget(Socket, Name, Opts).
+
+-spec ifget(Name :: string() | atom(), Opts :: [if_getopt()]) ->
+ {'ok', [if_setopt()]} | {'error', posix()}.
+
+ifget(Name, Opts) ->
+ withsocket(fun(S) -> prim_inet:ifget(S, Name, Opts) end).
+
+-spec ifset(Socket :: socket(),
+ Name :: string() | atom(),
+ Opts :: [if_setopt()]) ->
+ 'ok' | {'error', posix()}.
+
+ifset(Socket, Name, Opts) ->
+ prim_inet:ifset(Socket, Name, Opts).
+
+-spec ifset(Name :: string() | atom(), Opts :: [if_setopt()]) ->
+ 'ok' | {'error', posix()}.
+
+ifset(Name, Opts) ->
+ withsocket(fun(S) -> prim_inet:ifset(S, Name, Opts) end).
+
+-spec getif() ->
+ {'ok', [{ip_address(), ip_address() | 'undefined', ip_address()}]} |
+ {'error', posix()}.
+
+getif() ->
+ withsocket(fun(S) -> getif(S) end).
+
+%% backwards compatible getif
+-spec getif(Socket :: socket()) ->
+ {'ok', [{ip_address(), ip_address() | 'undefined', ip_address()}]} |
+ {'error', posix()}.
+
+getif(Socket) ->
+ case prim_inet:getiflist(Socket) of
+ {ok, IfList} ->
+ {ok, lists:foldl(
+ fun(Name,Acc) ->
+ case prim_inet:ifget(Socket,Name,
+ [addr,broadaddr,netmask]) of
+ {ok,[{addr,A},{broadaddr,B},{netmask,M}]} ->
+ [{A,B,M}|Acc];
+ %% Some interfaces does not have a b-addr
+ {ok,[{addr,A},{netmask,M}]} ->
+ [{A,undefined,M}|Acc];
+ _ ->
+ Acc
+ end
+ end, [], IfList)};
+ Error -> Error
+ end.
+
+withsocket(Fun) ->
+ case inet_udp:open(0,[]) of
+ {ok,Socket} ->
+ Res = Fun(Socket),
+ inet_udp:close(Socket),
+ Res;
+ Error ->
+ Error
+ end.
+
+pushf(_Socket, Fun, _State) when is_function(Fun) ->
+ {error, einval}.
+
+popf(_Socket) ->
+ {error, einval}.
+
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+% the hostname is not cached any more because this
+% could cause troubles on at least windows with plug-and-play
+% and network-cards inserted and removed in conjunction with
+% use of the DHCP-protocol
+% should never fail
+
+-spec gethostname() -> {'ok', string()}.
+
+gethostname() ->
+ case inet_udp:open(0,[]) of
+ {ok,U} ->
+ {ok,Res} = gethostname(U),
+ inet_udp:close(U),
+ {Res2,_} = lists:splitwith(fun($.)->false;(_)->true end,Res),
+ {ok, Res2};
+ _ ->
+ {ok, "nohost.nodomain"}
+ end.
+
+-spec gethostname(Socket :: socket()) ->
+ {'ok', string()} | {'error', posix()}.
+
+gethostname(Socket) ->
+ prim_inet:gethostname(Socket).
+
+-spec getstat(Socket :: socket()) ->
+ {'ok', [{stat_option(), integer()}]} | {'error', posix()}.
+
+getstat(Socket) ->
+ prim_inet:getstat(Socket, stats()).
+
+-spec getstat(Socket :: socket(), Statoptions :: [stat_option()]) ->
+ {'ok', [{stat_option(), integer()}]} | {'error', posix()}.
+
+getstat(Socket,What) ->
+ prim_inet:getstat(Socket, What).
+
+-spec gethostbyname(Name :: string() | atom()) ->
+ {'ok', #hostent{}} | {'error', posix()}.
+
+gethostbyname(Name) ->
+ gethostbyname_tm(Name, inet, false).
+
+-spec gethostbyname(Name :: string() | atom(), Family :: family_option()) ->
+ {'ok', #hostent{}} | {'error', posix()}.
+
+gethostbyname(Name,Family) ->
+ gethostbyname_tm(Name, Family, false).
+
+-spec gethostbyname(Name :: string() | atom(),
+ Family :: family_option(),
+ Timeout :: non_neg_integer() | 'infinity') ->
+ {'ok', #hostent{}} | {'error', posix()}.
+
+gethostbyname(Name,Family,Timeout) ->
+ Timer = start_timer(Timeout),
+ Res = gethostbyname_tm(Name,Family,Timer),
+ stop_timer(Timer),
+ Res.
+
+gethostbyname_tm(Name,Family,Timer) ->
+ gethostbyname_tm(Name,Family,Timer,inet_db:res_option(lookup)).
+
+
+-spec gethostbyaddr(Address :: string() | ip_address()) ->
+ {'ok', #hostent{}} | {'error', posix()}.
+
+gethostbyaddr(Address) ->
+ gethostbyaddr_tm(Address, false).
+
+-spec gethostbyaddr(Address :: string() | ip_address(),
+ Timeout :: non_neg_integer() | 'infinity') ->
+ {'ok', #hostent{}} | {'error', posix()}.
+
+gethostbyaddr(Address,Timeout) ->
+ Timer = start_timer(Timeout),
+ Res = gethostbyaddr_tm(Address, Timer),
+ stop_timer(Timer),
+ Res.
+
+gethostbyaddr_tm(Address,Timer) ->
+ gethostbyaddr_tm(Address, Timer, inet_db:res_option(lookup)).
+
+-spec ip(Ip :: ip_address() | string() | atom()) ->
+ {'ok', ip_address()} | {'error', posix()}.
+
+ip({A,B,C,D}) when ?ip(A,B,C,D) ->
+ {ok, {A,B,C,D}};
+ip(Name) ->
+ case gethostbyname(Name) of
+ {ok, Ent} ->
+ {ok, hd(Ent#hostent.h_addr_list)};
+ Error -> Error
+ end.
+
+%% This function returns the erlang port used (with inet_drv)
+
+-spec getll(Socket :: socket()) -> {'ok', socket()}.
+
+getll(Socket) when is_port(Socket) ->
+ {ok, Socket}.
+
+%%
+%% Return the internal file descriptor number
+%%
+
+-spec getfd(Socket :: socket()) ->
+ {'ok', non_neg_integer()} | {'error', posix()}.
+
+getfd(Socket) ->
+ prim_inet:getfd(Socket).
+
+%%
+%% Lookup an ip address
+%%
+
+-spec getaddr(Host :: ip_address() | string() | atom(),
+ Family :: family_option()) ->
+ {'ok', ip_address()} | {'error', posix()}.
+
+getaddr(Address, Family) ->
+ getaddr(Address, Family, infinity).
+
+-spec getaddr(Host :: ip_address() | string() | atom(),
+ Family :: family_option(),
+ Timeout :: non_neg_integer() | 'infinity') ->
+ {'ok', ip_address()} | {'error', posix()}.
+
+getaddr(Address, Family, Timeout) ->
+ Timer = start_timer(Timeout),
+ Res = getaddr_tm(Address, Family, Timer),
+ stop_timer(Timer),
+ Res.
+
+getaddr_tm(Address, Family, Timer) ->
+ case getaddrs_tm(Address, Family, Timer) of
+ {ok, [IP|_]} -> {ok, IP};
+ Error -> Error
+ end.
+
+-spec getaddrs(Host :: ip_address() | string() | atom(),
+ Family :: family_option()) ->
+ {'ok', [ip_address()]} | {'error', posix()}.
+
+getaddrs(Address, Family) ->
+ getaddrs(Address, Family, infinity).
+
+-spec getaddrs(Host :: ip_address() | string() | atom(),
+ Family :: family_option(),
+ Timeout :: non_neg_integer() | 'infinity') ->
+ {'ok', [ip_address()]} | {'error', posix()}.
+
+getaddrs(Address, Family, Timeout) ->
+ Timer = start_timer(Timeout),
+ Res = getaddrs_tm(Address, Family, Timer),
+ stop_timer(Timer),
+ Res.
+
+-spec getservbyport(Port :: ip_port(), Protocol :: atom() | string()) ->
+ {'ok', string()} | {'error', posix()}.
+
+getservbyport(Port, Proto) ->
+ case inet_udp:open(0, []) of
+ {ok,U} ->
+ Res = prim_inet:getservbyport(U, Port, Proto),
+ inet_udp:close(U),
+ Res;
+ Error -> Error
+ end.
+
+-spec getservbyname(Name :: atom() | string(),
+ Protocol :: atom() | string()) ->
+ {'ok', ip_port()} | {'error', posix()}.
+
+getservbyname(Name, Protocol) when is_atom(Name) ->
+ case inet_udp:open(0, []) of
+ {ok,U} ->
+ Res = prim_inet:getservbyname(U, Name, Protocol),
+ inet_udp:close(U),
+ Res;
+ Error -> Error
+ end.
+
+%% Return a list of available options
+options() ->
+ [
+ tos, priority, reuseaddr, keepalive, dontroute, linger,
+ broadcast, sndbuf, recbuf, nodelay,
+ buffer, header, active, packet, deliver, mode,
+ multicast_if, multicast_ttl, multicast_loop,
+ exit_on_close, high_watermark, low_watermark,
+ bit8, send_timeout, send_timeout_close
+ ].
+
+%% Return a list of statistics options
+
+-spec stats() -> [stat_option(),...].
+
+stats() ->
+ [recv_oct, recv_cnt, recv_max, recv_avg, recv_dvi,
+ send_oct, send_cnt, send_max, send_avg, send_pend].
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% Available options for tcp:connect
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+connect_options() ->
+ [tos, priority, reuseaddr, keepalive, linger, sndbuf, recbuf, nodelay,
+ header, active, packet, packet_size, buffer, mode, deliver,
+ exit_on_close, high_watermark, low_watermark, bit8, send_timeout,
+ send_timeout_close, delay_send,raw].
+
+connect_options(Opts, Family) ->
+ BaseOpts =
+ case application:get_env(kernel, inet_default_connect_options) of
+ {ok,List} when is_list(List) ->
+ NList = [{active, true} | lists:keydelete(active,1,List)],
+ #connect_opts{ opts = NList};
+ {ok,{active,_Bool}} ->
+ #connect_opts{ opts = [{active,true}]};
+ {ok,Option} ->
+ #connect_opts{ opts = [{active,true}, Option]};
+ _ ->
+ #connect_opts{ opts = [{active,true}]}
+ end,
+ case con_opt(Opts, BaseOpts, connect_options()) of
+ {ok, R} ->
+ {ok, R#connect_opts {
+ ifaddr = translate_ip(R#connect_opts.ifaddr, Family)
+ }};
+ Error -> Error
+ end.
+
+con_opt([{raw,A,B,C}|Opts],R,As) ->
+ con_opt([{raw,{A,B,C}}|Opts],R,As);
+con_opt([Opt | Opts], R, As) ->
+ case Opt of
+ {ip,IP} -> con_opt(Opts, R#connect_opts { ifaddr = IP }, As);
+ {ifaddr,IP} -> con_opt(Opts, R#connect_opts { ifaddr = IP }, As);
+ {port,P} -> con_opt(Opts, R#connect_opts { port = P }, As);
+ {fd,Fd} -> con_opt(Opts, R#connect_opts { fd = Fd }, As);
+ binary -> con_add(mode, binary, R, Opts, As);
+ list -> con_add(mode, list, R, Opts, As);
+ {tcp_module,_} -> con_opt(Opts, R, As);
+ inet -> con_opt(Opts, R, As);
+ inet6 -> con_opt(Opts, R, As);
+ {Name,Val} when is_atom(Name) -> con_add(Name, Val, R, Opts, As);
+ _ -> {error, badarg}
+ end;
+con_opt([], R, _) ->
+ {ok, R}.
+
+con_add(Name, Val, R, Opts, AllOpts) ->
+ case add_opt(Name, Val, R#connect_opts.opts, AllOpts) of
+ {ok, SOpts} ->
+ con_opt(Opts, R#connect_opts { opts = SOpts }, AllOpts);
+ Error -> Error
+ end.
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% Available options for tcp:listen
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+listen_options() ->
+ [tos, priority, reuseaddr, keepalive, linger, sndbuf, recbuf, nodelay,
+ header, active, packet, buffer, mode, deliver, backlog,
+ exit_on_close, high_watermark, low_watermark, bit8, send_timeout,
+ send_timeout_close, delay_send, packet_size,raw].
+
+listen_options(Opts, Family) ->
+ BaseOpts =
+ case application:get_env(kernel, inet_default_listen_options) of
+ {ok,List} when is_list(List) ->
+ NList = [{active, true} | lists:keydelete(active,1,List)],
+ #listen_opts{ opts = NList};
+ {ok,{active,_Bool}} ->
+ #listen_opts{ opts = [{active,true}]};
+ {ok,Option} ->
+ #listen_opts{ opts = [{active,true}, Option]};
+ _ ->
+ #listen_opts{ opts = [{active,true}]}
+ end,
+ case list_opt(Opts, BaseOpts, listen_options()) of
+ {ok, R} ->
+ {ok, R#listen_opts {
+ ifaddr = translate_ip(R#listen_opts.ifaddr, Family)
+ }};
+ Error -> Error
+ end.
+
+list_opt([{raw,A,B,C}|Opts], R, As) ->
+ list_opt([{raw,{A,B,C}}|Opts], R, As);
+list_opt([Opt | Opts], R, As) ->
+ case Opt of
+ {ip,IP} -> list_opt(Opts, R#listen_opts { ifaddr = IP }, As);
+ {ifaddr,IP} -> list_opt(Opts, R#listen_opts { ifaddr = IP }, As);
+ {port,P} -> list_opt(Opts, R#listen_opts { port = P }, As);
+ {fd,Fd} -> list_opt(Opts, R#listen_opts { fd = Fd }, As);
+ {backlog,BL} -> list_opt(Opts, R#listen_opts { backlog = BL }, As);
+ binary -> list_add(mode, binary, R, Opts, As);
+ list -> list_add(mode, list, R, Opts, As);
+ {tcp_module,_} -> list_opt(Opts, R, As);
+ inet -> list_opt(Opts, R, As);
+ inet6 -> list_opt(Opts, R, As);
+ {Name,Val} when is_atom(Name) -> list_add(Name, Val, R, Opts, As);
+ _ -> {error, badarg}
+ end;
+list_opt([], R, _SockOpts) ->
+ {ok, R}.
+
+list_add(Name, Val, R, Opts, As) ->
+ case add_opt(Name, Val, R#listen_opts.opts, As) of
+ {ok, SOpts} ->
+ list_opt(Opts, R#listen_opts { opts = SOpts }, As);
+ Error -> Error
+ end.
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% Available options for udp:open
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+udp_options() ->
+ [tos, priority, reuseaddr, sndbuf, recbuf, header, active, buffer, mode,
+ deliver,
+ broadcast, dontroute, multicast_if, multicast_ttl, multicast_loop,
+ add_membership, drop_membership, read_packets,raw].
+
+
+udp_options(Opts, Family) ->
+ case udp_opt(Opts, #udp_opts { }, udp_options()) of
+ {ok, R} ->
+ {ok, R#udp_opts {
+ ifaddr = translate_ip(R#udp_opts.ifaddr, Family)
+ }};
+ Error -> Error
+ end.
+
+udp_opt([{raw,A,B,C}|Opts], R, As) ->
+ udp_opt([{raw,{A,B,C}}|Opts], R, As);
+udp_opt([Opt | Opts], R, As) ->
+ case Opt of
+ {ip,IP} -> udp_opt(Opts, R#udp_opts { ifaddr = IP }, As);
+ {ifaddr,IP} -> udp_opt(Opts, R#udp_opts { ifaddr = IP }, As);
+ {port,P} -> udp_opt(Opts, R#udp_opts { port = P }, As);
+ {fd,Fd} -> udp_opt(Opts, R#udp_opts { fd = Fd }, As);
+ binary -> udp_add(mode, binary, R, Opts, As);
+ list -> udp_add(mode, list, R, Opts, As);
+ {udp_module,_} -> udp_opt(Opts, R, As);
+ inet -> udp_opt(Opts, R, As);
+ inet6 -> udp_opt(Opts, R, As);
+ {Name,Val} when is_atom(Name) -> udp_add(Name, Val, R, Opts, As);
+ _ -> {error, badarg}
+ end;
+udp_opt([], R, _SockOpts) ->
+ {ok, R}.
+
+udp_add(Name, Val, R, Opts, As) ->
+ case add_opt(Name, Val, R#udp_opts.opts, As) of
+ {ok, SOpts} ->
+ udp_opt(Opts, R#udp_opts { opts = SOpts }, As);
+ Error -> Error
+ end.
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% Available options for sctp:open
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+% Currently supported options include:
+% (*) {mode, list|binary} or just list|binary
+% (*) {active, true|false|once}
+% (*) {sctp_module, inet_sctp|inet6_sctp} or just inet|inet6
+% (*) options set via setsockopt.
+% The full list is below in sctp_options/0 .
+% All other options are currently NOT supported. In particular:
+% (*) multicast on SCTP is not (yet) supported, as it may be incompatible
+% with automatic associations;
+% (*) passing of open FDs ("fdopen") is not supported.
+sctp_options() ->
+[ % The following are generic inet options supported for SCTP sockets:
+ mode, active, buffer, tos, priority, dontroute, reuseaddr, linger, sndbuf,
+ recbuf,
+
+ % Other options are SCTP-specific (though they may be similar to their
+ % TCP and UDP counter-parts):
+ sctp_rtoinfo, sctp_associnfo, sctp_initmsg,
+ sctp_autoclose, sctp_nodelay, sctp_disable_fragments,
+ sctp_i_want_mapped_v4_addr, sctp_maxseg, sctp_primary_addr,
+ sctp_set_peer_primary_addr, sctp_adaptation_layer, sctp_peer_addr_params,
+ sctp_default_send_param, sctp_events, sctp_delayed_ack_time,
+ sctp_status, sctp_get_peer_addr_info
+].
+
+sctp_options(Opts, Mod) ->
+ case sctp_opt(Opts, Mod, #sctp_opts{}, sctp_options()) of
+ {ok,#sctp_opts{ifaddr=undefined}=SO} ->
+ {ok,SO#sctp_opts{ifaddr=Mod:translate_ip(?SCTP_DEF_IFADDR)}};
+ {ok,_}=OK ->
+ OK;
+ Error -> Error
+ end.
+
+sctp_opt([Opt|Opts], Mod, R, As) ->
+ case Opt of
+ {ip,IP} ->
+ sctp_opt_ifaddr(Opts, Mod, R, As, IP);
+ {ifaddr,IP} ->
+ sctp_opt_ifaddr(Opts, Mod, R, As, IP);
+ {port,Port} ->
+ case Mod:getserv(Port) of
+ {ok,P} ->
+ sctp_opt(Opts, Mod, R#sctp_opts{port=P}, As);
+ Error -> Error
+ end;
+ binary -> sctp_opt (Opts, Mod, R, As, mode, binary);
+ list -> sctp_opt (Opts, Mod, R, As, mode, list);
+ {sctp_module,_} -> sctp_opt (Opts, Mod, R, As); % Done with
+ inet -> sctp_opt (Opts, Mod, R, As); % Done with
+ inet6 -> sctp_opt (Opts, Mod, R, As); % Done with
+ {Name,Val} -> sctp_opt (Opts, Mod, R, As, Name, Val);
+ _ -> {error,badarg}
+ end;
+sctp_opt([], _Mod, R, _SockOpts) ->
+ {ok, R}.
+
+sctp_opt(Opts, Mod, R, As, Name, Val) ->
+ case add_opt(Name, Val, R#sctp_opts.opts, As) of
+ {ok,SocketOpts} ->
+ sctp_opt(Opts, Mod, R#sctp_opts{opts=SocketOpts}, As);
+ Error -> Error
+ end.
+
+sctp_opt_ifaddr(Opts, Mod, #sctp_opts{ifaddr=IfAddr}=R, As, Addr) ->
+ IP = Mod:translate_ip(Addr),
+ sctp_opt(Opts, Mod,
+ R#sctp_opts{
+ ifaddr=case IfAddr of
+ undefined -> IP;
+ _ when is_list(IfAddr) -> [IP|IfAddr];
+ _ -> [IP,IfAddr]
+ end}, As).
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% Util to check and insert option in option list
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+add_opt(Name, Val, Opts, As) ->
+ case lists:member(Name, As) of
+ true ->
+ case prim_inet:is_sockopt_val(Name, Val) of
+ true ->
+ Opts1 = lists:keydelete(Name, 1, Opts),
+ {ok, [{Name,Val} | Opts1]};
+ false -> {error,badarg}
+ end;
+ false -> {error,badarg}
+ end.
+
+
+translate_ip(any, inet) -> {0,0,0,0};
+translate_ip(loopback, inet) -> {127,0,0,1};
+translate_ip(any, inet6) -> {0,0,0,0,0,0,0,0};
+translate_ip(loopback, inet6) -> {0,0,0,0,0,0,0,1};
+translate_ip(IP, _) -> IP.
+
+
+getaddrs_tm({A,B,C,D} = IP, Fam, _) ->
+ %% Only "syntactic" validation and check of family.
+ if
+ ?ip(A,B,C,D) ->
+ if
+ Fam =:= inet -> {ok,[IP]};
+ true -> {error,eafnosupport}
+ end;
+ true -> {error,einval}
+ end;
+getaddrs_tm({A,B,C,D,E,F,G,H} = IP, Fam, _) ->
+ %% Only "syntactic" validation; we assume that the address was
+ %% "semantically" validated when it was converted to a tuple.
+ if
+ ?ip6(A,B,C,D,E,F,G,H) ->
+ if
+ Fam =:= inet6 -> {ok,[IP]};
+ true -> {error,eafnosupport}
+ end;
+ true -> {error,einval}
+ end;
+getaddrs_tm(Address, Family, Timer) when is_atom(Address) ->
+ getaddrs_tm(atom_to_list(Address), Family, Timer);
+getaddrs_tm(Address, Family, Timer) ->
+ case inet_parse:visible_string(Address) of
+ false ->
+ {error,einval};
+ true ->
+ %% Address is a host name or a valid IP address,
+ %% either way check it with the resolver.
+ case gethostbyname_tm(Address, Family, Timer) of
+ {ok,Ent} -> {ok,Ent#hostent.h_addr_list};
+ Error -> Error
+ end
+ end.
+
+%%
+%% gethostbyname with option search
+%%
+gethostbyname_tm(Name, Type, Timer, [dns | Opts]) ->
+ Res = inet_res:gethostbyname_tm(Name, Type, Timer),
+ case Res of
+ {ok,_} -> Res;
+ {error,timeout} -> Res;
+ {error,formerr} -> {error,einval};
+ {error,_} -> gethostbyname_tm(Name,Type,Timer,Opts)
+ end;
+gethostbyname_tm(Name, Type, Timer, [file | Opts]) ->
+ case inet_hosts:gethostbyname(Name, Type) of
+ {error,formerr} -> {error,einval};
+ {error,_} -> gethostbyname_tm(Name,Type,Timer,Opts);
+ Result -> Result
+ end;
+gethostbyname_tm(Name, Type, Timer, [yp | Opts]) ->
+ gethostbyname_tm_native(Name, Type, Timer, Opts);
+gethostbyname_tm(Name, Type, Timer, [nis | Opts]) ->
+ gethostbyname_tm_native(Name, Type, Timer, Opts);
+gethostbyname_tm(Name, Type, Timer, [nisplus | Opts]) ->
+ gethostbyname_tm_native(Name, Type, Timer, Opts);
+gethostbyname_tm(Name, Type, Timer, [wins | Opts]) ->
+ gethostbyname_tm_native(Name, Type, Timer, Opts);
+gethostbyname_tm(Name, Type, Timer, [native | Opts]) ->
+ gethostbyname_tm_native(Name, Type, Timer, Opts);
+gethostbyname_tm(_, _, _, [no_default|_]) ->
+ %% If the native resolver has failed, we should not bother
+ %% to try to be smarter and parse the IP address here.
+ {error,nxdomain};
+gethostbyname_tm(Name, Type, Timer, [_ | Opts]) ->
+ gethostbyname_tm(Name, Type, Timer, Opts);
+%% Last resort - parse the hostname as address
+gethostbyname_tm(Name, inet, _Timer, []) ->
+ case inet_parse:ipv4_address(Name) of
+ {ok,IP4} ->
+ {ok,make_hostent(Name, [IP4], [], inet)};
+ _ ->
+ gethostbyname_self(Name)
+ end;
+gethostbyname_tm(Name, inet6, _Timer, []) ->
+ case inet_parse:ipv6_address(Name) of
+ {ok,IP6} ->
+ {ok,make_hostent(Name, [IP6], [], inet6)};
+ _ ->
+ %% Even if Name is a valid IPv4 address, we can't
+ %% assume it's correct to return it on a IPv6
+ %% format ( {0,0,0,0,0,16#ffff,?u16(A,B),?u16(C,D)} ).
+ %% This host might not support IPv6.
+ gethostbyname_self(Name)
+ end.
+
+gethostbyname_tm_native(Name, Type, Timer, Opts) ->
+ %% Fixme: add (global) timeout to gethost_native
+ case inet_gethost_native:gethostbyname(Name, Type) of
+ {error,formerr} -> {error,einval};
+ {error,timeout} -> {error,timeout};
+ {error,_} -> gethostbyname_tm(Name, Type, Timer, Opts++[no_default]);
+ Result -> Result
+ end.
+
+%% Make sure we always can look up our own hostname.
+gethostbyname_self(Name) ->
+ Type = case inet_db:res_option(inet6) of
+ true -> inet6;
+ false -> inet
+ end,
+ case inet_db:gethostname() of
+ Name ->
+ {ok,make_hostent(Name, [translate_ip(loopback, Type)],
+ [], Type)};
+ Self ->
+ case inet_db:res_option(domain) of
+ "" -> {error,nxdomain};
+ Domain ->
+ case lists:append([Self,".",Domain]) of
+ Name ->
+ {ok,make_hostent(Name,
+ [translate_ip(loopback, Type)],
+ [], Type)};
+ _ -> {error,nxdomain}
+ end
+ end
+ end.
+
+make_hostent(Name, Addrs, Aliases, Type) ->
+ #hostent{h_name = Name,
+ h_aliases = Aliases,
+ h_addrtype = Type,
+ h_length = case Type of inet -> 4; inet6 -> 16 end,
+ h_addr_list = Addrs}.
+
+%%
+%% gethostbyaddr with option search
+%%
+gethostbyaddr_tm(Addr, Timer, [dns | Opts]) ->
+ Res = inet_res:gethostbyaddr_tm(Addr,Timer),
+ case Res of
+ {ok,_} -> Res;
+ {error,timeout} -> Res;
+ {error,formerr} -> {error, einval};
+ {error,_} -> gethostbyaddr_tm(Addr,Timer,Opts)
+ end;
+gethostbyaddr_tm(Addr, Timer, [file | Opts]) ->
+ case inet_hosts:gethostbyaddr(Addr) of
+ {error,formerr} -> {error, einval};
+ {error,_} -> gethostbyaddr_tm(Addr,Timer,Opts);
+ Result -> Result
+ end;
+gethostbyaddr_tm(Addr, Timer, [yp | Opts]) ->
+ gethostbyaddr_tm_native(Addr, Timer, Opts);
+gethostbyaddr_tm(Addr, Timer, [nis | Opts]) ->
+ gethostbyaddr_tm_native(Addr, Timer, Opts);
+gethostbyaddr_tm(Addr, Timer, [nisplus | Opts]) ->
+ gethostbyaddr_tm_native(Addr, Timer, Opts);
+gethostbyaddr_tm(Addr, Timer, [wins | Opts]) ->
+ gethostbyaddr_tm_native(Addr, Timer, Opts);
+gethostbyaddr_tm(Addr, Timer, [native | Opts]) ->
+ gethostbyaddr_tm_native(Addr, Timer, Opts);
+gethostbyaddr_tm(Addr, Timer, [_ | Opts]) ->
+ gethostbyaddr_tm(Addr, Timer, Opts);
+gethostbyaddr_tm({127,0,0,1}=IP, _Timer, []) ->
+ gethostbyaddr_self(IP, inet);
+gethostbyaddr_tm({0,0,0,0,0,0,0,1}=IP, _Timer, []) ->
+ gethostbyaddr_self(IP, inet6);
+gethostbyaddr_tm(_Addr, _Timer, []) ->
+ {error, nxdomain}.
+
+gethostbyaddr_self(IP, Type) ->
+ Name = inet_db:gethostname(),
+ case inet_db:res_option(domain) of
+ "" ->
+ {ok,make_hostent(Name, [IP], [], Type)};
+ Domain ->
+ {ok,make_hostent(Name++"."++Domain, [IP], [Name], Type)}
+ end.
+
+gethostbyaddr_tm_native(Addr, Timer, Opts) ->
+ %% Fixme: user timer for timeoutvalue
+ case inet_gethost_native:gethostbyaddr(Addr) of
+ {error,formerr} -> {error, einval};
+ {error,_} -> gethostbyaddr_tm(Addr,Timer,Opts);
+ Result -> Result
+ end.
+
+-spec open(Fd :: integer(),
+ Addr :: ip_address(),
+ Port :: ip_port(),
+ Opts :: [socket_setopt()],
+ Protocol :: protocol_option(),
+ Family :: 'inet' | 'inet6',
+ Module :: atom()) ->
+ {'ok', socket()} | {'error', posix()}.
+
+open(Fd, Addr, Port, Opts, Protocol, Family, Module) when Fd < 0 ->
+ case prim_inet:open(Protocol, Family) of
+ {ok,S} ->
+ case prim_inet:setopts(S, Opts) of
+ ok ->
+ case if is_list(Addr) ->
+ prim_inet:bind(S, add,
+ [case A of
+ {_,_} -> A;
+ _ -> {A,Port}
+ end || A <- Addr]);
+ true ->
+ prim_inet:bind(S, Addr, Port)
+ end of
+ {ok, _} ->
+ inet_db:register_socket(S, Module),
+ {ok,S};
+ Error ->
+ prim_inet:close(S),
+ Error
+ end;
+ Error ->
+ prim_inet:close(S),
+ Error
+ end;
+ Error ->
+ Error
+ end;
+open(Fd, _Addr, _Port, Opts, Protocol, Family, Module) ->
+ fdopen(Fd, Opts, Protocol, Family, Module).
+
+-spec fdopen(Fd :: non_neg_integer(),
+ Opts :: [socket_setopt()],
+ Protocol :: protocol_option(),
+ Family :: family_option(),
+ Module :: atom()) ->
+ {'ok', socket()} | {'error', posix()}.
+
+fdopen(Fd, Opts, Protocol, Family, Module) ->
+ case prim_inet:fdopen(Protocol, Fd, Family) of
+ {ok, S} ->
+ case prim_inet:setopts(S, Opts) of
+ ok ->
+ inet_db:register_socket(S, Module),
+ {ok, S};
+ Error ->
+ prim_inet:close(S), Error
+ end;
+ Error -> Error
+ end.
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% socket stat
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+i() -> i(tcp), i(udp).
+
+i(Proto) -> i(Proto, [port, module, recv, sent, owner,
+ local_address, foreign_address, state]).
+
+i(tcp, Fs) ->
+ ii(tcp_sockets(), Fs, tcp);
+i(udp, Fs) ->
+ ii(udp_sockets(), Fs, udp).
+
+ii(Ss, Fs, Proto) ->
+ LLs = [h_line(Fs) | info_lines(Ss, Fs, Proto)],
+ Maxs = foldl(
+ fun(Line,Max0) -> smax(Max0,Line) end,
+ duplicate(length(Fs),0),LLs),
+ Fmt = append(["~-" ++ integer_to_list(N) ++ "s " || N <- Maxs]) ++ "\n",
+ lists:foreach(fun(Line) -> io:format(Fmt, Line) end, LLs).
+
+smax([Max|Ms], [Str|Strs]) ->
+ N = length(Str),
+ [if N > Max -> N; true -> Max end | smax(Ms, Strs)];
+smax([], []) -> [].
+
+info_lines(Ss, Fs, Proto) -> [i_line(S, Fs,Proto) || S <- Ss].
+i_line(S, Fs, Proto) -> [info(S, F, Proto) || F <- Fs].
+
+h_line(Fs) -> [h_field(atom_to_list(F)) || F <- Fs].
+
+h_field([C|Cs]) -> [upper(C) | hh_field(Cs)].
+
+hh_field([$_,C|Cs]) -> [$\s,upper(C) | hh_field(Cs)];
+hh_field([C|Cs]) -> [C|hh_field(Cs)];
+hh_field([]) -> [].
+
+upper(C) when C >= $a, C =< $z -> (C-$a) + $A;
+upper(C) -> C.
+
+
+info(S, F, Proto) ->
+ case F of
+ owner ->
+ case erlang:port_info(S, connected) of
+ {connected, Owner} -> pid_to_list(Owner);
+ _ -> " "
+ end;
+ port ->
+ case erlang:port_info(S,id) of
+ {id, Id} -> integer_to_list(Id);
+ undefined -> " "
+ end;
+ sent ->
+ case prim_inet:getstat(S, [send_oct]) of
+ {ok,[{send_oct,N}]} -> integer_to_list(N);
+ _ -> " "
+ end;
+ recv ->
+ case prim_inet:getstat(S, [recv_oct]) of
+ {ok,[{recv_oct,N}]} -> integer_to_list(N);
+ _ -> " "
+ end;
+ local_address ->
+ fmt_addr(prim_inet:sockname(S), Proto);
+ foreign_address ->
+ fmt_addr(prim_inet:peername(S), Proto);
+ state ->
+ case prim_inet:getstatus(S) of
+ {ok,Status} -> fmt_status(Status);
+ _ -> " "
+ end;
+ packet ->
+ case prim_inet:getopt(S, packet) of
+ {ok,Type} when is_atom(Type) -> atom_to_list(Type);
+ {ok,Type} when is_integer(Type) -> integer_to_list(Type);
+ _ -> " "
+ end;
+ type ->
+ case prim_inet:gettype(S) of
+ {ok,{_,stream}} -> "STREAM";
+ {ok,{_,dgram}} -> "DGRAM";
+ _ -> " "
+ end;
+ fd ->
+ case prim_inet:getfd(S) of
+ {ok, Fd} -> integer_to_list(Fd);
+ _ -> " "
+ end;
+ module ->
+ case inet_db:lookup_socket(S) of
+ {ok,Mod} -> atom_to_list(Mod);
+ _ -> "prim_inet"
+ end
+ end.
+%% Possible flags: (sorted)
+%% [accepting,bound,busy,connected,connecting,listen,listening,open]
+%%
+fmt_status(Flags) ->
+ case lists:sort(Flags) of
+ [accepting | _] -> "ACCEPTING";
+ [bound,busy,connected|_] -> "CONNECTED*";
+ [bound,connected|_] -> "CONNECTED";
+ [bound,listen,listening | _] -> "LISTENING";
+ [bound,listen | _] -> "LISTEN";
+ [bound,connecting | _] -> "CONNECTING";
+ [bound,open] -> "BOUND";
+ [open] -> "IDLE";
+ [] -> "CLOSED";
+ _ -> "????"
+ end.
+
+fmt_addr({error,enotconn}, _) -> "*:*";
+fmt_addr({error,_}, _) -> " ";
+fmt_addr({ok,Addr}, Proto) ->
+ case Addr of
+ %%Dialyzer {0,0} -> "*:*";
+ {{0,0,0,0},Port} -> "*:" ++ fmt_port(Port, Proto);
+ {{0,0,0,0,0,0,0,0},Port} -> "*:" ++ fmt_port(Port, Proto);
+ {{127,0,0,1},Port} -> "localhost:" ++ fmt_port(Port, Proto);
+ {{0,0,0,0,0,0,0,1},Port} -> "localhost:" ++ fmt_port(Port, Proto);
+ {IP,Port} -> inet_parse:ntoa(IP) ++ ":" ++ fmt_port(Port, Proto)
+ end.
+
+fmt_port(N, Proto) ->
+ case inet:getservbyport(N, Proto) of
+ {ok, Name} -> Name;
+ _ -> integer_to_list(N)
+ end.
+
+%% Return a list of all tcp sockets
+tcp_sockets() -> port_list("tcp_inet").
+udp_sockets() -> port_list("udp_inet").
+
+%% Return all ports having the name 'Name'
+port_list(Name) ->
+ filter(
+ fun(Port) ->
+ case erlang:port_info(Port, name) of
+ {name, Name} -> true;
+ _ -> false
+ end
+ end, erlang:ports()).
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% utils
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+-spec format_error(posix()) -> string().
+
+format_error(exbadport) -> "invalid port state";
+format_error(exbadseq) -> "bad command sequence";
+format_error(Tag) ->
+ erl_posix_msg:message(Tag).
+
+%% Close a TCP socket.
+tcp_close(S) when is_port(S) ->
+ %% if exit_on_close is set we must force a close even if remotely closed!!!
+ prim_inet:close(S),
+ receive {tcp_closed, S} -> ok after 0 -> ok end.
+
+%% Close a UDP socket.
+udp_close(S) when is_port(S) ->
+ receive
+ {udp_closed, S} -> ok
+ after 0 ->
+ prim_inet:close(S),
+ receive {udp_closed, S} -> ok after 0 -> ok end
+ end.
+
+%% Set controlling process for TCP socket.
+tcp_controlling_process(S, NewOwner) when is_port(S), is_pid(NewOwner) ->
+ case erlang:port_info(S, connected) of
+ {connected, Pid} when Pid =/= self() ->
+ {error, not_owner};
+ undefined ->
+ {error, einval};
+ _ ->
+ case prim_inet:getopt(S, active) of
+ {ok, A0} ->
+ prim_inet:setopt(S, active, false),
+ case tcp_sync_input(S, NewOwner, false) of
+ true -> %% socket already closed,
+ ok;
+ false ->
+ try erlang:port_connect(S, NewOwner) of
+ true ->
+ unlink(S), %% unlink from port
+ prim_inet:setopt(S, active, A0),
+ ok
+ catch
+ error:Reason ->
+ {error, Reason}
+ end
+ end;
+ Error ->
+ Error
+ end
+ end.
+
+tcp_sync_input(S, Owner, Flag) ->
+ receive
+ {tcp, S, Data} ->
+ Owner ! {tcp, S, Data},
+ tcp_sync_input(S, Owner, Flag);
+ {tcp_closed, S} ->
+ Owner ! {tcp_closed, S},
+ tcp_sync_input(S, Owner, true);
+ {S, {data, Data}} ->
+ Owner ! {S, {data, Data}},
+ tcp_sync_input(S, Owner, Flag);
+ {inet_async, S, Ref, Status} ->
+ Owner ! {inet_async, S, Ref, Status},
+ tcp_sync_input(S, Owner, Flag);
+ {inet_reply, S, Status} ->
+ Owner ! {inet_reply, S, Status},
+ tcp_sync_input(S, Owner, Flag)
+ after 0 ->
+ Flag
+ end.
+
+%% Set controlling process for UDP or SCTP socket.
+udp_controlling_process(S, NewOwner) when is_port(S), is_pid(NewOwner) ->
+ case erlang:port_info(S, connected) of
+ {connected, Pid} when Pid =/= self() ->
+ {error, not_owner};
+ _ ->
+ {ok, A0} = prim_inet:getopt(S, active),
+ prim_inet:setopt(S, active, false),
+ udp_sync_input(S, NewOwner),
+ try erlang:port_connect(S, NewOwner) of
+ true ->
+ unlink(S),
+ prim_inet:setopt(S, active, A0),
+ ok
+ catch
+ error:Reason ->
+ {error, Reason}
+ end
+ end.
+
+udp_sync_input(S, Owner) ->
+ receive
+ {sctp, S, _, _, _}=Msg -> udp_sync_input(S, Owner, Msg);
+ {udp, S, _, _, _}=Msg -> udp_sync_input(S, Owner, Msg);
+ {udp_closed, S}=Msg -> udp_sync_input(S, Owner, Msg);
+ {S, {data,_}}=Msg -> udp_sync_input(S, Owner, Msg);
+ {inet_async, S, _, _}=Msg -> udp_sync_input(S, Owner, Msg);
+ {inet_reply, S, _}=Msg -> udp_sync_input(S, Owner, Msg)
+ after 0 ->
+ ok
+ end.
+
+udp_sync_input(S, Owner, Msg) ->
+ Owner ! Msg,
+ udp_sync_input(S, Owner).
+
+start_timer(infinity) -> false;
+start_timer(Timeout) ->
+ erlang:start_timer(Timeout, self(), inet).
+
+timeout(false) -> infinity;
+timeout(Timer) ->
+ case erlang:read_timer(Timer) of
+ false -> 0;
+ Time -> Time
+ end.
+
+timeout(Time, false) -> Time;
+timeout(Time, Timer) ->
+ TimerTime = timeout(Timer),
+ if TimerTime < Time -> TimerTime;
+ true -> Time
+ end.
+
+stop_timer(false) -> false;
+stop_timer(Timer) ->
+ case erlang:cancel_timer(Timer) of
+ false ->
+ receive
+ {timeout,Timer,_} -> false
+ after 0 ->
+ false
+ end;
+ T -> T
+ end.
diff --git a/lib/kernel/src/inet6_sctp.erl b/lib/kernel/src/inet6_sctp.erl
new file mode 100644
index 0000000000..5c49c4fec3
--- /dev/null
+++ b/lib/kernel/src/inet6_sctp.erl
@@ -0,0 +1,75 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2007-2009. All Rights Reserved.
+%%
+%% The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved online at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% %CopyrightEnd%
+%%
+%% SCTP protocol contribution by Leonid Timochouk and Serge Aleynikov.
+%% See also: $ERL_TOP/lib/kernel/AUTHORS
+%%
+%%
+-module(inet6_sctp).
+
+%% This module provides functions for communicating with
+%% sockets using the SCTP protocol. The implementation assumes that
+%% the OS kernel supports SCTP providing user-level SCTP Socket API:
+%% http://tools.ietf.org/html/draft-ietf-tsvwg-sctpsocket-13
+
+-include("inet_sctp.hrl").
+-include("inet_int.hrl").
+
+-define(FAMILY, inet6).
+-export([getserv/1,getaddr/1,getaddr/2,translate_ip/1]).
+-export([open/1,close/1,listen/2,connect/5,sendmsg/3,recv/2]).
+
+
+
+getserv(Port) when is_integer(Port) -> {ok, Port};
+getserv(Name) when is_atom(Name) ->
+ inet:getservbyname(Name, sctp);
+getserv(_) ->
+ {error,einval}.
+
+getaddr(Address) ->
+ inet:getaddr(Address, ?FAMILY).
+getaddr(Address, Timer) ->
+ inet:getaddr_tm(Address, ?FAMILY, Timer).
+
+translate_ip(IP) ->
+ inet:translate_ip(IP, ?FAMILY).
+
+
+
+open(Opts) ->
+ case inet:sctp_options(Opts, ?MODULE) of
+ {ok,#sctp_opts{fd=Fd,ifaddr=Addr,port=Port,opts=SOs}} ->
+ inet:open(Fd, Addr, Port, SOs, sctp, ?FAMILY, ?MODULE);
+ Error -> Error
+ end.
+
+close(S) ->
+ prim_inet:close(S).
+
+listen(S, Flag) ->
+ prim_inet:listen(S, Flag).
+
+connect(S, Addr, Port, Opts, Timer) ->
+ inet_sctp:connect(S, Addr, Port, Opts, Timer).
+
+sendmsg(S, SRI, Data) ->
+ prim_inet:sendmsg(S, SRI, Data).
+
+recv(S, Timeout) ->
+ prim_inet:recvfrom(S, 0, Timeout).
diff --git a/lib/kernel/src/inet6_tcp.erl b/lib/kernel/src/inet6_tcp.erl
new file mode 100644
index 0000000000..cc45f6c7f6
--- /dev/null
+++ b/lib/kernel/src/inet6_tcp.erl
@@ -0,0 +1,153 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 1997-2009. All Rights Reserved.
+%%
+%% The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved online at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(inet6_tcp).
+
+-export([connect/3, connect/4, listen/2, accept/1, accept/2, close/1]).
+-export([send/2, send/3, recv/2, recv/3, unrecv/2]).
+-export([shutdown/2]).
+-export([controlling_process/2]).
+-export([fdopen/2]).
+
+-export([getserv/1, getaddr/1, getaddr/2, getaddrs/1, getaddrs/2]).
+
+-include("inet_int.hrl").
+
+%% inet_tcp port lookup
+getserv(Port) when is_integer(Port) -> {ok, Port};
+getserv(Name) when is_atom(Name) -> inet:getservbyname(Name,tcp).
+
+%% inet_tcp address lookup
+getaddr(Address) -> inet:getaddr(Address, inet6).
+getaddr(Address,Timer) -> inet:getaddr_tm(Address, inet6, Timer).
+
+%% inet_tcp address lookup
+getaddrs(Address) -> inet:getaddrs(Address, inet6).
+getaddrs(Address,Timer) -> inet:getaddrs_tm(Address,inet6,Timer).
+
+%%
+%% Send data on a socket
+%%
+send(Socket, Packet, Opts) -> prim_inet:send(Socket, Packet, Opts).
+send(Socket, Packet) -> prim_inet:send(Socket, Packet, []).
+
+%%
+%% Receive data from a socket (inactive only)
+%%
+recv(Socket, Length) -> prim_inet:recv(Socket, Length).
+recv(Socket, Length, Timeout) -> prim_inet:recv(Socket, Length, Timeout).
+
+unrecv(Socket, Data) -> prim_inet:unrecv(Socket, Data).
+%%
+%% Close a socket (async)
+%%
+close(Socket) ->
+ inet:tcp_close(Socket).
+
+%%
+%% Shutdown one end of a socket
+%%
+shutdown(Socket, How) ->
+ prim_inet:shutdown(Socket, How).
+
+%%
+%% Set controlling process
+%% FIXME: move messages to new owner!!!
+%%
+controlling_process(Socket, NewOwner) ->
+ inet:tcp_controlling_process(Socket, NewOwner).
+
+%%
+%% Connect
+%%
+connect(Address, Port, Opts) ->
+ do_connect(Address, Port, Opts, infinity).
+
+connect(Address, Port, Opts, infinity) ->
+ do_connect(Address, Port, Opts, infinity);
+connect(Address, Port, Opts, Timeout) when is_integer(Timeout),
+ Timeout >= 0 ->
+ do_connect(Address, Port, Opts, Timeout).
+
+do_connect(Addr = {A,B,C,D,E,F,G,H}, Port, Opts, Time) when
+ ?ip6(A,B,C,D,E,F,G,H), ?port(Port) ->
+ case inet:connect_options(Opts, inet6) of
+ {error, Reason} -> exit(Reason);
+ {ok, #connect_opts{fd=Fd,
+ ifaddr=BAddr={Ab,Bb,Cb,Db,Eb,Fb,Gb,Hb},
+ port=BPort,
+ opts=SockOpts}}
+ when ?ip6(Ab,Bb,Cb,Db,Eb,Fb,Gb,Hb), ?port(BPort) ->
+ case inet:open(Fd,BAddr,BPort,SockOpts,tcp,inet6,?MODULE) of
+ {ok, S} ->
+ case prim_inet:connect(S, Addr, Port, Time) of
+ ok -> {ok,S};
+ Error -> prim_inet:close(S), Error
+ end;
+ Error -> Error
+ end;
+ {ok, _} -> exit(badarg)
+ end.
+
+%%
+%% Listen
+%%
+listen(Port, Opts) ->
+ case inet:listen_options([{port,Port} | Opts], inet6) of
+ {error, Reason} -> exit(Reason);
+ {ok, #listen_opts{fd=Fd,
+ ifaddr=BAddr={A,B,C,D,E,F,G,H},
+ port=BPort,
+ opts=SockOpts}=R}
+ when ?ip6(A,B,C,D,E,F,G,H), ?port(BPort) ->
+ case inet:open(Fd,BAddr,BPort,SockOpts,tcp,inet6,?MODULE) of
+ {ok, S} ->
+ case prim_inet:listen(S, R#listen_opts.backlog) of
+ ok -> {ok, S};
+ Error -> prim_inet:close(S), Error
+ end;
+ Error -> Error
+ end;
+ {ok, _} -> exit(badarg)
+ end.
+
+%%
+%% Accept
+%%
+accept(L) ->
+ case prim_inet:accept(L) of
+ {ok, S} ->
+ inet_db:register_socket(S, ?MODULE),
+ {ok,S};
+ Error -> Error
+ end.
+
+accept(L,Timeout) ->
+ case prim_inet:accept(L,Timeout) of
+ {ok, S} ->
+ inet_db:register_socket(S, ?MODULE),
+ {ok,S};
+ Error -> Error
+ end.
+
+%%
+%% Create a port/socket from a file descriptor
+%%
+fdopen(Fd, Opts) ->
+ inet:fdopen(Fd, Opts, tcp, inet6, ?MODULE).
+
diff --git a/lib/kernel/src/inet6_tcp_dist.erl b/lib/kernel/src/inet6_tcp_dist.erl
new file mode 100644
index 0000000000..34cf582af7
--- /dev/null
+++ b/lib/kernel/src/inet6_tcp_dist.erl
@@ -0,0 +1,417 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 1997-2009. All Rights Reserved.
+%%
+%% The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved online at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(inet6_tcp_dist).
+
+%% Handles the connection setup phase with other Erlang nodes.
+
+-export([listen/1, accept/1, accept_connection/5,
+ setup/5, close/1, select/1, is_node_name/1]).
+
+%% internal exports
+
+-export([accept_loop/2,do_accept/6,do_setup/6, getstat/1,tick/1]).
+
+-import(error_logger,[error_msg/2]).
+
+-include("net_address.hrl").
+
+
+
+-define(to_port(Socket, Data, Opts),
+ case inet6_tcp:send(Socket, Data, Opts) of
+ {error, closed} ->
+ self() ! {tcp_closed, Socket},
+ {error, closed};
+ R ->
+ R
+ end).
+
+
+-include("dist.hrl").
+-include("dist_util.hrl").
+
+%% ------------------------------------------------------------
+%% Select this protocol based on node name
+%% select(Node) => Bool
+%% ------------------------------------------------------------
+
+select(Node) ->
+ case split_node(atom_to_list(Node), $@, []) of
+ [_, Host] ->
+ case inet:getaddr(Host,inet6) of
+ {ok,_} -> true;
+ _ -> false
+ end;
+ _ -> false
+ end.
+
+%% ------------------------------------------------------------
+%% Create the listen socket, i.e. the port that this erlang
+%% node is accessible through.
+%% ------------------------------------------------------------
+
+listen(Name) ->
+ case inet6_tcp:listen(0, [{active, false}, {packet,2}]) of
+ {ok, Socket} ->
+ TcpAddress = get_tcp_address(Socket),
+ {_,Port} = TcpAddress#net_address.address,
+ {ok, Creation} = erl_epmd:register_node(Name, Port),
+ {ok, {Socket, TcpAddress, Creation}};
+ Error ->
+ Error
+ end.
+
+%% ------------------------------------------------------------
+%% Accepts new connection attempts from other Erlang nodes.
+%% ------------------------------------------------------------
+
+accept(Listen) ->
+ spawn_opt(?MODULE, accept_loop, [self(), Listen], [link, {priority, max}]).
+
+accept_loop(Kernel, Listen) ->
+ case inet6_tcp:accept(Listen) of
+ {ok, Socket} ->
+ Kernel ! {accept,self(),Socket,inet,tcp},
+ controller(Kernel, Socket),
+ accept_loop(Kernel, Listen);
+ Error ->
+ exit(Error)
+ end.
+
+controller(Kernel, Socket) ->
+ receive
+ {Kernel, controller, Pid} ->
+ flush_controller(Pid, Socket),
+ inet6_tcp:controlling_process(Socket, Pid),
+ flush_controller(Pid, Socket),
+ Pid ! {self(), controller};
+ {Kernel, unsupported_protocol} ->
+ exit(unsupported_protocol)
+ end.
+
+flush_controller(Pid, Socket) ->
+ receive
+ {tcp, Socket, Data} ->
+ Pid ! {tcp, Socket, Data},
+ flush_controller(Pid, Socket);
+ {tcp_closed, Socket} ->
+ Pid ! {tcp_closed, Socket},
+ flush_controller(Pid, Socket)
+ after 0 ->
+ ok
+ end.
+
+%% ------------------------------------------------------------
+%% Accepts a new connection attempt from another Erlang node.
+%% Performs the handshake with the other side.
+%% ------------------------------------------------------------
+
+accept_connection(AcceptPid, Socket, MyNode, Allowed, SetupTime) ->
+ spawn_opt(?MODULE, do_accept,
+ [self(), AcceptPid, Socket, MyNode, Allowed, SetupTime],
+ [link, {priority, max}]).
+
+do_accept(Kernel, AcceptPid, Socket, MyNode, Allowed, SetupTime) ->
+ receive
+ {AcceptPid, controller} ->
+ Timer = dist_util:start_timer(SetupTime),
+ case check_ip(Socket) of
+ true ->
+ HSData = #hs_data{
+ kernel_pid = Kernel,
+ this_node = MyNode,
+ socket = Socket,
+ timer = Timer,
+ this_flags = 0,
+ allowed = Allowed,
+ f_send = fun(S,D) -> inet6_tcp:send(S,D) end,
+ f_recv = fun(S,N,T) -> inet6_tcp:recv(S,N,T)
+ end,
+ f_setopts_pre_nodeup =
+ fun(S) ->
+ inet:setopts(S,
+ [{active, false},
+ {packet, 4},
+ nodelay()])
+ end,
+ f_setopts_post_nodeup =
+ fun(S) ->
+ inet:setopts(S,
+ [{active, true},
+ {deliver, port},
+ {packet, 4},
+ nodelay()])
+ end,
+ f_getll = fun(S) ->
+ inet:getll(S)
+ end,
+ f_address = fun get_remote_id/2,
+ mf_tick = {?MODULE, tick},
+ mf_getstat = {?MODULE,getstat}
+ },
+ dist_util:handshake_other_started(HSData);
+ {false,IP} ->
+ error_msg("** Connection attempt from "
+ "disallowed IP ~w ** ~n", [IP]),
+ ?shutdown(no_node)
+ end
+ end.
+
+
+%% we may not always want the nodelay behaviour
+%% for performance reasons
+
+nodelay() ->
+ case application:get_env(kernel, dist_nodelay) of
+ undefined ->
+ {nodelay, true};
+ {ok, true} ->
+ {nodelay, true};
+ {ok, false} ->
+ {nodelay, false};
+ _ ->
+ {nodelay, true}
+ end.
+
+
+%% ------------------------------------------------------------
+%% Get remote information about a Socket.
+%% ------------------------------------------------------------
+
+get_remote_id(Socket, Node) ->
+ {ok, Address} = inet:peername(Socket),
+ [_, Host] = split_node(atom_to_list(Node), $@, []),
+ #net_address {
+ address = Address,
+ host = Host,
+ protocol = tcp,
+ family = inet6 }.
+
+%% ------------------------------------------------------------
+%% Setup a new connection to another Erlang node.
+%% Performs the handshake with the other side.
+%% ------------------------------------------------------------
+
+setup(Node, Type, MyNode, LongOrShortNames,SetupTime) ->
+ spawn_opt(?MODULE, do_setup,
+ [self(), Node, Type, MyNode, LongOrShortNames, SetupTime],
+ [link, {priority, max}]).
+
+do_setup(Kernel, Node, Type, MyNode, LongOrShortNames,SetupTime) ->
+ ?trace("~p~n",[{?MODULE,self(),setup,Node}]),
+ [Name, Address] = splitnode(Node, LongOrShortNames),
+ case inet:getaddr(Address, inet6) of
+ {ok, Ip} ->
+ Timer = dist_util:start_timer(SetupTime),
+ case erl_epmd:port_please(Name, Ip) of
+ {port, TcpPort, Version} ->
+ ?trace("port_please(~p) -> version ~p~n",
+ [Node,Version]),
+ dist_util:reset_timer(Timer),
+ case inet6_tcp:connect(Ip, TcpPort,
+ [{active, false},
+ {packet,2}]) of
+ {ok, Socket} ->
+ HSData = #hs_data{
+ kernel_pid = Kernel,
+ other_node = Node,
+ this_node = MyNode,
+ socket = Socket,
+ timer = Timer,
+ this_flags = 0,
+ other_version = Version,
+ f_send = fun inet_tcp:send/2,
+ f_recv = fun inet_tcp:recv/3,
+ f_setopts_pre_nodeup =
+ fun(S) ->
+ inet:setopts
+ (S,
+ [{active, false},
+ {packet, 4},
+ nodelay()])
+ end,
+ f_setopts_post_nodeup =
+ fun(S) ->
+ inet:setopts
+ (S,
+ [{active, true},
+ {deliver, port},
+ {packet, 4},
+ nodelay()])
+ end,
+ f_getll = fun inet:getll/1,
+ f_address =
+ fun(_,_) ->
+ #net_address {
+ address = {Ip,TcpPort},
+ host = Address,
+ protocol = tcp,
+ family = inet}
+ end,
+ mf_tick = fun ?MODULE:tick/1,
+ mf_getstat = fun ?MODULE:getstat/1,
+ request_type = Type
+ },
+ dist_util:handshake_we_started(HSData);
+ _ ->
+ %% Other Node may have closed since
+ %% port_please !
+ ?trace("other node (~p) "
+ "closed since port_please.~n",
+ [Node]),
+ ?shutdown(Node)
+ end;
+ _ ->
+ ?trace("port_please (~p) "
+ "failed.~n", [Node]),
+ ?shutdown(Node)
+ end;
+ __Other ->
+ ?trace("inet_getaddr(~p) "
+ "failed (~p).~n", [Node,__Other]),
+ ?shutdown(Node)
+ end.
+
+%%
+%% Close a socket.
+%%
+close(Socket) ->
+ inet6_tcp:close(Socket).
+
+
+%% If Node is illegal terminate the connection setup!!
+splitnode(Node, LongOrShortNames) ->
+ case split_node(atom_to_list(Node), $@, []) of
+ [Name|Tail] when Tail =/= [] ->
+ Host = lists:append(Tail),
+ case split_node(Host, $., []) of
+ [_] when LongOrShortNames =:= longnames ->
+ error_msg("** System running to use "
+ "fully qualified "
+ "hostnames **~n"
+ "** Hostname ~s is illegal **~n",
+ [Host]),
+ ?shutdown(Node);
+ L when length(L) > 1, LongOrShortNames =:= shortnames ->
+ error_msg("** System NOT running to use fully qualified "
+ "hostnames **~n"
+ "** Hostname ~s is illegal **~n",
+ [Host]),
+ ?shutdown(Node);
+ _ ->
+ [Name, Host]
+ end;
+ [_] ->
+ error_msg("** Nodename ~p illegal, no '@' character **~n",
+ [Node]),
+ ?shutdown(Node);
+ _ ->
+ error_msg("** Nodename ~p illegal **~n", [Node]),
+ ?shutdown(Node)
+ end.
+
+split_node([Chr|T], Chr, Ack) -> [lists:reverse(Ack)|split_node(T, Chr, [])];
+split_node([H|T], Chr, Ack) -> split_node(T, Chr, [H|Ack]);
+split_node([], _, Ack) -> [lists:reverse(Ack)].
+
+%% ------------------------------------------------------------
+%% Fetch local information about a Socket.
+%% ------------------------------------------------------------
+get_tcp_address(Socket) ->
+ {ok, Address} = inet:sockname(Socket),
+ {ok, Host} = inet:gethostname(),
+ #net_address {
+ address = Address,
+ host = Host,
+ protocol = tcp,
+ family = inet6
+ }.
+
+%% ------------------------------------------------------------
+%% Do only accept new connection attempts from nodes at our
+%% own LAN, if the check_ip environment parameter is true.
+%% ------------------------------------------------------------
+check_ip(Socket) ->
+ case application:get_env(check_ip) of
+ {ok, true} ->
+ case get_ifs(Socket) of
+ {ok, IFs, IP} ->
+ check_ip(IFs, IP);
+ _ ->
+ ?shutdown(no_node)
+ end;
+ _ ->
+ true
+ end.
+
+get_ifs(Socket) ->
+ case inet:peername(Socket) of
+ {ok, {IP, _}} ->
+ case inet:getif(Socket) of
+ {ok, IFs} -> {ok, IFs, IP};
+ Error -> Error
+ end;
+ Error ->
+ Error
+ end.
+
+check_ip([{OwnIP, _, Netmask}|IFs], PeerIP) ->
+ case {mask(Netmask, PeerIP), mask(Netmask, OwnIP)} of
+ {M, M} -> true;
+ _ -> check_ip(IFs, PeerIP)
+ end;
+check_ip([], PeerIP) ->
+ {false, PeerIP}.
+
+mask({M1,M2,M3,M4,M5,M6,M7,M8}, {IP1,IP2,IP3,IP4,IP5,IP6,IP7,IP8}) ->
+ {M1 band IP1,
+ M2 band IP2,
+ M3 band IP3,
+ M4 band IP4,
+ M5 band IP5,
+ M6 band IP6,
+ M7 band IP7,
+ M8 band IP8 }.
+
+is_node_name(Node) when is_atom(Node) ->
+ case split_node(atom_to_list(Node), $@, []) of
+ [_,_Host] -> true;
+ _ -> false
+ end;
+is_node_name(_Node) ->
+ false.
+tick(Sock) ->
+ ?to_port(Sock,[],[force]).
+getstat(Socket) ->
+ case inet:getstat(Socket, [recv_cnt, send_cnt, send_pend]) of
+ {ok, Stat} ->
+ split_stat(Stat,0,0,0);
+ Error ->
+ Error
+ end.
+
+split_stat([{recv_cnt, R}|Stat], _, W, P) ->
+ split_stat(Stat, R, W, P);
+split_stat([{send_cnt, W}|Stat], R, _, P) ->
+ split_stat(Stat, R, W, P);
+split_stat([{send_pend, P}|Stat], R, W, _) ->
+ split_stat(Stat, R, W, P);
+split_stat([], R, W, P) ->
+ {ok, R, W, P}.
+
diff --git a/lib/kernel/src/inet6_udp.erl b/lib/kernel/src/inet6_udp.erl
new file mode 100644
index 0000000000..e81d417151
--- /dev/null
+++ b/lib/kernel/src/inet6_udp.erl
@@ -0,0 +1,87 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 1997-2009. All Rights Reserved.
+%%
+%% The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved online at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(inet6_udp).
+
+-export([open/1, open/2, close/1]).
+-export([send/2, send/4, recv/2, recv/3, connect/3]).
+-export([controlling_process/2]).
+-export([fdopen/2]).
+
+-export([getserv/1, getaddr/1, getaddr/2]).
+
+-include("inet_int.hrl").
+
+%% inet_udp port lookup
+getserv(Port) when is_integer(Port) -> {ok, Port};
+getserv(Name) when is_atom(Name) -> inet:getservbyname(Name,udp).
+
+%% inet_udp address lookup
+getaddr(Address) -> inet:getaddr(Address, inet6).
+getaddr(Address,Timer) -> inet:getaddr(Address, inet6, Timer).
+
+open(Port) -> open(Port, []).
+
+open(Port, Opts) ->
+ case inet:udp_options([{port,Port} | Opts], inet6) of
+ {error, Reason} -> exit(Reason);
+ {ok, #udp_opts{fd=Fd,
+ ifaddr=BAddr={A,B,C,D,E,F,G,H},
+ port=BPort,
+ opts=SockOpts}}
+ when ?ip6(A,B,C,D,E,F,G,H), ?port(BPort) ->
+ inet:open(Fd,BAddr,BPort,SockOpts,udp,inet6,?MODULE);
+ {ok, _} -> exit(badarg)
+ end.
+
+send(S, Addr = {A,B,C,D,E,F,G,H}, P, Data)
+ when ?ip6(A,B,C,D,E,F,G,H), ?port(P) ->
+ prim_inet:sendto(S, Addr, P, Data).
+
+send(S, Data) ->
+ prim_inet:sendto(S, {0,0,0,0,0,0,0,0}, 0, Data).
+
+connect(S, Addr = {A,B,C,D,E,F,G,H}, P)
+ when ?ip6(A,B,C,D,E,F,G,H), ?port(P) ->
+ prim_inet:connect(S, Addr, P).
+
+recv(S,Len) ->
+ prim_inet:recvfrom(S, Len).
+
+recv(S,Len,Time) ->
+ prim_inet:recvfrom(S, Len, Time).
+
+close(S) ->
+ inet:udp_close(S).
+
+%%
+%% Set controlling process:
+%% 1) First sync socket into a known state
+%% 2) Move all messages onto the new owners message queue
+%% 3) Commit the owner
+%% 4) Wait for ack of new Owner (since socket does some link and unlink)
+%%
+
+controlling_process(Socket, NewOwner) ->
+ inet:udp_controlling_process(Socket, NewOwner).
+
+%%
+%% Create a port/socket from a file descriptor
+%%
+fdopen(Fd, Opts) ->
+ inet:fdopen(Fd, Opts, udp, inet6, ?MODULE).
diff --git a/lib/kernel/src/inet_boot.hrl b/lib/kernel/src/inet_boot.hrl
new file mode 100644
index 0000000000..35501a0f9c
--- /dev/null
+++ b/lib/kernel/src/inet_boot.hrl
@@ -0,0 +1,32 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 1997-2009. All Rights Reserved.
+%%
+%% The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved online at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% %CopyrightEnd%
+%%
+%%
+%% Defines used for erlang boot/load protocol
+%%
+
+-define(EBOOT_PORT, 4368). %% same as epmd but for udp !
+
+-define(EBOOT_REQUEST, "EBOOTQ").
+-define(EBOOT_REPLY, "EBOOTR").
+
+-define(EBOOT_RETRY, 3). % number of retry before sleep
+-define(EBOOT_REQUEST_DELAY, 500). % delay between retry
+-define(EBOOT_SHORT_RETRY_SLEEP, 10000). % initial sleep time between boot attempt's
+-define(EBOOT_UNSUCCESSFUL_TRIES, 10). % retries before longer sleep
+-define(EBOOT_LONG_RETRY_SLEEP, 60000). % sleep time after a number of unsuccessful tries
diff --git a/lib/kernel/src/inet_config.erl b/lib/kernel/src/inet_config.erl
new file mode 100644
index 0000000000..b5317f72f5
--- /dev/null
+++ b/lib/kernel/src/inet_config.erl
@@ -0,0 +1,638 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 1997-2009. All Rights Reserved.
+%%
+%% The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved online at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(inet_config).
+
+-include("inet_config.hrl").
+-include("inet.hrl").
+
+-import(lists, [foreach/2, member/2, reverse/1]).
+
+-export([init/0]).
+
+-export([do_load_resolv/2]).
+
+%%
+%% Must be called after inet_db:start
+%%
+%% Order in which to load inet_db data:
+%% 1. Hostname (possibly derive domain and search)
+%% 2. OS default /etc/resolv.conf, Windows registry etc
+%% a) Hosts database
+%% b) Resolver options
+%% 3. Config (kernel app)
+%% 4. Root (otp root)
+%% 5. Home (user inetrc)
+%%
+%%
+-spec init() -> 'ok'.
+init() ->
+ OsType = os:type(),
+ case OsType of
+ {ose,_} ->
+ case init:get_argument(loader) of
+ {ok,[["ose_inet"]]} ->
+ %% port already started by prim_loader
+ ok;
+ _Other ->
+ %% Setup reserved port for ose_inet driver (only OSE)
+ case catch erlang:open_port({spawn,"ose_inet"}, [binary]) of
+ {'EXIT',Why} ->
+ error("can't open port for ose_inet: ~p", [Why]);
+ OseInetPort ->
+ erlang:display({ose_inet_port,OseInetPort})
+ end
+ end;
+ _ ->
+ ok
+ end,
+
+ set_hostname(),
+
+ %% Note: In shortnames (or non-distributed) mode we don't need to know
+ %% our own domain name. In longnames mode we do and we can't rely on
+ %% the user to provide it (by means of inetrc), so we need to look
+ %% for it ourselves.
+
+ do_load_resolv(OsType, erl_dist_mode()),
+
+ case OsType of
+ {unix,Type} ->
+ if Type =:= linux ->
+ %% It may be the case that the domain name was not set
+ %% because the hostname was short. But NOW we can look it
+ %% up and get the long name and the domain name from it.
+
+ %% FIXME: The second call to set_hostname will insert
+ %% a duplicate entry in the search list.
+
+ case inet_db:res_option(domain) of
+ "" ->
+ case inet:gethostbyname(inet_db:gethostname()) of
+ {ok,#hostent{h_name = []}} ->
+ ok;
+ {ok,#hostent{h_name = HostName}} ->
+ set_hostname({ok,HostName});
+ _ ->
+ ok
+ end;
+ _ ->
+ ok
+ end;
+ true -> ok
+ end,
+ add_dns_lookup(inet_db:res_option(lookup));
+ _ ->
+ ok
+ end,
+
+ %% Read inetrc file, if it exists.
+ {RcFile,CfgFiles,CfgList} = read_rc(),
+
+ %% Possibly read config files or system registry
+ lists:foreach(fun({file,hosts,File}) ->
+ load_hosts(File, unix);
+ ({file,Func,File}) ->
+ load_resolv(File, Func);
+ ({registry,win32}) ->
+ case OsType of
+ {win32,WinType} ->
+ win32_load_from_registry(WinType);
+ _ ->
+ error("can not read win32 system registry~n", [])
+ end
+ end, CfgFiles),
+
+ %% Add inetrc config entries
+ case inet_db:add_rc_list(CfgList) of
+ ok -> ok;
+ _ -> error("syntax error in ~s~n", [RcFile])
+ end,
+
+ %% Set up a resolver configuration file for inet_res,
+ %% unless that already has been done
+ case OsType of
+ {unix,_} ->
+ %% The Etc variable enables us to run tests with other
+ %% configuration files than the normal ones
+ Etc = case os:getenv("ERL_INET_ETC_DIR") of
+ false -> ?DEFAULT_ETC;
+ _EtcDir ->
+ _EtcDir
+ end,
+ case inet_db:res_option(resolv_conf) of
+ undefined ->
+ inet_db:set_resolv_conf(filename:join(Etc,
+ ?DEFAULT_RESOLV));
+ _ -> ok
+ end,
+ case inet_db:res_option(hosts_file) of
+ undefined ->
+ inet_db:set_hosts_file(filename:join(Etc,
+ ?DEFAULT_HOSTS));
+ _ -> ok
+ end;
+ _ -> ok
+ end.
+
+
+
+erl_dist_mode() ->
+ case init:get_argument(sname) of
+ {ok,[[_SName]]} -> shortnames;
+ _ ->
+ case init:get_argument(name) of
+ {ok,[[_Name]]} -> longnames;
+ _ -> nonames
+ end
+ end.
+
+do_load_resolv({unix,Type}, longnames) ->
+ %% The Etc variable enables us to run tests with other
+ %% configuration files than the normal ones
+ Etc = case os:getenv("ERL_INET_ETC_DIR") of
+ false -> ?DEFAULT_ETC;
+ _EtcDir ->
+ _EtcDir
+ end,
+ load_resolv(filename:join(Etc, ?DEFAULT_RESOLV), resolv),
+ case Type of
+ freebsd -> %% we may have to check version (2.2.2)
+ load_resolv(filename:join(Etc,"host.conf"), host_conf_freebsd);
+ 'bsd/os' ->
+ load_resolv(filename:join(Etc,"irs.conf"), host_conf_bsdos);
+ sunos ->
+ case os:version() of
+ {Major,_,_} when Major >= 5 ->
+ load_resolv(filename:join(Etc,"nsswitch.conf"),
+ nsswitch_conf);
+ _ ->
+ ok
+ end;
+ netbsd ->
+ case os:version() of
+ {Major,Minor,_} when Major >= 1, Minor >= 4 ->
+ load_resolv(filename:join(Etc,"nsswitch.conf"),
+ nsswitch_conf);
+ _ ->
+ ok
+ end;
+ linux ->
+ case load_resolv(filename:join(Etc,"host.conf"),
+ host_conf_linux) of
+ ok ->
+ ok;
+ _ ->
+ load_resolv(filename:join(Etc,"nsswitch.conf"),
+ nsswitch_conf)
+ end;
+ _ ->
+ ok
+ end,
+ inet_db:set_lookup([native]);
+
+do_load_resolv({win32,Type}, longnames) ->
+ win32_load_from_registry(Type),
+ inet_db:set_lookup([native]);
+
+do_load_resolv(vxworks, _) ->
+ vxworks_load_hosts(),
+ inet_db:set_lookup([file, dns]),
+ case os:getenv("ERLRESCONF") of
+ false ->
+ no_ERLRESCONF;
+ Resolv ->
+ load_resolv(Resolv, resolv)
+ end;
+
+do_load_resolv({ose,_Type}, _) ->
+ inet_db:set_lookup([file, dns]),
+ case os:getenv("NAMESERVER") of
+ false ->
+ case os:getenv("RESOLVFILE") of
+ false ->
+ erlang:display('Warning: No NAMESERVER or RESOLVFILE specified!'),
+ no_resolv;
+ Resolv ->
+ load_resolv(Resolv, resolv)
+ end;
+ Ns ->
+ {ok,IP} = inet_parse:address(Ns),
+ inet_db:add_rc_list([{nameserver,IP}])
+ end,
+ case os:getenv("DOMAIN") of
+ false ->
+ no_domain;
+ D ->
+ ok = inet_db:add_rc_list([{domain,D}])
+ end,
+ case os:getenv("HOSTSFILE") of
+ false ->
+ erlang:display('Warning: No HOSTSFILE specified!'),
+ no_hosts_file;
+ File ->
+ load_hosts(File, ose)
+ end;
+
+do_load_resolv(_, _) ->
+ inet_db:set_lookup([native]).
+
+add_dns_lookup(L) ->
+ case lists:member(dns,L) of
+ true -> ok;
+ _ ->
+ case application:get_env(kernel,inet_dns_when_nis) of
+ {ok,true} ->
+ add_dns_lookup(L,[]);
+ _ ->
+ ok
+ end
+ end.
+
+add_dns_lookup([yp|T],Acc) ->
+ add_dns_lookup(T,[yp,dns|Acc]);
+add_dns_lookup([H|T],Acc) ->
+ add_dns_lookup(T,[H|Acc]);
+add_dns_lookup([],Acc) ->
+ inet_db:set_lookup(reverse(Acc)).
+
+%%
+%% Set the hostname (SHORT)
+%% If hostname is long use the suffix as default domain
+%% and initalize the search option with the parts of domain
+%%
+set_hostname() ->
+ case inet_udp:open(0,[]) of
+ {ok,U} ->
+ Res = inet:gethostname(U),
+ inet_udp:close(U),
+ set_hostname(Res);
+ _ ->
+ set_hostname({ok, []})
+ end.
+
+set_hostname({ok,Name}) when length(Name) > 0 ->
+ {Host, Domain} = lists:splitwith(fun($.) -> false;
+ (_) -> true
+ end, Name),
+ inet_db:set_hostname(Host),
+ set_search_dom(Domain);
+set_hostname({ok,[]}) ->
+ inet_db:set_hostname("nohost"),
+ set_search_dom("nodomain").
+
+set_search_dom([$.|Domain]) ->
+ %% leading . not removed by dropwhile above.
+ inet_db:set_domain(Domain),
+ inet_db:ins_search(Domain),
+ ok;
+set_search_dom([]) ->
+ ok;
+set_search_dom(Domain) ->
+ inet_db:set_domain(Domain),
+ inet_db:ins_search(Domain),
+ ok.
+
+%%
+%% Load resolver data
+%%
+load_resolv(File, Func) ->
+ case get_file(File) of
+ {ok,Bin} ->
+ case inet_parse:Func(File, {chars, Bin}) of
+ {ok, Ls} ->
+ inet_db:add_rc_list(Ls);
+ {error, Reason} ->
+ error("parse error in file ~s: ~p", [File, Reason])
+ end;
+ Error ->
+ warning("file not found ~s: ~p~n", [File, Error])
+ end.
+
+%%
+%% Load a UNIX hosts file
+%%
+load_hosts(File,Os) ->
+ case get_file(File) of
+ {ok,Bin} ->
+ case inet_parse:hosts(File,{chars,Bin}) of
+ {ok, Ls} ->
+ foreach(
+ fun({IP, Name, Aliases}) ->
+ inet_db:add_host(IP, [Name|Aliases]) end,
+ Ls);
+ {error, Reason} ->
+ error("parse error in file ~s: ~p", [File, Reason])
+ end;
+ Error ->
+ case Os of
+ unix ->
+ error("file not found ~s: ~p~n", [File, Error]);
+ _ ->
+ %% for windows or nt the hosts file is not always there
+ %% and we don't require it
+ ok
+ end
+ end.
+
+%%
+%% Load resolver data from Windows registry
+%%
+win32_load_from_registry(Type) ->
+ %% The TcpReg variable enables us to run tests with other registry configurations than
+ %% the normal ones
+ TcpReg = case os:getenv("ERL_INET_ETC_DIR") of
+ false -> [];
+ _TReg -> _TReg
+ end,
+ {ok, Reg} = win32reg:open([read]),
+ {TcpIp,HFileKey} =
+ case Type of
+ nt ->
+ case TcpReg of
+ [] ->
+ {"\\hklm\\system\\CurrentControlSet\\Services\\TcpIp\\Parameters",
+ "DataBasePath" };
+ Other ->
+ {Other,"DataBasePath"}
+ end;
+ windows ->
+ case TcpReg of
+ [] ->
+ {"\\hklm\\system\\CurrentControlSet\\Services\\VxD\\MSTCP",
+ "LMHostFile" };
+ Other ->
+ {Other,"LMHostFile"}
+ end
+ end,
+ Result =
+ case win32reg:change_key(Reg,TcpIp) of
+ ok ->
+ win32_load1(Reg,Type,HFileKey);
+ {error, _Reason} ->
+ error("Failed to locate TCP/IP parameters (is TCP/IP installed)?",
+ [])
+ end,
+ win32reg:close(Reg),
+ Result.
+
+win32_load1(Reg,Type,HFileKey) ->
+ Names = [HFileKey, "Domain", "DhcpDomain",
+ "EnableDNS", "NameServer", "SearchList"],
+ case win32_get_strings(Reg, Names) of
+ [DBPath0, Domain, DhcpDomain,
+ _EnableDNS, NameServers0, Search] ->
+ inet_db:set_domain(
+ case Domain of "" -> DhcpDomain; _ -> Domain end),
+ NameServers = win32_split_line(NameServers0,Type),
+ AddNs = fun(Addr) ->
+ case inet_parse:address(Addr) of
+ {ok, Address} ->
+ inet_db:add_ns(Address);
+ {error, _} ->
+ error("Bad TCP/IP address in registry", [])
+ end
+ end,
+ foreach(AddNs, NameServers),
+ Searches0 = win32_split_line(Search,Type),
+ Searches = case member(Domain, Searches0) of
+ true -> Searches0;
+ false -> [Domain|Searches0]
+ end,
+ foreach(fun(D) -> inet_db:add_search(D) end, Searches),
+ if Type =:= nt ->
+ DBPath = win32reg:expand(DBPath0),
+ load_hosts(filename:join(DBPath, "hosts"),nt);
+ Type =:= windows ->
+ load_hosts(filename:join(DBPath0,""),windows)
+ end,
+%% Maybe activate this later as an optimization
+%% For now we use native always as the SAFE way
+%% case NameServers of
+%% [] -> inet_db:set_lookup([native, file]);
+%% _ -> inet_db:set_lookup([dns, file, native])
+%% end;
+ true;
+ {error, _Reason} ->
+ error("Failed to read TCP/IP parameters from registry", [])
+ end.
+
+win32_split_line(Line,nt) -> inet_parse:split_line(Line);
+win32_split_line(Line,windows) -> string:tokens(Line, ",").
+
+win32_get_strings(Reg, Names) ->
+ win32_get_strings(Reg, Names, []).
+
+win32_get_strings(Reg, [Name|Rest], Result) ->
+ case win32reg:value(Reg, Name) of
+ {ok, Value} when is_list(Value) ->
+ win32_get_strings(Reg, Rest, [Value|Result]);
+ {ok, _NotString} ->
+ {error, not_string};
+ {error, _Reason} ->
+ win32_get_strings(Reg, Rest, [""|Result])
+ end;
+win32_get_strings(_, [], Result) ->
+ lists:reverse(Result).
+
+%%
+%% Load host data from VxWorks hostShow command
+%%
+
+vxworks_load_hosts() ->
+ HostShow = os:cmd("hostShow"),
+ case check_hostShow(HostShow) of
+ Hosts when is_list(Hosts) ->
+ case inet_parse:hosts_vxworks({chars, Hosts}) of
+ {ok, Ls} ->
+ foreach(
+ fun({IP, Name, Aliases}) ->
+ inet_db:add_host(IP, [Name|Aliases])
+ end,
+ Ls);
+ {error,Reason} ->
+ error("parser error VxWorks hostShow ~s", [Reason])
+ end;
+ _Error ->
+ error("error in VxWorks hostShow~s~n", [HostShow])
+ end.
+
+%%
+%% Check if hostShow yields at least two line; the first one
+%% starting with "hostname", the second one starting with
+%% "--------".
+%% Returns: list of hosts in VxWorks notation
+%% rows of 'Name IP [Aliases] \n'
+%% if hostShow yielded these two lines, false otherwise.
+check_hostShow(HostShow) ->
+ check_hostShow(["hostname", "--------"], HostShow).
+
+check_hostShow([], HostShow) ->
+ HostShow;
+check_hostShow([String_match|Rest], HostShow) ->
+ case lists:prefix(String_match, HostShow) of
+ true ->
+ check_hostShow(Rest, next_line(HostShow));
+ false ->
+ false
+ end.
+
+next_line([]) ->
+ [];
+next_line([$\n|Rest]) ->
+ Rest;
+next_line([_First|Rest]) ->
+ next_line(Rest).
+
+read_rc() ->
+ {RcFile,CfgList} = read_inetrc(),
+ case extract_cfg_files(CfgList, [], []) of
+ {CfgFiles,CfgList1} ->
+ {RcFile,CfgFiles,CfgList1};
+ error ->
+ {error,[],[]}
+ end.
+
+
+
+extract_cfg_files([E = {file,Type,_File} | Es], CfgFiles, CfgList) ->
+ extract_cfg_files1(Type, E, Es, CfgFiles, CfgList);
+extract_cfg_files([E = {registry,Type} | Es], CfgFiles, CfgList) ->
+ extract_cfg_files1(Type, E, Es, CfgFiles, CfgList);
+extract_cfg_files([E | Es], CfgFiles, CfgList) ->
+ extract_cfg_files(Es, CfgFiles, [E | CfgList]);
+extract_cfg_files([], CfgFiles, CfgList) ->
+ {reverse(CfgFiles),reverse(CfgList)}.
+
+extract_cfg_files1(Type, E, Es, CfgFiles, CfgList) ->
+ case valid_type(Type) of
+ true ->
+ extract_cfg_files(Es, [E | CfgFiles], CfgList);
+ false ->
+ error("invalid config value ~w in inetrc~n", [Type]),
+ error
+ end.
+
+valid_type(resolv) -> true;
+valid_type(host_conf_freebsd) -> true;
+valid_type(host_conf_bsdos) -> true;
+valid_type(host_conf_linux) -> true;
+valid_type(nsswitch_conf) -> true;
+valid_type(hosts) -> true;
+valid_type(win32) -> true;
+valid_type(_) -> false.
+
+read_inetrc() ->
+ case application:get_env(inetrc) of
+ {ok,File} ->
+ try_get_rc(File);
+ _ ->
+ case os:getenv("ERL_INETRC") of
+ false ->
+ {nofile,[]};
+ File ->
+ try_get_rc(File)
+ end
+ end.
+
+try_get_rc(File) ->
+ case get_rc(File) of
+ error -> {nofile,[]};
+ Ls -> {File,Ls}
+ end.
+
+get_rc(File) ->
+ case get_file(File) of
+ {ok,Bin} ->
+ case parse_inetrc(Bin) of
+ {ok,Ls} ->
+ Ls;
+ _Error ->
+ error("parse error in ~s~n", [File]),
+ error
+ end;
+ _Error ->
+ error("file ~s not found~n", [File]),
+ error
+ end.
+
+%% XXX Check if we really need to prim load the stuff
+get_file(File) ->
+ case erl_prim_loader:get_file(File) of
+ {ok,Bin,_} -> {ok,Bin};
+ Error -> Error
+ end.
+
+error(Fmt, Args) ->
+ error_logger:error_msg("inet_config: " ++ Fmt, Args).
+
+warning(Fmt, Args) ->
+ case application:get_env(kernel,inet_warnings) of
+ %{ok,silent} -> ok;
+ {ok,on} ->
+ error_logger:info_msg("inet_config:" ++ Fmt, Args);
+ _ ->
+ ok
+ end.
+
+%%
+%% Parse inetrc, i.e. make a binary of a term list.
+%% The extra newline is to let the user ignore the whitespace !!!
+%% Ignore leading whitespace before a token (due to bug in erl_scan) !
+%%
+parse_inetrc(Bin) ->
+ Str = binary_to_list(Bin) ++ "\n",
+ parse_inetrc(Str, 1, []).
+
+parse_inetrc_skip_line([], _Line, Ack) ->
+ {ok, reverse(Ack)};
+parse_inetrc_skip_line([$\n|Str], Line, Ack) ->
+ parse_inetrc(Str, Line+1, Ack);
+parse_inetrc_skip_line([_|Str], Line, Ack) ->
+ parse_inetrc_skip_line(Str, Line, Ack).
+
+parse_inetrc([$%|Str], Line, Ack) ->
+ parse_inetrc_skip_line(Str, Line, Ack);
+parse_inetrc([$\s|Str], Line, Ack) ->
+ parse_inetrc(Str, Line, Ack);
+parse_inetrc([$\n |Str], Line, Ack) ->
+ parse_inetrc(Str, Line+1, Ack);
+parse_inetrc([$\t|Str], Line, Ack) ->
+ parse_inetrc(Str, Line, Ack);
+parse_inetrc([], _, Ack) ->
+ {ok, reverse(Ack)};
+
+
+%% The clauses above are here due to a bug in erl_scan (OTP-1449).
+
+parse_inetrc(Str, Line, Ack) ->
+ case erl_scan:tokens([], Str, Line) of
+ {done, {ok, Tokens, EndLine}, MoreChars} ->
+ case erl_parse:parse_term(Tokens) of
+ {ok, Term} ->
+ parse_inetrc(MoreChars, EndLine, [Term|Ack]);
+ Error ->
+ {error, {'parse_inetrc', Error}}
+ end;
+ {done, {eof, _}, _} ->
+ {ok, reverse(Ack)};
+ {done, Error, _} ->
+ {error, {'scan_inetrc', Error}};
+ {more, _} -> %% Bug in erl_scan !!
+ {error, {'scan_inetrc', {eof, Line}}}
+ end.
diff --git a/lib/kernel/src/inet_config.hrl b/lib/kernel/src/inet_config.hrl
new file mode 100644
index 0000000000..e9bb79f05d
--- /dev/null
+++ b/lib/kernel/src/inet_config.hrl
@@ -0,0 +1,34 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 1997-2009. All Rights Reserved.
+%%
+%% The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved online at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% %CopyrightEnd%
+%%
+
+%% Configuration constants
+
+-define(DEFAULT_ETC, "/etc").
+-define(DEFAULT_SERVICES, "services").
+-define(DEFAULT_RPC, "rpc").
+-define(DEFAULT_HOSTS, "hosts").
+-define(DEFAULT_RESOLV, "resolv.conf").
+-define(DEFAULT_PROTOCOLS, "protocols").
+-define(DEFAULT_NETMASKS, "netmasks").
+-define(DEFAULT_NETWORKS, "networks").
+
+-define(DEFAULT_UDP_MODULE, inet_udp).
+-define(DEFAULT_TCP_MODULE, inet_tcp).
+-define(DEFAULT_SCTP_MODULE, inet_sctp).
+
diff --git a/lib/kernel/src/inet_db.erl b/lib/kernel/src/inet_db.erl
new file mode 100644
index 0000000000..211847014f
--- /dev/null
+++ b/lib/kernel/src/inet_db.erl
@@ -0,0 +1,1525 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 1997-2009. All Rights Reserved.
+%%
+%% The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved online at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% %CopyrightEnd%
+%%
+
+-module(inet_db).
+
+%% Store info about ip addresses, names, aliases host files resolver
+%% options
+
+%% If the macro DEBUG is defined during compilation,
+%% debug printouts are done through erlang:display/1.
+%% Activate this feature by starting the compiler
+%% with> erlc -DDEBUG ...
+%% or by> setenv ERL_COMPILER_FLAGS DEBUG
+%% before running make (in the OTP make system)
+%% (the example is for tcsh)
+
+%% External exports
+-export([start/0, start_link/0, stop/0, reset/0, clear_cache/0]).
+-export([add_rr/1,add_rr/5,del_rr/4]).
+-export([add_ns/1,add_ns/2, ins_ns/1, ins_ns/2,
+ del_ns/2, del_ns/1, del_ns/0]).
+-export([add_alt_ns/1,add_alt_ns/2, ins_alt_ns/1, ins_alt_ns/2,
+ del_alt_ns/2, del_alt_ns/1, del_alt_ns/0]).
+-export([add_search/1,ins_search/1,del_search/1, del_search/0]).
+-export([set_lookup/1, set_recurse/1]).
+-export([set_socks_server/1, set_socks_port/1, add_socks_methods/1,
+ del_socks_methods/1, del_socks_methods/0,
+ add_socks_noproxy/1, del_socks_noproxy/1]).
+-export([set_cache_size/1, set_cache_refresh/1]).
+-export([set_timeout/1, set_retry/1, set_inet6/1, set_usevc/1]).
+-export([set_edns/1, set_udp_payload_size/1]).
+-export([set_resolv_conf/1, set_hosts_file/1, get_hosts_file/0]).
+-export([tcp_module/0, set_tcp_module/1]).
+-export([udp_module/0, set_udp_module/1]).
+-export([sctp_module/0,set_sctp_module/1]).
+-export([register_socket/2, unregister_socket/1, lookup_socket/1]).
+
+%% Host name & domain
+-export([set_hostname/1, set_domain/1]).
+-export([gethostname/0]).
+
+%% file interface
+-export([add_host/2, del_host/1, clear_hosts/0, add_hosts/1]).
+-export([add_resolv/1]).
+-export([add_rc/1, add_rc_bin/1, add_rc_list/1, get_rc/0]).
+
+-export([res_option/1, res_option/2, res_check_option/2]).
+-export([socks_option/1]).
+-export([getbyname/2, get_searchlist/0]).
+-export([gethostbyaddr/1]).
+-export([res_gethostbyaddr/2,res_hostent_by_domain/3]).
+-export([res_update_conf/0, res_update_hosts/0]).
+%% inet help functions
+-export([tolower/1]).
+-ifdef(DEBUG).
+-define(dbg(Fmt, Args), io:format(Fmt, Args)).
+-else.
+-define(dbg(Fmd, Args), ok).
+-endif.
+
+-include_lib("kernel/include/file.hrl").
+
+%% gen_server callbacks
+-export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2]).
+
+-record(state,
+ {db, %% resolver data
+ cache, %% bag of resource records
+ hosts_byname, %% hosts table
+ hosts_byaddr, %% hosts table
+ hosts_file_byname, %% hosts table from system file
+ hosts_file_byaddr, %% hosts table from system file
+ cache_timer %% timer reference for refresh
+ }).
+
+-include("inet.hrl").
+-include("inet_int.hrl").
+-include("inet_res.hrl").
+-include("inet_dns.hrl").
+-include("inet_config.hrl").
+
+%%%----------------------------------------------------------------------
+%%% API
+%%%----------------------------------------------------------------------
+
+start() ->
+ case gen_server:start({local, inet_db}, inet_db, [], []) of
+ {ok,Pid} -> inet_config:init(), {ok,Pid};
+ Error -> Error
+ end.
+
+
+start_link() ->
+ case gen_server:start_link({local, inet_db}, inet_db, [], []) of
+ {ok,Pid} -> inet_config:init(), {ok,Pid};
+ Error -> Error
+ end.
+
+call(Req) ->
+ gen_server:call(inet_db, Req, infinity).
+
+stop() ->
+ call(stop).
+
+reset() ->
+ call(reset).
+
+
+%% insert all resolve options from this file (MAY GO)
+add_resolv(File) ->
+ case inet_parse:resolv(File) of
+ {ok, Res} -> add_rc_list(Res);
+ Error -> Error
+ end.
+
+%% add all aliases from this hosts file (MAY GO)
+add_hosts(File) ->
+ case inet_parse:hosts(File) of
+ {ok, Res} ->
+ lists:foreach(
+ fun({IP, Name, Aliases}) -> add_host(IP, [Name|Aliases]) end,
+ Res);
+ Error -> Error
+ end.
+
+
+add_host(IP, Names) -> call({add_host, IP, Names}).
+
+del_host(IP) -> call({del_host, IP}).
+
+clear_hosts() -> call(clear_hosts).
+
+%% add to the end of name server list
+add_ns(IP) ->
+ add_ns(IP,?NAMESERVER_PORT).
+add_ns(IP,Port) ->
+ call({listop, nameservers, add, {IP,Port}}).
+
+%% insert at head of name server list
+ins_ns(IP) ->
+ ins_ns(IP, ?NAMESERVER_PORT).
+ins_ns(IP,Port) ->
+ call({listop, nameservers, ins, {IP,Port}}).
+
+%% delete this name server entry (delete all ns having this ip)
+del_ns(IP) ->
+ del_ns(IP, ?NAMESERVER_PORT).
+del_ns(IP, Port) ->
+ call({listop, nameservers, del, {IP,Port}}).
+
+del_ns() ->
+ call({listdel, nameservers}).
+
+%% ALTERNATIVE NAME SERVER
+%% add to the end of name server list
+add_alt_ns(IP) ->
+ add_alt_ns(IP, ?NAMESERVER_PORT).
+add_alt_ns(IP,Port) ->
+ call({listop, alt_nameservers, add, {IP,Port}}).
+
+%% insert at head of name server list
+ins_alt_ns(IP) ->
+ ins_alt_ns(IP, ?NAMESERVER_PORT).
+ins_alt_ns(IP,Port) ->
+ call({listop, alt_nameservers, ins, {IP,Port}}).
+
+%% delete this name server entry
+del_alt_ns(IP) ->
+ del_alt_ns(IP, ?NAMESERVER_PORT).
+del_alt_ns(IP, Port) ->
+ call({listop, alt_nameservers, del, {IP,Port}}).
+
+del_alt_ns() ->
+ call({listdel, alt_nameservers}).
+
+%% add this domain to the search list
+add_search(Domain) when is_list(Domain) ->
+ call({listop, search, add, Domain}).
+
+ins_search(Domain) when is_list(Domain) ->
+ call({listop, search, ins, Domain}).
+
+del_search(Domain) ->
+ call({listop, search, del, Domain}).
+
+del_search() ->
+ call({listdel, search}).
+
+%% set host name used by inet
+%% Should only be used by inet_config at startup!
+set_hostname(Name) ->
+ call({set_hostname, Name}).
+
+%% set default domain
+set_domain(Domain) -> res_option(domain, Domain).
+
+%% set lookup methods
+set_lookup(Methods) -> res_option(lookup, Methods).
+
+%% resolver
+set_recurse(Flag) -> res_option(recurse, Flag).
+
+set_timeout(Time) -> res_option(timeout, Time).
+
+set_retry(N) -> res_option(retry, N).
+
+set_inet6(Bool) -> res_option(inet6, Bool).
+
+set_usevc(Bool) -> res_option(usevc, Bool).
+
+set_edns(Version) -> res_option(edns, Version).
+
+set_udp_payload_size(Size) -> res_option(udp_payload_size, Size).
+
+set_resolv_conf(Fname) -> res_option(resolv_conf, Fname).
+
+set_hosts_file(Fname) -> res_option(hosts_file, Fname).
+
+get_hosts_file() ->
+ get_rc_hosts([], [], inet_hosts_file_byname).
+
+%% set socks options
+set_socks_server(Server) -> call({set_socks_server, Server}).
+
+set_socks_port(Port) -> call({set_socks_port, Port}).
+
+add_socks_methods(Ms) -> call({add_socks_methods,Ms}).
+
+del_socks_methods(Ms) -> call({del_socks_methods,Ms}).
+
+del_socks_methods() -> call(del_socks_methods).
+
+add_socks_noproxy({Net,Mask}) -> call({add_socks_noproxy, {Net,Mask}}).
+
+del_socks_noproxy(Net) -> call({del_socks_noproxy, Net}).
+
+%% cache options
+set_cache_size(Limit) -> call({set_cache_size, Limit}).
+
+set_cache_refresh(Time) -> call({set_cache_refresh, Time}).
+
+clear_cache() -> call(clear_cache).
+
+
+set_tcp_module(Module) -> call({set_tcp_module, Module}).
+
+tcp_module() -> db_get(tcp_module).
+
+set_udp_module(Module) -> call({set_udp_module, Module}).
+
+udp_module() -> db_get(udp_module).
+
+set_sctp_module(Family)-> call({set_sctp_module,Family}).
+
+sctp_module()-> db_get(sctp_module).
+
+%% Add an inetrc file
+add_rc(File) ->
+ case file:consult(File) of
+ {ok, List} -> add_rc_list(List);
+ Error -> Error
+ end.
+
+%% Add an inetrc binary term must be a rc list
+add_rc_bin(Bin) ->
+ case catch binary_to_term(Bin) of
+ List when is_list(List) ->
+ add_rc_list(List);
+ _ ->
+ {error, badarg}
+ end.
+
+add_rc_list(List) -> call({add_rc_list, List}).
+
+
+
+%% All kind of flavors !
+translate_lookup(["bind" | Ls]) -> [dns | translate_lookup(Ls)];
+translate_lookup(["dns" | Ls]) -> [dns | translate_lookup(Ls)];
+translate_lookup(["hosts" | Ls]) -> [file | translate_lookup(Ls)];
+translate_lookup(["files" | Ls]) -> [file | translate_lookup(Ls)];
+translate_lookup(["file" | Ls]) -> [file | translate_lookup(Ls)];
+translate_lookup(["yp" | Ls]) -> [yp | translate_lookup(Ls)];
+translate_lookup(["nis" | Ls]) -> [nis | translate_lookup(Ls)];
+translate_lookup(["nisplus" | Ls]) -> [nisplus | translate_lookup(Ls)];
+translate_lookup(["native" | Ls]) -> [native | translate_lookup(Ls)];
+translate_lookup([M | Ls]) when is_atom(M) -> translate_lookup([atom_to_list(M) | Ls]);
+translate_lookup([_ | Ls]) -> translate_lookup(Ls);
+translate_lookup([]) -> [].
+
+valid_lookup() -> [dns, file, yp, nis, nisplus, native].
+
+
+%% Reconstruct an inetrc sturcture from inet_db
+get_rc() ->
+ get_rc([hosts, domain, nameservers, search, alt_nameservers,
+ timeout, retry, inet6, usevc,
+ edns, udp_payload_size, resolv_conf, hosts_file,
+ socks5_server, socks5_port, socks5_methods, socks5_noproxy,
+ udp, sctp, tcp, host, cache_size, cache_refresh, lookup], []).
+
+get_rc([K | Ks], Ls) ->
+ case K of
+ hosts -> get_rc_hosts(Ks, Ls, inet_hosts_byname);
+ domain -> get_rc(domain, res_domain, "", Ks, Ls);
+ nameservers -> get_rc_ns(db_get(res_ns),nameservers,Ks,Ls);
+ alt_nameservers -> get_rc_ns(db_get(res_alt_ns),alt_nameservers,Ks,Ls);
+ search -> get_rc(search, res_search, [], Ks, Ls);
+ timeout -> get_rc(timeout,res_timeout,?RES_TIMEOUT, Ks,Ls);
+ retry -> get_rc(retry, res_retry, ?RES_RETRY, Ks, Ls);
+ inet6 -> get_rc(inet6, res_inet6, false, Ks, Ls);
+ usevc -> get_rc(usevc, res_usevc, false, Ks, Ls);
+ edns -> get_rc(edns, res_edns, false, Ks, Ls);
+ udp_payload_size -> get_rc(udp_payload_size, res_udp_payload_size,
+ ?DNS_UDP_PAYLOAD_SIZE, Ks, Ls);
+ resolv_conf -> get_rc(resolv_conf, res_resolv_conf, undefined, Ks, Ls);
+ hosts_file -> get_rc(hosts_file, res_hosts_file, undefined, Ks, Ls);
+ tcp -> get_rc(tcp, tcp_module, ?DEFAULT_TCP_MODULE, Ks, Ls);
+ udp -> get_rc(udp, udp_module, ?DEFAULT_UDP_MODULE, Ks, Ls);
+ sctp -> get_rc(sctp, sctp_module, ?DEFAULT_SCTP_MODULE, Ks, Ls);
+ lookup -> get_rc(lookup, res_lookup, [native,file], Ks, Ls);
+ cache_size -> get_rc(cache_size, cache_size, ?CACHE_LIMIT, Ks, Ls);
+ cache_refresh ->
+ get_rc(cache_refresh, cache_refresh_interval,?CACHE_REFRESH,Ks,Ls);
+ socks5_server -> get_rc(socks5_server, socks5_server, "", Ks, Ls);
+ socks5_port -> get_rc(socks5_port,socks5_port,?IPPORT_SOCKS,Ks,Ls);
+ socks5_methods -> get_rc(socks5_methods,socks5_methods,[none],Ks,Ls);
+ socks5_noproxy ->
+ case db_get(socks5_noproxy) of
+ [] -> get_rc(Ks, Ls);
+ NoProxy -> get_rc_noproxy(NoProxy, Ks, Ls)
+ end;
+ _ ->
+ get_rc(Ks, Ls)
+ end;
+get_rc([], Ls) ->
+ lists:reverse(Ls).
+
+get_rc(Name, Key, Default, Ks, Ls) ->
+ case db_get(Key) of
+ Default -> get_rc(Ks, Ls);
+ Value -> get_rc(Ks, [{Name, Value} | Ls])
+ end.
+
+get_rc_noproxy([{Net,Mask} | Ms], Ks, Ls) ->
+ get_rc_noproxy(Ms, Ks, [{socks5_noproxy, Net, Mask} | Ls]);
+get_rc_noproxy([], Ks, Ls) -> get_rc(Ks, Ls).
+
+get_rc_ns([{IP,?NAMESERVER_PORT} | Ns], Tag, Ks, Ls) ->
+ get_rc_ns(Ns, Tag, Ks, [{Tag, IP} | Ls]);
+get_rc_ns([{IP,Port} | Ns], Tag, Ks, Ls) ->
+ get_rc_ns(Ns, Tag, Ks, [{Tag, IP, Port} | Ls]);
+get_rc_ns([], _Tag, Ks, Ls) ->
+ get_rc(Ks, Ls).
+
+get_rc_hosts(Ks, Ls, Tab) ->
+ case lists:keysort(3, ets:tab2list(Tab)) of
+ [] -> get_rc(Ks, Ls);
+ [{N,_,IP}|Hosts] -> get_rc_hosts(Ks, Ls, IP, Hosts, [N])
+ end.
+
+get_rc_hosts(Ks, Ls, IP, [], Ns) ->
+ get_rc(Ks, [{host,IP,lists:reverse(Ns)}|Ls]);
+get_rc_hosts(Ks, Ls, IP, [{N,_,IP}|Hosts], Ns) ->
+ get_rc_hosts(Ks, Ls, IP, Hosts, [N|Ns]);
+get_rc_hosts(Ks, Ls, IP, [{N,_,NewIP}|Hosts], Ns) ->
+ [{host,IP,lists:reverse(Ns)}|get_rc_hosts(Ks, Ls, NewIP, Hosts, [N])].
+
+%%
+%% Resolver options
+%%
+res_option(next_id) ->
+ Cnt = ets:update_counter(inet_db, res_id, 1),
+ case Cnt band 16#ffff of
+ 0 ->
+ ets:update_counter(inet_db, res_id, -Cnt),
+ 0;
+ Id ->
+ Id
+ end;
+res_option(Option) ->
+ case res_optname(Option) of
+ undefined ->
+ erlang:error(badarg, [Option]);
+ ResOptname ->
+ db_get(ResOptname)
+ end.
+
+res_option(Option, Value) ->
+ case res_optname(Option) of
+ undefined ->
+ erlang:error(badarg, [Option,Value]);
+ _ ->
+ call({res_set,Option,Value})
+ end.
+
+res_optname(nameserver) -> res_ns; %% Legacy
+res_optname(alt_nameserver) -> res_alt_ns; %% Legacy
+res_optname(nameservers) -> res_ns;
+res_optname(alt_nameservers) -> res_alt_ns;
+res_optname(domain) -> res_domain;
+res_optname(lookup) -> res_lookup;
+res_optname(recurse) -> res_recurse;
+res_optname(search) -> res_search;
+res_optname(retry) -> res_retry;
+res_optname(timeout) -> res_timeout;
+res_optname(inet6) -> res_inet6;
+res_optname(usevc) -> res_usevc;
+res_optname(edns) -> res_edns;
+res_optname(udp_payload_size) -> res_udp_payload_size;
+res_optname(resolv_conf) -> res_resolv_conf;
+res_optname(hosts_file) -> res_hosts_file;
+res_optname(_) -> undefined.
+
+res_check_option(nameserver, NSs) -> %% Legacy
+ res_check_list(NSs, fun res_check_ns/1);
+res_check_option(alt_nameserver, NSs) -> %% Legacy
+ res_check_list(NSs, fun res_check_ns/1);
+res_check_option(nameservers, NSs) ->
+ res_check_list(NSs, fun res_check_ns/1);
+res_check_option(alt_nameservers, NSs) ->
+ res_check_list(NSs, fun res_check_ns/1);
+res_check_option(domain, Dom) ->
+ inet_parse:visible_string(Dom);
+res_check_option(lookup, Methods) ->
+ try lists_subtract(Methods, valid_lookup()) of
+ [] -> true;
+ _ -> false
+ catch
+ error:_ -> false
+ end;
+res_check_option(recurse, R) when R =:= 0; R =:= 1 -> true; %% Legacy
+res_check_option(recurse, R) when is_boolean(R) -> true;
+res_check_option(search, SearchList) ->
+ res_check_list(SearchList, fun res_check_search/1);
+res_check_option(retry, N) when is_integer(N), N > 0 -> true;
+res_check_option(timeout, T) when is_integer(T), T > 0 -> true;
+res_check_option(inet6, Bool) when is_boolean(Bool) -> true;
+res_check_option(usevc, Bool) when is_boolean(Bool) -> true;
+res_check_option(edns, V) when V =:= false; V =:= 0 -> true;
+res_check_option(udp_payload_size, S) when is_integer(S), S >= 512 -> true;
+res_check_option(resolv_conf, "") -> true;
+res_check_option(resolv_conf, F) ->
+ res_check_option_absfile(F);
+res_check_option(hosts_file, "") -> true;
+res_check_option(hosts_file, F) ->
+ res_check_option_absfile(F);
+res_check_option(_, _) -> false.
+
+res_check_option_absfile(F) ->
+ try filename:pathtype(F) of
+ absolute -> true;
+ _ -> false
+ catch
+ _:_ -> false
+ end.
+
+res_check_list([], _Fun) -> true;
+res_check_list([H|T], Fun) ->
+ case Fun(H) of
+ true -> res_check_list(T, Fun);
+ false -> false
+ end;
+res_check_list(_, _Fun) -> false.
+
+res_check_ns({{A,B,C,D,E,F,G,H}, Port})
+ when ?ip6(A,B,C,D,E,F,G,H), Port band 65535 =:= Port -> true;
+res_check_ns({{A,B,C,D}, Port})
+ when ?ip(A,B,C,D), Port band 65535 =:= Port -> true;
+res_check_ns(_) -> false.
+
+res_check_search("") -> true;
+res_check_search(Dom) -> inet_parse:visible_string(Dom).
+
+socks_option(server) -> db_get(socks5_server);
+socks_option(port) -> db_get(socks5_port);
+socks_option(methods) -> db_get(socks5_methods);
+socks_option(noproxy) -> db_get(socks5_noproxy).
+
+gethostname() -> db_get(hostname).
+
+res_update_conf() ->
+ res_update(res_resolv_conf, res_resolv_conf_tm, res_resolv_conf_info,
+ set_resolv_conf_tm, fun set_resolv_conf/1).
+
+res_update_hosts() ->
+ res_update(res_hosts_file, res_hosts_file_tm, res_hosts_file_info,
+ set_hosts_file_tm, fun set_hosts_file/1).
+
+res_update(Tag, TagTm, TagInfo, CallTag, SetFun) ->
+ case db_get(TagTm) of
+ undefined -> ok;
+ TM ->
+ case times() of
+ Now when Now >= TM + ?RES_FILE_UPDATE_TM ->
+ case db_get(Tag) of
+ undefined ->
+ SetFun("");
+ "" ->
+ SetFun("");
+ File ->
+ case erl_prim_loader:read_file_info(File) of
+ {ok, Finfo0} ->
+ Finfo =
+ Finfo0#file_info{access = undefined,
+ atime = undefined},
+ case db_get(TagInfo) of
+ Finfo ->
+ call({CallTag, Now});
+ _ ->
+ SetFun(File)
+ end;
+ _ ->
+ call({CallTag, Now}),
+ error
+ end
+ end;
+ _ -> ok
+ end
+ end.
+
+db_get(Name) ->
+ case ets:lookup(inet_db, Name) of
+ [] -> undefined;
+ [{_,Val}] -> Val
+ end.
+
+add_rr(RR) ->
+ call({add_rr, RR}).
+
+add_rr(Domain, Class, Type, TTL, Data) ->
+ call({add_rr, #dns_rr { domain = Domain, class = Class,
+ type = Type, ttl = TTL, data = Data}}).
+
+del_rr(Domain, Class, Type, Data) ->
+ call({del_rr, #dns_rr { domain = Domain, class = Class,
+ type = Type, cnt = '_', tm = '_', ttl = '_',
+ bm = '_', func = '_', data = Data}}).
+
+res_cache_answer(Rec) ->
+ lists:foreach( fun(RR) -> add_rr(RR) end, Rec#dns_rec.anlist).
+
+
+
+
+%%
+%% getbyname (cache version)
+%%
+%% This function and inet_res:res_getbyname/3 must look up names
+%% in the same manner, but not from the same places.
+%%
+getbyname(Name, Type) ->
+ {EmbeddedDots, TrailingDot} = inet_parse:dots(Name),
+ Dot = if TrailingDot -> ""; true -> "." end,
+ if TrailingDot ->
+ hostent_by_domain(Name, Type);
+ EmbeddedDots =:= 0 ->
+ getbysearch(Name, Dot, get_searchlist(), Type, {error,nxdomain});
+ true ->
+ case hostent_by_domain(Name, Type) of
+ {error,_}=Error ->
+ getbysearch(Name, Dot, get_searchlist(), Type, Error);
+ Other -> Other
+ end
+ end.
+
+getbysearch(Name, Dot, [Dom | Ds], Type, _) ->
+ case hostent_by_domain(Name ++ Dot ++ Dom, Type) of
+ {ok, HEnt} -> {ok, HEnt};
+ Error ->
+ getbysearch(Name, Dot, Ds, Type, Error)
+ end;
+getbysearch(_Name, _Dot, [], _Type, Error) ->
+ Error.
+
+
+
+%%
+%% get_searchlist
+%%
+get_searchlist() ->
+ case res_option(search) of
+ [] -> [res_option(domain)];
+ L -> L
+ end.
+
+
+
+make_hostent(Name, Addrs, Aliases, ?S_A) ->
+ #hostent {
+ h_name = Name,
+ h_addrtype = inet,
+ h_length = 4,
+ h_addr_list = Addrs,
+ h_aliases = Aliases
+ };
+make_hostent(Name, Addrs, Aliases, ?S_AAAA) ->
+ #hostent {
+ h_name = Name,
+ h_addrtype = inet6,
+ h_length = 16,
+ h_addr_list = Addrs,
+ h_aliases = Aliases
+ };
+make_hostent(Name, Datas, Aliases, Type) ->
+ %% Use #hostent{} for other Types as well !
+ #hostent {
+ h_name = Name,
+ h_addrtype = Type,
+ h_length = length(Datas),
+ h_addr_list = Datas,
+ h_aliases = Aliases
+ }.
+
+hostent_by_domain(Domain, Type) ->
+ ?dbg("hostent_by_domain: ~p~n", [Domain]),
+ hostent_by_domain(stripdot(Domain), [], Type).
+
+hostent_by_domain(Domain, Aliases, Type) ->
+ case lookup_type(Domain, Type) of
+ [] ->
+ case lookup_cname(Domain) of
+ [] ->
+ {error, nxdomain};
+ [CName | _] ->
+ case lists:member(CName, [Domain | Aliases]) of
+ true ->
+ {error, nxdomain};
+ false ->
+ hostent_by_domain(CName, [Domain | Aliases], Type)
+ end
+ end;
+ Addrs ->
+ {ok, make_hostent(Domain, Addrs, Aliases, Type)}
+ end.
+
+%% lookup address record
+lookup_type(Domain, Type) ->
+ [R#dns_rr.data || R <- lookup_rr(Domain, in, Type) ].
+
+%% lookup canonical name
+lookup_cname(Domain) ->
+ [R#dns_rr.data || R <- lookup_rr(Domain, in, ?S_CNAME) ].
+
+%% Have to do all lookups (changes to the db) in the
+%% process in order to make it possible to refresh the cache.
+lookup_rr(Domain, Class, Type) ->
+ call({lookup_rr, Domain, Class, Type}).
+
+%%
+%% hostent_by_domain (newly resolved version)
+%% match data field directly and cache RRs.
+%%
+res_hostent_by_domain(Domain, Type, Rec) ->
+ res_cache_answer(Rec),
+ RRs = Rec#dns_rec.anlist,
+ ?dbg("res_hostent_by_domain: ~p - ~p~n", [Domain, RRs]),
+ res_hostent_by_domain(stripdot(Domain), [], Type, RRs).
+
+res_hostent_by_domain(Domain, Aliases, Type, RRs) ->
+ case res_lookup_type(Domain, Type, RRs) of
+ [] ->
+ case res_lookup_type(Domain, ?S_CNAME, RRs) of
+ [] ->
+ {error, nxdomain};
+ [CName | _] ->
+ case lists:member(CName, [Domain | Aliases]) of
+ true ->
+ {error, nxdomain};
+ false ->
+ res_hostent_by_domain(CName, [Domain | Aliases],
+ Type, RRs)
+ end
+ end;
+ Addrs ->
+ {ok, make_hostent(Domain, Addrs, Aliases, Type)}
+ end.
+
+%% newly resolved lookup address record
+res_lookup_type(Domain,Type,RRs) ->
+ [R#dns_rr.data || R <- RRs,
+ R#dns_rr.domain =:= Domain,
+ R#dns_rr.type =:= Type].
+
+%%
+%% gethostbyaddr (cache version)
+%% match data field directly
+%%
+gethostbyaddr(IP) ->
+ case dnip(IP) of
+ {ok, {IP1, HType, HLen, DnIP}} ->
+ RRs = match_rr(#dns_rr { domain = DnIP, class = in, type = ptr,
+ cnt = '_', tm = '_', ttl = '_',
+ bm = '_', func = '_', data = '_' }),
+ ent_gethostbyaddr(RRs, IP1, HType, HLen);
+ Error -> Error
+ end.
+
+%%
+%% res_gethostbyaddr (newly resolved version)
+%% match data field directly and cache RRs.
+%%
+res_gethostbyaddr(IP, Rec) ->
+ {ok, {IP1, HType, HLen}} = dnt(IP),
+ res_cache_answer(Rec),
+ ent_gethostbyaddr(Rec#dns_rec.anlist, IP1, HType, HLen).
+
+ent_gethostbyaddr(RRs, IP, AddrType, Length) ->
+ case RRs of
+ [] -> {error, nxdomain};
+ [RR|TR] ->
+ %% debug
+ if TR =/= [] ->
+ ?dbg("gethostbyaddr found extra=~p~n", [TR]);
+ true -> ok
+ end,
+ Domain = RR#dns_rr.data,
+ H = #hostent { h_name = Domain,
+ h_aliases = lookup_cname(Domain),
+ h_addr_list = [IP],
+ h_addrtype = AddrType,
+ h_length = Length },
+ {ok, H}
+ end.
+
+dnip(IP) ->
+ case dnt(IP) of
+ {ok,{IP1 = {A,B,C,D}, inet, HLen}} ->
+ {ok,{IP1, inet, HLen, dn_in_addr_arpa(A,B,C,D)}};
+ {ok,{IP1 = {A,B,C,D,E,F,G,H}, inet6, HLen}} ->
+ {ok,{IP1, inet6, HLen, dn_ip6_int(A,B,C,D,E,F,G,H)}};
+ _ ->
+ {error, formerr}
+ end.
+
+
+dnt(IP = {A,B,C,D}) when ?ip(A,B,C,D) ->
+ {ok, {IP, inet, 4}};
+dnt({0,0,0,0,0,16#ffff,G,H}) when is_integer(G+H) ->
+ A = G div 256, B = G rem 256, C = H div 256, D = H rem 256,
+ {ok, {{A,B,C,D}, inet, 4}};
+dnt(IP = {A,B,C,D,E,F,G,H}) when ?ip6(A,B,C,D,E,F,G,H) ->
+ {ok, {IP, inet6, 16}};
+dnt(_) ->
+ {error, formerr}.
+
+%%
+%% Register socket Modules
+%%
+register_socket(Socket, Module) when is_port(Socket), is_atom(Module) ->
+ try erlang:port_set_data(Socket, Module)
+ catch
+ error:badarg -> false
+ end.
+
+unregister_socket(Socket) when is_port(Socket) ->
+ ok. %% not needed any more
+
+lookup_socket(Socket) when is_port(Socket) ->
+ try erlang:port_get_data(Socket) of
+ Module when is_atom(Module) -> {ok,Module};
+ _ -> {error,closed}
+ catch
+ error:badarg -> {error,closed}
+ end.
+
+%%%----------------------------------------------------------------------
+%%% Callback functions from gen_server
+%%%----------------------------------------------------------------------
+
+%%----------------------------------------------------------------------
+%% Func: init/1
+%% Returns: {ok, State} |
+%% {ok, State, Timeout} |
+%% {stop, Reason}
+%%----------------------------------------------------------------------
+
+%% INET DB ENTRY TYPES:
+%%
+%% KEY VALUE - DESCRIPTION
+%%
+%% hostname String - SHORT host name
+%%
+%% Resolver options
+%% ----------------
+%% res_ns [Nameserver] - list of name servers
+%% res_alt_ns [AltNameServer] - list of alternate name servers (nxdomain)
+%% res_search [Domain] - list of domains for short names
+%% res_domain Domain - local domain for short names
+%% res_recurse Bool - recursive query
+%% res_usevc Bool - use tcp only
+%% res_id Integer - NS query identifier
+%% res_retry Integer - Retry count for UDP query
+%% res_timeout Integer - UDP query timeout before retry
+%% res_inet6 Bool - address family inet6 for gethostbyname/1
+%% res_usevc Bool - use Virtual Circuit (TCP)
+%% res_edns false|Integer - false or EDNS version
+%% res_udp_payload_size Integer - size for EDNS, both query and reply
+%% res_resolv_conf Filename - file to watch for resolver config i.e
+%% {res_ns, res_search}
+%% res_hosts_file Filename - file to watch for hosts config
+%%
+%% Socks5 options
+%% --------------
+%% socks5_server Server - IP address of the socks5 server
+%% socks5_port Port - TCP port of the socks5 server
+%% socks5_methods Ls - List of authentication methods
+%% socks5_noproxy IPs - List of {Net,Subnetmask}
+%%
+%% Generic tcp/udp options
+%% -----------------------
+%% tcp_module Module - The default gen_tcp module
+%% udp_module Module - The default gen_udp module
+%% sctp_module Module - The default gen_sctp module
+%%
+%% Distribution options
+%% --------------------
+%% {node_auth,N} Ls - List of authentication for node N
+%% {node_crypt,N} Ls - List of encryption methods for node N
+%% node_auth Ls - Default authenication
+%% node_crypt Ls - Default encryption
+%%
+init([]) ->
+ process_flag(trap_exit, true),
+ Db = ets:new(inet_db, [public, named_table]),
+ reset_db(Db),
+ Cache = ets:new(inet_cache, [public, bag, {keypos,2}, named_table]),
+ BynameOpts = [protected, bag, named_table, {keypos,1}],
+ ByaddrOpts = [protected, bag, named_table, {keypos,3}],
+ HostsByname = ets:new(inet_hosts_byname, BynameOpts),
+ HostsByaddr = ets:new(inet_hosts_byaddr, ByaddrOpts),
+ HostsFileByname = ets:new(inet_hosts_file_byname, BynameOpts),
+ HostsFileByaddr = ets:new(inet_hosts_file_byaddr, ByaddrOpts),
+ {ok, #state{db = Db,
+ cache = Cache,
+ hosts_byname = HostsByname,
+ hosts_byaddr = HostsByaddr,
+ hosts_file_byname = HostsFileByname,
+ hosts_file_byaddr = HostsFileByaddr,
+ cache_timer = init_timer() }}.
+
+reset_db(Db) ->
+ ets:insert(Db, {hostname, []}),
+ ets:insert(Db, {res_ns, []}),
+ ets:insert(Db, {res_alt_ns, []}),
+ ets:insert(Db, {res_search, []}),
+ ets:insert(Db, {res_domain, ""}),
+ ets:insert(Db, {res_lookup, []}),
+ ets:insert(Db, {res_recurse, true}),
+ ets:insert(Db, {res_usevc, false}),
+ ets:insert(Db, {res_id, 0}),
+ ets:insert(Db, {res_retry, ?RES_RETRY}),
+ ets:insert(Db, {res_timeout, ?RES_TIMEOUT}),
+ ets:insert(Db, {res_inet6, false}),
+ ets:insert(Db, {res_edns, false}),
+ ets:insert(Db, {res_udp_payload_size, ?DNS_UDP_PAYLOAD_SIZE}),
+ ets:insert(Db, {cache_size, ?CACHE_LIMIT}),
+ ets:insert(Db, {cache_refresh_interval,?CACHE_REFRESH}),
+ ets:insert(Db, {socks5_server, ""}),
+ ets:insert(Db, {socks5_port, ?IPPORT_SOCKS}),
+ ets:insert(Db, {socks5_methods, [none]}),
+ ets:insert(Db, {socks5_noproxy, []}),
+ ets:insert(Db, {tcp_module, ?DEFAULT_TCP_MODULE}),
+ ets:insert(Db, {udp_module, ?DEFAULT_UDP_MODULE}),
+ ets:insert(Db, {sctp_module, ?DEFAULT_SCTP_MODULE}).
+
+%%----------------------------------------------------------------------
+%% Func: handle_call/3
+%% Returns: {reply, Reply, State} |
+%% {reply, Reply, State, Timeout} |
+%% {noreply, State} |
+%% {noreply, State, Timeout} |
+%% {stop, Reason, Reply, State} | (terminate/2 is called)
+%% {stop, Reason, Reply, State} (terminate/2 is called)
+%%----------------------------------------------------------------------
+handle_call(Request, From, #state{db=Db}=State) ->
+ case Request of
+ {load_hosts_file,IPNmAs} when is_list(IPNmAs) ->
+ NIPs = lists:flatten([ [{N,if tuple_size(IP) =:= 4 -> inet;
+ tuple_size(IP) =:= 8 -> inet6
+ end,IP} || N <- [Nm|As]]
+ || {IP,Nm,As} <- IPNmAs]),
+ Byname = State#state.hosts_file_byname,
+ Byaddr = State#state.hosts_file_byaddr,
+ ets:delete_all_objects(Byname),
+ ets:delete_all_objects(Byaddr),
+ ets:insert(Byname, NIPs),
+ ets:insert(Byaddr, NIPs),
+ {reply, ok, State};
+
+ {add_host,{A,B,C,D}=IP,[N|As]=Names}
+ when ?ip(A,B,C,D), is_list(N), is_list(As) ->
+ do_add_host(State#state.hosts_byname,
+ State#state.hosts_byaddr,
+ Names, inet, IP),
+ {reply, ok, State};
+ {add_host,{A,B,C,D,E,F,G,H}=IP,[N|As]=Names}
+ when ?ip6(A,B,C,D,E,F,G,H), is_list(N), is_list(As) ->
+ do_add_host(State#state.hosts_byname,
+ State#state.hosts_byaddr,
+ Names, inet6, IP),
+ {reply, ok, State};
+
+ {del_host,{A,B,C,D}=IP} when ?ip(A,B,C,D) ->
+ do_del_host(State#state.hosts_byname,
+ State#state.hosts_byaddr,
+ IP),
+ {reply, ok, State};
+ {del_host,{A,B,C,D,E,F,G,H}=IP} when ?ip6(A,B,C,D,E,F,G,H) ->
+ do_del_host(State#state.hosts_byname,
+ State#state.hosts_byaddr,
+ IP),
+ {reply, ok, State};
+
+ {add_rr, RR} when is_record(RR, dns_rr) ->
+ RR1 = lower_rr(RR),
+ ?dbg("add_rr: ~p~n", [RR1]),
+ do_add_rr(RR1, Db, State),
+ {reply, ok, State};
+
+ {del_rr, RR} when is_record(RR, dns_rr) ->
+ RR1 = lower_rr(RR),
+ %% note. del_rr will handle wildcards !!!
+ Cache = State#state.cache,
+ ets:match_delete(Cache, RR1),
+ {reply, ok, State};
+
+ {lookup_rr, Domain, Class, Type} ->
+ {reply, do_lookup_rr(Domain, Class, Type), State};
+
+ {listop, Opt, Op, E} ->
+ El = [E],
+ case res_check_option(Opt, El) of
+ true ->
+ Optname = res_optname(Opt),
+ [{_,Es}] = ets:lookup(Db, Optname),
+ NewEs = case Op of
+ ins -> [E | lists_delete(E, Es)];
+ add -> lists_delete(E, Es) ++ El;
+ del -> lists_delete(E, Es)
+ end,
+ ets:insert(Db, {Optname, NewEs}),
+ {reply,ok,State};
+ false ->
+ {reply,error,State}
+ end;
+
+ {listdel, Opt} ->
+ ets:insert(Db, {res_optname(Opt), []}),
+ {reply, ok, State};
+
+ {set_hostname, Name} ->
+ case inet_parse:visible_string(Name) of
+ true ->
+ ets:insert(Db, {hostname, Name}),
+ {reply, ok, State};
+ false ->
+ {reply, error, State}
+ end;
+
+ {res_set, hosts_file=Option, Fname} ->
+ handle_set_file(Option, Fname,
+ res_hosts_file_tm, res_hosts_file_info,
+ fun (Bin) ->
+ case inet_parse:hosts(Fname,
+ {chars,Bin}) of
+ {ok,Opts} ->
+ [{load_hosts_file,Opts}];
+ _ -> error
+ end
+ end,
+ From, State);
+ %%
+ {res_set, resolv_conf=Option, Fname} ->
+ handle_set_file(Option, Fname,
+ res_resolv_conf_tm, res_resolv_conf_info,
+ fun (Bin) ->
+ case inet_parse:resolv(Fname,
+ {chars,Bin}) of
+ {ok,Opts} ->
+ [del_ns,
+ clear_search,
+ clear_cache
+ |[Opt ||
+ {T,_}=Opt <- Opts,
+ (T =:= nameserver orelse
+ T =:= search)]];
+ _ -> error
+ end
+ end,
+ From, State);
+ %%
+ {res_set, Opt, Value} ->
+ case res_optname(Opt) of
+ undefined ->
+ {reply, error, State};
+ Optname ->
+ case res_check_option(Opt, Value) of
+ true ->
+ ets:insert(Db, {Optname, Value}),
+ {reply, ok, State};
+ false ->
+ {reply, error, State}
+ end
+ end;
+
+ {set_resolv_conf_tm, TM} ->
+ ets:insert(Db, {res_resolv_conf_tm, TM}),
+ {reply, ok, State};
+
+ {set_hosts_file_tm, TM} ->
+ ets:insert(Db, {res_hosts_file_tm, TM}),
+ {reply, ok, State};
+
+ {set_socks_server, {A,B,C,D}} when ?ip(A,B,C,D) ->
+ ets:insert(Db, {socks5_server, {A,B,C,D}}),
+ {reply, ok, State};
+
+ {set_socks_port, Port} when is_integer(Port) ->
+ ets:insert(Db, {socks5_port, Port}),
+ {reply, ok, State};
+
+ {add_socks_methods, Ls} ->
+ [{_,As}] = ets:lookup(Db, socks5_methods),
+ As1 = lists_subtract(As, Ls),
+ ets:insert(Db, {socks5_methods, As1 ++ Ls}),
+ {reply, ok, State};
+
+ {del_socks_methods, Ls} ->
+ [{_,As}] = ets:lookup(Db, socks5_methods),
+ As1 = lists_subtract(As, Ls),
+ case lists:member(none, As1) of
+ false -> ets:insert(Db, {socks5_methods, As1 ++ [none]});
+ true -> ets:insert(Db, {socks5_methods, As1})
+ end,
+ {reply, ok, State};
+
+ del_socks_methods ->
+ ets:insert(Db, {socks5_methods, [none]}),
+ {reply, ok, State};
+
+ {add_socks_noproxy, {{A,B,C,D},{MA,MB,MC,MD}}}
+ when ?ip(A,B,C,D), ?ip(MA,MB,MC,MD) ->
+ [{_,As}] = ets:lookup(Db, socks5_noproxy),
+ ets:insert(Db, {socks5_noproxy, As++[{{A,B,C,D},{MA,MB,MC,MD}}]}),
+ {reply, ok, State};
+
+ {del_socks_noproxy, {A,B,C,D}=IP} when ?ip(A,B,C,D) ->
+ [{_,As}] = ets:lookup(Db, socks5_noproxy),
+ ets:insert(Db, {socks5_noproxy, lists_keydelete(IP, 1, As)}),
+ {reply, ok, State};
+
+ {set_tcp_module, Mod} when is_atom(Mod) ->
+ ets:insert(Db, {tcp_module, Mod}), %% check/load module ?
+ {reply, ok, State};
+
+ {set_udp_module, Mod} when is_atom(Mod) ->
+ ets:insert(Db, {udp_module, Mod}), %% check/load module ?
+ {reply, ok, State};
+
+ {set_sctp_module, Fam} when is_atom(Fam) ->
+ ets:insert(Db, {sctp_module, Fam}), %% check/load module ?
+ {reply, ok, State};
+
+ {set_cache_size, Size} when is_integer(Size), Size >= 0 ->
+ ets:insert(Db, {cache_size, Size}),
+ {reply, ok, State};
+
+ {set_cache_refresh, Time} when is_integer(Time), Time > 0 ->
+ Time1 = ((Time+999) div 1000)*1000, %% round up
+ ets:insert(Db, {cache_refresh_interval, Time1}),
+ stop_timer(State#state.cache_timer),
+ {reply, ok, State#state{cache_timer = init_timer()}};
+
+ clear_hosts ->
+ ets:delete_all_objects(State#state.hosts_byname),
+ ets:delete_all_objects(State#state.hosts_byaddr),
+ {reply, ok, State};
+
+ clear_cache ->
+ ets:match_delete(State#state.cache, '_'),
+ {reply, ok, State};
+
+ reset ->
+ reset_db(Db),
+ stop_timer(State#state.cache_timer),
+ {reply, ok, State#state{cache_timer = init_timer()}};
+
+ {add_rc_list, List} ->
+ handle_rc_list(List, From, State);
+
+ stop ->
+ {stop, normal, ok, State};
+
+ _ ->
+ {reply, error, State}
+ end.
+
+
+%%----------------------------------------------------------------------
+%% Func: handle_cast/2
+%% Returns: {noreply, State} |
+%% {noreply, State, Timeout} |
+%% {stop, Reason, State} (terminate/2 is called)
+%%----------------------------------------------------------------------
+handle_cast(_Msg, State) ->
+ {noreply, State}.
+
+%%----------------------------------------------------------------------
+%% Func: handle_info/2
+%% Returns: {noreply, State} |
+%% {noreply, State, Timeout} |
+%% {stop, Reason, State} (terminate/2 is called)
+%%----------------------------------------------------------------------
+handle_info(refresh_timeout, State) ->
+ do_refresh_cache(State#state.cache),
+ {noreply, State#state{cache_timer = init_timer()}};
+
+handle_info(_Info, State) ->
+ {noreply, State}.
+
+%%----------------------------------------------------------------------
+%% Func: terminate/2
+%% Purpose: Shutdown the server
+%% Returns: any (ignored by gen_server)
+%%----------------------------------------------------------------------
+terminate(_Reason, State) ->
+ stop_timer(State#state.cache_timer),
+ ok.
+
+%%%----------------------------------------------------------------------
+%%% Internal functions
+%%%----------------------------------------------------------------------
+
+handle_set_file(Option, Fname, TagTm, TagInfo, ParseFun, From,
+ #state{db=Db}=State) ->
+ case res_check_option(Option, Fname) of
+ true when Fname =:= "" ->
+ ets:insert(Db, {res_optname(Option), Fname}),
+ ets:delete(Db, TagInfo),
+ ets:delete(Db, TagTm),
+ handle_set_file(ParseFun, <<>>, From, State);
+ true ->
+ File = filename:flatten(Fname),
+ ets:insert(Db, {res_optname(Option), File}),
+ Bin =
+ case erl_prim_loader:read_file_info(File) of
+ {ok, Finfo0} ->
+ Finfo = Finfo0#file_info{access = undefined,
+ atime = undefined},
+ ets:insert(Db, {TagInfo, Finfo}),
+ ets:insert(Db, {TagTm, times()}),
+ case erl_prim_loader:get_file(File) of
+ {ok, B, _} -> B;
+ _ -> <<>>
+ end;
+ _ -> <<>>
+ end,
+ handle_set_file(ParseFun, Bin, From, State);
+ false -> {reply,error,State}
+ end.
+
+handle_set_file(ParseFun, Bin, From, State) ->
+ case ParseFun(Bin) of
+ error -> {reply,error,State};
+ Opts ->
+ handle_rc_list(Opts, From, State)
+ end.
+
+do_add_host(Byname, Byaddr, Names, Type, IP) ->
+ do_del_host(Byname, Byaddr, IP),
+ NIPs = [{tolower(N),Type,IP} || N <- Names],
+ ets:insert(Byname, NIPs),
+ ets:insert(Byaddr, NIPs),
+ ok.
+
+do_del_host(Byname, Byaddr, IP) ->
+ [ets:delete_object(Byname, NIP) || NIP <- ets:lookup(Byaddr, IP)],
+ ets:delete(Byaddr, IP),
+ ok.
+
+%% Loop over .inetrc option list and call handle_call/3 for each
+%%
+handle_rc_list([], _From, State) ->
+ {reply, ok, State};
+handle_rc_list([Opt|Opts], From, State) ->
+ case rc_opt_req(Opt) of
+ undefined ->
+ {reply, {error,{badopt,Opt}}, State};
+ Req ->
+ case handle_calls(Req, From, State) of
+ {reply, ok, NewState} ->
+ handle_rc_list(Opts, From, NewState);
+ Result -> Result
+ end
+ end;
+handle_rc_list(_, _From, State) ->
+ {reply, error, State}.
+
+handle_calls([], _From, State) ->
+ {reply, ok, State};
+handle_calls([Req|Reqs], From, State) ->
+ case handle_call(Req, From, State) of
+ {reply, ok, NewState} ->
+ handle_calls(Reqs, From, NewState);
+ {reply, _, NewState} ->
+ {reply, error, NewState}
+ %% {noreply,_} is currently not returned by handle_call/3
+ end;
+handle_calls(Req, From, State) ->
+ handle_call(Req, From, State).
+
+%% Translate .inetrc option into gen_server request
+%%
+rc_opt_req({nameserver, Ns}) ->
+ {listop,nameservers,add,{Ns,?NAMESERVER_PORT}};
+rc_opt_req({nameserver, Ns, Port}) ->
+ {listop,nameservers,add,{Ns,Port}};
+rc_opt_req({alt_nameserver, Ns}) ->
+ {listop,alt_nameservers,add,{Ns,?NAMESERVER_PORT}};
+rc_opt_req({alt_nameserver, Ns, Port}) ->
+ {listop,alt_nameservers,add,{Ns,Port}};
+rc_opt_req({socks5_noproxy, IP, Mask}) ->
+ {add_socks_noproxy, {IP, Mask}};
+rc_opt_req({search, Ds}) when is_list(Ds) ->
+ try [{listop,search,add,D} || D <- Ds]
+ catch error:_ -> undefined
+ end;
+rc_opt_req({host, IP, Aliases}) -> {add_host, IP, Aliases};
+rc_opt_req({load_hosts_file, _}=Req) -> Req;
+rc_opt_req({lookup, Ls}) ->
+ try {res_set, lookup, translate_lookup(Ls)}
+ catch error:_ -> undefined
+ end;
+rc_opt_req({Name,Arg}) ->
+ case rc_reqname(Name) of
+ undefined ->
+ case is_res_set(Name) of
+ true -> {res_set,Name,Arg};
+ false -> undefined
+ end;
+ Req -> {Req, Arg}
+ end;
+rc_opt_req(del_ns) ->
+ {listdel,nameservers};
+rc_opt_req(del_alt_ns) ->
+ {listdel,alt_nameservers};
+rc_opt_req(clear_ns) ->
+ [{listdel,nameservers},{listdel,alt_nameservers}];
+rc_opt_req(clear_search) ->
+ {listdel,search};
+rc_opt_req(Opt) when is_atom(Opt) ->
+ case is_reqname(Opt) of
+ true -> Opt;
+ false -> undefined
+ end;
+rc_opt_req(_) -> undefined.
+%%
+rc_reqname(socks5_server) -> set_socks_server;
+rc_reqname(socks5_port) -> set_socks_port;
+rc_reqname(socks5_methods) -> set_socks_methods;
+rc_reqname(cache_refresh) -> set_cache_refresh;
+rc_reqname(cache_size) -> set_cache_size;
+rc_reqname(udp) -> set_udp_module;
+rc_reqname(sctp) -> set_sctp_module;
+rc_reqname(tcp) -> set_tcp_module;
+rc_reqname(_) -> undefined.
+%%
+is_res_set(domain) -> true;
+is_res_set(lookup) -> true;
+is_res_set(timeout) -> true;
+is_res_set(retry) -> true;
+is_res_set(inet6) -> true;
+is_res_set(usevc) -> true;
+is_res_set(edns) -> true;
+is_res_set(udp_payload_size) -> true;
+is_res_set(resolv_conf) -> true;
+is_res_set(hosts_file) -> true;
+is_res_set(_) -> false.
+%%
+is_reqname(reset) -> true;
+is_reqname(clear_cache) -> true;
+is_reqname(clear_hosts) -> true;
+is_reqname(_) -> false.
+
+%% Add a resource record to the cache if there are space left.
+%% If the cache is full this function first deletes old entries,
+%% i.e. entries with oldest latest access time.
+%% #dns_rr.cnt is used to store the access time instead of number of
+%% accesses.
+do_add_rr(RR, Db, State) ->
+ CacheDb = State#state.cache,
+ TM = times(),
+ case alloc_entry(Db, CacheDb, TM) of
+ true ->
+ cache_rr(Db, CacheDb, RR#dns_rr { tm = TM,
+ cnt = TM });
+ _ ->
+ false
+ end.
+
+cache_rr(_Db, Cache, RR) ->
+ %% delete possible old entry
+ ets:match_delete(Cache, RR#dns_rr { cnt = '_', tm = '_', ttl = '_',
+ bm = '_', func = '_'}),
+ ets:insert(Cache, RR).
+
+times() ->
+ {Mega,Secs,_} = erlang:now(),
+ Mega*1000000 + Secs.
+
+%% lookup and remove old entries
+
+do_lookup_rr(Domain, Class, Type) ->
+ match_rr(#dns_rr { domain = tolower(Domain), class = Class,type = Type,
+ cnt = '_', tm = '_', ttl = '_',
+ bm = '_', func = '_', data = '_'}).
+
+match_rr(RR) ->
+ filter_rr(ets:match_object(inet_cache, RR), times()).
+
+
+%% filter old resource records and update access count
+
+filter_rr([RR | RRs], Time) when RR#dns_rr.ttl =:= 0 -> %% at least once
+ ets:match_delete(inet_cache, RR),
+ [RR | filter_rr(RRs, Time)];
+filter_rr([RR | RRs], Time) when RR#dns_rr.tm + RR#dns_rr.ttl < Time ->
+ ets:match_delete(inet_cache, RR),
+ filter_rr(RRs, Time);
+filter_rr([RR | RRs], Time) ->
+ ets:match_delete(inet_cache, RR),
+ ets:insert(inet_cache, RR#dns_rr { cnt = Time }),
+ [RR | filter_rr(RRs, Time)];
+filter_rr([], _Time) -> [].
+
+
+%%
+%% Lower case the domain name before storage
+%%
+lower_rr(RR) ->
+ Dn = RR#dns_rr.domain,
+ if is_list(Dn) ->
+ RR#dns_rr { domain = tolower(Dn) };
+ true -> RR
+ end.
+
+%%
+%% Map upper-case to lower-case
+%% NOTE: this code is in kernel and we don't want to relay
+%% to much on stdlib
+%%
+tolower([]) -> [];
+tolower([C|Cs]) when C >= $A, C =< $Z -> [(C-$A)+$a|tolower(Cs)];
+tolower([C|Cs]) -> [C|tolower(Cs)].
+
+dn_ip6_int(A,B,C,D,E,F,G,H) ->
+ dnib(H) ++ dnib(G) ++ dnib(F) ++ dnib(E) ++
+ dnib(D) ++ dnib(C) ++ dnib(B) ++ dnib(A) ++ "ip6.int".
+
+dn_in_addr_arpa(A,B,C,D) ->
+ integer_to_list(D) ++ "." ++
+ integer_to_list(C) ++ "." ++
+ integer_to_list(B) ++ "." ++
+ integer_to_list(A) ++ ".in-addr.arpa".
+
+dnib(X) ->
+ [ hex(X), $., hex(X bsr 4), $., hex(X bsr 8), $., hex(X bsr 12), $.].
+
+hex(X) ->
+ X4 = (X band 16#f),
+ if X4 < 10 -> X4 + $0;
+ true -> (X4-10) + $a
+ end.
+
+%% Strip trailing dot, do not produce garbage unless necessary.
+%%
+stripdot(Name) ->
+ case stripdot_1(Name) of
+ false -> Name;
+ N -> N
+ end.
+%%
+stripdot_1([$.]) -> [];
+stripdot_1([]) -> false;
+stripdot_1([H|T]) ->
+ case stripdot_1(T) of
+ false -> false;
+ N -> [H|N]
+ end.
+
+%% -------------------------------------------------------------------
+%% Refresh cache at regular intervals, i.e. delete expired #dns_rr's.
+%% -------------------------------------------------------------------
+init_timer() ->
+ erlang:send_after(cache_refresh(), self(), refresh_timeout).
+
+stop_timer(undefined) ->
+ undefined;
+stop_timer(Timer) ->
+ erlang:cancel_timer(Timer).
+
+cache_refresh() ->
+ case db_get(cache_refresh_interval) of
+ undefined -> ?CACHE_REFRESH;
+ Val -> Val
+ end.
+
+%% Delete all entries with expired TTL.
+%% Returns the access time of the entry with the oldest access time
+%% in the cache.
+do_refresh_cache(CacheDb) ->
+ Now = times(),
+ do_refresh_cache(ets:first(CacheDb), CacheDb, Now, Now).
+
+do_refresh_cache('$end_of_table', _, _, OldestT) ->
+ OldestT;
+do_refresh_cache(Key, CacheDb, Now, OldestT) ->
+ Fun = fun(RR, T) when RR#dns_rr.tm + RR#dns_rr.ttl < Now ->
+ ets:match_delete(CacheDb, RR),
+ T;
+ (#dns_rr{cnt = C}, T) when C < T ->
+ C;
+ (_, T) ->
+ T
+ end,
+ Next = ets:next(CacheDb, Key),
+ OldT = lists:foldl(Fun, OldestT, ets:lookup(CacheDb, Key)),
+ do_refresh_cache(Next, CacheDb, Now, OldT).
+
+%% -------------------------------------------------------------------
+%% Allocate room for a new entry in the cache.
+%% Deletes entries with expired TTL and all entries with latest
+%% access time older than
+%% trunc((TM - OldestTM) * 0.3) + OldestTM from the cache if it
+%% is full. Does not delete more than 10% of the entries in the cache
+%% though, unless they there deleted due to expired TTL.
+%% Returns: true if space for a new entry otherwise false.
+%% -------------------------------------------------------------------
+alloc_entry(Db, CacheDb, TM) ->
+ CurSize = ets:info(CacheDb, size),
+ case ets:lookup(Db, cache_size) of
+ [{cache_size, Size}] when Size =< CurSize, Size > 0 ->
+ alloc_entry(CacheDb, CurSize, TM, trunc(Size * 0.1) + 1);
+ [{cache_size, Size}] when Size =< 0 ->
+ false;
+ _ ->
+ true
+ end.
+
+alloc_entry(CacheDb, OldSize, TM, N) ->
+ OldestTM = do_refresh_cache(CacheDb), % Delete timedout entries
+ case ets:info(CacheDb, size) of
+ OldSize ->
+ %% No entrys timedout
+ delete_n_oldest(CacheDb, TM, OldestTM, N);
+ _ ->
+ true
+ end.
+
+delete_n_oldest(CacheDb, TM, OldestTM, N) ->
+ DelTM = trunc((TM - OldestTM) * 0.3) + OldestTM,
+ case delete_older(CacheDb, DelTM, N) of
+ 0 ->
+ false;
+ _ ->
+ true
+ end.
+
+%% Delete entries with latest access time older than TM.
+%% Delete max N number of entries.
+%% Returns the number of deleted entries.
+delete_older(CacheDb, TM, N) ->
+ delete_older(ets:first(CacheDb), CacheDb, TM, N, 0).
+
+delete_older('$end_of_table', _, _, _, M) ->
+ M;
+delete_older(_, _, _, N, M) when N =< M ->
+ M;
+delete_older(Domain, CacheDb, TM, N, M) ->
+ Next = ets:next(CacheDb, Domain),
+ Fun = fun(RR, MM) when RR#dns_rr.cnt =< TM ->
+ ets:match_delete(CacheDb, RR),
+ MM + 1;
+ (_, MM) ->
+ MM
+ end,
+ M1 = lists:foldl(Fun, M, ets:lookup(CacheDb, Domain)),
+ delete_older(Next, CacheDb, TM, N, M1).
+
+
+%% as lists:delete/2, but delete all exact matches
+%%
+lists_delete(_, []) -> [];
+lists_delete(E, [E|Es]) ->
+ lists_delete(E, Es);
+lists_delete(E, [X|Es]) ->
+ [X|lists_delete(E, Es)].
+
+%% as '--'/2 aka lists:subtract/2 but delete all exact matches
+lists_subtract(As0, Bs) ->
+ lists:foldl(fun (E, As) -> lists_delete(E, As) end, As0, Bs).
+
+%% as lists:keydelete/3, but delete all _exact_ key matches
+lists_keydelete(_, _, []) -> [];
+lists_keydelete(K, N, [T|Ts]) when element(N, T) =:= K ->
+ lists_keydelete(K, N, Ts);
+lists_keydelete(K, N, [X|Ts]) ->
+ [X|lists_keydelete(K, N, Ts)].
diff --git a/lib/kernel/src/inet_dns.erl b/lib/kernel/src/inet_dns.erl
new file mode 100644
index 0000000000..669a361c9d
--- /dev/null
+++ b/lib/kernel/src/inet_dns.erl
@@ -0,0 +1,701 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 1997-2009. All Rights Reserved.
+%%
+%% The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved online at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(inet_dns).
+
+%% Dns record encode/decode
+%%
+%% RFC 1035: Domain Names - Implementation and Specification
+%% RFC 2181: Clarifications to the DNS Specification
+%% RFC 2671: Extension Mechanisms for DNS (EDNS0)
+%% RFC 2782: A DNS RR for specifying the location of services (DNS SRV)
+%% RFC 2915: The Naming Authority Pointer (NAPTR) DNS Resource Rec
+
+-export([decode/1, encode/1]).
+
+-import(lists, [reverse/1, reverse/2, nthtail/2]).
+
+-include("inet_int.hrl").
+-include("inet_dns.hrl").
+
+-export([record_type/1, rr/1, rr/2]).
+-export([make_rr/0, make_rr/1, make_rr/2, make_rr/3]).
+%% ADTs exports. The make_* functions are undocumented.
+-export([msg/1, msg/2,
+ make_msg/0, make_msg/1, make_msg/2, make_msg/3]).
+-export([header/1, header/2,
+ make_header/0, make_header/1, make_header/2, make_header/3]).
+-export([dns_query/1, dns_query/2,
+ make_dns_query/0, make_dns_query/1,
+ make_dns_query/2, make_dns_query/3]).
+-include("inet_dns_record_adts.hrl").
+
+%% Function merge of #dns_rr{} and #dns_rr_opt{}
+%%
+
+record_type(#dns_rr{}) -> rr;
+record_type(#dns_rr_opt{}) -> rr;
+record_type(Rec) ->
+ record_adts(Rec).
+
+rr(#dns_rr{}=RR) -> dns_rr(RR);
+rr(#dns_rr_opt{}=RR) -> dns_rr_opt(RR).
+
+rr(#dns_rr{}=RR, L) -> dns_rr(RR, L);
+rr(#dns_rr_opt{}=RR, L) -> dns_rr_opt(RR, L).
+
+make_rr() -> make_dns_rr().
+
+make_rr(L) when is_list(L) ->
+ case rr_type(L, any) of
+ opt -> make_dns_rr_opt(L);
+ _ -> make_dns_rr(L)
+ end.
+
+make_rr(type, opt) -> make_dns_rr_opt();
+make_rr(F, V) when is_atom(F) -> make_dns_rr(F, V);
+make_rr(#dns_rr{}=RR, L) when is_list(L) ->
+ case rr_type(L, RR#dns_rr.type) of
+ opt ->
+ Ts = common_fields__rr__rr_opt(),
+ make_dns_rr_opt([Opt || {T,_}=Opt <- dns_rr(RR),
+ lists_member(T, Ts)] ++ L);
+ _ -> make_dns_rr(RR, L)
+ end;
+make_rr(#dns_rr_opt{}=RR, L) when is_list(L) ->
+ case rr_type(L, RR#dns_rr_opt.type) of
+ opt ->
+ make_dns_rr_opt(RR, L);
+ _ ->
+ Ts = common_fields__rr__rr_opt(),
+ make_dns_rr([Opt || {T,_}=Opt <- dns_rr_opt(RR),
+ lists_member(T, Ts)] ++ L)
+ end.
+
+make_rr(#dns_rr{}=RR, type, opt) -> make_rr(RR, [{type,opt}]);
+make_rr(#dns_rr{}=RR, F, V) -> make_dns_rr(RR, F, V);
+make_rr(#dns_rr_opt{}=RR, type, opt) -> RR;
+make_rr(#dns_rr_opt{}=RR, type, T) -> make_rr(RR, [{type,T}]);
+make_rr(#dns_rr_opt{}=RR, F, V) -> make_dns_rr_opt(RR, F, V).
+
+-compile({inline, [rr_type/2]}).
+rr_type([], T) -> T;
+rr_type([{type,T}|Opts], _) -> rr_type(Opts, T);
+rr_type([_|Opts], T) -> rr_type(Opts, T).
+
+common_fields__rr__rr_opt() ->
+ [T || T <- record_info(fields, dns_rr_opt),
+ lists_member(T, record_info(fields, dns_rr))].
+
+-compile({inline, [lists_member/2]}).
+lists_member(_, []) -> false;
+lists_member(H, [H|_]) -> true;
+lists_member(H, [_|T]) -> lists_member(H, T).
+
+
+
+-define(DECODE_ERROR, fmt). % must match a clause in inet_res:query_nss_e?dns
+
+%%
+%% Decode a dns buffer.
+%%
+
+decode(Buffer) when is_binary(Buffer) ->
+ try do_decode(Buffer) of
+ DnsRec ->
+ {ok,DnsRec}
+ catch
+ Reason ->
+ {error,Reason}
+ end.
+
+do_decode(<<Id:16,
+ QR:1,Opcode:4,AA:1,TC:1,RD:1,
+ RA:1,PR:1,_:2,Rcode:4,
+ QdCount:16,AnCount:16,NsCount:16,ArCount:16,
+ QdBuf/binary>>=Buffer) ->
+ {AnBuf,QdList} = decode_query_section(QdBuf,QdCount,Buffer),
+ {NsBuf,AnList} = decode_rr_section(AnBuf,AnCount,Buffer),
+ {ArBuf,NsList} = decode_rr_section(NsBuf,NsCount,Buffer),
+ {Rest,ArList} = decode_rr_section(ArBuf,ArCount,Buffer),
+ case Rest of
+ <<>> ->
+ DnsHdr =
+ #dns_header{id=Id,
+ qr=decode_boolean(QR),
+ opcode=decode_opcode(Opcode),
+ aa=decode_boolean(AA),
+ tc=decode_boolean(TC),
+ rd=decode_boolean(RD),
+ ra=decode_boolean(RA),
+ pr=decode_boolean(PR),
+ rcode=Rcode},
+ #dns_rec{header=DnsHdr,
+ qdlist=QdList,
+ anlist=AnList,
+ nslist=NsList,
+ arlist=ArList};
+ _ ->
+ %% Garbage data after DNS message
+ throw(?DECODE_ERROR)
+ end;
+do_decode(_) ->
+ %% DNS message does not even match header
+ throw(?DECODE_ERROR).
+
+decode_query_section(Bin, N, Buffer) ->
+ decode_query_section(Bin, N, Buffer, []).
+
+decode_query_section(Rest, 0, _Buffer, Qs) ->
+ {Rest,reverse(Qs)};
+decode_query_section(Bin, N, Buffer, Qs) ->
+ case decode_name(Bin, Buffer) of
+ {<<Type:16,Class:16,Rest/binary>>,Name} ->
+ DnsQuery =
+ #dns_query{domain=Name,
+ type=decode_type(Type),
+ class=decode_class(Class)},
+ decode_query_section(Rest, N-1, Buffer, [DnsQuery|Qs]);
+ _ ->
+ %% Broken question
+ throw(?DECODE_ERROR)
+ end.
+
+decode_rr_section(Bin, N, Buffer) ->
+ decode_rr_section(Bin, N, Buffer, []).
+
+decode_rr_section(Rest, 0, _Buffer, RRs) ->
+ {Rest,reverse(RRs)};
+decode_rr_section(Bin, N, Buffer, RRs) ->
+ case decode_name(Bin, Buffer) of
+ {<<T:16/unsigned,C:16/unsigned,TTL:4/binary,
+ Len:16,D:Len/binary,Rest/binary>>,
+ Name} ->
+ Type = decode_type(T),
+ Class = decode_class(C),
+ Data = decode_data(D, Class, Type, Buffer),
+ RR =
+ case Type of
+ opt ->
+ <<ExtRcode,Version,Z:16>> = TTL,
+ #dns_rr_opt{domain=Name,
+ type=Type,
+ udp_payload_size=C,
+ ext_rcode=ExtRcode,
+ version=Version,
+ z=Z,
+ data=Data};
+ _ ->
+ <<TimeToLive:32/signed>> = TTL,
+ #dns_rr{domain=Name,
+ type=Type,
+ class=Class,
+ ttl=if TimeToLive < 0 -> 0;
+ true -> TimeToLive end,
+ data=Data}
+ end,
+ decode_rr_section(Rest, N-1, Buffer, [RR|RRs]);
+ _ ->
+ %% Broken RR
+ throw(?DECODE_ERROR)
+ end.
+
+%%
+%% Encode a user query
+%%
+
+encode(Q) ->
+ QdCount = length(Q#dns_rec.qdlist),
+ AnCount = length(Q#dns_rec.anlist),
+ NsCount = length(Q#dns_rec.nslist),
+ ArCount = length(Q#dns_rec.arlist),
+ B0 = encode_header(Q#dns_rec.header, QdCount, AnCount, NsCount, ArCount),
+ C0 = gb_trees:empty(),
+ {B1,C1} = encode_query_section(B0, C0, Q#dns_rec.qdlist),
+ {B2,C2} = encode_res_section(B1, C1, Q#dns_rec.anlist),
+ {B3,C3} = encode_res_section(B2, C2, Q#dns_rec.nslist),
+ {B,_} = encode_res_section(B3, C3, Q#dns_rec.arlist),
+ B.
+
+
+%% RFC 1035: 4.1.1. Header section format
+%%
+encode_header(#dns_header{id=Id}=H, QdCount, AnCount, NsCount, ArCount) ->
+ QR = encode_boolean(H#dns_header.qr),
+ Opcode = encode_opcode(H#dns_header.opcode),
+ AA = encode_boolean(H#dns_header.aa),
+ TC = encode_boolean(H#dns_header.tc),
+ RD = encode_boolean(H#dns_header.rd),
+ RA = encode_boolean(H#dns_header.ra),
+ PR = encode_boolean(H#dns_header.pr),
+ Rcode = H#dns_header.rcode,
+ <<Id:16,
+ QR:1,Opcode:4,AA:1,TC:1,RD:1,
+ RA:1,PR:1,0:2,Rcode:4,
+ QdCount:16,AnCount:16,NsCount:16,ArCount:16>>.
+
+%% RFC 1035: 4.1.2. Question section format
+%%
+encode_query_section(Bin, Comp, []) -> {Bin,Comp};
+encode_query_section(Bin0, Comp0, [#dns_query{domain=DName}=Q | Qs]) ->
+ Type = encode_type(Q#dns_query.type),
+ Class = encode_class(Q#dns_query.class),
+ {Bin,Comp} = encode_name(Bin0, Comp0, byte_size(Bin0), DName),
+ encode_query_section(<<Bin/binary,Type:16,Class:16>>, Comp, Qs).
+
+%% RFC 1035: 4.1.3. Resource record format
+%% RFC 2671: 4.3, 4.4, 4.6 OPT RR format
+%%
+encode_res_section(Bin, Comp, []) -> {Bin,Comp};
+encode_res_section(Bin, Comp, [#dns_rr {domain = DName,
+ type = Type,
+ class = Class,
+ ttl = TTL,
+ data = Data} | Rs]) ->
+ encode_res_section_rr(Bin, Comp, Rs,
+ DName, Type, Class, <<TTL:32/signed>>, Data);
+encode_res_section(Bin, Comp, [#dns_rr_opt {domain = DName,
+ udp_payload_size = UdpPayloadSize,
+ ext_rcode = ExtRCode,
+ version = Version,
+ z = Z,
+ data = Data} | Rs]) ->
+ encode_res_section_rr(Bin, Comp, Rs,
+ DName, ?S_OPT, UdpPayloadSize,
+ <<ExtRCode,Version,Z:16>>, Data).
+
+encode_res_section_rr(Bin0, Comp0, Rs, DName, Type, Class, TTL, Data) ->
+ T = encode_type(Type),
+ C = encode_class(Class),
+ {Bin,Comp1} = encode_name(Bin0, Comp0, byte_size(Bin0), DName),
+ {DataBin,Comp} = encode_data(Comp1, byte_size(Bin)+2+2+byte_size(TTL)+2,
+ Type, Class, Data),
+ DataSize = byte_size(DataBin),
+ encode_res_section(<<Bin/binary,T:16,C:16,
+ TTL/binary,DataSize:16,DataBin/binary>>, Comp, Rs).
+
+%%
+%% Resource types
+%%
+decode_type(Type) ->
+ case Type of
+ ?T_A -> ?S_A;
+ ?T_NS -> ?S_NS;
+ ?T_MD -> ?S_MD;
+ ?T_MF -> ?S_MF;
+ ?T_CNAME -> ?S_CNAME;
+ ?T_SOA -> ?S_SOA;
+ ?T_MB -> ?S_MB;
+ ?T_MG -> ?S_MG;
+ ?T_MR -> ?S_MR;
+ ?T_NULL -> ?S_NULL;
+ ?T_WKS -> ?S_WKS;
+ ?T_PTR -> ?S_PTR;
+ ?T_HINFO -> ?S_HINFO;
+ ?T_MINFO -> ?S_MINFO;
+ ?T_MX -> ?S_MX;
+ ?T_TXT -> ?S_TXT;
+ ?T_AAAA -> ?S_AAAA;
+ ?T_SRV -> ?S_SRV;
+ ?T_NAPTR -> ?S_NAPTR;
+ ?T_OPT -> ?S_OPT;
+ ?T_SPF -> ?S_SPF;
+ %% non standard
+ ?T_UINFO -> ?S_UINFO;
+ ?T_UID -> ?S_UID;
+ ?T_GID -> ?S_GID;
+ ?T_UNSPEC -> ?S_UNSPEC;
+ %% Query type values which do not appear in resource records
+ ?T_AXFR -> ?S_AXFR;
+ ?T_MAILB -> ?S_MAILB;
+ ?T_MAILA -> ?S_MAILA;
+ ?T_ANY -> ?S_ANY;
+ _ -> Type %% raw unknown type
+ end.
+
+%%
+%% Resource types
+%%
+encode_type(Type) ->
+ case Type of
+ ?S_A -> ?T_A;
+ ?S_NS -> ?T_NS;
+ ?S_MD -> ?T_MD;
+ ?S_MF -> ?T_MF;
+ ?S_CNAME -> ?T_CNAME;
+ ?S_SOA -> ?T_SOA;
+ ?S_MB -> ?T_MB;
+ ?S_MG -> ?T_MG;
+ ?S_MR -> ?T_MR;
+ ?S_NULL -> ?T_NULL;
+ ?S_WKS -> ?T_WKS;
+ ?S_PTR -> ?T_PTR;
+ ?S_HINFO -> ?T_HINFO;
+ ?S_MINFO -> ?T_MINFO;
+ ?S_MX -> ?T_MX;
+ ?S_TXT -> ?T_TXT;
+ ?S_AAAA -> ?T_AAAA;
+ ?S_SRV -> ?T_SRV;
+ ?S_NAPTR -> ?T_NAPTR;
+ ?S_OPT -> ?T_OPT;
+ ?S_SPF -> ?T_SPF;
+ %% non standard
+ ?S_UINFO -> ?T_UINFO;
+ ?S_UID -> ?T_UID;
+ ?S_GID -> ?T_GID;
+ ?S_UNSPEC -> ?T_UNSPEC;
+ %% Query type values which do not appear in resource records
+ ?S_AXFR -> ?T_AXFR;
+ ?S_MAILB -> ?T_MAILB;
+ ?S_MAILA -> ?T_MAILA;
+ ?S_ANY -> ?T_ANY;
+ Type when is_integer(Type) -> Type %% raw unknown type
+ end.
+
+%%
+%% Resource clases
+%%
+
+decode_class(Class) ->
+ case Class of
+ ?C_IN -> in;
+ ?C_CHAOS -> chaos;
+ ?C_HS -> hs;
+ ?C_ANY -> any;
+ _ -> Class %% raw unknown class
+ end.
+
+encode_class(Class) ->
+ case Class of
+ in -> ?C_IN;
+ chaos -> ?C_CHAOS;
+ hs -> ?C_HS;
+ any -> ?C_ANY;
+ Class when is_integer(Class) -> Class %% raw unknown class
+ end.
+
+decode_opcode(Opcode) ->
+ case Opcode of
+ ?QUERY -> 'query';
+ ?IQUERY -> iquery;
+ ?STATUS -> status;
+ _ when is_integer(Opcode) -> Opcode %% non-standard opcode
+ end.
+
+encode_opcode(Opcode) ->
+ case Opcode of
+ 'query' -> ?QUERY;
+ iquery -> ?IQUERY;
+ status -> ?STATUS;
+ _ when is_integer(Opcode) -> Opcode %% non-standard opcode
+ end.
+
+
+encode_boolean(true) -> 1;
+encode_boolean(false) -> 0;
+encode_boolean(B) when is_integer(B) -> B.
+
+decode_boolean(0) -> false;
+decode_boolean(I) when is_integer(I) -> true.
+
+%%
+%% Data field -> term() content representation
+%%
+decode_data(<<A,B,C,D>>, in, ?S_A, _) -> {A,B,C,D};
+decode_data(<<A:16,B:16,C:16,D:16,E:16,F:16,G:16,H:16>>, in, ?S_AAAA, _) ->
+ {A,B,C,D,E,F,G,H};
+decode_data(Dom, _, ?S_NS, Buffer) -> decode_domain(Dom, Buffer);
+decode_data(Dom, _, ?S_MD, Buffer) -> decode_domain(Dom, Buffer);
+decode_data(Dom, _, ?S_MF, Buffer) -> decode_domain(Dom, Buffer);
+decode_data(Dom, _, ?S_CNAME, Buffer) -> decode_domain(Dom, Buffer);
+decode_data(Data0, _, ?S_SOA, Buffer) ->
+ {Data1,MName} = decode_name(Data0, Buffer),
+ {Data,RName} = decode_name(Data1, Buffer),
+ case Data of
+ <<Serial:32,Refresh:32/signed,Retry:32/signed,
+ Expiry:32/signed,Minimum:32>> ->
+ {MName,RName,Serial,Refresh,Retry,Expiry,Minimum};
+ _ ->
+ %% Broken SOA RR data
+ throw(?DECODE_ERROR)
+ end;
+decode_data(Dom, _, ?S_MB, Buffer) -> decode_domain(Dom, Buffer);
+decode_data(Dom, _, ?S_MG, Buffer) -> decode_domain(Dom, Buffer);
+decode_data(Dom, _, ?S_MR, Buffer) -> decode_domain(Dom, Buffer);
+decode_data(Data, _, ?S_NULL, _) -> Data;
+decode_data(<<A,B,C,D,Proto,BitMap/binary>>, in, ?S_WKS, _Buffer) ->
+ {{A,B,C,D},Proto,BitMap};
+decode_data(Dom, _, ?S_PTR, Buffer) -> decode_domain(Dom, Buffer);
+decode_data(<<CpuLen,CPU:CpuLen/binary,
+ OsLen,OS:OsLen/binary>>, _, ?S_HINFO, _) ->
+ {binary_to_list(CPU),binary_to_list(OS)};
+decode_data(Data0, _, ?S_MINFO, Buffer) ->
+ {Data1,RM} = decode_name(Data0, Buffer),
+ {Data,EM} = decode_name(Data1, Buffer),
+ case Data of
+ <<>> -> {RM,EM};
+ _ ->
+ %% Broken MINFO data
+ throw(?DECODE_ERROR)
+ end;
+decode_data(<<Prio:16,Dom/binary>>, _, ?S_MX, Buffer) ->
+ {Prio,decode_domain(Dom, Buffer)};
+decode_data(<<Prio:16,Weight:16,Port:16,Dom/binary>>, _, ?S_SRV, Buffer) ->
+ {Prio,Weight,Port,decode_domain(Dom, Buffer)};
+decode_data(<<Order:16,Preference:16,Data0/binary>>, _, ?S_NAPTR, Buffer) ->
+ {Data1,Flags} = decode_string(Data0),
+ {Data2,Services} = decode_string(Data1),
+ {Data,Regexp} = decode_characters(Data2, utf8),
+ Replacement = decode_domain(Data, Buffer),
+ {Order,Preference,string:to_lower(Flags),string:to_lower(Services),
+ Regexp,Replacement};
+%% ?S_OPT falls through to default
+decode_data(Data, _, ?S_TXT, _) ->
+ decode_txt(Data);
+decode_data(Data, _, ?S_SPF, _) ->
+ decode_txt(Data);
+%% sofar unknown or non standard
+decode_data(Data, _, _, _) ->
+ Data.
+
+%% Array of strings
+%%
+decode_txt(<<>>) -> [];
+decode_txt(Bin) ->
+ {Rest,String} = decode_string(Bin),
+ [String|decode_txt(Rest)].
+
+decode_string(<<Len,Bin:Len/binary,Rest/binary>>) ->
+ {Rest,binary_to_list(Bin)};
+decode_string(_) ->
+ %% Broken string
+ throw(?DECODE_ERROR).
+
+decode_characters(<<Len,Bin:Len/binary,Rest/binary>>, Encoding) ->
+ {Rest,unicode:characters_to_list(Bin, Encoding)};
+decode_characters(_, _) ->
+ %% Broken encoded string
+ throw(?DECODE_ERROR).
+
+%% One domain name only, there must be nothing after
+%%
+decode_domain(Bin, Buffer) ->
+ case decode_name(Bin, Buffer) of
+ {<<>>,Name} -> Name;
+ _ ->
+ %% Garbage after domain name
+ throw(?DECODE_ERROR)
+ end.
+
+%% Domain name -> {RestBin,Name}
+%%
+decode_name(Bin, Buffer) ->
+ decode_name(Bin, Buffer, [], Bin, 0).
+
+%% Tail advances with Rest until the first indirection is followed
+%% then it stays put at that Rest.
+decode_name(_, Buffer, _Labels, _Tail, Cnt) when Cnt > byte_size(Buffer) ->
+ throw(?DECODE_ERROR); %% Insantiy bailout - this must be a decode loop
+decode_name(<<0,Rest/binary>>, _Buffer, Labels, Tail, Cnt) ->
+ %% Root domain, we have all labels for the domain name
+ {if Cnt =/= 0 -> Tail; true -> Rest end,
+ decode_name_labels(Labels)};
+decode_name(<<0:2,Len:6,Label:Len/binary,Rest/binary>>,
+ Buffer, Labels, Tail, Cnt) ->
+ %% One plain label here
+ decode_name(Rest, Buffer, [Label|Labels],
+ if Cnt =/= 0 -> Tail; true -> Rest end,
+ Cnt);
+decode_name(<<3:2,Ptr:14,Rest/binary>>, Buffer, Labels, Tail, Cnt) ->
+ %% Indirection - reposition in buffer and recurse
+ case Buffer of
+ <<_:Ptr/binary,Bin/binary>> ->
+ decode_name(Bin, Buffer, Labels,
+ if Cnt =/= 0 -> Tail; true -> Rest end,
+ Cnt+2); % size of indirection pointer
+ _ ->
+ %% Indirection pointer outside buffer
+ throw(?DECODE_ERROR)
+ end;
+decode_name(_, _, _, _, _) -> throw(?DECODE_ERROR).
+
+%% Reverse list of labels (binaries) -> domain name (string)
+decode_name_labels([]) -> ".";
+decode_name_labels(Labels) ->
+ decode_name_labels(Labels, "").
+
+decode_name_labels([Label], Name) ->
+ decode_name_label(Label, Name);
+decode_name_labels([Label|Labels], Name) ->
+ decode_name_labels(Labels, "."++decode_name_label(Label, Name)).
+
+decode_name_label(<<>>, _Name) ->
+ %% Empty label is only allowed for the root domain,
+ %% and that is handled above.
+ throw(?DECODE_ERROR);
+decode_name_label(Label, Name) ->
+ decode_name_label(Label, Name, byte_size(Label)).
+
+%% Decode $. and $\\ to become $\\ escaped characters
+%% in the string representation.
+-compile({inline, [decode_name_label/3]}).
+decode_name_label(_, Name, 0) -> Name;
+decode_name_label(Label, Name, N) ->
+ M = N-1,
+ case Label of
+ <<_:M/binary,($\\),_/binary>> ->
+ decode_name_label(Label, "\\\\"++Name, M);
+ <<_:M/binary,($.),_/binary>> ->
+ decode_name_label(Label, "\\."++Name, M);
+ <<_:M/binary,C,_/binary>> ->
+ decode_name_label(Label, [C|Name], M);
+ _ ->
+ %% This should not happen but makes surrounding
+ %% programming errors easier to locate.
+ erlang:error(badarg, [Label,Name,N])
+ end.
+
+%%
+%% Data field -> {binary(),NewCompressionTable}
+%%
+encode_data(Comp, _, ?S_A, in, {A,B,C,D}) -> {<<A,B,C,D>>,Comp};
+encode_data(Comp, _, ?S_AAAA, in, {A,B,C,D,E,F,G,H}) ->
+ {<<A:16,B:16,C:16,D:16,E:16,F:16,G:16,H:16>>,Comp};
+encode_data(Comp, Pos, ?S_NS, in, Domain) -> encode_name(Comp, Pos, Domain);
+encode_data(Comp, Pos, ?S_MD, in, Domain) -> encode_name(Comp, Pos, Domain);
+encode_data(Comp, Pos, ?S_MF, in, Domain) -> encode_name(Comp, Pos, Domain);
+encode_data(Comp, Pos, ?S_CNAME, in, Domain) -> encode_name(Comp, Pos, Domain);
+encode_data(Comp0, Pos, ?S_SOA, in,
+ {MName,RName,Serial,Refresh,Retry,Expiry,Minimum}) ->
+ {B1,Comp1} = encode_name(Comp0, Pos, MName),
+ {B,Comp} = encode_name(B1, Comp1, Pos+byte_size(B1), RName),
+ {<<B/binary,Serial:32,Refresh:32/signed,Retry:32/signed,
+ Expiry:32/signed,Minimum:32>>,
+ Comp};
+encode_data(Comp, Pos, ?S_MB, in, Domain) -> encode_name(Comp, Pos, Domain);
+encode_data(Comp, Pos, ?S_MG, in, Domain) -> encode_name(Comp, Pos, Domain);
+encode_data(Comp, Pos, ?S_MR, in, Domain) -> encode_name(Comp, Pos, Domain);
+encode_data(Comp, _, ?S_NULL, in, Data) ->
+ {iolist_to_binary(Data),Comp};
+encode_data(Comp, _, ?S_WKS, in, {{A,B,C,D},Proto,BitMap}) ->
+ BitMapBin = iolist_to_binary(BitMap),
+ {<<A,B,C,D,Proto,BitMapBin/binary>>,Comp};
+encode_data(Comp, Pos, ?S_PTR, in, Domain) -> encode_name(Comp, Pos, Domain);
+encode_data(Comp, _, ?S_HINFO, in, {CPU,OS}) ->
+ Bin = encode_string(iolist_to_binary(CPU)),
+ {encode_string(Bin, iolist_to_binary(OS)),Comp};
+encode_data(Comp0, Pos, ?S_MINFO, in, {RM,EM}) ->
+ {Bin,Comp} = encode_name(Comp0, Pos, RM),
+ encode_name(Bin, Comp, Pos+byte_size(Bin), EM);
+encode_data(Comp, Pos, ?S_MX, in, {Pref,Exch}) ->
+ encode_name(<<Pref:16>>, Comp, Pos+2, Exch);
+encode_data(Comp, Pos, ?S_SRV, in, {Prio,Weight,Port,Target}) ->
+ encode_name(<<Prio:16,Weight:16,Port:16>>, Comp, Pos+2+2+2, Target);
+encode_data(Comp, Pos, ?S_NAPTR, in,
+ {Order,Preference,Flags,Services,Regexp,Replacement}) ->
+ B0 = <<Order:16,Preference:16>>,
+ B1 = encode_string(B0, iolist_to_binary(Flags)),
+ B2 = encode_string(B1, iolist_to_binary(Services)),
+ B3 = encode_string(B2, unicode:characters_to_binary(Regexp,
+ unicode, utf8)),
+ %% Bypass name compression (RFC 2915: section 2)
+ {B,_} = encode_name(B3, gb_trees:empty(), Pos+byte_size(B3), Replacement),
+ {B,Comp};
+%% ?S_OPT falls through to default
+encode_data(Comp, _, ?S_TXT, in, Data) -> {encode_txt(Data),Comp};
+encode_data(Comp, _, ?S_SPF, in, Data) -> {encode_txt(Data),Comp};
+encode_data(Comp, _Pos, _Type, _Class, Data) -> {iolist_to_binary(Data),Comp}.
+
+%% Array of strings
+%%
+encode_txt(Strings) ->
+ encode_txt(<<>>, Strings).
+%%
+encode_txt(Bin, []) -> Bin;
+encode_txt(Bin, [S|Ss]) ->
+ encode_txt(encode_string(Bin, iolist_to_binary(S)), Ss).
+
+%% Singular string
+%%
+encode_string(StringBin) ->
+ encode_string(<<>>, StringBin).
+%%
+encode_string(Bin, StringBin) ->
+ Size = byte_size(StringBin),
+ if Size =< 255 ->
+ <<Bin/binary,Size,StringBin/binary>>
+ end.
+
+%% Domain name
+%%
+encode_name(Comp, Pos, Name) ->
+ encode_name(<<>>, Comp, Pos, Name).
+%%
+%% Bin = target binary
+%% Comp = compression lookup table; label list -> buffer position
+%% Pos = position in DNS message
+%% Name = domain name to encode
+%%
+%% The name compression does not make the case conversions
+%% it could. This means case will be preserved at the cost
+%% of missed compression opportunities. But if the encoded
+%% message use the same case for different instances of
+%% the same domain name there is no problem, and if not it is
+%% only compression that suffers. Furthermore encode+decode
+%% this way becomes an identity operation for any decoded
+%% DNS message which is nice for testing encode.
+%%
+encode_name(Bin0, Comp0, Pos, Name) ->
+ case encode_labels(Bin0, Comp0, Pos, name2labels(Name)) of
+ {Bin,_}=Result when byte_size(Bin) - byte_size(Bin0) =< 255 -> Result;
+ _ ->
+ %% Fail on too long name
+ erlang:error(badarg, [Bin0,Comp0,Pos,Name])
+ end.
+
+name2labels("") -> [];
+name2labels(".") -> [];
+name2labels(Cs) -> name2labels(<<>>, Cs).
+%%
+-compile({inline, [name2labels/2]}).
+name2labels(Label, "") -> [Label];
+name2labels(Label, ".") -> [Label];
+name2labels(Label, "."++Cs) -> [Label|name2labels(<<>>, Cs)];
+name2labels(Label, "\\"++[C|Cs]) -> name2labels(<<Label/binary,C>>, Cs);
+name2labels(Label, [C|Cs]) -> name2labels(<<Label/binary,C>>, Cs).
+
+%% Fail on empty or too long labels.
+encode_labels(Bin, Comp, _Pos, []) ->
+ {<<Bin/binary,0>>,Comp};
+encode_labels(Bin, Comp0, Pos, [L|Ls]=Labels)
+ when 1 =< byte_size(L), byte_size(L) =< 63 ->
+ case gb_trees:lookup(Labels, Comp0) of
+ none ->
+ Comp = if Pos < (3 bsl 14) ->
+ %% Just in case - compression
+ %% pointers can not reach further
+ gb_trees:insert(Labels, Pos, Comp0);
+ true -> Comp0
+ end,
+ Size = byte_size(L),
+ encode_labels(<<Bin/binary,Size,L/binary>>,
+ Comp, Pos+1+Size, Ls);
+ {value,Ptr} ->
+ %% Name compression - point to already encoded name
+ {<<Bin/binary,3:2,Ptr:14>>,Comp0}
+ end.
diff --git a/lib/kernel/src/inet_dns.hrl b/lib/kernel/src/inet_dns.hrl
new file mode 100644
index 0000000000..1b69f31a4d
--- /dev/null
+++ b/lib/kernel/src/inet_dns.hrl
@@ -0,0 +1,208 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 1997-2009. All Rights Reserved.
+%%
+%% The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved online at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% %CopyrightEnd%
+%%
+%%
+%% Defintion for Domain Name System
+%%
+
+%%
+%% Currently defined opcodes
+%%
+-define(QUERY, 16#0). %% standard query
+-define(IQUERY, 16#1). %% inverse query
+-define(STATUS, 16#2). %% nameserver status query
+%% -define(xxx, 16#3) %% 16#3 reserved
+%% non standard
+-define(UPDATEA, 16#9). %% add resource record
+-define(UPDATED, 16#a). %% delete a specific resource record
+-define(UPDATEDA, 16#b). %% delete all nemed resource record
+-define(UPDATEM, 16#c). %% modify a specific resource record
+-define(UPDATEMA, 16#d). %% modify all named resource record
+
+-define(ZONEINIT, 16#e). %% initial zone transfer
+-define(ZONEREF, 16#f). %% incremental zone referesh
+
+
+%%
+%% Currently defined response codes
+%%
+-define(NOERROR, 0). %% no error
+-define(FORMERR, 1). %% format error
+-define(SERVFAIL, 2). %% server failure
+-define(NXDOMAIN, 3). %% non existent domain
+-define(NOTIMP, 4). %% not implemented
+-define(REFUSED, 5). %% query refused
+%% non standard
+-define(NOCHANGE, 16#f). %% update failed to change db
+-define(BADVERS, 16).
+
+%%
+%% Type values for resources and queries
+%%
+-define(T_A, 1). %% host address
+-define(T_NS, 2). %% authoritative server
+-define(T_MD, 3). %% mail destination
+-define(T_MF, 4). %% mail forwarder
+-define(T_CNAME, 5). %% connonical name
+-define(T_SOA, 6). %% start of authority zone
+-define(T_MB, 7). %% mailbox domain name
+-define(T_MG, 8). %% mail group member
+-define(T_MR, 9). %% mail rename name
+-define(T_NULL, 10). %% null resource record
+-define(T_WKS, 11). %% well known service
+-define(T_PTR, 12). %% domain name pointer
+-define(T_HINFO, 13). %% host information
+-define(T_MINFO, 14). %% mailbox information
+-define(T_MX, 15). %% mail routing information
+-define(T_TXT, 16). %% text strings
+-define(T_AAAA, 28). %% ipv6 address
+%% SRV (RFC 2052)
+-define(T_SRV, 33). %% services
+%% NAPTR (RFC 2915)
+-define(T_NAPTR, 35). %% naming authority pointer
+-define(T_OPT, 41). %% EDNS pseudo-rr RFC2671(7)
+%% SPF (RFC 4408)
+-define(T_SPF, 99). %% server policy framework
+%% non standard
+-define(T_UINFO, 100). %% user (finger) information
+-define(T_UID, 101). %% user ID
+-define(T_GID, 102). %% group ID
+-define(T_UNSPEC, 103). %% Unspecified format (binary data)
+%% Query type values which do not appear in resource records
+-define(T_AXFR, 252). %% transfer zone of authority
+-define(T_MAILB, 253). %% transfer mailbox records
+-define(T_MAILA, 254). %% transfer mail agent records
+-define(T_ANY, 255). %% wildcard match
+
+%%
+%% Symbolic Type values for resources and queries
+%%
+-define(S_A, a). %% host address
+-define(S_NS, ns). %% authoritative server
+-define(S_MD, md). %% mail destination
+-define(S_MF, mf). %% mail forwarder
+-define(S_CNAME, cname). %% connonical name
+-define(S_SOA, soa). %% start of authority zone
+-define(S_MB, mb). %% mailbox domain name
+-define(S_MG, mg). %% mail group member
+-define(S_MR, mr). %% mail rename name
+-define(S_NULL, null). %% null resource record
+-define(S_WKS, wks). %% well known service
+-define(S_PTR, ptr). %% domain name pointer
+-define(S_HINFO, hinfo). %% host information
+-define(S_MINFO, minfo). %% mailbox information
+-define(S_MX, mx). %% mail routing information
+-define(S_TXT, txt). %% text strings
+-define(S_AAAA, aaaa). %% ipv6 address
+%% SRV (RFC 2052)
+-define(S_SRV, srv). %% services
+%% NAPTR (RFC 2915)
+-define(S_NAPTR, naptr). %% naming authority pointer
+-define(S_OPT, opt). %% EDNS pseudo-rr RFC2671(7)
+%% SPF (RFC 4408)
+-define(S_SPF, spf). %% server policy framework
+%% non standard
+-define(S_UINFO, uinfo). %% user (finger) information
+-define(S_UID, uid). %% user ID
+-define(S_GID, gid). %% group ID
+-define(S_UNSPEC, unspec). %% Unspecified format (binary data)
+%% Query type values which do not appear in resource records
+-define(S_AXFR, axfr). %% transfer zone of authority
+-define(S_MAILB, mailb). %% transfer mailbox records
+-define(S_MAILA, maila). %% transfer mail agent records
+-define(S_ANY, any). %% wildcard match
+
+%%
+%% Values for class field
+%%
+
+-define(C_IN, 1). %% the arpa internet
+-define(C_CHAOS, 3). %% for chaos net at MIT
+-define(C_HS, 4). %% for Hesiod name server at MIT
+%% Query class values which do not appear in resource records
+-define(C_ANY, 255). %% wildcard match
+
+
+%% indirection mask for compressed domain names
+-define(INDIR_MASK, 16#c0).
+
+%%
+%% Structure for query header, the order of the fields is machine and
+%% compiler dependent, in our case, the bits within a byte are assignd
+%% least significant first, while the order of transmition is most
+%% significant first. This requires a somewhat confusing rearrangement.
+%%
+-record(dns_header,
+ {
+ id = 0, %% ushort query identification number
+ %% byte F0
+ qr = 0, %% :1 response flag
+ opcode = 0, %% :4 purpose of message
+ aa = 0, %% :1 authoritive answer
+ tc = 0, %% :1 truncated message
+ rd = 0, %% :1 recursion desired
+ %% byte F1
+ ra = 0, %% :1 recursion available
+ pr = 0, %% :1 primary server required (non standard)
+ %% :2 unused bits
+ rcode = 0 %% :4 response code
+ }).
+
+-record(dns_rec,
+ {
+ header, %% dns_header record
+ qdlist = [], %% list of question entries
+ anlist = [], %% list of answer entries
+ nslist = [], %% list of authority entries
+ arlist = [] %% list of resource entries
+ }).
+
+%% DNS resource record
+-record(dns_rr,
+ {
+ domain = "", %% resource domain
+ type = any, %% resource type
+ class = in, %% reource class
+ cnt = 0, %% access count
+ ttl = 0, %% time to live
+ data = [], %% raw data
+ %%
+ tm, %% creation time
+ bm = [], %% Bitmap storing domain character case information.
+ func = false %% Optional function calculating the data field.
+ }).
+
+-define(DNS_UDP_PAYLOAD_SIZE, 1280).
+
+-record(dns_rr_opt, %% EDNS RR OPT (RFC2671), dns_rr{type=opt}
+ {
+ domain = "", %% should be the root domain
+ type = opt,
+ udp_payload_size = ?DNS_UDP_PAYLOAD_SIZE, %% RFC2671(4.5 CLASS)
+ ext_rcode = 0, %% RFC2671(4.6 EXTENDED-RCODE)
+ version = 0, %% RFC2671(4.6 VERSION)
+ z = 0, %% RFC2671(4.6 Z)
+ data = [] %% RFC2671(4.4)
+ }).
+
+-record(dns_query,
+ {
+ domain, %% query domain
+ type, %% query type
+ class %% query class
+ }).
diff --git a/lib/kernel/src/inet_dns_record_adts.pl b/lib/kernel/src/inet_dns_record_adts.pl
new file mode 100644
index 0000000000..b1d8fab939
--- /dev/null
+++ b/lib/kernel/src/inet_dns_record_adts.pl
@@ -0,0 +1,180 @@
+#! /usr/bin/env perl
+#
+# %CopyrightBegin%
+#
+# Copyright Ericsson AB 2009. All Rights Reserved.
+#
+# The contents of this file are subject to the Erlang Public License,
+# Version 1.1, (the "License"); you may not use this file except in
+# compliance with the License. You should have received a copy of the
+# Erlang Public License along with this software. If not, it can be
+# retrieved online at http://www.erlang.org/.
+#
+# Software distributed under the License is distributed on an "AS IS"
+# basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+# the License for the specific language governing rights and limitations
+# under the License.
+#
+# %CopyrightEnd%
+#
+use strict;
+
+# Generate ADT (Abstract Data Type) access and generation functions
+# for internal records.
+#
+# The following defines which ADT function sets that will be generated
+# and which record fields that will be exponated.
+#
+# (FunctionBaseName => [RecordName, FieldName ...], ...)
+my %Names = ('msg' => ['dns_rec', 'header', 'qdlist',
+ 'anlist', 'nslist', 'arlist'],
+ 'dns_rr' => ['dns_rr', 'domain', 'type', 'class', 'ttl', 'data'],
+ 'dns_rr_opt' => ['dns_rr_opt', 'domain', 'type',
+ 'udp_payload_size', 'ext_rcode', 'version',
+ 'z', 'data'],
+ 'dns_query' => ['dns_query', 'domain', 'type', 'class'],
+ 'header' => ['dns_header', 'id', 'qr', 'opcode', 'aa', 'tc',
+ 'rd', 'ra', 'pr', 'rcode']);
+# The functions are defined in the __DATA__ section at the end.
+
+# Read in __DATA__ and merge lines.
+my $line = '';
+my @DATA;
+my @INDEX;
+while(<DATA>) {
+ chomp;
+ $line .= $_;
+ unless ($line =~ s/\\$//) {
+ if ($line =~ s/^[+]//) {
+ push(@INDEX, $line);
+ } else {
+ push(@DATA, $line);
+ }
+ $line = '';
+ }
+}
+
+$" = ',';
+$\ = "\n";
+while( my ($Name, $r) = each(%Names)) {
+ # Create substitutions for this Name
+ my ($Record, @Fields) = @{ $r };
+ my @FieldMatchValues;
+ my @FieldValueTuples;
+ my @Values;
+ my $n = $#{ $r };
+ for my $i ( 1 .. $n ) {
+ push(@FieldMatchValues, "$Fields[$i-1]=V$i");
+ push(@FieldValueTuples, "{$Fields[$i-1],V$i}");
+ push(@Values, "V$i");
+ }
+ # "@FieldMatchValues" = "field1=V1,field2=V2"...",fieldN=VN"
+ # "@FieldMatchTuples" = "{field1,V1},{field2,V2}"...",{fieldN,VN}"
+ # "@Values" = "V1,V2"...",VN"
+ my @D = @DATA;
+ foreach my $line (@D) {
+ my $m = 1;
+ # For leading * iterate $n times, otherwise once
+ $line =~ s/^\s*[*]// and $m = $n;
+ for my $i ( 1 .. $m ) {
+ # For this iteration - substitute and print
+ my $Value = "V$i";
+ my $SemicolonDot = ";";
+ $SemicolonDot = "." if $i == $m;
+ my @ValuesIgnoreValue = @Values;
+ $ValuesIgnoreValue[$i-1] = '_';
+ # "$Value" = "V1" or "V2" or ... "VN"
+ # "@ValuesIgnoreValue" = "_,V2"...",VN"
+ # or "V1,_"...",VN"
+ # or ... "V1,V2"...",_"
+ $_ = $line;
+ s/FieldMatchValues\b/@FieldMatchValues/g;
+ s/FieldValueTuples\b/@FieldValueTuples/g;
+ s/Field\b/$Fields[$i-1]/g;
+ s/Name\b/$Name/g;
+ s/Record\b/$Record/g;
+ s/ValuesIgnoreValue\b/@ValuesIgnoreValue/g;
+ s/Values\b/@Values/g;
+ s/Value\b/$Value/g;
+ s/[;][.]/$SemicolonDot/g;
+ s/->\s*/->\n /;
+ print;
+ }
+ }
+}
+for my $i ( 0 .. $#INDEX ) {
+ my $line = $INDEX[$i];
+ if ($line =~ s/^[*]//) {
+ while( my ($Name, $r) = each(%Names)) {
+ my ($Record) = @{ $r };
+ $_ = $line;
+ s/Name\b/$Name/g;
+ s/Record\b/$Record/g;
+ s/->\s*/->\n /;
+ print;
+ }
+ } else {
+ print $line;
+ }
+}
+
+# Trailing \ will merge line with the following.
+# Leading * will iterate the (merged) line over all field names.
+# Sub-words in the loop above are substituted.
+__DATA__
+
+%%
+%% Abstract Data Type functions for #Record{}
+%%
+%% -export([Name/1, Name/2,
+%% make_Name/0, make_Name/1, make_Name/2, make_Name/3]).
+
+%% Split #Record{} into property list
+%%
+Name(#Record{FieldMatchValues}) -> \
+ [FieldValueTuples].
+
+%% Get one field value from #Record{}
+%%
+*Name(#Record{Field=Value}, Field) -> \
+ Value;
+%% Map field name list to value list from #Record{}
+%%
+Name(#Record{}, []) -> \
+ [];
+*Name(#Record{Field=Value}=R, [Field|L]) -> \
+ [Value|Name(R, L)];.
+
+%% Generate default #Record{}
+%%
+make_Name() -> \
+ #Record{}.
+
+%% Generate #Record{} from property list
+%%
+make_Name(L) when is_list(L) -> \
+ make_Name(#Record{}, L).
+
+%% Generate #Record{} with one updated field
+%%
+*make_Name(Field, Value) -> \
+ #Record{Field=Value};
+%%
+%% Update #Record{} from property list
+%%
+make_Name(#Record{FieldMatchValues}, L) when is_list(L) -> \
+ do_make_Name(L, Values).
+do_make_Name([], Values) -> \
+ #Record{FieldMatchValues};
+*do_make_Name([{Field,Value}|L], ValuesIgnoreValue) -> \
+ do_make_Name(L, Values);.
+
+%% Update one field of #Record{}
+%%
+*make_Name(#Record{}=R, Field, Value) -> \
+ R#Record{Field=Value};.
+
++%% Record type index
++%%
++*record_adts(#Record{}) -> Name;
++record_adts(_) -> undefined.
diff --git a/lib/kernel/src/inet_gethost_native.erl b/lib/kernel/src/inet_gethost_native.erl
new file mode 100644
index 0000000000..abdbe2b8cf
--- /dev/null
+++ b/lib/kernel/src/inet_gethost_native.erl
@@ -0,0 +1,626 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 1998-2009. All Rights Reserved.
+%%
+%% The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved online at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(inet_gethost_native).
+-behaviour(supervisor_bridge).
+
+%% Supervisor bridge exports
+-export([start_link/0, init/1, terminate/2, start_raw/0, run_once/0]).
+
+%% Server export
+-export([server_init/2, main_loop/1]).
+
+%% API exports
+-export([gethostbyname/1, gethostbyname/2, gethostbyaddr/1, control/1]).
+
+%%% Exports for sys:handle_system_msg/6
+-export([system_continue/3, system_terminate/4, system_code_change/4]).
+
+-include_lib("kernel/include/inet.hrl").
+
+-define(PROCNAME_SUP, inet_gethost_native_sup).
+
+-define(OP_GETHOSTBYNAME,1).
+-define(OP_GETHOSTBYADDR,2).
+-define(OP_CANCEL_REQUEST,3).
+-define(OP_CONTROL,4).
+
+-define(PROTO_IPV4,1).
+-define(PROTO_IPV6,2).
+
+%% OP_CONTROL
+-define(SETOPT_DEBUG_LEVEL, 0).
+
+-define(UNIT_ERROR,0).
+-define(UNIT_IPV4,4).
+-define(UNIT_IPV6,16).
+
+-define(PORT_PROGRAM, "inet_gethost").
+-define(DEFAULT_POOLSIZE, 4).
+-define(REQUEST_TIMEOUT, (inet_db:res_option(timeout)*4)).
+
+-define(MAX_TIMEOUT, 16#7FFFFFF).
+-define(INVALID_SERIAL, 16#FFFFFFFF).
+
+%-define(DEBUG,1).
+-ifdef(DEBUG).
+-define(dbg(A,B), io:format(A,B)).
+-else.
+-define(dbg(A,B), noop).
+-endif.
+
+-define(SEND_AFTER(A,B,C),erlang:send_after(A,B,C)).
+-define(CANCEL_TIMER(A),erlang:cancel_timer(A)).
+
+%% In erlang, IPV6 addresses are built as 8-tuples of 16bit values (not 16-tuples of octets).
+%% This macro, meant to be used in guards checks one such 16bit value in the 8-tuple.
+-define(VALID_V6(Part), is_integer(Part), Part < 65536).
+%% The regular IPV4 addresses are represented as 4-tuples of octets, this macro,
+%% meant to be used in guards, check one such octet.
+-define(VALID_V4(Part), is_integer(Part), Part < 256).
+
+% Requests, one per unbique request to the PORT program, may be more than one client!!!
+-record(request, {
+ rid, % Request id as sent to port
+ op,
+ proto,
+ rdata,
+ clients = [] % Can be more than one client per request (Pid's).
+}).
+
+
+% Statistics, not used yet.
+-record(statistics, {
+ netdb_timeout = 0,
+ netdb_internal = 0,
+ port_crash = 0,
+ notsup = 0,
+ host_not_found = 0,
+ try_again = 0,
+ no_recovery = 0,
+ no_data = 0
+}).
+
+% The main loopstate...
+-record(state, {
+ port = noport, % Port() connected to the port program
+ timeout = 8000, % Timeout value from inet_db:res_option
+ requests, % Table of request
+ req_index, % Table of {{op,proto,rdata},rid}
+ parent, % The supervisor bridge
+ pool_size = 4, % Number of C processes in pool.
+ statistics % Statistics record (records error causes).
+}).
+
+%% The supervisor bridge code
+init([]) -> % Called by supervisor_bridge:start_link
+ Ref = make_ref(),
+ SaveTE = process_flag(trap_exit,true),
+ Pid = spawn_link(?MODULE,server_init,[self(),Ref]),
+ receive
+ Ref ->
+ process_flag(trap_exit,SaveTE),
+ {ok, Pid, Pid};
+ {'EXIT', Pid, Message} ->
+ process_flag(trap_exit,SaveTE),
+ {error, Message}
+ after 10000 ->
+ process_flag(trap_exit,SaveTE),
+ {error, {timeout, ?MODULE}}
+ end.
+
+start_link() ->
+ supervisor_bridge:start_link({local, ?PROCNAME_SUP}, ?MODULE, []).
+
+%% Only used in fallback situations, no supervisor, no bridge, serve only until
+%% no requests present...
+start_raw() ->
+ spawn(?MODULE,run_once,[]).
+
+run_once() ->
+ Port = do_open_port(get_poolsize(), get_extra_args()),
+ Timeout = ?REQUEST_TIMEOUT,
+ {Pid, R, Request} =
+ receive
+ {{Pid0,R0}, {?OP_GETHOSTBYNAME, Proto0, Name0}} ->
+ {Pid0, R0,
+ [<<1:32, ?OP_GETHOSTBYNAME:8, Proto0:8>>,Name0,0]};
+ {{Pid1,R1}, {?OP_GETHOSTBYADDR, Proto1, Data1}} ->
+ {Pid1, R1,
+ <<1:32, ?OP_GETHOSTBYADDR:8, Proto1:8, Data1/binary>>}
+ after Timeout ->
+ exit(normal)
+ end,
+ (catch port_command(Port, Request)),
+ receive
+ {Port, {data, <<1:32, BinReply/binary>>}} ->
+ Pid ! {R, {ok, BinReply}}
+ after Timeout ->
+ Pid ! {R,{error,timeout}}
+ end.
+
+terminate(_Reason,Pid) ->
+ (catch exit(Pid,kill)),
+ ok.
+
+%%-----------------------------------------------------------------------
+%% Server API
+%%-----------------------------------------------------------------------
+server_init(Starter, Ref) ->
+ process_flag(trap_exit,true),
+ case whereis(?MODULE) of
+ undefined ->
+ case (catch register(?MODULE,self())) of
+ true ->
+ Starter ! Ref;
+ _->
+ exit({already_started,whereis(?MODULE)})
+ end;
+ Winner ->
+ exit({already_started,Winner})
+ end,
+ Poolsize = get_poolsize(),
+ Port = do_open_port(Poolsize, get_extra_args()),
+ Timeout = ?REQUEST_TIMEOUT,
+ put(rid,0),
+ put(num_requests,0),
+ RequestTab = ets:new(ign_requests,[{keypos,#request.rid},set,protected]),
+ RequestIndex = ets:new(ign_req_index,[set,protected]),
+ State = #state{port = Port, timeout = Timeout, requests = RequestTab,
+ req_index = RequestIndex,
+ pool_size = Poolsize,
+ statistics = #statistics{},
+ parent = Starter},
+ main_loop(State).
+
+main_loop(State) ->
+ receive
+ Any ->
+ handle_message(Any,State)
+ end.
+
+handle_message({{Pid,_} = Client, {?OP_GETHOSTBYNAME, Proto, Name} = R},
+ State) when is_pid(Pid) ->
+ NewState = do_handle_call(R,Client,State,
+ [<<?OP_GETHOSTBYNAME:8, Proto:8>>, Name,0]),
+ main_loop(NewState);
+
+handle_message({{Pid,_} = Client, {?OP_GETHOSTBYADDR, Proto, Data} = R},
+ State) when is_pid(Pid) ->
+ NewState = do_handle_call(R,Client,State,
+ <<?OP_GETHOSTBYADDR:8, Proto:8, Data/binary>>),
+ main_loop(NewState);
+
+handle_message({{Pid,Ref}, {?OP_CONTROL, Ctl, Data}}, State)
+ when is_pid(Pid) ->
+ catch port_command(State#state.port,
+ <<?INVALID_SERIAL:32, ?OP_CONTROL:8,
+ Ctl:8, Data/binary>>),
+ Pid ! {Ref, ok},
+ main_loop(State);
+
+handle_message({{Pid,Ref}, restart_port}, State)
+ when is_pid(Pid) ->
+ NewPort=restart_port(State),
+ Pid ! {Ref, ok},
+ main_loop(State#state{port=NewPort});
+
+handle_message({Port, {data, Data}}, State = #state{port = Port}) ->
+ NewState = case Data of
+ <<RID:32, BinReply/binary>> ->
+ case BinReply of
+ <<Unit, _/binary>> when Unit =:= ?UNIT_ERROR;
+ Unit =:= ?UNIT_IPV4;
+ Unit =:= ?UNIT_IPV6 ->
+ case pick_request(State,RID) of
+ false ->
+ State;
+ Req ->
+ lists:foreach(fun({P,R,TR}) ->
+ ?CANCEL_TIMER(TR),
+ P ! {R,
+ {ok,
+ BinReply}}
+ end,
+ Req#request.clients),
+ State
+ end;
+ _UnitError ->
+ %% Unexpected data, let's restart it,
+ %% it must be broken.
+ NewPort=restart_port(State),
+ State#state{port=NewPort}
+ end;
+ _BasicFormatError ->
+ NewPort=restart_port(State),
+ State#state{port=NewPort}
+ end,
+ main_loop(NewState);
+
+handle_message({'EXIT',Port,_Reason}, State = #state{port = Port}) ->
+ ?dbg("Port died.~n",[]),
+ NewPort=restart_port(State),
+ main_loop(State#state{port=NewPort});
+
+handle_message({Port,eof}, State = #state{port = Port}) ->
+ ?dbg("Port eof'ed.~n",[]),
+ NewPort=restart_port(State),
+ main_loop(State#state{port=NewPort});
+
+handle_message({timeout, Pid, RID}, State) ->
+ case pick_client(State,RID,Pid) of
+ false ->
+ false;
+ {more, {P,R,_}} ->
+ P ! {R,{error,timeout}};
+ {last, {LP,LR,_}} ->
+ LP ! {LR, {error,timeout}},
+ %% Remove the whole request structure...
+ pick_request(State, RID),
+ %% Also cancel the request to the port program...
+ (catch port_command(State#state.port,
+ <<RID:32,?OP_CANCEL_REQUEST>>))
+ end,
+ main_loop(State);
+
+handle_message({system, From, Req}, State) ->
+ sys:handle_system_msg(Req, From, State#state.parent, ?MODULE, [],
+ State);
+
+handle_message(_, State) -> % Stray messages from dying ports etc.
+ main_loop(State).
+
+
+do_handle_call(R,Client0,State,RData) ->
+ Req = find_request(State,R),
+ Timeout = State#state.timeout,
+ {P,Ref} = Client0,
+ TR = ?SEND_AFTER(Timeout,self(),{timeout, P, Req#request.rid}),
+ Client = {P,Ref,TR},
+ case Req#request.clients of
+ [] ->
+ RealRData = [<<(Req#request.rid):32>>|RData],
+ (catch port_command(State#state.port, RealRData)),
+ ets:insert(State#state.requests,Req#request{clients = [Client]});
+ Tail ->
+ ets:insert(State#state.requests,Req#request{clients = [Client | Tail]})
+ end,
+ State.
+
+find_request(State, R = {Op, Proto, Data}) ->
+ case ets:lookup(State#state.req_index,R) of
+ [{R, Rid}] ->
+ [Ret] = ets:lookup(State#state.requests,Rid),
+ Ret;
+ [] ->
+ NRid = get_rid(),
+ Req = #request{rid = NRid, op = Op, proto = Proto, rdata = Data},
+ ets:insert(State#state.requests, Req),
+ ets:insert(State#state.req_index,{R,NRid}),
+ put(num_requests,get(num_requests) + 1),
+ Req
+ end.
+
+pick_request(State, RID) ->
+ case ets:lookup(State#state.requests, RID) of
+ [] ->
+ false;
+ [#request{rid = RID, op = Op, proto = Proto, rdata = Data}=R] ->
+ ets:delete(State#state.requests,RID),
+ ets:delete(State#state.req_index,{Op,Proto,Data}),
+ put(num_requests,get(num_requests) - 1),
+ R
+ end.
+
+pick_client(State,RID,Clid) ->
+ case ets:lookup(State#state.requests, RID) of
+ [] ->
+ false;
+ [R] ->
+ case R#request.clients of
+ [SoleClient] ->
+ {last, SoleClient}; % Note, not removed, the caller
+ % should cleanup request data
+ CList ->
+ case lists:keysearch(Clid,1,CList) of
+ {value, Client} ->
+ NCList = lists:keydelete(Clid,1,CList),
+ ets:insert(State#state.requests,
+ R#request{clients = NCList}),
+ {more, Client};
+ false ->
+ false
+ end
+ end
+ end.
+
+get_rid () ->
+ New = (get(rid) + 1) rem 16#7FFFFFF,
+ put(rid,New),
+ New.
+
+
+foreach(Fun,Table) ->
+ foreach(Fun,Table,ets:first(Table)).
+
+foreach(_Fun,_Table,'$end_of_table') ->
+ ok;
+foreach(Fun,Table,Key) ->
+ [Object] = ets:lookup(Table,Key),
+ Fun(Object),
+ foreach(Fun,Table,ets:next(Table,Key)).
+
+restart_port(#state{port = Port, requests = Requests}) ->
+ (catch port_close(Port)),
+ NewPort = do_open_port(get_poolsize(), get_extra_args()),
+ foreach(fun(#request{rid = Rid, op = Op, proto = Proto, rdata = Rdata}) ->
+ case Op of
+ ?OP_GETHOSTBYNAME ->
+ port_command(NewPort,[<<Rid:32,?OP_GETHOSTBYNAME:8,
+ Proto:8>>,
+ Rdata,0]);
+ ?OP_GETHOSTBYADDR ->
+ port_command(NewPort,
+ <<Rid:32,?OP_GETHOSTBYADDR:8, Proto:8,
+ Rdata/binary>>)
+ end
+ end,
+ Requests),
+ NewPort.
+
+
+
+do_open_port(Poolsize, ExtraArgs) ->
+ try
+ open_port({spawn,
+ ?PORT_PROGRAM++" "++integer_to_list(Poolsize)++" "++
+ ExtraArgs},
+ [{packet,4},eof,binary,overlapped_io])
+ catch
+ error:_ ->
+ open_port({spawn,
+ ?PORT_PROGRAM++" "++integer_to_list(Poolsize)++
+ " "++ExtraArgs},
+ [{packet,4},eof,binary])
+ end.
+
+get_extra_args() ->
+ FirstPart = case application:get_env(kernel, gethost_prioritize) of
+ {ok, false} ->
+ " -ng";
+ _ ->
+ ""
+ end,
+ case application:get_env(kernel, gethost_extra_args) of
+ {ok, L} when is_list(L) ->
+ FirstPart++" "++L;
+ _ ->
+ FirstPart++""
+ end.
+
+get_poolsize() ->
+ case application:get_env(kernel, gethost_poolsize) of
+ {ok,I} when is_integer(I) ->
+ I;
+ _ ->
+ ?DEFAULT_POOLSIZE
+ end.
+
+%%------------------------------------------------------------------
+%% System messages callbacks
+%%------------------------------------------------------------------
+
+system_continue(_Parent, _, State) ->
+ main_loop(State).
+
+system_terminate(Reason, _Parent, _, _State) ->
+ exit(Reason).
+
+system_code_change(State, _Module, _OldVsn, _Extra) ->
+ {ok, State}. %% Nothing to do in this version.
+
+
+%%-----------------------------------------------------------------------
+%% Client API
+%%-----------------------------------------------------------------------
+
+gethostbyname(Name) ->
+ gethostbyname(Name, inet).
+
+gethostbyname(Name, inet) when is_list(Name) ->
+ getit(?OP_GETHOSTBYNAME, ?PROTO_IPV4, Name);
+gethostbyname(Name, inet6) when is_list(Name) ->
+ getit(?OP_GETHOSTBYNAME, ?PROTO_IPV6, Name);
+gethostbyname(Name, Type) when is_atom(Name) ->
+ gethostbyname(atom_to_list(Name), Type);
+gethostbyname(_, _) ->
+ {error, formerr}.
+
+gethostbyaddr({A,B,C,D}) when ?VALID_V4(A), ?VALID_V4(B), ?VALID_V4(C), ?VALID_V4(D) ->
+ getit(?OP_GETHOSTBYADDR, ?PROTO_IPV4, <<A,B,C,D>>);
+gethostbyaddr({A,B,C,D,E,F,G,H}) when ?VALID_V6(A), ?VALID_V6(B), ?VALID_V6(C), ?VALID_V6(D),
+ ?VALID_V6(E), ?VALID_V6(F), ?VALID_V6(G), ?VALID_V6(H) ->
+ getit(?OP_GETHOSTBYADDR, ?PROTO_IPV6, <<A:16,B:16,C:16,D:16,E:16,F:16,G:16,H:16>>);
+gethostbyaddr(Addr) when is_list(Addr) ->
+ case inet_parse:address(Addr) of
+ {ok, IP} -> gethostbyaddr(IP);
+ _Error -> {error, formerr}
+ end;
+gethostbyaddr(Addr) when is_atom(Addr) ->
+ gethostbyaddr(atom_to_list(Addr));
+gethostbyaddr(_) -> {error, formerr}.
+
+control({debug_level, Level}) when is_integer(Level) ->
+ getit(?OP_CONTROL, ?SETOPT_DEBUG_LEVEL, <<Level:32>>);
+control(soft_restart) ->
+ getit(restart_port);
+control(_) -> {error, formerr}.
+
+getit(Op, Proto, Data) ->
+ getit({Op, Proto, Data}).
+
+getit(Req) ->
+ Pid = ensure_started(),
+ Ref = make_ref(),
+ Pid ! {{self(),Ref}, Req},
+ receive
+ {Ref, {ok,BinHostent}} ->
+ parse_address(BinHostent);
+ {Ref, Error} ->
+ Error
+ after 5000 ->
+ Ref2 = erlang:monitor(process,Pid),
+ Res2 = receive
+ {Ref, {ok,BinHostent}} ->
+ parse_address(BinHostent);
+ {Ref, Error} ->
+ Error;
+ {'DOWN', Ref2, process,
+ Pid, Reason} ->
+ {error, Reason}
+ end,
+ catch erlang:demonitor(Ref2),
+ receive {'DOWN',Ref2,_,_,_} -> ok after 0 -> ok end,
+ Res2
+ end.
+
+do_start(Sup, C) ->
+ {Child,_,_,_,_,_} = C,
+ case supervisor:start_child(Sup,C) of
+ {ok,_} ->
+ ok;
+ {error, {already_started, Pid}} when is_pid(Pid) ->
+ ok;
+ {error, {{already_started, Pid}, _Child}} when is_pid(Pid) ->
+ ok;
+ {error, already_present} ->
+ supervisor:delete_child(Sup, Child),
+ do_start(Sup, C)
+ end.
+
+ensure_started() ->
+ case whereis(?MODULE) of
+ undefined ->
+ C = {?PROCNAME_SUP, {?MODULE, start_link, []}, temporary,
+ 1000, worker, [?MODULE]},
+ case whereis(kernel_safe_sup) of
+ undefined ->
+ case whereis(net_sup) of
+ undefined ->
+ %% Icky fallback, run once without supervisor
+ start_raw();
+ _ ->
+ do_start(net_sup,C),
+ case whereis(?MODULE) of
+ undefined ->
+ exit({could_not_start_server, ?MODULE});
+ Pid0 ->
+ Pid0
+ end
+ end;
+ _ ->
+ do_start(kernel_safe_sup,C),
+ case whereis(?MODULE) of
+ undefined ->
+ exit({could_not_start_server, ?MODULE});
+ Pid1 ->
+ Pid1
+ end
+ end;
+ Pid ->
+ Pid
+ end.
+
+parse_address(BinHostent) ->
+ case catch
+ begin
+ case BinHostent of
+ <<?UNIT_ERROR, Errstring/binary>> ->
+ {error, list_to_atom(listify(Errstring))};
+ <<?UNIT_IPV4, Naddr:32, T0/binary>> ->
+ {T1,Addresses} = pick_addresses_v4(Naddr, T0),
+ [Name | Names] = pick_names(T1),
+ {ok, #hostent{h_addr_list = Addresses, h_addrtype = inet,
+ h_aliases = Names, h_length = ?UNIT_IPV4,
+ h_name = Name}};
+ <<?UNIT_IPV6, Naddr:32, T0/binary>> ->
+ {T1,Addresses} = pick_addresses_v6(Naddr, T0),
+ [Name | Names] = pick_names(T1),
+ {ok, #hostent{h_addr_list = Addresses, h_addrtype = inet6,
+ h_aliases = Names, h_length = ?UNIT_IPV6,
+ h_name = Name}};
+ _Else ->
+ {error, {internal_error, {malformed_response, BinHostent}}}
+ end
+ end of
+ {'EXIT', Reason} ->
+ Reason;
+ Normal ->
+ Normal
+ end.
+
+listify(Bin) ->
+ N = byte_size(Bin) - 1,
+ <<Bin2:N/binary, Ch>> = Bin,
+ case Ch of
+ 0 ->
+ listify(Bin2);
+ _ ->
+ binary_to_list(Bin)
+ end.
+
+pick_addresses_v4(0,Tail) ->
+ {Tail,[]};
+pick_addresses_v4(N,<<A,B,C,D,Tail/binary>>) ->
+ {NTail, OList} = pick_addresses_v4(N-1,Tail),
+ {NTail, [{A,B,C,D} | OList]}.
+
+pick_addresses_v6(0,Tail) ->
+ {Tail,[]};
+pick_addresses_v6(Num,<<A:16,B:16,C:16,D:16,E:16,F:16,G:16,H:16,
+ Tail/binary>>) ->
+ {NTail, OList} = pick_addresses_v6(Num-1,Tail),
+ {NTail, [{A,B,C,D,E,F,G,H} | OList]}.
+
+ndx(Ch,Bin) ->
+ ndx(Ch,0,byte_size(Bin),Bin).
+
+ndx(_,N,N,_) ->
+ undefined;
+ndx(Ch,I,N,Bin) ->
+ case Bin of
+ <<_:I/binary,Ch,_/binary>> ->
+ I;
+ _ ->
+ ndx(Ch,I+1,N,Bin)
+ end.
+
+pick_names(<<Length:32,Namelist/binary>>) ->
+ pick_names(Length,Namelist).
+
+pick_names(0,<<>>) ->
+ [];
+pick_names(0,_) ->
+ exit({error,format_error});
+pick_names(_N,<<>>) ->
+ exit({error,format_error});
+pick_names(N,Bin) ->
+ Ndx = ndx(0,Bin),
+ <<Str:Ndx/binary,0,Rest/binary>> = Bin,
+ [binary_to_list(Str)|pick_names(N-1,Rest)].
+
diff --git a/lib/kernel/src/inet_hosts.erl b/lib/kernel/src/inet_hosts.erl
new file mode 100644
index 0000000000..df1d4fc0be
--- /dev/null
+++ b/lib/kernel/src/inet_hosts.erl
@@ -0,0 +1,123 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 1997-2009. All Rights Reserved.
+%%
+%% The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved online at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(inet_hosts).
+
+%% Implement gethostbyname gethostbyaddr for inet_hosts table
+
+-export([gethostbyname/1, gethostbyname/2, gethostbyaddr/1]).
+
+-include("inet.hrl").
+-include("inet_int.hrl").
+
+gethostbyname(Name) when is_list(Name) ->
+ gethostbyname(Name,
+ case inet_db:res_option(inet6) of
+ true -> inet6;
+ false -> inet
+ end);
+gethostbyname(Name) when is_atom(Name) ->
+ gethostbyname(atom_to_list(Name));
+gethostbyname(_) -> {error, formerr}.
+
+
+
+gethostbyname(Name, Type) when is_list(Name), is_atom(Type) ->
+ case gethostbyname(Name, Type, inet_hosts_byname, inet_hosts_byaddr) of
+ false ->
+ case gethostbyname(Name, Type,
+ inet_hosts_file_byname,
+ inet_hosts_file_byaddr) of
+ false -> {error,nxdomain};
+ Hostent -> {ok,Hostent}
+ end;
+ Hostent -> {ok,Hostent}
+ end;
+gethostbyname(Name, Type) when is_atom(Name), is_atom(Type) ->
+ gethostbyname(atom_to_list(Name), Type);
+gethostbyname(_, _) -> {error, formerr}.
+
+gethostbyname(Name, Type, Byname, Byaddr) ->
+ inet_db:res_update_hosts(),
+ case [I || [I] <- ets:match(Byname, {Name,Type,'$1'})] of
+ [] -> false;
+ [IP|_]=IPs ->
+ %% Use the primary IP address to generate aliases
+ [Nm|As] = [N || [N] <- ets:match(Byaddr,
+ {'$1',Type,IP})],
+ make_hostent(Nm, IPs, As, Type)
+ end.
+
+
+
+
+gethostbyaddr({A,B,C,D}=IP) when ?ip(A,B,C,D) ->
+ gethostbyaddr(IP, inet);
+%% ipv4 only ipv6 address
+gethostbyaddr({0,0,0,0,0,16#ffff=F,G,H}) when ?ip6(0,0,0,0,0,F,G,H) ->
+ gethostbyaddr({G bsr 8, G band 255, H bsr 8, H band 255});
+gethostbyaddr({A,B,C,D,E,F,G,H}=IP) when ?ip6(A,B,C,D,E,F,G,H) ->
+ gethostbyaddr(IP, inet6);
+gethostbyaddr(Addr) when is_list(Addr) ->
+ case inet_parse:address(Addr) of
+ {ok,IP} -> gethostbyaddr(IP);
+ _Error -> {error, formerr}
+ end;
+gethostbyaddr(Addr) when is_atom(Addr) ->
+ gethostbyaddr(atom_to_list(Addr));
+gethostbyaddr(_) -> {error, formerr}.
+
+
+
+gethostbyaddr(IP, Type) ->
+ case gethostbyaddr(IP, Type, inet_hosts_byaddr) of
+ false ->
+ case gethostbyaddr(IP, Type, inet_hosts_file_byaddr) of
+ false -> {error,nxdomain};
+ Hostent -> {ok,Hostent}
+ end;
+ Hostent -> {ok,Hostent}
+ end.
+
+gethostbyaddr(IP, Type, Byaddr) ->
+ inet_db:res_update_hosts(),
+ case [N || [N] <- ets:match(Byaddr, {'$1',Type,IP})] of
+ [] -> false;
+ [Nm|As] -> make_hostent(Nm, [IP], As, Type)
+ end.
+
+
+
+make_hostent(Name, Addrs, Aliases, inet) ->
+ #hostent {
+ h_name = Name,
+ h_addrtype = inet,
+ h_length = 4,
+ h_addr_list = Addrs,
+ h_aliases = Aliases
+ };
+make_hostent(Name, Addrs, Aliases, inet6) ->
+ #hostent {
+ h_name = Name,
+ h_addrtype = inet6,
+ h_length = 16,
+ h_addr_list = Addrs,
+ h_aliases = Aliases
+ }.
+
+
diff --git a/lib/kernel/src/inet_int.hrl b/lib/kernel/src/inet_int.hrl
new file mode 100644
index 0000000000..cf357b7fba
--- /dev/null
+++ b/lib/kernel/src/inet_int.hrl
@@ -0,0 +1,414 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 1997-2009. All Rights Reserved.
+%%
+%% The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved online at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% %CopyrightEnd%
+%%
+
+%%----------------------------------------------------------------------------
+%% Interface constants.
+%%
+%% This section must be "identical" to the corresponding in inet_drv.c
+%%
+
+%% family codes to open
+-define(INET_AF_INET, 1).
+-define(INET_AF_INET6, 2).
+-define(INET_AF_ANY, 3). % Fake for ANY in any address family
+-define(INET_AF_LOOPBACK, 4). % Fake for LOOPBACK in any address family
+
+%% type codes (gettype, INET_REQ_GETTYPE)
+-define(INET_TYPE_STREAM, 1).
+-define(INET_TYPE_DGRAM, 2).
+-define(INET_TYPE_SEQPACKET, 3).
+
+%% socket modes, INET_LOPT_MODE
+-define(INET_MODE_LIST, 0).
+-define(INET_MODE_BINARY, 1).
+
+%% deliver mode, INET_LOPT_DELIVER
+-define(INET_DELIVER_PORT, 0).
+-define(INET_DELIVER_TERM, 1).
+
+%% active socket, INET_LOPT_ACTIVE
+-define(INET_PASSIVE, 0).
+-define(INET_ACTIVE, 1).
+-define(INET_ONCE, 2). % Active once then passive
+
+%% state codes (getstatus, INET_REQ_GETSTATUS)
+-define(INET_F_OPEN, 16#0001).
+-define(INET_F_BOUND, 16#0002).
+-define(INET_F_ACTIVE, 16#0004).
+-define(INET_F_LISTEN, 16#0008).
+-define(INET_F_CON, 16#0010).
+-define(INET_F_ACC, 16#0020).
+-define(INET_F_LST, 16#0040).
+-define(INET_F_BUSY, 16#0080).
+
+%% request codes (erlang:port_control/3)
+-define(INET_REQ_OPEN, 1).
+-define(INET_REQ_CLOSE, 2).
+-define(INET_REQ_CONNECT, 3).
+-define(INET_REQ_PEER, 4).
+-define(INET_REQ_NAME, 5).
+-define(INET_REQ_BIND, 6).
+-define(INET_REQ_SETOPTS, 7).
+-define(INET_REQ_GETOPTS, 8).
+-define(INET_REQ_GETIX, 9).
+%% -define(INET_REQ_GETIF, 10). OBSOLETE
+-define(INET_REQ_GETSTAT, 11).
+-define(INET_REQ_GETHOSTNAME, 12).
+-define(INET_REQ_FDOPEN, 13).
+-define(INET_REQ_GETFD, 14).
+-define(INET_REQ_GETTYPE, 15).
+-define(INET_REQ_GETSTATUS, 16).
+-define(INET_REQ_GETSERVBYNAME, 17).
+-define(INET_REQ_GETSERVBYPORT, 18).
+-define(INET_REQ_SETNAME, 19).
+-define(INET_REQ_SETPEER, 20).
+-define(INET_REQ_GETIFLIST, 21).
+-define(INET_REQ_IFGET, 22).
+-define(INET_REQ_IFSET, 23).
+-define(INET_REQ_SUBSCRIBE, 24).
+%% TCP requests
+-define(TCP_REQ_ACCEPT, 40).
+-define(TCP_REQ_LISTEN, 41).
+-define(TCP_REQ_RECV, 42).
+-define(TCP_REQ_UNRECV, 43).
+-define(TCP_REQ_SHUTDOWN, 44).
+%% UDP and SCTP requests
+-define(PACKET_REQ_RECV, 60).
+-define(SCTP_REQ_LISTEN, 61).
+-define(SCTP_REQ_BINDX, 62). %% Multi-home SCTP bind
+
+%% subscribe codes, INET_REQ_SUBSCRIBE
+-define(INET_SUBS_EMPTY_OUT_Q, 1).
+
+%% reply codes for *_REQ_*
+-define(INET_REP_ERROR, 0).
+-define(INET_REP_OK, 1).
+-define(INET_REP_SCTP, 2).
+
+%% INET, TCP and UDP options:
+-define(INET_OPT_REUSEADDR, 0).
+-define(INET_OPT_KEEPALIVE, 1).
+-define(INET_OPT_DONTROUTE, 2).
+-define(INET_OPT_LINGER, 3).
+-define(INET_OPT_BROADCAST, 4).
+-define(INET_OPT_OOBINLINE, 5).
+-define(INET_OPT_SNDBUF, 6).
+-define(INET_OPT_RCVBUF, 7).
+-define(INET_OPT_PRIORITY, 8).
+-define(INET_OPT_TOS, 9).
+-define(TCP_OPT_NODELAY, 10).
+-define(UDP_OPT_MULTICAST_IF, 11).
+-define(UDP_OPT_MULTICAST_TTL, 12).
+-define(UDP_OPT_MULTICAST_LOOP, 13).
+-define(UDP_OPT_ADD_MEMBERSHIP, 14).
+-define(UDP_OPT_DROP_MEMBERSHIP, 15).
+% "Local" options: codes start from 20:
+-define(INET_LOPT_BUFFER, 20).
+-define(INET_LOPT_HEADER, 21).
+-define(INET_LOPT_ACTIVE, 22).
+-define(INET_LOPT_PACKET, 23).
+-define(INET_LOPT_MODE, 24).
+-define(INET_LOPT_DELIVER, 25).
+-define(INET_LOPT_EXITONCLOSE, 26).
+-define(INET_LOPT_TCP_HIWTRMRK, 27).
+-define(INET_LOPT_TCP_LOWTRMRK, 28).
+-define(INET_LOPT_BIT8, 29).
+-define(INET_LOPT_TCP_SEND_TIMEOUT, 30).
+-define(INET_LOPT_TCP_DELAY_SEND, 31).
+-define(INET_LOPT_PACKET_SIZE, 32).
+-define(INET_LOPT_READ_PACKETS, 33).
+-define(INET_OPT_RAW, 34).
+-define(INET_LOPT_TCP_SEND_TIMEOUT_CLOSE, 35).
+% Specific SCTP options: separate range:
+-define(SCTP_OPT_RTOINFO, 100).
+-define(SCTP_OPT_ASSOCINFO, 101).
+-define(SCTP_OPT_INITMSG, 102).
+-define(SCTP_OPT_AUTOCLOSE, 103).
+-define(SCTP_OPT_NODELAY, 104).
+-define(SCTP_OPT_DISABLE_FRAGMENTS, 105).
+-define(SCTP_OPT_I_WANT_MAPPED_V4_ADDR, 106).
+-define(SCTP_OPT_MAXSEG, 107).
+-define(SCTP_OPT_SET_PEER_PRIMARY_ADDR, 108).
+-define(SCTP_OPT_PRIMARY_ADDR, 109).
+-define(SCTP_OPT_ADAPTATION_LAYER, 110).
+-define(SCTP_OPT_PEER_ADDR_PARAMS, 111).
+-define(SCTP_OPT_DEFAULT_SEND_PARAM, 112).
+-define(SCTP_OPT_EVENTS, 113).
+-define(SCTP_OPT_DELAYED_ACK_TIME, 114).
+-define(SCTP_OPT_STATUS, 115).
+-define(SCTP_OPT_GET_PEER_ADDR_INFO, 116).
+
+%% interface options, INET_REQ_IFGET and INET_REQ_IFSET
+-define(INET_IFOPT_ADDR, 1).
+-define(INET_IFOPT_BROADADDR, 2).
+-define(INET_IFOPT_DSTADDR, 3).
+-define(INET_IFOPT_MTU, 4).
+-define(INET_IFOPT_NETMASK, 5).
+-define(INET_IFOPT_FLAGS, 6).
+-define(INET_IFOPT_HWADDR, 7). %% where support (e.g linux)
+
+%% packet byte values, INET_LOPT_PACKET
+-define(TCP_PB_RAW, 0).
+-define(TCP_PB_1, 1).
+-define(TCP_PB_2, 2).
+-define(TCP_PB_4, 3).
+-define(TCP_PB_ASN1, 4).
+-define(TCP_PB_RM, 5).
+-define(TCP_PB_CDR, 6).
+-define(TCP_PB_FCGI, 7).
+-define(TCP_PB_LINE_LF, 8).
+-define(TCP_PB_TPKT, 9).
+-define(TCP_PB_HTTP, 10).
+-define(TCP_PB_HTTPH, 11).
+-define(TCP_PB_SSL_TLS, 12).
+-define(TCP_PB_HTTP_BIN,13).
+-define(TCP_PB_HTTPH_BIN,14).
+
+%% bit options, INET_LOPT_BIT8
+-define(INET_BIT8_CLEAR, 0).
+-define(INET_BIT8_SET, 1).
+-define(INET_BIT8_ON, 2).
+-define(INET_BIT8_OFF, 3).
+
+
+%% getstat, INET_REQ_GETSTAT
+-define(INET_STAT_RECV_CNT, 1).
+-define(INET_STAT_RECV_MAX, 2).
+-define(INET_STAT_RECV_AVG, 3).
+-define(INET_STAT_RECV_DVI, 4).
+-define(INET_STAT_SEND_CNT, 5).
+-define(INET_STAT_SEND_MAX, 6).
+-define(INET_STAT_SEND_AVG, 7).
+-define(INET_STAT_SEND_PEND, 8).
+-define(INET_STAT_RECV_OCT, 9).
+-define(INET_STAT_SEND_OCT, 10).
+
+%% interface stuff, INET_IFOPT_FLAGS
+-define(INET_IFNAMSIZ, 16).
+-define(INET_IFF_UP, 16#0001).
+-define(INET_IFF_BROADCAST, 16#0002).
+-define(INET_IFF_LOOPBACK, 16#0004).
+-define(INET_IFF_POINTTOPOINT, 16#0008).
+-define(INET_IFF_RUNNING, 16#0010).
+-define(INET_IFF_MULTICAST, 16#0020).
+%%
+-define(INET_IFF_DOWN, 16#0100).
+-define(INET_IFF_NBROADCAST, 16#0200).
+-define(INET_IFF_NPOINTTOPOINT, 16#0800).
+
+%% SCTP Flags for "sctp_sndrcvinfo":
+%% INET_REQ_SETOPTS:SCTP_OPT_DEFAULT_SEND_PARAM
+-define(SCTP_FLAG_UNORDERED, 1). % sctp_unordered
+-define(SCTP_FLAG_ADDR_OVER, 2). % sctp_addr_over
+-define(SCTP_FLAG_ABORT, 4). % sctp_abort
+-define(SCTP_FLAG_EOF, 8). % sctp_eof
+-define(SCTP_FLAG_SNDALL, 16). % sctp_sndall, NOT YET IMPLEMENTED.
+
+%% SCTP Flags for "sctp_paddrparams", and the corresp Atoms:
+-define(SCTP_FLAG_HB_ENABLE, 1). % sctp_hb_enable
+-define(SCTP_FLAG_HB_DISABLE, 2). % sctp_hb_disable
+-define(SCTP_FLAG_HB_DEMAND, 4). % sctp_hb_demand
+-define(SCTP_FLAG_PMTUD_ENABLE, 8). % sctp_pmtud_enable
+-define(SCTP_FLAG_PMTUD_DISABLE, 16). % sctp_pmtud_disable
+-define(SCTP_FLAG_SACKDELAY_ENABLE, 32). % sctp_sackdelay_enable
+-define(SCTP_FLAG_SACKDELAY_DISABLE, 64). % sctp_sackdelay_disable
+
+%%
+%% End of interface constants.
+%%----------------------------------------------------------------------------
+
+-define(LISTEN_BACKLOG, 5). %% default backlog
+
+%% 5 secs need more ???
+-define(INET_CLOSE_TIMEOUT, 5000).
+
+%%
+%% Port/socket numbers: network standard functions
+%%
+-define(IPPORT_ECHO, 7).
+-define(IPPORT_DISCARD, 9).
+-define(IPPORT_SYSTAT, 11).
+-define(IPPORT_DAYTIME, 13).
+-define(IPPORT_NETSTAT, 15).
+-define(IPPORT_FTP, 21).
+-define(IPPORT_TELNET, 23).
+-define(IPPORT_SMTP, 25).
+-define(IPPORT_TIMESERVER, 37).
+-define(IPPORT_NAMESERVER, 42).
+-define(IPPORT_WHOIS, 43).
+-define(IPPORT_MTP, 57).
+
+%%
+%% Port/socket numbers: host specific functions
+%%
+-define(IPPORT_TFTP, 69).
+-define(IPPORT_RJE, 77).
+-define(IPPORT_FINGER, 79).
+-define(IPPORT_TTYLINK, 87).
+-define(IPPORT_SUPDUP, 95).
+
+%%
+%% UNIX TCP sockets
+%%
+-define(IPPORT_EXECSERVER, 512).
+-define(IPPORT_LOGINSERVER, 513).
+-define(IPPORT_CMDSERVER, 514).
+-define(IPPORT_EFSSERVER, 520).
+
+%%
+%% UNIX UDP sockets
+%%
+-define(IPPORT_BIFFUDP, 512).
+-define(IPPORT_WHOSERVER, 513).
+-define(IPPORT_ROUTESERVER, 520). %% 520+1 also used
+
+
+%%
+%% Ports < IPPORT_RESERVED are reserved for
+%% privileged processes (e.g. root).
+%% Ports > IPPORT_USERRESERVED are reserved
+%% for servers, not necessarily privileged.
+%%
+-define(IPPORT_RESERVED, 1024).
+-define(IPPORT_USERRESERVED, 5000).
+
+%% standard port for socks
+-define(IPPORT_SOCKS, 1080).
+
+%%
+%% Int to bytes
+%%
+-define(int8(X), [(X) band 16#ff]).
+
+-define(int16(X), [((X) bsr 8) band 16#ff, (X) band 16#ff]).
+
+-define(int24(X), [((X) bsr 16) band 16#ff,
+ ((X) bsr 8) band 16#ff, (X) band 16#ff]).
+
+-define(int32(X),
+ [((X) bsr 24) band 16#ff, ((X) bsr 16) band 16#ff,
+ ((X) bsr 8) band 16#ff, (X) band 16#ff]).
+
+-define(intAID(X), % For SCTP AssocID
+ ?int32(X)).
+
+%% Bytes to unsigned
+-define(u64(X7,X6,X5,X4,X3,X2,X1,X0),
+ ( ((X7) bsl 56) bor ((X6) bsl 48) bor ((X5) bsl 40) bor
+ ((X4) bsl 32) bor ((X3) bsl 24) bor ((X2) bsl 16) bor
+ ((X1) bsl 8) bor (X0) )).
+
+-define(u32(X3,X2,X1,X0),
+ (((X3) bsl 24) bor ((X2) bsl 16) bor ((X1) bsl 8) bor (X0))).
+
+-define(u24(X2,X1,X0),
+ (((X2) bsl 16) bor ((X1) bsl 8) bor (X0))).
+
+-define(u16(X1,X0),
+ (((X1) bsl 8) bor (X0))).
+
+-define(u8(X0), (X0)).
+
+%% Bytes to signed
+-define(i32(X3,X2,X1,X0),
+ (?u32(X3,X2,X1,X0) -
+ (if (X3) > 127 -> 16#100000000; true -> 0 end))).
+
+-define(i24(X2,X1,X0),
+ (?u24(X2,X1,X0) -
+ (if (X2) > 127 -> 16#1000000; true -> 0 end))).
+
+-define(i16(X1,X0),
+ (?u16(X1,X0) -
+ (if (X1) > 127 -> 16#10000; true -> 0 end))).
+
+-define(i8(X0),
+ (?u8(X0) -
+ (if (X0) > 127 -> 16#100; true -> 0 end))).
+
+%% macro for use in guard for checking ip address {A,B,C,D}
+-define(ip(A,B,C,D),
+ (((A) bor (B) bor (C) bor (D)) band (bnot 16#ff)) =:= 0).
+
+-define(ip6(A,B,C,D,E,F,G,H),
+ (((A) bor (B) bor (C) bor (D) bor (E) bor (F) bor (G) bor (H))
+ band (bnot 16#ffff)) =:= 0).
+
+-define(ether(A,B,C,D,E,F),
+ (((A) bor (B) bor (C) bor (D) bor (E) bor (F))
+ band (bnot 16#ff)) =:= 0).
+
+-define(port(P), (((P) band bnot 16#ffff) =:= 0)).
+
+%% default options (when inet_drv port is started)
+%%
+%% bufsz = INET_MIN_BUFFER (8K)
+%% header = 0
+%% packet = 0 (raw)
+%% mode = list
+%% deliver = term
+%% active = false
+%%
+-record(connect_opts,
+ {
+ ifaddr = any, %% bind to interface address
+ port = 0, %% bind to port (default is dynamic port)
+ fd = -1, %% fd >= 0 => already bound
+ opts = [] %% [{active,true}] added in inet:connect_options
+ }).
+
+-record(listen_opts,
+ {
+ ifaddr = any, %% bind to interface address
+ port = 0, %% bind to port (default is dynamic port)
+ backlog = ?LISTEN_BACKLOG, %% backlog
+ fd = -1, %% %% fd >= 0 => already bound
+ opts = [] %% [{active,true}] added in
+ %% inet:listen_options
+ }).
+
+-record(udp_opts,
+ {
+ ifaddr = any,
+ port = 0,
+ fd = -1,
+ opts = [{active,true}]
+ }).
+
+-define(SCTP_DEF_BUFSZ, 65536).
+-define(SCTP_DEF_IFADDR, any).
+-record(sctp_opts,
+ {
+ ifaddr,
+ port = 0,
+ fd = -1,
+ opts = [{mode, binary},
+ {buffer, ?SCTP_DEF_BUFSZ},
+ {sndbuf, ?SCTP_DEF_BUFSZ},
+ {recbuf, 1024},
+ {sctp_events, undefined}%,
+ %%{active, true}
+ ]
+ }).
+
+%% The following Tags are purely internal, used for marking items in the
+%% send buffer:
+-define(SCTP_TAG_SEND_ANC_INITMSG, 0).
+-define(SCTP_TAG_SEND_ANC_PARAMS, 1).
+-define(SCTP_TAG_SEND_DATA, 2).
diff --git a/lib/kernel/src/inet_parse.erl b/lib/kernel/src/inet_parse.erl
new file mode 100644
index 0000000000..62d44fb723
--- /dev/null
+++ b/lib/kernel/src/inet_parse.erl
@@ -0,0 +1,755 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 1997-2009. All Rights Reserved.
+%%
+%% The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved online at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(inet_parse).
+
+%% Parser for all kinds of ineternet configuration files
+
+-export([hosts/1, hosts/2]).
+-export([hosts_vxworks/1]).
+-export([protocols/1, protocols/2]).
+-export([netmasks/1, netmasks/2]).
+-export([networks/1, networks/2]).
+-export([services/1, services/2]).
+-export([rpc/1, rpc/2]).
+-export([resolv/1, resolv/2]).
+-export([host_conf_linux/1, host_conf_linux/2]).
+-export([host_conf_freebsd/1, host_conf_freebsd/2]).
+-export([host_conf_bsdos/1, host_conf_bsdos/2]).
+-export([nsswitch_conf/1, nsswitch_conf/2]).
+
+-export([ipv4_address/1, ipv6_address/1]).
+-export([address/1]).
+-export([visible_string/1, domain/1]).
+-export([ntoa/1, dots/1]).
+-export([split_line/1]).
+
+-import(lists, [reverse/1]).
+
+-include_lib("kernel/include/file.hrl").
+
+%% --------------------------------------------------------------------------
+%% Parse services internet style
+%% Syntax:
+%% Name Port/Protocol [Aliases] \n
+%% # comment
+%% --------------------------------------------------------------------------
+
+services(File) ->
+ services(noname, File).
+
+services(Fname, File) ->
+ Fn = fun([Name, PortProto | Aliases]) ->
+ {Proto,Port} = port_proto(PortProto, 0),
+ {Name,Proto,Port,Aliases}
+ end,
+ parse_file(Fname, File, Fn).
+
+%% --------------------------------------------------------------------------
+%% Parse rpc program names
+%% Syntax:
+%% Name Program [Aliases] \n |
+%% # comment
+%% --------------------------------------------------------------------------
+
+rpc(File) ->
+ rpc(noname, File).
+
+rpc(Fname, File) ->
+ Fn = fun([Name,Program | Aliases]) ->
+ Prog = list_to_integer(Program),
+ {Name,Prog,Aliases}
+ end,
+ parse_file(Fname, File, Fn).
+
+%% --------------------------------------------------------------------------
+%% Parse hosts file unix style
+%% Syntax:
+%% IP Name [Aliases] \n |
+%% # comment
+%% --------------------------------------------------------------------------
+hosts(File) ->
+ hosts(noname,File).
+
+hosts(Fname,File) ->
+ Fn = fun([Address, Name | Aliases]) ->
+ %% XXX Fix for link-local IPv6 addresses that specify
+ %% interface with a %if suffix. These kind of
+ %% addresses maybe need to be gracefully handled
+ %% throughout inet* and inet_drv.
+ case string:tokens(Address, "%") of
+ [Addr,_] ->
+ {ok,_} = address(Addr),
+ skip;
+ _ ->
+ {ok,IP} = address(Address),
+ {IP, Name, Aliases}
+ end
+ end,
+ parse_file(Fname, File, Fn).
+
+%% --------------------------------------------------------------------------
+%% Parse hostShow vxworks style
+%% Syntax:
+%% Name IP [Aliases] \n
+%% --------------------------------------------------------------------------
+hosts_vxworks(Hosts) ->
+ Fn = fun([Name, Address | Aliases]) ->
+ {ok,IP} = address(Address),
+ {IP, Name, Aliases}
+ end,
+ parse_file(Hosts, Fn).
+
+%% --------------------------------------------------------------------------
+%% Parse resolv file unix style
+%% Syntax:
+%% domain Domain \n
+%% nameserver IP \n
+%% search Dom1 Dom2 ... \n
+%% lookup Method1 Method2 Method3 \n
+%% # comment
+%% --------------------------------------------------------------------------
+
+resolv(File) ->
+ resolv(noname,File).
+
+resolv(Fname, File) ->
+ Fn = fun(["domain", Domain]) ->
+ {domain, Domain};
+ (["nameserver", Address]) ->
+ {ok,IP} = address(Address),
+ {nameserver,IP};
+ (["search" | List]) ->
+ {search, List};
+ (["lookup" | Types]) ->
+ {lookup, Types};
+ (_) ->
+ skip %% there are too many local options, we MUST skip
+ end,
+ parse_file(Fname, File, Fn).
+
+%% --------------------------------------------------------------------------
+%%
+%% Parse Linux host.conf file
+%% find "order" only.
+%%
+%% --------------------------------------------------------------------------
+host_conf_linux(File) ->
+ host_conf_linux(noname,File).
+
+host_conf_linux(Fname, File) ->
+ Fn = fun(["order" | Order]) ->
+ %% XXX remove ',' between entries
+ {lookup, split_comma(Order)};
+ (_) ->
+ skip
+ end,
+ parse_file(Fname, File, Fn).
+
+%% --------------------------------------------------------------------------
+%%
+%% Parse Freebsd/Netbsd host.conf file
+%% find "order" only.
+%%
+%% --------------------------------------------------------------------------
+host_conf_freebsd(File) ->
+ host_conf_freebsd(noname,File).
+
+host_conf_freebsd(Fname, File) ->
+ Fn = fun([Type]) -> Type end,
+ case parse_file(Fname, File, Fn) of
+ {ok, Ls} -> {ok, [{lookup, Ls}]};
+ Error -> Error
+ end.
+
+
+
+%% --------------------------------------------------------------------------
+%%
+%% Parse BSD/OS irs.conf file
+%% find "hosts" only and ignore options.
+%%
+%% Syntax:
+%% Map AccessMethod [,AccessMethod] [continue|merge [,merge|,continue]] \n
+%% # comment
+
+%% --------------------------------------------------------------------------
+host_conf_bsdos(File) ->
+ host_conf_bsdos(noname,File).
+
+host_conf_bsdos(Fname, File) ->
+ Fn = fun(["hosts" | List]) ->
+ delete_options(split_comma(List));
+ (_) ->
+ skip
+ end,
+ case parse_file(Fname, File, Fn) of
+ {ok, Ls} ->
+ {ok, [{lookup, lists:append(Ls)}]};
+ Error -> Error
+ end.
+
+delete_options(["continue"|T]) ->
+ delete_options(T);
+delete_options(["merge"|T]) ->
+ delete_options(T);
+delete_options([H|T]) ->
+ [H|delete_options(T)];
+delete_options([]) ->
+ [].
+
+
+%% --------------------------------------------------------------------------
+%%
+%% Parse Solaris nsswitch.conf
+%% find "hosts:" only
+%%
+%% --------------------------------------------------------------------------
+
+nsswitch_conf(File) ->
+ nsswitch_conf(noname,File).
+
+nsswitch_conf(Fname, File) ->
+ Fn = fun(["hosts:" | Types]) ->
+ {lookup, Types};
+ (_) -> skip
+ end,
+ parse_file(Fname, File, Fn).
+
+%% --------------------------------------------------------------------------
+%% Parse protocol file unix style
+%% Syntax:
+%% name protocol number name \n
+%% # comment
+%% --------------------------------------------------------------------------
+
+protocols(File) ->
+ protocols(noname,File).
+
+protocols(Fname, File) ->
+ Fn = fun([Name, Number, DName]) ->
+ {list_to_atom(Name), list_to_integer(Number), DName}
+ end,
+ parse_file(Fname, File, Fn).
+
+%% --------------------------------------------------------------------------
+%% Parse netmasks file unix style
+%% Syntax:
+%% Network Subnetmask
+%% # comment
+%% --------------------------------------------------------------------------
+
+netmasks(File) ->
+ netmasks(noname, File).
+
+netmasks(Fname, File) ->
+ Fn = fun([Net, Subnetmask]) ->
+ {ok, NetIP} = address(Net),
+ {ok, Mask} = address(Subnetmask),
+ {NetIP, Mask}
+ end,
+ parse_file(Fname, File, Fn).
+
+%% --------------------------------------------------------------------------
+%% Parse networks file unix style
+%% Syntax:
+%% network-name network-number aliases ...
+%% # comment
+%% --------------------------------------------------------------------------
+
+networks(File) ->
+ networks(noname, File).
+
+networks(Fname, File) ->
+ Fn = fun([NetName, NetNumber]) ->
+ Number = list_to_integer(NetNumber),
+ {NetName, Number}
+ end,
+ parse_file(Fname, File, Fn).
+
+%% --------------------------------------------------------------------------
+%%
+%% Simple Line by Line parser
+%%
+%% --------------------------------------------------------------------------
+
+parse_file(File, Fn) ->
+ parse_file(noname, File, Fn).
+
+parse_file(Fname, {fd,Fd}, Fn) ->
+ parse_fd(Fname,Fd, 1, Fn, []);
+parse_file(Fname, {chars,Cs}, Fn) when is_list(Cs) ->
+ parse_cs(Fname, Cs, 1, Fn, []);
+parse_file(Fname, {chars,Cs}, Fn) when is_binary(Cs) ->
+ parse_cs(Fname, binary_to_list(Cs), 1, Fn, []);
+parse_file(_, File, Fn) ->
+ case file:open(File, [read]) of
+ {ok, Fd} ->
+ Result = parse_fd(File,Fd, 1, Fn, []),
+ file:close(Fd),
+ Result;
+ Error -> Error
+ end.
+
+parse_fd(Fname,Fd, Line, Fun, Ls) ->
+ case read_line(Fd) of
+ eof -> {ok, reverse(Ls)};
+ Cs ->
+ case split_line(Cs) of
+ [] -> parse_fd(Fname, Fd, Line+1, Fun, Ls);
+ Toks ->
+ case catch Fun(Toks) of
+ {'EXIT',_} ->
+ error("~p:~p: erroneous line, SKIPPED~n",[Fname,Line]),
+ parse_fd(Fname, Fd,Line+1,Fun,Ls);
+ {warning,Wlist,Val} ->
+ warning("~p:~p: warning! strange domain name(s) ~p ~n",[Fname,Line,Wlist]),
+ parse_fd(Fname, Fd,Line+1,Fun,[Val|Ls]);
+
+ skip ->
+ parse_fd(Fname, Fd, Line+1, Fun, Ls);
+ Val -> parse_fd(Fname, Fd, Line+1, Fun, [Val|Ls])
+ end
+ end
+ end.
+
+parse_cs(Fname, Chars, Line, Fun, Ls) ->
+ case get_line(Chars) of
+ eof -> {ok, reverse(Ls)};
+ {Cs,Chars1} ->
+ case split_line(Cs) of
+ [] -> parse_cs(Fname, Chars1, Line+1, Fun, Ls);
+ Toks ->
+ case catch Fun(Toks) of
+ {'EXIT',_} ->
+ error("~p:~p: erroneous line, SKIPPED~n",[Fname,Line]),
+ parse_cs(Fname, Chars1, Line+1, Fun, Ls);
+ {warning,Wlist,Val} ->
+ warning("~p:~p: warning! strange domain name(s) ~p ~n",[Fname,Line,Wlist]),
+ parse_cs(Fname, Chars1, Line+1, Fun, [Val|Ls]);
+
+ skip -> parse_cs(Fname, Chars1, Line+1, Fun, Ls);
+ Val -> parse_cs(Fname, Chars1, Line+1, Fun, [Val|Ls])
+ end
+ end
+ end.
+
+get_line([]) -> eof;
+get_line(Chars) -> get_line(Chars,[]).
+
+get_line([], Acc) -> {reverse(Acc), []};
+get_line([$\r, $\n | Cs], Acc) -> {reverse([$\n|Acc]), Cs};
+get_line([$\n | Cs], Acc) -> {reverse([$\n|Acc]), Cs};
+get_line([C | Cs], Acc) -> get_line(Cs, [C|Acc]).
+
+%%
+%% Read a line
+%%
+read_line(Fd) when is_pid(Fd) -> io:get_line(Fd, '');
+read_line(Fd = #file_descriptor{}) ->
+ collect_line(Fd, []).
+
+collect_line(Fd, Cs) ->
+ case file:read(Fd, 80) of
+ {ok, Line} when is_binary(Line) ->
+ collect_line(Fd, byte_size(Line), binary_to_list(Line), Cs);
+ {ok, Line} ->
+ collect_line(Fd, length(Line), Line, Cs);
+ eof when Cs =:= [] ->
+ eof;
+ eof -> reverse(Cs)
+ end.
+
+collect_line(Fd, N, [$\r, $\n|_], Cs) ->
+ {ok, _} = file:position(Fd, {cur,-(N-2)}),
+ reverse([$\n|Cs]);
+collect_line(Fd, N, [$\n|_], Cs) ->
+ {ok, _} = file:position(Fd, {cur,-(N-1)}),
+ reverse([$\n|Cs]);
+collect_line(Fd, _, [], Cs) ->
+ collect_line(Fd, Cs);
+collect_line(Fd, N, [X|Xs], Cs) ->
+ collect_line(Fd, N-1, Xs, [X|Cs]).
+
+
+%% split Port/Proto -> {Port, Proto}
+port_proto([X|Xs], N) when X >= $0, X =< $9 ->
+ port_proto(Xs, N*10 + (X - $0));
+port_proto([$/ | Proto], Port) when Port =/= 0 ->
+ {list_to_atom(Proto), Port}.
+
+%%
+%% Check if a String is a string with visible characters #21..#7E
+%% visible_string(String) -> Bool
+%%
+visible_string([H|T]) ->
+ is_vis1([H|T]);
+visible_string(_) ->
+ false.
+
+is_vis1([C | Cs]) when C >= 16#21, C =< 16#7e -> is_vis1(Cs);
+is_vis1([]) -> true;
+is_vis1(_) -> false.
+
+%%
+%% Check if a String is a domain name according to RFC XXX.
+%% domain(String) -> Bool
+%%
+domain([H|T]) ->
+ is_dom1([H|T]);
+domain(_) ->
+ false.
+
+is_dom1([C | Cs]) when C >= $a, C =< $z -> is_dom_ldh(Cs);
+is_dom1([C | Cs]) when C >= $A, C =< $Z -> is_dom_ldh(Cs);
+is_dom1([C | Cs]) when C >= $0, C =< $9 ->
+ case is_dom_ldh(Cs) of
+ true -> is_dom2(string:tokens([C | Cs],"."));
+ false -> false
+ end;
+is_dom1(_) -> false.
+
+is_dom_ldh([C | Cs]) when C >= $a, C =< $z -> is_dom_ldh(Cs);
+is_dom_ldh([C | Cs]) when C >= $A, C =< $Z -> is_dom_ldh(Cs);
+is_dom_ldh([C | Cs]) when C >= $0, C =< $9 -> is_dom_ldh(Cs);
+is_dom_ldh([$-,$. | _]) -> false;
+is_dom_ldh([$_,$. | _]) -> false;
+is_dom_ldh([$_ | Cs]) -> is_dom_ldh(Cs);
+is_dom_ldh([$- | Cs]) -> is_dom_ldh(Cs);
+is_dom_ldh([$. | Cs]) -> is_dom1(Cs);
+is_dom_ldh([]) -> true;
+is_dom_ldh(_) -> false.
+
+%%% Check that we don't get a IP-address as a domain name.
+
+-define(L2I(L), (catch list_to_integer(L))).
+
+is_dom2([A,B,C,D]) ->
+ case ?L2I(D) of
+ Di when is_integer(Di) ->
+ case {?L2I(A),?L2I(B),?L2I(C)} of
+ {Ai,Bi,Ci} when is_integer(Ai),
+ is_integer(Bi),
+ is_integer(Ci) -> false;
+ _ -> true
+ end;
+ _ -> true
+ end;
+is_dom2(_) ->
+ true.
+
+
+
+%%
+%% Test ipv4 address or ipv6 address
+%% Return {ok, Address} | {error, Reason}
+%%
+address(Cs) when is_list(Cs) ->
+ case ipv4_address(Cs) of
+ {ok,IP} -> {ok,IP};
+ _ ->
+ case ipv6_address(Cs) of
+ {ok, IP} -> {ok, IP};
+ Error -> Error
+ end
+ end;
+address(_) ->
+ {error, einval}.
+
+%%
+%% Parse IPv4 address:
+%% d1.d2.d3.d4
+%% d1.d2.d4
+%% d1.d4
+%% d4
+%%
+%% Return {ok, IP} | {error, einval}
+%%
+ipv4_address(Cs) ->
+ case catch ipv4_addr(Cs) of
+ {'EXIT',_} -> {error,einval};
+ Addr -> {ok,Addr}
+ end.
+
+ipv4_addr(Cs) ->
+ ipv4_addr(d3(Cs), []).
+
+ipv4_addr({Cs0,[]}, A) when length(A) =< 3 ->
+ case [tod(Cs0)|A] of
+ [D4,D3,D2,D1] ->
+ {D1,D2,D3,D4};
+ [D4,D2,D1] ->
+ {D1,D2,0,D4};
+ [D4,D1] ->
+ {D1,0,0,D4};
+ [D4] ->
+ {0,0,0,D4}
+ end;
+ipv4_addr({Cs0,"."++Cs1}, A) when length(A) =< 2 ->
+ ipv4_addr(d3(Cs1), [tod(Cs0)|A]).
+
+d3(Cs) -> d3(Cs, []).
+
+d3([C|Cs], R) when C >= $0, C =< $9, length(R) =< 2 ->
+ d3(Cs, [C|R]);
+d3(Cs, [_|_]=R) ->
+ {lists:reverse(R),Cs}.
+
+tod(Cs) ->
+ case erlang:list_to_integer(Cs) of
+ D when D >= 0, D =< 255 ->
+ D;
+ _ ->
+ erlang:error(badarg, [Cs])
+ end.
+
+%%
+%% Parse IPv6 address:
+%% x1:x2:x3:x4:x5:x6:x7:x8
+%% x1:x2::x7:x8
+%% ::x7:x8
+%% x1:x2::
+%% ::
+%% x1:x2:x3:x4:x5:x6:d7a.d7b.d8a.d8b
+%% x1:x2::x5:x6:d7a.d7b.d8a.d8b
+%% ::x5:x6:d7a.d7b.d8a.d8b
+%% x1:x2::d7a.d7b.d8a.d8b
+%% ::d7a.d7b.d8a.d8b
+%%
+%% Return {ok, IP} | {error, einval}
+%%
+ipv6_address(Cs) ->
+ case catch ipv6_addr(Cs) of
+ {'EXIT',_} -> {error,einval};
+ Addr -> {ok,Addr}
+ end.
+
+ipv6_addr("::") ->
+ ipv6_addr_done([], []);
+ipv6_addr("::"++Cs) ->
+ ipv6_addr(x4(Cs), [], []);
+ipv6_addr(Cs) ->
+ ipv6_addr(x4(Cs), []).
+
+%% Before "::"
+ipv6_addr({Cs0,[]}, A) when length(A) =:= 7 ->
+ ipv6_addr_done([tox(Cs0)|A]);
+ipv6_addr({Cs0,"::"}, A) when length(A) =< 6 ->
+ ipv6_addr_done([tox(Cs0)|A], []);
+ipv6_addr({Cs0,"::"++Cs1}, A) when length(A) =< 5 ->
+ ipv6_addr(x4(Cs1), [tox(Cs0)|A], []);
+ipv6_addr({Cs0,":"++Cs1}, A) when length(A) =< 6 ->
+ ipv6_addr(x4(Cs1), [tox(Cs0)|A]);
+ipv6_addr({Cs0,"."++Cs1}, A) when length(A) =:= 6 ->
+ ipv6_addr(d3(Cs1), A, [], [tod(Cs0)]).
+
+%% After "::"
+ipv6_addr({Cs0,[]}, A, B) when length(A)+length(B) =< 6 ->
+ ipv6_addr_done(A, [tox(Cs0)|B]);
+ipv6_addr({Cs0,":"++Cs1}, A, B) when length(A)+length(B) =< 5 ->
+ ipv6_addr(x4(Cs1), A, [tox(Cs0)|B]);
+ipv6_addr({Cs0,"."++Cs1}, A, B) when length(A)+length(B) =< 5 ->
+ ipv6_addr(x4(Cs1), A, B, [tod(Cs0)]).
+
+%% After "."
+ipv6_addr({Cs0,[]}, A, B, C) when length(C) =:= 3 ->
+ ipv6_addr_done(A, B, [tod(Cs0)|C]);
+ipv6_addr({Cs0,"."++Cs1}, A, B, C) when length(C) =< 2 ->
+ ipv6_addr(d3(Cs1), A, B, [tod(Cs0)|C]).
+
+ipv6_addr_done(Ar, Br, [D4,D3,D2,D1]) ->
+ ipv6_addr_done(Ar, [((D3 bsl 8) bor D4),((D1 bsl 8) bor D2)|Br]).
+
+ipv6_addr_done(Ar, Br) ->
+ ipv6_addr_done(Br++dup(8-length(Ar)-length(Br), 0, Ar)).
+
+ipv6_addr_done(Ar) ->
+ list_to_tuple(lists:reverse(Ar)).
+
+x4(Cs) -> x4(Cs, []).
+
+x4([C|Cs], R) when C >= $0, C =< $9, length(R) =< 3 ->
+ x4(Cs, [C|R]);
+x4([C|Cs], R) when C >= $a, C =< $f, length(R) =< 3 ->
+ x4(Cs, [C|R]);
+x4([C|Cs], R) when C >= $A, C =< $F, length(R) =< 3 ->
+ x4(Cs, [C|R]);
+x4(Cs, [_|_]=R) ->
+ {lists:reverse(R),Cs}.
+
+tox(Cs) ->
+ erlang:list_to_integer(Cs, 16).
+
+dup(0, _, L) ->
+ L;
+dup(N, E, L) when is_integer(N), N >= 1 ->
+ dup(N-1, E, [E|L]);
+dup(N, E, L) ->
+ erlang:error(badarg, [N,E,L]).
+
+%% Convert IPv4 adress to ascii
+%% Convert IPv6 / IPV4 adress to ascii (plain format)
+ntoa({A,B,C,D}) ->
+ integer_to_list(A) ++ "." ++ integer_to_list(B) ++ "." ++
+ integer_to_list(C) ++ "." ++ integer_to_list(D);
+%% ANY
+ntoa({0,0,0,0,0,0,0,0}) -> "::";
+%% LOOPBACK
+ntoa({0,0,0,0,0,0,0,1}) -> "::1";
+%% IPV4 ipv6 host address
+ntoa({0,0,0,0,0,0,A,B}) -> "::" ++ dig_to_dec(A) ++ "." ++ dig_to_dec(B);
+%% IPV4 non ipv6 host address
+ntoa({0,0,0,0,0,16#ffff,A,B}) ->
+ "::FFFF:" ++ dig_to_dec(A) ++ "." ++ dig_to_dec(B);
+ntoa({_,_,_,_,_,_,_,_}=T) ->
+ %% Find longest sequence of zeros, at least 2, to replace with "::"
+ ntoa(tuple_to_list(T), []).
+
+%% Find first double zero
+ntoa([], R) ->
+ ntoa_done(R);
+ntoa([0,0|T], R) ->
+ ntoa(T, R, 2);
+ntoa([D|T], R) ->
+ ntoa(T, [D|R]).
+
+%% Count consecutive zeros
+ntoa([], R, _) ->
+ ntoa_done(R, []);
+ntoa([0|T], R, N) ->
+ ntoa(T, R, N+1);
+ntoa([D|T], R, N) ->
+ ntoa(T, R, N, [D]).
+
+%% Find alternate double zero
+ntoa([], R1, _N1, R2) ->
+ ntoa_done(R1, R2);
+ntoa([0,0|T], R1, N1, R2) ->
+ ntoa(T, R1, N1, R2, 2);
+ntoa([D|T], R1, N1, R2) ->
+ ntoa(T, R1, N1, [D|R2]).
+
+%% Count consecutive alternate zeros
+ntoa(T, R1, N1, R2, N2) when N2 > N1 ->
+ %% Alternate zero sequence is longer - use it instead
+ ntoa(T, R2++dup(N1, 0, R1), N2);
+ntoa([], R1, _N1, R2, N2) ->
+ ntoa_done(R1, dup(N2, 0, R2));
+ntoa([0|T], R1, N1, R2, N2) ->
+ ntoa(T, R1, N1, R2, N2+1);
+ntoa([D|T], R1, N1, R2, N2) ->
+ ntoa(T, R1, N1, [D|dup(N2, 0, R2)]).
+
+ntoa_done(R1, R2) ->
+ lists:append(
+ separate(":", lists:map(fun dig_to_hex/1, lists:reverse(R1)))++
+ ["::"|separate(":", lists:map(fun dig_to_hex/1, lists:reverse(R2)))]).
+
+ntoa_done(R) ->
+ lists:append(separate(":", lists:map(fun dig_to_hex/1, lists:reverse(R)))).
+
+separate(_E, []) ->
+ [];
+separate(E, [_|_]=L) ->
+ separate(E, L, []).
+
+separate(E, [H|[_|_]=T], R) ->
+ separate(E, T, [E,H|R]);
+separate(_E, [H], R) ->
+ lists:reverse(R, [H]).
+
+%% convert to A.B decimal form
+dig_to_dec(0) -> [$0,$.,$0];
+dig_to_dec(X) ->
+ integer_to_list((X bsr 8) band 16#ff) ++ "." ++
+ integer_to_list(X band 16#ff).
+
+%% Convert a integer to hex string
+dig_to_hex(X) ->
+ erlang:integer_to_list(X, 16).
+
+%%
+%% Count number of '.' in a name
+%% return {Number of non-terminating dots, has-terminating dot?}
+%% {integer, bool}
+%%
+dots(Name) -> dots(Name, 0).
+
+dots([$.], N) -> {N, true};
+dots([$. | T], N) -> dots(T, N+1);
+dots([_C | T], N) -> dots(T, N);
+dots([], N) -> {N, false}.
+
+
+split_line(Line) ->
+ split_line(Line, []).
+
+split_line([$# | _], Tokens) -> reverse(Tokens);
+split_line([$\s| L], Tokens) -> split_line(L, Tokens);
+split_line([$\t | L], Tokens) -> split_line(L, Tokens);
+split_line([$\n | L], Tokens) -> split_line(L, Tokens);
+split_line([], Tokens) -> reverse(Tokens);
+split_line([C|Cs], Tokens) -> split_mid(Cs, [C], Tokens).
+
+split_mid([$# | _Cs], Acc, Tokens) -> split_end(Acc, Tokens);
+split_mid([$\s | Cs], Acc, Tokens) -> split_line(Cs, [reverse(Acc) | Tokens]);
+split_mid([$\t | Cs], Acc, Tokens) -> split_line(Cs, [reverse(Acc) | Tokens]);
+split_mid([$\r, $\n | Cs], Acc, Tokens) -> split_line(Cs, [reverse(Acc) | Tokens]);
+split_mid([$\n | Cs], Acc, Tokens) -> split_line(Cs, [reverse(Acc) | Tokens]);
+split_mid([], Acc, Tokens) -> split_end(Acc, Tokens);
+split_mid([C|Cs], Acc, Tokens) -> split_mid(Cs, [C|Acc], Tokens).
+
+split_end(Acc, Tokens) -> reverse([reverse(Acc) | Tokens]).
+
+
+%% Split a comma separated tokens. Because we already have split on
+%% spaces we may have the cases
+%%
+%% ",foo"
+%% "foo,"
+%% "foo,bar..."
+
+split_comma([]) ->
+ [];
+split_comma([Token | Tokens]) ->
+ split_comma(Token, []) ++ split_comma(Tokens).
+
+split_comma([], Tokens) -> reverse(Tokens);
+split_comma([$, | L], Tokens) -> split_comma(L, Tokens);
+split_comma([C|Cs], Tokens) -> split_mid_comma(Cs, [C], Tokens).
+
+split_mid_comma([$, | Cs], Acc, Tokens) ->
+ split_comma(Cs, [reverse(Acc) | Tokens]);
+split_mid_comma([], Acc, Tokens) ->
+ split_end(Acc, Tokens);
+split_mid_comma([C|Cs], Acc, Tokens) ->
+ split_mid_comma(Cs, [C|Acc], Tokens).
+
+%%
+
+warning(Fmt, Args) ->
+ case application:get_env(kernel,inet_warnings) of
+ {ok,on} ->
+ error_logger:info_msg("inet_parse:" ++ Fmt, Args);
+ _ ->
+ ok
+ end.
+
+error(Fmt, Args) ->
+ error_logger:info_msg("inet_parse:" ++ Fmt, Args).
+
diff --git a/lib/kernel/src/inet_res.erl b/lib/kernel/src/inet_res.erl
new file mode 100644
index 0000000000..9b9e078898
--- /dev/null
+++ b/lib/kernel/src/inet_res.erl
@@ -0,0 +1,846 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 1997-2009. All Rights Reserved.
+%%
+%% The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved online at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% %CopyrightEnd%
+%%
+%% RFC 1035, 2671, 2782, 2915.
+%%
+-module(inet_res).
+
+%-compile(export_all).
+
+-export([gethostbyname/1, gethostbyname/2, gethostbyname/3,
+ gethostbyname_tm/3]).
+-export([gethostbyaddr/1, gethostbyaddr/2,
+ gethostbyaddr_tm/2]).
+-export([getbyname/2, getbyname/3,
+ getbyname_tm/3]).
+
+-export([resolve/3, resolve/4, resolve/5]).
+-export([lookup/3, lookup/4, lookup/5]).
+-export([dns_msg/1]).
+
+-export([nslookup/3, nslookup/4]).
+-export([nnslookup/4, nnslookup/5]).
+
+-include_lib("kernel/include/inet.hrl").
+-include("inet_res.hrl").
+-include("inet_dns.hrl").
+-include("inet_int.hrl").
+
+-define(verbose(Cond, Format, Args),
+ case begin Cond end of
+ true -> io:format(begin Format end, begin Args end);
+ false -> ok
+ end).
+
+%% --------------------------------------------------------------------------
+%% resolve:
+%%
+%% Nameserver query
+%%
+
+resolve(Name, Class, Type) ->
+ resolve(Name, Class, Type, [], infinity).
+
+resolve(Name, Class, Type, Opts) ->
+ resolve(Name, Class, Type, Opts, infinity).
+
+resolve(Name, Class, Type, Opts, Timeout) ->
+ case nsdname(Name) of
+ {ok, Nm} ->
+ Timer = inet:start_timer(Timeout),
+ Res = res_query(Nm, Class, Type, Opts, Timer),
+ inet:stop_timer(Timer),
+ Res;
+ Error ->
+ Error
+ end.
+
+%% --------------------------------------------------------------------------
+%% lookup:
+%%
+%% Convenience wrapper to resolve/3,4,5 that filters out all answer data
+%% fields of the class and type asked for.
+
+lookup(Name, Class, Type) ->
+ lookup(Name, Class, Type, []).
+
+lookup(Name, Class, Type, Opts) ->
+ lookup(Name, Class, Type, Opts, infinity).
+
+lookup(Name, Class, Type, Opts, Timeout) ->
+ lookup_filter(resolve(Name, Class, Type, Opts, Timeout),
+ Class, Type).
+
+lookup_filter({ok,#dns_rec{anlist=Answers}}, Class, Type) ->
+ [A#dns_rr.data || A <- Answers,
+ A#dns_rr.class =:= Class,
+ A#dns_rr.type =:= Type];
+lookup_filter({error,_}, _, _) -> [].
+
+%% --------------------------------------------------------------------------
+%% nslookup:
+%%
+%% Do a general nameserver lookup
+%%
+%% Perform nslookup on standard config !!
+%%
+%% To be deprecated
+
+nslookup(Name, Class, Type) ->
+ do_nslookup(Name, Class, Type, [], infinity).
+
+nslookup(Name, Class, Type, Timeout) when is_integer(Timeout), Timeout >= 0 ->
+ do_nslookup(Name, Class, Type, [], Timeout);
+nslookup(Name, Class, Type, NSs) -> % For backwards compatibility
+ nnslookup(Name, Class, Type, NSs). % with OTP R6B only
+
+nnslookup(Name, Class, Type, NSs) ->
+ nnslookup(Name, Class, Type, NSs, infinity).
+
+nnslookup(Name, Class, Type, NSs, Timeout) ->
+ do_nslookup(Name, Class, Type, [{nameservers,NSs}], Timeout).
+
+do_nslookup(Name, Class, Type, Opts, Timeout) ->
+ case resolve(Name, Class, Type, Opts, Timeout) of
+ {error,{qfmterror,_}} -> {error,einval};
+ {error,{Reason,_}} -> {error,Reason};
+ Result -> Result
+ end.
+
+%% --------------------------------------------------------------------------
+%% options record
+%%
+-record(options, { % These must be sorted!
+ alt_nameservers,edns,inet6,nameservers,recurse,
+ retry,timeout,udp_payload_size,usevc,
+ verbose}). % this is a local option, not in inet_db
+%%
+%% Opts when is_list(Opts) -> #options{}
+make_options(Opts0) ->
+ Opts = [if is_atom(Opt) ->
+ case atom_to_list(Opt) of
+ "no"++X -> {list_to_atom(X),false};
+ _ -> {Opt,true}
+ end;
+ true -> Opt
+ end || Opt <- Opts0],
+ %% If the caller gives the nameservers option, the inet_db
+ %% alt_nameservers option should be regarded as empty, i.e
+ %% use only the nameservers the caller supplies.
+ SortedOpts =
+ lists:ukeysort(1,
+ case lists:keymember(nameservers, 1, Opts) of
+ true ->
+ case lists:keymember(alt_nameservers, 1, Opts) of
+ false ->
+ [{alt_nameservers,[]}|Opts];
+ true ->
+ Opts
+ end;
+ false ->
+ Opts
+ end),
+ SortedNames = record_info(fields, options),
+ inet_db:res_update_conf(),
+ list_to_tuple([options|make_options(SortedOpts, SortedNames)]).
+
+make_options([_|_]=Opts0, []=Names0) ->
+ erlang:error(badarg, [Opts0,Names0]);
+make_options([], []) -> [];
+make_options([{verbose,Val}|Opts]=Opts0, [verbose|Names]=Names0) ->
+ if is_boolean(Val) ->
+ [Val|make_options(Opts, Names)];
+ true ->
+ erlang:error(badarg, [Opts0,Names0])
+ end;
+make_options([{Opt,Val}|Opts]=Opts0, [Opt|Names]=Names0) ->
+ case inet_db:res_check_option(Opt, Val) of
+ true ->
+ [Val|make_options(Opts, Names)];
+ false ->
+ erlang:error(badarg, [Opts0,Names0])
+ end;
+make_options(Opts, [verbose|Names]) ->
+ [false|make_options(Opts, Names)];
+make_options(Opts, [Name|Names]) ->
+ [inet_db:res_option(Name)|make_options(Opts, Names)].
+
+
+%% --------------------------------------------------------------------------
+%%
+%% gethostbyaddr(ip_address()) => {ok, hostent()} | {error, Reason}
+%%
+%% where ip_address() is {A,B,C,D} ipv4 address
+%% | {A,B,C,D,E,F,G,H} ipv6 address
+%% | string versions of the above
+%% | atom version
+%%
+%% --------------------------------------------------------------------------
+
+gethostbyaddr(IP) -> gethostbyaddr_tm(IP,false).
+
+gethostbyaddr(IP,Timeout) ->
+ Timer = inet:start_timer(Timeout),
+ Res = gethostbyaddr_tm(IP,Timer),
+ inet:stop_timer(Timer),
+ Res.
+
+gethostbyaddr_tm({A,B,C,D} = IP, Timer) when ?ip(A,B,C,D) ->
+ inet_db:res_update_conf(),
+ case inet_db:gethostbyaddr(IP) of
+ {ok, HEnt} -> {ok, HEnt};
+ _ -> res_gethostbyaddr(dn_in_addr_arpa(A,B,C,D), IP, Timer)
+ end;
+%% ipv4 only ipv6 address
+gethostbyaddr_tm({0,0,0,0,0,16#ffff,G,H},Timer) when is_integer(G+H) ->
+ gethostbyaddr_tm({G div 256, G rem 256, H div 256, H rem 256},Timer);
+gethostbyaddr_tm({A,B,C,D,E,F,G,H} = IP, Timer) when ?ip6(A,B,C,D,E,F,G,H) ->
+ inet_db:res_update_conf(),
+ case inet_db:gethostbyaddr(IP) of
+ {ok, HEnt} -> {ok, HEnt};
+ _ -> res_gethostbyaddr(dn_ip6_int(A,B,C,D,E,F,G,H), IP, Timer)
+ end;
+gethostbyaddr_tm(Addr,Timer) when is_list(Addr) ->
+ case inet_parse:address(Addr) of
+ {ok, IP} -> gethostbyaddr_tm(IP,Timer);
+ _Error -> {error, formerr}
+ end;
+gethostbyaddr_tm(Addr,Timer) when is_atom(Addr) ->
+ gethostbyaddr_tm(atom_to_list(Addr),Timer);
+gethostbyaddr_tm(_,_) -> {error, formerr}.
+
+%%
+%% Send the gethostbyaddr query to:
+%% 1. the list of normal names servers
+%% 2. the list of alternative name servers
+%%
+res_gethostbyaddr(Addr, IP, Timer) ->
+ case res_query(Addr, in, ptr, [], Timer) of
+ {ok, Rec} ->
+ inet_db:res_gethostbyaddr(IP, Rec);
+ {error,{qfmterror,_}} -> {error,einval};
+ {error,{Reason,_}} -> {error,Reason};
+ Error ->
+ Error
+ end.
+
+%% --------------------------------------------------------------------------
+%%
+%% gethostbyname(domain_name()[,family [,Timer])
+%% => {ok, hostent()} | {error, Reason}
+%%
+%% where domain_name() is domain string or atom
+%%
+%% Caches the answer.
+%% --------------------------------------------------------------------------
+
+gethostbyname(Name) ->
+ case inet_db:res_option(inet6) of
+ true ->
+ gethostbyname_tm(Name, inet6, false);
+ false ->
+ gethostbyname_tm(Name, inet, false)
+ end.
+
+gethostbyname(Name,Family) ->
+ gethostbyname_tm(Name,Family,false).
+
+gethostbyname(Name,Family,Timeout) ->
+ Timer = inet:start_timer(Timeout),
+ Res = gethostbyname_tm(Name,Family,Timer),
+ inet:stop_timer(Timer),
+ Res.
+
+gethostbyname_tm(Name,inet,Timer) ->
+ getbyname_tm(Name,?S_A,Timer);
+gethostbyname_tm(Name,inet6,Timer) ->
+ case getbyname_tm(Name,?S_AAAA,Timer) of
+ {ok,HEnt} -> {ok,HEnt};
+ {error,nxdomain} ->
+ case getbyname_tm(Name, ?S_A,Timer) of
+ {ok, HEnt} ->
+ %% rewrite to a ipv4 only ipv6 address
+ {ok,
+ HEnt#hostent {
+ h_addrtype = inet6,
+ h_length = 16,
+ h_addr_list =
+ lists:map(
+ fun({A,B,C,D}) ->
+ {0,0,0,0,0,16#ffff,A*256+B,C*256+D}
+ end, HEnt#hostent.h_addr_list)
+ }};
+ Error ->
+ Error
+ end;
+ Error ->
+ Error
+ end;
+gethostbyname_tm(_Name, _Family, _Timer) ->
+ {error, einval}.
+
+%% --------------------------------------------------------------------------
+%%
+%% getbyname(domain_name(), Type) => {ok, hostent()} | {error, Reason}
+%%
+%% where domain_name() is domain string or atom and Type is ?S_A, ?S_MX ...
+%%
+%% Caches the answer.
+%% --------------------------------------------------------------------------
+
+getbyname(Name, Type) ->
+ getbyname_tm(Name,Type,false).
+
+getbyname(Name, Type, Timeout) ->
+ Timer = inet:start_timer(Timeout),
+ Res = getbyname_tm(Name, Type, Timer),
+ inet:stop_timer(Timer),
+ Res.
+
+getbyname_tm(Name, Type, Timer) when is_list(Name) ->
+ case type_p(Type) of
+ true ->
+ case inet_parse:visible_string(Name) of
+ false -> {error, formerr};
+ true ->
+ inet_db:res_update_conf(),
+ case inet_db:getbyname(Name, Type) of
+ {ok, HEnt} -> {ok, HEnt};
+ _ -> res_getbyname(Name, Type, Timer)
+ end
+ end;
+ false ->
+ {error, formerr}
+ end;
+getbyname_tm(Name,Type,Timer) when is_atom(Name) ->
+ getbyname_tm(atom_to_list(Name), Type,Timer);
+getbyname_tm(_, _, _) -> {error, formerr}.
+
+type_p(Type) ->
+ lists:member(Type, [?S_A, ?S_AAAA, ?S_MX, ?S_NS,
+ ?S_MD, ?S_MF, ?S_CNAME, ?S_SOA,
+ ?S_MB, ?S_MG, ?S_MR, ?S_NULL,
+ ?S_WKS, ?S_HINFO, ?S_TXT, ?S_SRV, ?S_NAPTR, ?S_SPF,
+ ?S_UINFO, ?S_UID, ?S_GID]).
+
+
+
+%% This function and inet_db:getbyname/2 must look up names
+%% in the same manner, but not from the same places.
+%%
+%% Assuming search path, i.e return value from inet_db:get_searchlist()
+%% to be ["dom1", "dom2"]:
+%%
+%% Old behaviour (not this code but the previous version):
+%% * For Name = "foo"
+%% Name = "foo." try "foo.dom1", "foo.dom2" at normal nameservers
+%% * For Name = "foo.bar"
+%% Name = "foo.bar." try "foo.bar" at normal then alt. nameservers
+%% then try "foo.bar.dom1", "foo.bar.dom2"
+%% at normal nameservers
+%%
+%% New behaviour (this code), honoring the old behaviour but
+%% doing better for absolute names:
+%% * For Name = "foo" try "foo.dom1", "foo.dom2" at normal nameservers
+%% * For Name = "foo.bar" try "foo.bar" at normal then alt. nameservers
+%% then try "foo.bar.dom1", "foo.bar.dom2"
+%% at normal nameservers
+%% * For Name = "foo." try "foo" at normal then alt. nameservers
+%% * For Name = "foo.bar." try "foo.bar" at normal then alt. nameservers
+%%
+%%
+%% FIXME This is probably how it should be done:
+%% Common behaviour (Solaris resolver) is:
+%% * For Name = "foo." try "foo"
+%% * For Name = "foo.bar." try "foo.bar"
+%% * For Name = "foo" try "foo.dom1", "foo.dom2", "foo"
+%% * For Name = "foo.bar" try "foo.bar.dom1", "foo.bar.dom2", "foo.bar"
+%% That is to try Name as it is as a last resort if it is not absolute.
+%%
+res_getbyname(Name, Type, Timer) ->
+ {EmbeddedDots, TrailingDot} = inet_parse:dots(Name),
+ Dot = if TrailingDot -> ""; true -> "." end,
+ if TrailingDot ->
+ res_getby_query(Name, Type, Timer);
+ EmbeddedDots =:= 0 ->
+ res_getby_search(Name, Dot,
+ inet_db:get_searchlist(),
+ nxdomain, Type, Timer);
+ true ->
+ case res_getby_query(Name, Type, Timer) of
+ {error,_Reason}=Error ->
+ res_getby_search(Name, Dot,
+ inet_db:get_searchlist(),
+ Error, Type, Timer);
+ Other -> Other
+ end
+ end.
+
+res_getby_search(Name, Dot, [Dom | Ds], _Reason, Type, Timer) ->
+ case res_getby_query(Name++Dot++Dom, Type, Timer,
+ inet_db:res_option(nameservers)) of
+ {ok, HEnt} -> {ok, HEnt};
+ {error, NewReason} ->
+ res_getby_search(Name, Dot, Ds, NewReason, Type, Timer)
+ end;
+res_getby_search(_Name, _, [], Reason,_,_) ->
+ {error, Reason}.
+
+res_getby_query(Name, Type, Timer) ->
+ case res_query(Name, in, Type, [], Timer) of
+ {ok, Rec} ->
+ inet_db:res_hostent_by_domain(Name, Type, Rec);
+ {error,{qfmterror,_}} -> {error,einval};
+ {error,{Reason,_}} -> {error,Reason};
+ Error -> Error
+ end.
+
+res_getby_query(Name, Type, Timer, NSs) ->
+ case res_query(Name, in, Type, [], Timer, NSs) of
+ {ok, Rec} ->
+ inet_db:res_hostent_by_domain(Name, Type, Rec);
+ {error,{qfmterror,_}} -> {error,einval};
+ {error,{Reason,_}} -> {error,Reason};
+ Error -> Error
+ end.
+
+
+
+%% --------------------------------------------------------------------------
+%% query record
+%%
+-record(q, {options,edns,dns}).
+
+
+
+%% Query first nameservers list then alt_nameservers list
+res_query(Name, Class, Type, Opts, Timer) ->
+ #q{options=#options{nameservers=NSs}}=Q =
+ make_query(Name, Class, Type, Opts),
+ case do_query(Q, NSs, Timer) of
+ {error,nxdomain}=Error ->
+ res_query_alt(Q, Error, Timer);
+ {error,{nxdomain,_}}=Error ->
+ res_query_alt(Q, Error, Timer);
+ {ok,#dns_rec{anlist=[]}}=Reply ->
+ res_query_alt(Q, Reply, Timer);
+ Reply -> Reply
+ end.
+
+%% Query just the argument nameservers list
+res_query(Name, Class, Type, Opts, Timer, NSs) ->
+ Q = make_query(Name, Class, Type, Opts),
+ do_query(Q, NSs, Timer).
+
+res_query_alt(#q{options=#options{alt_nameservers=NSs}}=Q, Reply, Timer) ->
+ case NSs of
+ [] -> Reply;
+ _ ->
+ do_query(Q, NSs, Timer)
+ end.
+
+make_query(Dname, Class, Type, Opts) ->
+ Options = make_options(Opts),
+ case Options#options.edns of
+ false ->
+ #q{options=Options,
+ edns=undefined,
+ dns=make_query(Dname, Class, Type, Options, false)};
+ Edns ->
+ #q{options=Options,
+ edns=make_query(Dname, Class, Type, Options, Edns),
+ dns=fun () ->
+ make_query(Dname, Class, Type, Options, false)
+ end}
+ end.
+
+%% XXX smarter would be to always construct both queries,
+%% but make the EDNS query point into the DNS query binary.
+%% It is only the header ARList length that need to be changed,
+%% and the OPT record appended.
+make_query(Dname, Class, Type, Options, Edns) ->
+ Id = inet_db:res_option(next_id),
+ Recurse = Options#options.recurse,
+ ARList = case Edns of
+ false -> [];
+ _ ->
+ PSz = Options#options.udp_payload_size,
+ [#dns_rr_opt{udp_payload_size=PSz,
+ version=Edns}]
+ end,
+ Msg = #dns_rec{header=#dns_header{id=Id,
+ opcode='query',
+ rd=Recurse,
+ rcode=?NOERROR},
+ qdlist=[#dns_query{domain=Dname,
+ type=Type,
+ class=Class}],
+ arlist=ARList},
+ ?verbose(Options#options.verbose, "Query: ~p~n", [dns_msg(Msg)]),
+ Buffer = inet_dns:encode(Msg),
+ {Id, Buffer}.
+
+%% --------------------------------------------------------------------------
+%% socket helpers
+%%
+-record(sock, {inet=undefined, inet6=undefined}).
+
+udp_open(#sock{inet6=I}=S, {A,B,C,D,E,F,G,H}) when ?ip6(A,B,C,D,E,F,G,H) ->
+ case I of
+ undefined ->
+ case gen_udp:open(0, [{active,false},binary,inet6]) of
+ {ok,J} ->
+ {ok,S#sock{inet6=J}};
+ Error ->
+ Error
+ end;
+ _ ->
+ {ok,S}
+ end;
+udp_open(#sock{inet=I}=S, {A,B,C,D}) when ?ip(A,B,C,D) ->
+ case I of
+ undefined ->
+ case gen_udp:open(0, [{active,false},binary,inet]) of
+ {ok,J} ->
+ {ok,S#sock{inet=J}};
+ Error ->
+ Error
+ end;
+ _ ->
+ {ok,S}
+ end.
+
+udp_connect(#sock{inet6=I}, {A,B,C,D,E,F,G,H}=IP, Port)
+ when ?ip6(A,B,C,D,E,F,G,H), ?port(Port) ->
+ gen_udp:connect(I, IP, Port);
+udp_connect(#sock{inet=I}, {A,B,C,D}=IP, Port)
+ when ?ip(A,B,C,D) ->
+ gen_udp:connect(I, IP, Port).
+
+udp_send(#sock{inet6=I}, {A,B,C,D,E,F,G,H}=IP, Port, Buffer)
+ when ?ip6(A,B,C,D,E,F,G,H), ?port(Port) ->
+ gen_udp:send(I, IP, Port, Buffer);
+udp_send(#sock{inet=I}, {A,B,C,D}=IP, Port, Buffer)
+ when ?ip(A,B,C,D), ?port(Port) ->
+ gen_udp:send(I, IP, Port, Buffer).
+
+udp_recv(#sock{inet6=I}, {A,B,C,D,E,F,G,H}=IP, Port, Timeout)
+ when ?ip6(A,B,C,D,E,F,G,H), ?port(Port) ->
+ do_udp_recv(fun(T) -> gen_udp:recv(I, 0, T) end, IP, Port, Timeout);
+udp_recv(#sock{inet=I}, {A,B,C,D}=IP, Port, Timeout)
+ when ?ip(A,B,C,D), ?port(Port) ->
+ do_udp_recv(fun(T) -> gen_udp:recv(I, 0, T) end, IP, Port, Timeout).
+
+do_udp_recv(Recv, IP, Port, Timeout) ->
+ do_udp_recv(Recv, IP, Port, Timeout,
+ if Timeout =/= 0 -> erlang:now(); true -> undefined end).
+
+do_udp_recv(Recv, IP, Port, Timeout, Then) ->
+ case Recv(Timeout) of
+ {ok,{IP,Port,Answer}} ->
+ {ok,Answer,erlang:max(0, Timeout - now_ms(erlang:now(), Then))};
+ {ok,_} when Timeout =:= 0 ->
+ {error,timeout};
+ {ok,_} ->
+ Now = erlang:now(),
+ T = erlang:max(0, Timeout - now_ms(Now, Then)),
+ do_udp_recv(Recv, IP, Port, T, Now);
+ Error -> Error
+ end.
+
+udp_close(#sock{inet=I,inet6=I6}) ->
+ if I =/= undefined -> gen_udp:close(I); true -> ok end,
+ if I6 =/= undefined -> gen_udp:close(I6); true -> ok end,
+ ok.
+
+%%
+%% Send a query to the nameserver and return a reply
+%% We first use socket server then we add the udp version
+%%
+%% Algorithm: (from manual page for dig)
+%% for i = 0 to retry - 1
+%% for j = 1 to num_servers
+%% send_query
+%% wait((time * (2**i)) / num_servers)
+%% end
+%% end
+%%
+
+do_query(_Q, [], _Timer) ->
+ {error,nxdomain};
+do_query(#q{options=#options{retry=Retry}}=Q, NSs, Timer) ->
+ query_retries(Q, NSs, Timer, Retry, 0, #sock{}).
+
+query_retries(_Q, _NSs, _Timer, Retry, Retry, S) ->
+ udp_close(S),
+ {error,timeout};
+query_retries(Q, NSs, Timer, Retry, I, S0) ->
+ Num = length(NSs),
+ if Num =:= 0 ->
+ {error,timeout};
+ true ->
+ case query_nss(Q, NSs, Timer, Retry, I, S0, []) of
+ {S,{noanswer,ErrNSs}} -> %% remove unreachable nameservers
+ query_retries(Q, NSs--ErrNSs, Timer, Retry, I+1, S);
+ {S,Result} ->
+ udp_close(S),
+ Result
+ end
+ end.
+
+query_nss(_Q, [], _Timer, _Retry, _I, S, ErrNSs) ->
+ {S,{noanswer,ErrNSs}};
+query_nss(#q{edns=undefined}=Q, NSs, Timer, Retry, I, S, ErrNSs) ->
+ query_nss_dns(Q, NSs, Timer, Retry, I, S, ErrNSs);
+query_nss(Q, NSs, Timer, Retry, I, S, ErrNSs) ->
+ query_nss_edns(Q, NSs, Timer, Retry, I, S, ErrNSs).
+
+query_nss_edns(#q{options=#options{udp_payload_size=PSz}=Options,
+ edns={Id,Buffer}}=Q,
+ [{IP,Port}=NS|NSs]=NSs0, Timer, Retry, I, S0, ErrNSs) ->
+ {S,Res}=Reply = query_ns(S0, Id, Buffer, IP, Port, Timer,
+ Retry, I, Options, PSz),
+ case Res of
+ timeout -> {S,{error,timeout}};
+ {ok,_} -> Reply;
+ {error,{nxdomain,_}} -> Reply;
+ {error,{E,_}} when E =:= qfmterror; E =:= notimp; E =:= servfail;
+ E =:= badvers ->
+ query_nss_dns(Q, NSs0, Timer, Retry, I, S, ErrNSs);
+ {error,E} when E =:= fmt; E =:= enetunreach; E =:= econnrefused ->
+ query_nss(Q, NSs, Timer, Retry, I, S, [NS|ErrNSs]);
+ _Error ->
+ query_nss(Q, NSs, Timer, Retry, I, S, ErrNSs)
+ end.
+
+query_nss_dns(#q{dns=Qdns}=Q0, [{IP,Port}=NS|NSs],
+ Timer, Retry, I, S0, ErrNSs) ->
+ #q{options=Options,dns={Id,Buffer}}=Q =
+ if
+ is_function(Qdns, 0) -> Q0#q{dns=Qdns()};
+ true -> Q0
+ end,
+ {S,Res}=Reply = query_ns(S0, Id, Buffer, IP, Port, Timer,
+ Retry, I, Options, ?PACKETSZ),
+ case Res of
+ timeout -> {S,{error,timeout}};
+ {ok,_} -> Reply;
+ {error,{E,_}} when E =:= nxdomain; E =:= qfmterror -> Reply;
+ {error,E} when E =:= fmt; E =:= enetunreach; E =:= econnrefused ->
+ query_nss(Q, NSs, Timer, Retry, I, S, [NS|ErrNSs]);
+ _Error ->
+ query_nss(Q, NSs, Timer, Retry, I, S, ErrNSs)
+ end.
+
+query_ns(S0, Id, Buffer, IP, Port, Timer, Retry, I,
+ #options{timeout=Tm,usevc=UseVC,verbose=Verbose},
+ PSz) ->
+ case UseVC orelse iolist_size(Buffer) > PSz of
+ true ->
+ {S0,query_tcp(Tm, Id, Buffer, IP, Port, Timer, Verbose)};
+ false ->
+ case udp_open(S0, IP) of
+ {ok,S} ->
+ {S,case query_udp(S, Id, Buffer, IP, Port, Timer,
+ Retry, I, Tm, Verbose) of
+ {ok,#dns_rec{header=H}} when H#dns_header.tc ->
+ query_tcp(Tm, Id, Buffer,
+ IP, Port, Timer, Verbose);
+ Reply -> Reply
+ end};
+ Error ->
+ {S0,Error}
+ end
+ end.
+
+query_udp(S, Id, Buffer, IP, Port, Timer, Retry, I, Tm, Verbose) ->
+ Timeout = inet:timeout( (Tm * (1 bsl I)) div Retry, Timer),
+ ?verbose(Verbose, "Try UDP server : ~p:~p (timeout=~w)\n",
+ [IP, Port, Timeout]),
+ udp_connect(S, IP, Port),
+ udp_send(S, IP, Port, Buffer),
+ query_udp_recv(S, IP, Port, Id, Timeout, Verbose).
+
+query_udp_recv(S, IP, Port, Id, Timeout, Verbose) ->
+ case udp_recv(S, IP, Port, Timeout) of
+ {ok,Answer,T} ->
+ case decode_answer(Answer, Id, Verbose) of
+ {error, badid} ->
+ query_udp_recv(S, IP, Port, Id, T, Verbose);
+ Reply -> Reply
+ end;
+ {error, timeout} when Timeout =:= 0 ->
+ ?verbose(Verbose, "UDP server timeout\n", []),
+ timeout;
+ Error ->
+ ?verbose(Verbose, "UDP server error: ~p\n", [Error]),
+ Error
+ end.
+
+query_tcp(Tm, Id, Buffer, IP, Port, Timer, Verbose) ->
+ Timeout = inet:timeout(Tm*5, Timer),
+ ?verbose(Verbose, "Try TCP server : ~p:~p (timeout=~w)\n",
+ [IP, Port, Timeout]),
+ Family = case IP of
+ {A,B,C,D} when ?ip(A,B,C,D) -> inet;
+ {A,B,C,D,E,F,G,H} when ?ip6(A,B,C,D,E,F,G,H) -> inet6
+ end,
+ try gen_tcp:connect(IP, Port,
+ [{active,false},{packet,2},binary,Family],
+ Timeout) of
+ {ok, S} ->
+ gen_tcp:send(S, Buffer),
+ case gen_tcp:recv(S, 0, Timeout) of
+ {ok, Answer} ->
+ gen_tcp:close(S),
+ case decode_answer(Answer, Id, Verbose) of
+ {ok, _} = OK -> OK;
+ {error, badid} -> {error, servfail};
+ Error -> Error
+ end;
+ Error ->
+ gen_tcp:close(S),
+ case Error of
+ {error, timeout} when Timeout =:= 0 ->
+ ?verbose(Verbose, "TCP server recv timeout\n", []),
+ timeout;
+ _ ->
+ ?verbose(Verbose, "TCP server recv error: ~p\n",
+ [Error]),
+ Error
+ end
+ end;
+ {error, timeout} when Timeout =:= 0 ->
+ ?verbose(Verbose, "TCP server connect timeout\n", []),
+ timeout;
+ Error ->
+ ?verbose(Verbose, "TCP server error: ~p\n", [Error]),
+ Error
+ catch
+ _:_ -> {error, einval}
+ end.
+
+decode_answer(Answer, Id, Verbose) ->
+ case inet_dns:decode(Answer) of
+ {ok, Msg} ->
+ ?verbose(Verbose, "Got reply: ~p~n", [dns_msg(Msg)]),
+ E = case lists:keyfind(dns_rr_opt, 1, Msg#dns_rec.arlist) of
+ false -> 0;
+ #dns_rr_opt{ext_rcode=ExtRCode} -> ExtRCode
+ end,
+ H = Msg#dns_rec.header,
+ RCode = (E bsl 4) bor H#dns_header.rcode,
+ case RCode of
+ ?NOERROR ->
+ if H#dns_header.id =/= Id ->
+ {error,badid};
+ length(Msg#dns_rec.qdlist) =/= 1 ->
+ {error,{noquery,Msg}};
+ true ->
+ {ok, Msg}
+ end;
+ ?FORMERR -> {error,{qfmterror,Msg}};
+ ?SERVFAIL -> {error,{servfail,Msg}};
+ ?NXDOMAIN -> {error,{nxdomain,Msg}};
+ ?NOTIMP -> {error,{notimp,Msg}};
+ ?REFUSED -> {error,{refused,Msg}};
+ ?BADVERS -> {error,{badvers,Msg}};
+ _ -> {error,{unknown,Msg}}
+ end;
+ Error ->
+ ?verbose(Verbose, "Got reply: ~p~n", [Error]),
+ Error
+ end.
+
+%%
+%% Transform domain name or address
+%% 1. "a.b.c" =>
+%% "a.b.c"
+%% 2. "1.2.3.4" =>
+%% "4.3.2.1.IN-ADDR.ARPA"
+%% 3. "4321:0:1:2:3:4:567:89ab" =>
+%% "b.a.9.8.7.6.5.0.4.0.0.0.3.0.0.0.2.0.0.0.1.0.0.0.0.0.0.1.2.3.4.IP6.ARPA"
+%% 4. {1,2,3,4} => as 2.
+%% 5. {1,2,3,4,5,6,7,8} => as 3.
+%%
+nsdname({A,B,C,D}) ->
+ {ok, dn_in_addr_arpa(A,B,C,D)};
+nsdname({A,B,C,D,E,F,G,H}) ->
+ {ok, dn_ip6_int(A,B,C,D,E,F,G,H)};
+nsdname(Name) when is_list(Name) ->
+ case inet_parse:visible_string(Name) of
+ true ->
+ case inet_parse:address(Name) of
+ {ok, Addr} ->
+ nsdname(Addr);
+ _ ->
+ {ok, Name}
+ end;
+ _ -> {error, formerr}
+ end;
+nsdname(Name) when is_atom(Name) ->
+ nsdname(atom_to_list(Name));
+nsdname(_) -> {error, formerr}.
+
+dn_in_addr_arpa(A,B,C,D) ->
+ integer_to_list(D) ++
+ ("." ++ integer_to_list(C) ++
+ ("." ++ integer_to_list(B) ++
+ ("." ++ integer_to_list(A) ++ ".IN-ADDR.ARPA"))).
+
+dn_ip6_int(A,B,C,D,E,F,G,H) ->
+ dnib(H) ++
+ (dnib(G) ++
+ (dnib(F) ++
+ (dnib(E) ++
+ (dnib(D) ++
+ (dnib(C) ++
+ (dnib(B) ++
+ (dnib(A) ++ "IP6.ARPA"))))))).
+
+
+
+-compile({inline, [dnib/1, dnib/3]}).
+dnib(X) ->
+ L = erlang:integer_to_list(X, 16),
+ dnib(4-length(L), L, []).
+%%
+dnib(0, [], Acc) -> Acc;
+dnib(0, [C|Cs], Acc) ->
+ dnib(0, Cs, [C,$.|Acc]);
+dnib(N, Cs, Acc) ->
+ dnib(N-1, Cs, [$0,$.|Acc]).
+
+
+
+dns_msg([]) -> [];
+dns_msg([{Field,Msg}|Fields]) ->
+ [{Field,dns_msg(Msg)}|dns_msg(Fields)];
+dns_msg([Msg|Msgs]) ->
+ [dns_msg(Msg)|dns_msg(Msgs)];
+dns_msg(Msg) ->
+ case inet_dns:record_type(Msg) of
+ undefined -> Msg;
+ Type ->
+ Fields = inet_dns:Type(Msg),
+ {Type,dns_msg(Fields)}
+ end.
+
+-compile({inline, [now_ms/2]}).
+now_ms({Meg1,Sec1,Mic1}, {Meg0,Sec0,Mic0}) ->
+ ((Meg1-Meg0)*1000000 + (Sec1-Sec0))*1000 + ((Mic1-Mic0) div 1000).
diff --git a/lib/kernel/src/inet_res.hrl b/lib/kernel/src/inet_res.hrl
new file mode 100644
index 0000000000..bfaf32a1ba
--- /dev/null
+++ b/lib/kernel/src/inet_res.hrl
@@ -0,0 +1,42 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 1997-2009. All Rights Reserved.
+%%
+%% The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved online at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% %CopyrightEnd%
+%%
+%%
+%% Dns & resolver defintions
+%%
+
+-define(RES_TIMEOUT, 2000). %% milli second between retries
+-define(RES_RETRY, 3). %% number of retry
+-define(RES_FILE_UPDATE_TM, 5). %% seconds between file_info
+
+-define(CACHE_LIMIT, 100). %% number of cached dns_rr
+-define(CACHE_REFRESH, 60*60*1000). %% refresh interval
+
+-define(PACKETSZ, 512). %% maximum packet size
+-define(MAXDNAME, 256). %% maximum domain name
+-define(MAXCDNAME, 255). %% maximum compressed domain name
+-define(MAXLABEL, 63). %% maximum length of domain label
+%% Number of bytes of fixed size data in query structure
+-define(QFIXEDSZ, 4).
+%% number of bytes of fixed size data in resource record
+-define(RRFIXEDSZ, 10).
+
+%%
+%% Internet nameserver port number
+%%
+-define(NAMESERVER_PORT, 53).
diff --git a/lib/kernel/src/inet_sctp.erl b/lib/kernel/src/inet_sctp.erl
new file mode 100644
index 0000000000..30c0e85dd9
--- /dev/null
+++ b/lib/kernel/src/inet_sctp.erl
@@ -0,0 +1,139 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2007-2009. All Rights Reserved.
+%%
+%% The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved online at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% %CopyrightEnd%
+%%
+%% SCTP protocol contribution by Leonid Timochouk and Serge Aleynikov.
+%% See also: $ERL_TOP/lib/kernel/AUTHORS
+%%
+-module(inet_sctp).
+
+%% This module provides functions for communicating with
+%% sockets using the SCTP protocol. The implementation assumes that
+%% the OS kernel supports SCTP providing user-level SCTP Socket API:
+%% http://tools.ietf.org/html/draft-ietf-tsvwg-sctpsocket-13
+
+-include("inet_sctp.hrl").
+-include("inet_int.hrl").
+
+-define(FAMILY, inet).
+-export([getserv/1,getaddr/1,getaddr/2,translate_ip/1]).
+-export([open/1,close/1,listen/2,connect/5,sendmsg/3,recv/2]).
+
+
+
+getserv(Port) when is_integer(Port) -> {ok, Port};
+getserv(Name) when is_atom(Name) ->
+ inet:getservbyname(Name, sctp);
+getserv(_) ->
+ {error,einval}.
+
+getaddr(Address) ->
+ inet:getaddr(Address, ?FAMILY).
+getaddr(Address, Timer) ->
+ inet:getaddr_tm(Address, ?FAMILY, Timer).
+
+translate_ip(IP) ->
+ inet:translate_ip(IP, ?FAMILY).
+
+
+
+open(Opts) ->
+ case inet:sctp_options(Opts, ?MODULE) of
+ {ok,#sctp_opts{fd=Fd,ifaddr=Addr,port=Port,opts=SOs}} ->
+ inet:open(Fd, Addr, Port, SOs, sctp, ?FAMILY, ?MODULE);
+ Error -> Error
+ end.
+
+close(S) ->
+ prim_inet:close(S).
+
+listen(S, Flag) ->
+ prim_inet:listen(S, Flag).
+
+connect(S, Addr, Port, Opts, Timer) ->
+ case prim_inet:chgopts(S, Opts) of
+ ok ->
+ case prim_inet:getopt(S, active) of
+ {ok,Active} ->
+ Timeout = inet:timeout(Timer),
+ case prim_inet:connect(S, Addr, Port, Timeout) of
+ ok ->
+ connect_get_assoc(S, Addr, Port, Active, Timer);
+ Err1 -> Err1
+ end;
+ Err2 -> Err2
+ end;
+ Err3 -> Err3
+ end.
+
+%% XXX race condition problem
+%%
+%% If an incoming #sctp_assoc_change{} arrives after
+%% prim_inet:getopt(S, alive) above but before the
+%% #sctp_assoc_change{state=comm_up} originating from
+%% prim_inet:connect(S, Addr, Port, Timeout) above,
+%% connect_get_assoc/5 below mistakes it for an invalid response
+%% for a socket in {active,false} or {active,once} modes.
+%%
+%% In {active,true} mode it probably gets right, but it is
+%% a blocking connect that is implemented even for {active,true},
+%% and that may be a shortcoming. A non-blocking connect
+%% would be nice to have.
+
+connect_get_assoc(S, Addr, Port, false, Timer) ->
+ case recv(S, inet:timeout(Timer)) of
+ {ok, {Addr, Port, [], #sctp_assoc_change{state=St}=Ev}} ->
+ if St =:= comm_up ->
+ %% Yes, successfully connected, return the whole
+ %% sctp_assoc_change event (containing, in particular,
+ %% the AssocID).
+ %% NB: we consider the connection to be successful
+ %% even if the number of OutStreams is not the same
+ %% as requested by the user:
+ {ok,Ev};
+ true ->
+ {error,Ev}
+ end;
+ %% Any other event: Error:
+ {ok, Msg} ->
+ {error, Msg};
+ {error,_}=Error ->
+ Error
+ end;
+connect_get_assoc(S, Addr, Port, Active, Timer) ->
+ Timeout = inet:timeout(Timer),
+ receive
+ {sctp,S,Addr,Port,{[],#sctp_assoc_change{state=St}=Ev}} ->
+ case Active of
+ once ->
+ prim_inet:setopt(S, active, once);
+ _ -> ok
+ end,
+ if St =:= comm_up ->
+ {ok,Ev};
+ true ->
+ {error,Ev}
+ end
+ after Timeout ->
+ {error,timeout}
+ end.
+
+sendmsg(S, SRI, Data) ->
+ prim_inet:sendmsg(S, SRI, Data).
+
+recv(S, Timeout) ->
+ prim_inet:recvfrom(S, 0, Timeout).
diff --git a/lib/kernel/src/inet_tcp.erl b/lib/kernel/src/inet_tcp.erl
new file mode 100644
index 0000000000..6dadccd6a9
--- /dev/null
+++ b/lib/kernel/src/inet_tcp.erl
@@ -0,0 +1,153 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 1997-2009. All Rights Reserved.
+%%
+%% The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved online at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(inet_tcp).
+
+%% Socket server for TCP/IP
+
+-export([connect/3, connect/4, listen/2, accept/1, accept/2, close/1]).
+-export([send/2, send/3, recv/2, recv/3, unrecv/2]).
+-export([shutdown/2]).
+-export([controlling_process/2]).
+-export([fdopen/2]).
+
+-export([getserv/1, getaddr/1, getaddr/2, getaddrs/1, getaddrs/2]).
+
+
+-include("inet_int.hrl").
+
+%% inet_tcp port lookup
+getserv(Port) when is_integer(Port) -> {ok, Port};
+getserv(Name) when is_atom(Name) -> inet:getservbyname(Name,tcp).
+
+%% inet_tcp address lookup
+getaddr(Address) -> inet:getaddr(Address, inet).
+getaddr(Address,Timer) -> inet:getaddr_tm(Address, inet, Timer).
+
+%% inet_tcp address lookup
+getaddrs(Address) -> inet:getaddrs(Address, inet).
+getaddrs(Address,Timer) -> inet:getaddrs_tm(Address,inet,Timer).
+
+%%
+%% Send data on a socket
+%%
+send(Socket, Packet, Opts) -> prim_inet:send(Socket, Packet, Opts).
+send(Socket, Packet) -> prim_inet:send(Socket, Packet, []).
+
+%%
+%% Receive data from a socket (inactive only)
+%%
+recv(Socket, Length) -> prim_inet:recv(Socket, Length).
+recv(Socket, Length, Timeout) -> prim_inet:recv(Socket, Length, Timeout).
+
+unrecv(Socket, Data) -> prim_inet:unrecv(Socket, Data).
+
+%%
+%% Shutdown one end of a socket
+%%
+shutdown(Socket, How) ->
+ prim_inet:shutdown(Socket, How).
+
+%%
+%% Close a socket (async)
+%%
+close(Socket) ->
+ inet:tcp_close(Socket).
+
+%%
+%% Set controlling process
+%%
+controlling_process(Socket, NewOwner) ->
+ inet:tcp_controlling_process(Socket, NewOwner).
+
+%%
+%% Connect
+%%
+connect(Address, Port, Opts) ->
+ do_connect(Address, Port, Opts, infinity).
+
+connect(Address, Port, Opts, infinity) ->
+ do_connect(Address, Port, Opts, infinity);
+connect(Address, Port, Opts, Timeout) when is_integer(Timeout),
+ Timeout >= 0 ->
+ do_connect(Address, Port, Opts, Timeout).
+
+do_connect({A,B,C,D}, Port, Opts, Time) when ?ip(A,B,C,D), ?port(Port) ->
+ case inet:connect_options(Opts, inet) of
+ {error, Reason} -> exit(Reason);
+ {ok, #connect_opts{fd=Fd,
+ ifaddr=BAddr={Ab,Bb,Cb,Db},
+ port=BPort,
+ opts=SockOpts}}
+ when ?ip(Ab,Bb,Cb,Db), ?port(BPort) ->
+ case inet:open(Fd,BAddr,BPort,SockOpts,tcp,inet,?MODULE) of
+ {ok, S} ->
+ case prim_inet:connect(S, {A,B,C,D}, Port, Time) of
+ ok -> {ok,S};
+ Error -> prim_inet:close(S), Error
+ end;
+ Error -> Error
+ end;
+ {ok, _} -> exit(badarg)
+ end.
+
+%%
+%% Listen
+%%
+listen(Port, Opts) ->
+ case inet:listen_options([{port,Port} | Opts], inet) of
+ {error,Reason} -> exit(Reason);
+ {ok, #listen_opts{fd=Fd,
+ ifaddr=BAddr={A,B,C,D},
+ port=BPort,
+ opts=SockOpts}=R}
+ when ?ip(A,B,C,D), ?port(BPort) ->
+ case inet:open(Fd,BAddr,BPort,SockOpts,tcp,inet,?MODULE) of
+ {ok, S} ->
+ case prim_inet:listen(S, R#listen_opts.backlog) of
+ ok -> {ok, S};
+ Error -> prim_inet:close(S), Error
+ end;
+ Error -> Error
+ end;
+ {ok, _} -> exit(badarg)
+ end.
+
+%%
+%% Accept
+%%
+accept(L) ->
+ case prim_inet:accept(L) of
+ {ok, S} ->
+ inet_db:register_socket(S, ?MODULE),
+ {ok,S};
+ Error -> Error
+ end.
+
+accept(L,Timeout) ->
+ case prim_inet:accept(L,Timeout) of
+ {ok, S} ->
+ inet_db:register_socket(S, ?MODULE),
+ {ok,S};
+ Error -> Error
+ end.
+%%
+%% Create a port/socket from a file descriptor
+%%
+fdopen(Fd, Opts) ->
+ inet:fdopen(Fd, Opts, tcp, inet, ?MODULE).
diff --git a/lib/kernel/src/inet_tcp_dist.erl b/lib/kernel/src/inet_tcp_dist.erl
new file mode 100644
index 0000000000..7f935c2b36
--- /dev/null
+++ b/lib/kernel/src/inet_tcp_dist.erl
@@ -0,0 +1,448 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 1997-2009. All Rights Reserved.
+%%
+%% The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved online at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(inet_tcp_dist).
+
+%% Handles the connection setup phase with other Erlang nodes.
+
+-export([listen/1, accept/1, accept_connection/5,
+ setup/5, close/1, select/1, is_node_name/1]).
+
+%% internal exports
+
+-export([accept_loop/2,do_accept/6,do_setup/6,getstat/1,tick/1]).
+
+-import(error_logger,[error_msg/2]).
+
+-include("net_address.hrl").
+
+
+
+-define(to_port(Socket, Data, Opts),
+ case inet_tcp:send(Socket, Data, Opts) of
+ {error, closed} ->
+ self() ! {tcp_closed, Socket},
+ {error, closed};
+ R ->
+ R
+ end).
+
+
+-include("dist.hrl").
+-include("dist_util.hrl").
+
+%% ------------------------------------------------------------
+%% Select this protocol based on node name
+%% select(Node) => Bool
+%% ------------------------------------------------------------
+
+select(Node) ->
+ case split_node(atom_to_list(Node), $@, []) of
+ [_,_Host] -> true;
+ _ -> false
+ end.
+
+%% ------------------------------------------------------------
+%% Create the listen socket, i.e. the port that this erlang
+%% node is accessible through.
+%% ------------------------------------------------------------
+
+listen(Name) ->
+ case do_listen([{active, false}, {packet,2}, {reuseaddr, true}]) of
+ {ok, Socket} ->
+ TcpAddress = get_tcp_address(Socket),
+ {_,Port} = TcpAddress#net_address.address,
+ {ok, Creation} = erl_epmd:register_node(Name, Port),
+ {ok, {Socket, TcpAddress, Creation}};
+ Error ->
+ Error
+ end.
+
+do_listen(Options0) ->
+ {First,Last} = case application:get_env(kernel,inet_dist_listen_min) of
+ {ok,N} when is_integer(N) ->
+ case application:get_env(kernel,
+ inet_dist_listen_max) of
+ {ok,M} when is_integer(M) ->
+ {N,M};
+ _ ->
+ {N,N}
+ end;
+ _ ->
+ {0,0}
+ end,
+ Options = case application:get_env(kernel, inet_dist_use_interface) of
+ {ok, Ip} ->
+ [{ip, Ip} | Options0];
+ _ ->
+ Options0
+ end,
+ do_listen(First, Last, [{backlog,128}|Options]).
+
+do_listen(First,Last,_) when First > Last ->
+ {error,eaddrinuse};
+do_listen(First,Last,Options) ->
+ case inet_tcp:listen(First, Options) of
+ {error, eaddrinuse} ->
+ do_listen(First+1,Last,Options);
+ Other ->
+ Other
+ end.
+
+%% ------------------------------------------------------------
+%% Accepts new connection attempts from other Erlang nodes.
+%% ------------------------------------------------------------
+
+accept(Listen) ->
+ spawn_opt(?MODULE, accept_loop, [self(), Listen], [link, {priority, max}]).
+
+accept_loop(Kernel, Listen) ->
+ case inet_tcp:accept(Listen) of
+ {ok, Socket} ->
+ Kernel ! {accept,self(),Socket,inet,tcp},
+ controller(Kernel, Socket),
+ accept_loop(Kernel, Listen);
+ Error ->
+ exit(Error)
+ end.
+
+controller(Kernel, Socket) ->
+ receive
+ {Kernel, controller, Pid} ->
+ flush_controller(Pid, Socket),
+ inet_tcp:controlling_process(Socket, Pid),
+ flush_controller(Pid, Socket),
+ Pid ! {self(), controller};
+ {Kernel, unsupported_protocol} ->
+ exit(unsupported_protocol)
+ end.
+
+flush_controller(Pid, Socket) ->
+ receive
+ {tcp, Socket, Data} ->
+ Pid ! {tcp, Socket, Data},
+ flush_controller(Pid, Socket);
+ {tcp_closed, Socket} ->
+ Pid ! {tcp_closed, Socket},
+ flush_controller(Pid, Socket)
+ after 0 ->
+ ok
+ end.
+
+%% ------------------------------------------------------------
+%% Accepts a new connection attempt from another Erlang node.
+%% Performs the handshake with the other side.
+%% ------------------------------------------------------------
+
+accept_connection(AcceptPid, Socket, MyNode, Allowed, SetupTime) ->
+ spawn_opt(?MODULE, do_accept,
+ [self(), AcceptPid, Socket, MyNode, Allowed, SetupTime],
+ [link, {priority, max}]).
+
+do_accept(Kernel, AcceptPid, Socket, MyNode, Allowed, SetupTime) ->
+ receive
+ {AcceptPid, controller} ->
+ Timer = dist_util:start_timer(SetupTime),
+ case check_ip(Socket) of
+ true ->
+ HSData = #hs_data{
+ kernel_pid = Kernel,
+ this_node = MyNode,
+ socket = Socket,
+ timer = Timer,
+ this_flags = 0,
+ allowed = Allowed,
+ f_send = fun(S,D) -> inet_tcp:send(S,D) end,
+ f_recv = fun(S,N,T) -> inet_tcp:recv(S,N,T)
+ end,
+ f_setopts_pre_nodeup =
+ fun(S) ->
+ inet:setopts(S,
+ [{active, false},
+ {packet, 4},
+ nodelay()])
+ end,
+ f_setopts_post_nodeup =
+ fun(S) ->
+ inet:setopts(S,
+ [{active, true},
+ {deliver, port},
+ {packet, 4},
+ nodelay()])
+ end,
+ f_getll = fun(S) ->
+ inet:getll(S)
+ end,
+ f_address = fun get_remote_id/2,
+ mf_tick = fun ?MODULE:tick/1,
+ mf_getstat = fun ?MODULE:getstat/1
+ },
+ dist_util:handshake_other_started(HSData);
+ {false,IP} ->
+ error_msg("** Connection attempt from "
+ "disallowed IP ~w ** ~n", [IP]),
+ ?shutdown(no_node)
+ end
+ end.
+
+
+%% we may not always want the nodelay behaviour
+%% for performance reasons
+
+nodelay() ->
+ case application:get_env(kernel, dist_nodelay) of
+ undefined ->
+ {nodelay, true};
+ {ok, true} ->
+ {nodelay, true};
+ {ok, false} ->
+ {nodelay, false};
+ _ ->
+ {nodelay, true}
+ end.
+
+
+%% ------------------------------------------------------------
+%% Get remote information about a Socket.
+%% ------------------------------------------------------------
+get_remote_id(Socket, Node) ->
+ case inet:peername(Socket) of
+ {ok,Address} ->
+ case split_node(atom_to_list(Node), $@, []) of
+ [_,Host] ->
+ #net_address{address=Address,host=Host,
+ protocol=tcp,family=inet};
+ _ ->
+ %% No '@' or more than one '@' in node name.
+ ?shutdown(no_node)
+ end;
+ {error, _Reason} ->
+ ?shutdown(no_node)
+ end.
+
+%% ------------------------------------------------------------
+%% Setup a new connection to another Erlang node.
+%% Performs the handshake with the other side.
+%% ------------------------------------------------------------
+
+setup(Node, Type, MyNode, LongOrShortNames,SetupTime) ->
+ spawn_opt(?MODULE, do_setup,
+ [self(), Node, Type, MyNode, LongOrShortNames, SetupTime],
+ [link, {priority, max}]).
+
+do_setup(Kernel, Node, Type, MyNode, LongOrShortNames,SetupTime) ->
+ ?trace("~p~n",[{inet_tcp_dist,self(),setup,Node}]),
+ [Name, Address] = splitnode(Node, LongOrShortNames),
+ case inet:getaddr(Address, inet) of
+ {ok, Ip} ->
+ Timer = dist_util:start_timer(SetupTime),
+ case erl_epmd:port_please(Name, Ip) of
+ {port, TcpPort, Version} ->
+ ?trace("port_please(~p) -> version ~p~n",
+ [Node,Version]),
+ dist_util:reset_timer(Timer),
+ case inet_tcp:connect(Ip, TcpPort,
+ [{active, false},
+ {packet,2}]) of
+ {ok, Socket} ->
+ HSData = #hs_data{
+ kernel_pid = Kernel,
+ other_node = Node,
+ this_node = MyNode,
+ socket = Socket,
+ timer = Timer,
+ this_flags = 0,
+ other_version = Version,
+ f_send = fun inet_tcp:send/2,
+ f_recv = fun inet_tcp:recv/3,
+ f_setopts_pre_nodeup =
+ fun(S) ->
+ inet:setopts
+ (S,
+ [{active, false},
+ {packet, 4},
+ nodelay()])
+ end,
+ f_setopts_post_nodeup =
+ fun(S) ->
+ inet:setopts
+ (S,
+ [{active, true},
+ {deliver, port},
+ {packet, 4},
+ nodelay()])
+ end,
+ f_getll = fun inet:getll/1,
+ f_address =
+ fun(_,_) ->
+ #net_address{
+ address = {Ip,TcpPort},
+ host = Address,
+ protocol = tcp,
+ family = inet}
+ end,
+ mf_tick = fun ?MODULE:tick/1,
+ mf_getstat = fun ?MODULE:getstat/1,
+ request_type = Type
+ },
+ dist_util:handshake_we_started(HSData);
+ _ ->
+ %% Other Node may have closed since
+ %% port_please !
+ ?trace("other node (~p) "
+ "closed since port_please.~n",
+ [Node]),
+ ?shutdown(Node)
+ end;
+ _ ->
+ ?trace("port_please (~p) "
+ "failed.~n", [Node]),
+ ?shutdown(Node)
+ end;
+ _Other ->
+ ?trace("inet_getaddr(~p) "
+ "failed (~p).~n", [Node,_Other]),
+ ?shutdown(Node)
+ end.
+
+%%
+%% Close a socket.
+%%
+close(Socket) ->
+ inet_tcp:close(Socket).
+
+
+%% If Node is illegal terminate the connection setup!!
+splitnode(Node, LongOrShortNames) ->
+ case split_node(atom_to_list(Node), $@, []) of
+ [Name|Tail] when Tail =/= [] ->
+ Host = lists:append(Tail),
+ case split_node(Host, $., []) of
+ [_] when LongOrShortNames =:= longnames ->
+ error_msg("** System running to use "
+ "fully qualified "
+ "hostnames **~n"
+ "** Hostname ~s is illegal **~n",
+ [Host]),
+ ?shutdown(Node);
+ L when length(L) > 1, LongOrShortNames =:= shortnames ->
+ error_msg("** System NOT running to use fully qualified "
+ "hostnames **~n"
+ "** Hostname ~s is illegal **~n",
+ [Host]),
+ ?shutdown(Node);
+ _ ->
+ [Name, Host]
+ end;
+ [_] ->
+ error_msg("** Nodename ~p illegal, no '@' character **~n",
+ [Node]),
+ ?shutdown(Node);
+ _ ->
+ error_msg("** Nodename ~p illegal **~n", [Node]),
+ ?shutdown(Node)
+ end.
+
+split_node([Chr|T], Chr, Ack) -> [lists:reverse(Ack)|split_node(T, Chr, [])];
+split_node([H|T], Chr, Ack) -> split_node(T, Chr, [H|Ack]);
+split_node([], _, Ack) -> [lists:reverse(Ack)].
+
+%% ------------------------------------------------------------
+%% Fetch local information about a Socket.
+%% ------------------------------------------------------------
+get_tcp_address(Socket) ->
+ {ok, Address} = inet:sockname(Socket),
+ {ok, Host} = inet:gethostname(),
+ #net_address {
+ address = Address,
+ host = Host,
+ protocol = tcp,
+ family = inet
+ }.
+
+%% ------------------------------------------------------------
+%% Do only accept new connection attempts from nodes at our
+%% own LAN, if the check_ip environment parameter is true.
+%% ------------------------------------------------------------
+check_ip(Socket) ->
+ case application:get_env(check_ip) of
+ {ok, true} ->
+ case get_ifs(Socket) of
+ {ok, IFs, IP} ->
+ check_ip(IFs, IP);
+ _ ->
+ ?shutdown(no_node)
+ end;
+ _ ->
+ true
+ end.
+
+get_ifs(Socket) ->
+ case inet:peername(Socket) of
+ {ok, {IP, _}} ->
+ case inet:getif(Socket) of
+ {ok, IFs} -> {ok, IFs, IP};
+ Error -> Error
+ end;
+ Error ->
+ Error
+ end.
+
+check_ip([{OwnIP, _, Netmask}|IFs], PeerIP) ->
+ case {mask(Netmask, PeerIP), mask(Netmask, OwnIP)} of
+ {M, M} -> true;
+ _ -> check_ip(IFs, PeerIP)
+ end;
+check_ip([], PeerIP) ->
+ {false, PeerIP}.
+
+mask({M1,M2,M3,M4}, {IP1,IP2,IP3,IP4}) ->
+ {M1 band IP1,
+ M2 band IP2,
+ M3 band IP3,
+ M4 band IP4}.
+
+is_node_name(Node) when is_atom(Node) ->
+ case split_node(atom_to_list(Node), $@, []) of
+ [_, _Host] -> true;
+ _ -> false
+ end;
+is_node_name(_Node) ->
+ false.
+
+tick(Sock) ->
+ ?to_port(Sock,[],[force]).
+
+getstat(Socket) ->
+ case inet:getstat(Socket, [recv_cnt, send_cnt, send_pend]) of
+ {ok, Stat} ->
+ split_stat(Stat,0,0,0);
+ Error ->
+ Error
+ end.
+
+split_stat([{recv_cnt, R}|Stat], _, W, P) ->
+ split_stat(Stat, R, W, P);
+split_stat([{send_cnt, W}|Stat], R, _, P) ->
+ split_stat(Stat, R, W, P);
+split_stat([{send_pend, P}|Stat], R, W, _) ->
+ split_stat(Stat, R, W, P);
+split_stat([], R, W, P) ->
+ {ok, R, W, P}.
+
+
diff --git a/lib/kernel/src/inet_udp.erl b/lib/kernel/src/inet_udp.erl
new file mode 100644
index 0000000000..9a4089ab19
--- /dev/null
+++ b/lib/kernel/src/inet_udp.erl
@@ -0,0 +1,132 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 1997-2009. All Rights Reserved.
+%%
+%% The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved online at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(inet_udp).
+
+-export([open/1, open/2, close/1]).
+-export([send/2, send/4, recv/2, recv/3, connect/3]).
+-export([controlling_process/2]).
+-export([fdopen/2]).
+
+-export([getserv/1, getaddr/1, getaddr/2]).
+
+-include("inet_int.hrl").
+
+-define(RECBUF, (8*1024)).
+
+
+
+%% inet_udp port lookup
+getserv(Port) when is_integer(Port) -> {ok, Port};
+getserv(Name) when is_atom(Name) -> inet:getservbyname(Name,udp).
+
+%% inet_udp address lookup
+getaddr(Address) -> inet:getaddr(Address, inet).
+getaddr(Address,Timer) -> inet:getaddr_tm(Address, inet, Timer).
+
+open(Port) -> open(Port, []).
+
+open(Port, Opts) ->
+ case inet:udp_options(
+ [{port,Port}, {recbuf, ?RECBUF} | Opts],
+ inet) of
+ {error, Reason} -> exit(Reason);
+ {ok, #udp_opts{fd=Fd,
+ ifaddr=BAddr={A,B,C,D},
+ port=BPort,
+ opts=SockOpts}} when ?ip(A,B,C,D), ?port(BPort) ->
+ inet:open(Fd,BAddr,BPort,SockOpts,udp,inet,?MODULE);
+ {ok, _} -> exit(badarg)
+ end.
+
+send(S,{A,B,C,D},P,Data) when ?ip(A,B,C,D), ?port(P) ->
+ prim_inet:sendto(S, {A,B,C,D}, P, Data).
+
+send(S, Data) ->
+ prim_inet:sendto(S, {0,0,0,0}, 0, Data).
+
+connect(S, {A,B,C,D}, P) when ?ip(A,B,C,D), ?port(P) ->
+ prim_inet:connect(S, {A,B,C,D}, P).
+
+recv(S,Len) ->
+ prim_inet:recvfrom(S, Len).
+
+recv(S,Len,Time) ->
+ prim_inet:recvfrom(S, Len, Time).
+
+close(S) ->
+ inet:udp_close(S).
+
+%%
+%% Set controlling process:
+%% 1) First sync socket into a known state
+%% 2) Move all messages onto the new owners message queue
+%% 3) Commit the owner
+%% 4) Wait for ack of new Owner (since socket does some link and unlink)
+%%
+
+controlling_process(Socket, NewOwner) ->
+ inet:udp_controlling_process(Socket, NewOwner).
+
+%%
+%% Create a port/socket from a file descriptor
+%%
+fdopen(Fd, Opts) ->
+ inet:fdopen(Fd,
+ optuniquify([{recbuf, ?RECBUF} | Opts]),
+ udp, inet, ?MODULE).
+
+
+%% Remove all duplicate options from an option list.
+%% The last occurring duplicate is used, and the order is preserved.
+%%
+%% Here's how:
+%% Reverse the list.
+%% For each head option go through the tail and remove
+%% all occurences of the same option from the tail.
+%% Store that head option and iterate using the new tail.
+%% Return the list of stored head options.
+optuniquify(List) ->
+ optuniquify(lists:reverse(List), []).
+
+optuniquify([], Result) ->
+ Result;
+optuniquify([Opt | Tail], Result) ->
+ %% Remove all occurences of Opt in Tail,
+ %% prepend Opt to Result,
+ %% then iterate back here.
+ optuniquify(Opt, Tail, [], Result).
+
+%% All duplicates of current option are now removed
+optuniquify(Opt, [], Rest, Result) ->
+ %% Store unique option
+ optuniquify(lists:reverse(Rest), [Opt | Result]);
+%% Duplicate option tuple
+optuniquify(Opt0, [Opt1 | Tail], Rest, Result)
+ when tuple_size(Opt0) =:= tuple_size(Opt1),
+ element(1, Opt0) =:= element(1, Opt1) ->
+ %% Waste duplicate
+ optuniquify(Opt0, Tail, Rest, Result);
+%% Duplicate option atom or other term
+optuniquify(Opt, [Opt | Tail], Rest, Result) ->
+ %% Waste duplicate
+ optuniquify(Opt, Tail, Rest, Result);
+%% Non-duplicate option
+optuniquify(Opt, [X | Tail], Rest, Result) ->
+ %% Keep non-duplicate
+ optuniquify(Opt, Tail, [X | Rest], Result).
diff --git a/lib/kernel/src/kernel.app.src b/lib/kernel/src/kernel.app.src
new file mode 100644
index 0000000000..17ab84c177
--- /dev/null
+++ b/lib/kernel/src/kernel.app.src
@@ -0,0 +1,120 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 1996-2009. All Rights Reserved.
+%%
+%% The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved online at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% %CopyrightEnd%
+%%
+%% This is an -*- erlang -*- file.
+%%
+{application, kernel,
+ [
+ {description, "ERTS CXC 138 10"},
+ {vsn, "%VSN%"},
+ {modules, [application,
+ application_controller,
+ application_master,
+ application_starter,
+ auth,
+ code,
+ packages,
+ code_server,
+ dist_util,
+ erl_boot_server,
+ erl_distribution,
+ erl_reply,
+ error_handler,
+ error_logger,
+ file,
+ file_server,
+ file_io_server,
+ global,
+ global_group,
+ global_search,
+ group,
+ heart,
+ hipe_unified_loader,
+ inet6_tcp,
+ inet6_tcp_dist,
+ inet6_udp,
+ inet6_sctp,
+ inet_config,
+ inet_hosts,
+ inet_gethost_native,
+ inet_tcp_dist,
+ kernel,
+ kernel_config,
+ net,
+ net_adm,
+ net_kernel,
+ os,
+ ram_file,
+ rpc,
+ user,
+ user_drv,
+ user_sup,
+ disk_log,
+ disk_log_1,
+ disk_log_server,
+ disk_log_sup,
+ dist_ac,
+ erl_ddll,
+ erl_epmd,
+ erts_debug,
+ gen_tcp,
+ gen_udp,
+ gen_sctp,
+ inet,
+ inet_db,
+ inet_dns,
+ inet_parse,
+ inet_res,
+ inet_tcp,
+ inet_udp,
+ inet_sctp,
+ pg2,
+ seq_trace,
+ standard_error,
+ wrap_log_reader]},
+ {registered, [application_controller,
+ erl_reply,
+ auth,
+ boot_server,
+ code_server,
+ disk_log_server,
+ disk_log_sup,
+ erl_prim_loader,
+ error_logger,
+ file_server_2,
+ fixtable_server,
+ global_group,
+ global_name_server,
+ heart,
+ init,
+ kernel_config,
+ kernel_sup,
+ net_kernel,
+ net_sup,
+ rex,
+ user,
+ os_server,
+ ddll_server,
+ erl_epmd,
+ inet_db,
+ pg2]},
+ {applications, []},
+ {env, [{error_logger, tty}]},
+ {mod, {kernel, []}}
+ ]
+}.
diff --git a/lib/kernel/src/kernel.appup.src b/lib/kernel/src/kernel.appup.src
new file mode 100644
index 0000000000..54a63833e6
--- /dev/null
+++ b/lib/kernel/src/kernel.appup.src
@@ -0,0 +1 @@
+{"%VSN%",[],[]}.
diff --git a/lib/kernel/src/kernel.erl b/lib/kernel/src/kernel.erl
new file mode 100644
index 0000000000..92ee7b441a
--- /dev/null
+++ b/lib/kernel/src/kernel.erl
@@ -0,0 +1,292 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 1996-2009. All Rights Reserved.
+%%
+%% The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved online at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(kernel).
+
+-behaviour(supervisor).
+
+%% External exports
+-export([start/2, init/1, stop/1]).
+-export([config_change/3]).
+
+%%%-----------------------------------------------------------------
+%%% The kernel is the first application started.
+%%% Callback functions for the kernel application.
+%%%-----------------------------------------------------------------
+start(_, []) ->
+ case supervisor:start_link({local, kernel_sup}, kernel, []) of
+ {ok, Pid} ->
+ Type = get_error_logger_type(),
+ error_logger:swap_handler(Type),
+ {ok, Pid, []};
+ Error -> Error
+ end.
+
+stop(_State) ->
+ ok.
+
+%%-------------------------------------------------------------------
+%% Some configuration parameters for kernel are changed
+%%-------------------------------------------------------------------
+config_change(Changed, New, Removed) ->
+ do_distribution_change(Changed, New, Removed),
+ do_global_groups_change(Changed, New, Removed),
+ ok.
+
+get_error_logger_type() ->
+ case application:get_env(kernel, error_logger) of
+ {ok, tty} -> tty;
+ {ok, {file, File}} when is_list(File) -> {logfile, File};
+ {ok, false} -> false;
+ {ok, silent} -> silent;
+ undefined -> tty; % default value
+ {ok, Bad} -> exit({bad_config, {kernel, {error_logger, Bad}}})
+ end.
+
+%%%-----------------------------------------------------------------
+%%% The process structure in kernel is as shown in the figure.
+%%%
+%%% ---------------
+%%% | kernel_sup (A)|
+%%% ---------------
+%%% |
+%%% -------------------------------
+%%% | | |
+%%% <std services> ------------- -------------
+%%% (file,code, | erl_dist (A)| | safe_sup (1)|
+%%% rpc, ...) ------------- -------------
+%%% | |
+%%% (net_kernel, (disk_log, pg2,
+%%% auth, ...) ...)
+%%%
+%%% The rectangular boxes are supervisors. All supervisors except
+%%% for kernel_safe_sup terminates the enitre erlang node if any of
+%%% their children dies. Any child that can't be restarted in case
+%%% of failure must be placed under one of these supervisors. Any
+%%% other child must be placed under safe_sup. These children may
+%%% be restarted. Be aware that if a child is restarted the old state
+%%% and all data will be lost.
+%%%-----------------------------------------------------------------
+%%% Callback functions for the kernel_sup supervisor.
+%%%-----------------------------------------------------------------
+
+init([]) ->
+ SupFlags = {one_for_all, 0, 1},
+
+ Config = {kernel_config,
+ {kernel_config, start_link, []},
+ permanent, 2000, worker, [kernel_config]},
+ Code = {code_server,
+ {code, start_link, get_code_args()},
+ permanent, 2000, worker, [code]},
+ File = {file_server_2,
+ {file_server, start_link, []},
+ permanent, 2000, worker,
+ [file, file_server, file_io_server, prim_file]},
+ StdError = {standard_error,
+ {standard_error, start_link, []},
+ temporary, 2000, supervisor, [user_sup]},
+ User = {user,
+ {user_sup, start, []},
+ temporary, 2000, supervisor, [user_sup]},
+
+ case init:get_argument(mode) of
+ {ok, [["minimal"]]} ->
+ SafeSupervisor = {kernel_safe_sup,
+ {supervisor, start_link,
+ [{local, kernel_safe_sup}, ?MODULE, safe]},
+ permanent, infinity, supervisor, [?MODULE]},
+ {ok, {SupFlags,
+ [File, Code, StdError, User,
+ Config, SafeSupervisor]}};
+ _ ->
+ Rpc = {rex, {rpc, start_link, []},
+ permanent, 2000, worker, [rpc]},
+ Global = {global_name_server, {global, start_link, []},
+ permanent, 2000, worker, [global]},
+ Glo_grp = {global_group, {global_group,start_link,[]},
+ permanent, 2000, worker, [global_group]},
+ InetDb = {inet_db, {inet_db, start_link, []},
+ permanent, 2000, worker, [inet_db]},
+ NetSup = {net_sup, {erl_distribution, start_link, []},
+ permanent, infinity, supervisor,[erl_distribution]},
+ DistAC = start_dist_ac(),
+
+ Timer = start_timer(),
+
+ SafeSupervisor = {kernel_safe_sup,
+ {supervisor, start_link,
+ [{local, kernel_safe_sup}, ?MODULE, safe]},
+ permanent, infinity, supervisor, [?MODULE]},
+ {ok, {SupFlags,
+ [Rpc, Global, InetDb | DistAC] ++
+ [NetSup, Glo_grp, File, Code,
+ StdError, User, Config, SafeSupervisor] ++ Timer}}
+ end;
+init(safe) ->
+ SupFlags = {one_for_one, 4, 3600},
+ Boot = start_boot_server(),
+ DiskLog = start_disk_log(),
+ Pg2 = start_pg2(),
+ {ok, {SupFlags, Boot ++ DiskLog ++ Pg2}}.
+
+get_code_args() ->
+ case init:get_argument(nostick) of
+ {ok, [[]]} -> [[nostick]];
+ _ -> []
+ end.
+
+start_dist_ac() ->
+ Spec = [{dist_ac,{dist_ac,start_link,[]},permanent,2000,worker,[dist_ac]}],
+ case application:get_env(kernel, start_dist_ac) of
+ {ok, true} -> Spec;
+ {ok, false} -> [];
+ undefined ->
+ case application:get_env(kernel, distributed) of
+ {ok, _} -> Spec;
+ _ -> []
+ end
+ end.
+
+start_boot_server() ->
+ case application:get_env(kernel, start_boot_server) of
+ {ok, true} ->
+ Args = get_boot_args(),
+ [{boot_server, {erl_boot_server, start_link, [Args]}, permanent,
+ 1000, worker, [erl_boot_server]}];
+ _ ->
+ []
+ end.
+
+get_boot_args() ->
+ case application:get_env(kernel, boot_server_slaves) of
+ {ok, Slaves} -> Slaves;
+ _ -> []
+ end.
+
+start_disk_log() ->
+ case application:get_env(kernel, start_disk_log) of
+ {ok, true} ->
+ [{disk_log_server,
+ {disk_log_server, start_link, []},
+ permanent, 2000, worker, [disk_log_server]},
+ {disk_log_sup, {disk_log_sup, start_link, []}, permanent,
+ 1000, supervisor, [disk_log_sup]}];
+ _ ->
+ []
+ end.
+
+start_pg2() ->
+ case application:get_env(kernel, start_pg2) of
+ {ok, true} ->
+ [{pg2, {pg2, start_link, []}, permanent, 1000, worker, [pg2]}];
+ _ ->
+ []
+ end.
+
+start_timer() ->
+ case application:get_env(kernel, start_timer) of
+ {ok, true} ->
+ [{timer_server, {timer, start_link, []}, permanent, 1000, worker,
+ [timer]}];
+ _ ->
+ []
+ end.
+
+%%-----------------------------------------------------------------
+%% The change of the distributed parameter is taken care of here
+%%-----------------------------------------------------------------
+do_distribution_change(Changed, New, Removed) ->
+ %% check if the distributed parameter is changed. It is not allowed
+ %% to make a local application to a distributed one, or vice versa.
+ case is_dist_changed(Changed, New, Removed) of
+ %%{changed, new, removed}
+ {false, false, false} ->
+ ok;
+ {C, false, false} ->
+ %% At last, update the parameter.
+ gen_server:call(dist_ac, {distribution_changed, C}, infinity);
+ {false, _, false} ->
+ error_logger:error_report("Distribution not changed: "
+ "Not allowed to add the 'distributed' "
+ "parameter."),
+ {error, {distribution_not_changed, "Not allowed to add the "
+ "'distributed' parameter"}};
+ {false, false, _} ->
+ error_logger:error_report("Distribution not changed: "
+ "Not allowed to remove the "
+ "distribution parameter."),
+ {error, {distribution_not_changed, "Not allowed to remove the "
+ "'distributed' parameter"}}
+ end.
+
+%%-----------------------------------------------------------------
+%% Check if distribution is changed in someway.
+%%-----------------------------------------------------------------
+is_dist_changed(Changed, New, Removed) ->
+ C = case lists:keyfind(distributed, 1, Changed) of
+ false ->
+ false;
+ {distributed, NewDistC} ->
+ NewDistC
+ end,
+ N = case lists:keyfind(distributed, 1, New) of
+ false ->
+ false;
+ {distributed, NewDistN} ->
+ NewDistN
+ end,
+ R = lists:member(distributed, Removed),
+ {C, N, R}.
+
+%%-----------------------------------------------------------------
+%% The change of the global_groups parameter is taken care of here
+%%-----------------------------------------------------------------
+do_global_groups_change(Changed, New, Removed) ->
+ %% check if the global_groups parameter is changed.
+ case is_gg_changed(Changed, New, Removed) of
+ %%{changed, new, removed}
+ {false, false, false} ->
+ ok;
+ {C, false, false} ->
+ %% At last, update the parameter.
+ global_group:global_groups_changed(C);
+ {false, N, false} ->
+ global_group:global_groups_added(N);
+ {false, false, R} ->
+ global_group:global_groups_removed(R)
+ end.
+
+%%-----------------------------------------------------------------
+%% Check if global_groups is changed in someway.
+%%-----------------------------------------------------------------
+is_gg_changed(Changed, New, Removed) ->
+ C = case lists:keyfind(global_groups, 1, Changed) of
+ false ->
+ false;
+ {global_groups, NewDistC} ->
+ NewDistC
+ end,
+ N = case lists:keyfind(global_groups, 1, New) of
+ false ->
+ false;
+ {global_groups, NewDistN} ->
+ NewDistN
+ end,
+ R = lists:member(global_groups, Removed),
+ {C, N, R}.
diff --git a/lib/kernel/src/kernel_config.erl b/lib/kernel/src/kernel_config.erl
new file mode 100644
index 0000000000..e5e9a0498d
--- /dev/null
+++ b/lib/kernel/src/kernel_config.erl
@@ -0,0 +1,173 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 1996-2009. All Rights Reserved.
+%%
+%% The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved online at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(kernel_config).
+
+-behaviour(gen_server).
+
+%% External exports
+-export([start_link/0]).
+%% Internal exports
+-export([init/1, handle_info/2, terminate/2, send_timeout/2]).
+-export([handle_call/3, handle_cast/2, code_change/3]).
+
+%%%-----------------------------------------------------------------
+%%% This module implements a process that configures the kernel
+%%% application.
+%%% Its purpose is that in the init phase add an error_logger
+%%% and when it dies (when the kernel application dies) deleting the
+%%% previously installed error_logger.
+%%% Also, this process waits for other nodes at startup, if
+%%% specified.
+%%%-----------------------------------------------------------------
+start_link() -> gen_server:start_link(kernel_config, [], []).
+
+%%-----------------------------------------------------------------
+%% Callback functions from gen_server
+%%-----------------------------------------------------------------
+init([]) ->
+ process_flag(trap_exit, true),
+ case sync_nodes() of
+ ok ->
+ case whereis(dist_ac) of
+ DAC when is_pid(DAC) ->
+ DAC ! {go, self()},
+ receive
+ dist_ac_took_control ->
+ ok
+ end;
+ _ ->
+ ok
+ end,
+ {ok, []};
+ {error, Error} ->
+ {stop, Error}
+ end.
+
+handle_info(_, State) ->
+ {noreply, State}.
+
+terminate(_Reason, _State) ->
+ ok.
+
+handle_call('__not_used', _From, State) ->
+ {reply, ok, State}.
+
+handle_cast('__not_used', State) ->
+ {noreply, State}.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+
+%%-----------------------------------------------------------------
+%% Internal functions
+%%-----------------------------------------------------------------
+sync_nodes() ->
+ case catch get_sync_data() of
+ {error, Reason} ->
+ error_logger:format("~p", [Reason]),
+ {error, Reason};
+ {infinity, MandatoryNodes, OptionalNodes} ->
+ case wait_nodes(MandatoryNodes, OptionalNodes) of
+ ok ->
+% sync(),
+ ok;
+ Error ->
+ Error
+ end;
+ {Timeout, MandatoryNodes, OptionalNodes} ->
+ spawn_link(kernel_config, send_timeout, [Timeout, self()]),
+ case wait_nodes(MandatoryNodes, OptionalNodes) of
+ ok ->
+% sync(),
+ ok;
+ Error ->
+ Error
+ end;
+ undefined -> ok
+ end.
+
+send_timeout(Timeout, Pid) ->
+ receive
+ after Timeout -> Pid ! timeout
+ end.
+
+wait_nodes(Mandatory, Optional) ->
+ net_kernel:monitor_nodes(true),
+ lists:foreach(fun(Node) ->
+ case net_adm:ping(Node) of
+ pong -> self() ! {nodeup, Node};
+ _ -> ok
+ end
+ end,
+ Mandatory ++ Optional),
+ rec_nodes(Mandatory, Optional).
+
+rec_nodes([], []) -> ok;
+rec_nodes(Mandatory, Optional) ->
+ receive
+ {nodeup, Node} -> check_up(Node, Mandatory, Optional);
+ timeout when Mandatory =:= [] -> ok;
+ timeout -> {error, {mandatory_nodes_down, Mandatory}}
+ end.
+
+check_up(Node, Mandatory, Optional) ->
+ case lists:member(Node, Mandatory) of
+ true ->
+ rec_nodes(lists:delete(Node, Mandatory), Optional);
+ false ->
+ case lists:member(Node, Optional) of
+ true ->
+ rec_nodes(Mandatory, lists:delete(Node, Optional));
+ false ->
+ rec_nodes(Mandatory, Optional)
+ end
+ end.
+
+%% Syncs standard servers
+%sync() ->
+% global:sync().
+
+get_sync_data() ->
+ Timeout = get_sync_timeout(),
+ MandatoryNodes = get_sync_mandatory_nodes(),
+ OptionalNodes = get_sync_optional_nodes(),
+ {Timeout, MandatoryNodes, OptionalNodes}.
+
+get_sync_timeout() ->
+ case application:get_env(sync_nodes_timeout) of
+ {ok, Timeout} when is_integer(Timeout), Timeout > 0 -> Timeout;
+ {ok, infinity} -> infinity;
+ undefined -> throw(undefined);
+ {ok, Else} -> throw({error, {badopt, {sync_nodes_timeout, Else}}})
+ end.
+
+get_sync_mandatory_nodes() ->
+ case application:get_env(sync_nodes_mandatory) of
+ {ok, Nodes} when is_list(Nodes) -> Nodes;
+ undefined -> [];
+ {ok, Else} -> throw({error, {badopt, {sync_nodes_mandatory, Else}}})
+ end.
+
+get_sync_optional_nodes() ->
+ case application:get_env(sync_nodes_optional) of
+ {ok, Nodes} when is_list(Nodes) -> Nodes;
+ undefined -> [];
+ {ok, Else} -> throw({error, {badopt, {sync_nodes_optional, Else}}})
+ end.
+
diff --git a/lib/kernel/src/net.erl b/lib/kernel/src/net.erl
new file mode 100644
index 0000000000..e8f4b6ba26
--- /dev/null
+++ b/lib/kernel/src/net.erl
@@ -0,0 +1,39 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 1996-2009. All Rights Reserved.
+%%
+%% The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved online at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(net).
+
+%% Various network functions, kept here for compatibility
+
+-export([call/4,
+ cast/4,
+ broadcast/3,
+ ping/1,
+ relay/1,
+ sleep/1]).
+
+-deprecated(module).
+
+call(N,M,F,A) -> rpc:call(N,M,F,A).
+cast(N,M,F,A) -> rpc:cast(N,M,F,A).
+broadcast(M,F,A) -> rpc:eval_everywhere(M,F,A).
+ping(Node) -> net_adm:ping(Node).
+sleep(T) -> receive after T -> ok end.
+relay(X) -> slave:relay(X).
+
+
diff --git a/lib/kernel/src/net_address.hrl b/lib/kernel/src/net_address.hrl
new file mode 100644
index 0000000000..5342076507
--- /dev/null
+++ b/lib/kernel/src/net_address.hrl
@@ -0,0 +1,28 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 1997-2009. All Rights Reserved.
+%%
+%% The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved online at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% %CopyrightEnd%
+%%
+
+%% Generic address format
+
+-record(net_address,
+ {
+ address, %% opaque address
+ host, %% host name
+ protocol, %% protocol
+ family %% address family
+ }).
diff --git a/lib/kernel/src/net_adm.erl b/lib/kernel/src/net_adm.erl
new file mode 100644
index 0000000000..737b1ecee9
--- /dev/null
+++ b/lib/kernel/src/net_adm.erl
@@ -0,0 +1,239 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 1996-2009. All Rights Reserved.
+%%
+%% The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved online at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(net_adm).
+-export([host_file/0,
+ localhost/0,
+ names/0, names/1,
+ ping_list/1,
+ world/0,world/1,
+ world_list/1, world_list/2,
+ dns_hostname/1,
+ ping/1]).
+
+%%------------------------------------------------------------------------
+
+-type verbosity() :: 'silent' | 'verbose'.
+
+%%------------------------------------------------------------------------
+
+%% Try to read .hosts.erlang file in
+%% 1. cwd , 2. $HOME 3. init:root_dir()
+
+-spec host_file() -> [atom()] | {'error',atom() | {integer(),atom(),_}}.
+
+host_file() ->
+ Home = case init:get_argument(home) of
+ {ok, [[H]]} -> [H];
+ _ -> []
+ end,
+ case file:path_consult(["."] ++ Home ++ [code:root_dir()], ".hosts.erlang") of
+ {ok, Hosts, _} -> Hosts;
+ Error -> Error
+ end.
+
+%% Check whether a node is up or down
+%% side effect: set up a connection to Node if there not yet is one.
+
+-spec ping(atom()) -> 'pang' | 'pong'.
+
+ping(Node) when is_atom(Node) ->
+ case catch gen:call({net_kernel, Node},
+ '$gen_call',
+ {is_auth, node()},
+ infinity) of
+ {ok, yes} -> pong;
+ _ ->
+ erlang:disconnect_node(Node),
+ pang
+ end.
+
+-spec localhost() -> string().
+
+localhost() ->
+ {ok, Host} = inet:gethostname(),
+ case inet_db:res_option(domain) of
+ "" -> Host;
+ Domain -> Host ++ "." ++ Domain
+ end.
+
+
+-spec names() -> {'ok', [{string(), integer()}]} | {'error', _}.
+
+names() ->
+ names(localhost()).
+
+-spec names(atom() | string()) -> {'ok', [{string(), integer()}]} | {'error', _}.
+
+names(Hostname) ->
+ case inet:gethostbyname(Hostname) of
+ {ok, {hostent, _Name, _ , _Af, _Size, [Addr | _]}} ->
+ erl_epmd:names(Addr);
+ Else ->
+ Else
+ end.
+
+-spec dns_hostname(atom() | string()) ->
+ {'ok', string()} | {'error', atom() | string()}.
+
+dns_hostname(Hostname) ->
+ case inet:gethostbyname(Hostname) of
+ {ok,{hostent, Name, _ , _Af, _Size, _Addr}} ->
+ {ok, Name};
+ _ ->
+ {error, Hostname}
+ end.
+
+%% A common situation in "life" is to have a configuration file with a list
+%% of nodes, and then at startup, all nodes in the list are ping'ed
+%% this can lead to no end of troubles if two disconnected nodes
+%% simultaneously ping each other.
+%% Use this function in order to do it safely.
+%% It assumes a working global.erl which ensures a fully
+%% connected network.
+%% Had the erlang runtime system been able to fully cope with
+%% the possibility of two simultaneous (unix) connects, this function would
+%% merley be lists:map({net_adm, ping}, [], Nodelist).
+%% It is also assumed, that the same (identical) Nodelist is given to all
+%% nodes which are to perform this call (possibly simultaneously).
+%% Even this code has a flaw, and that is the case where two
+%% nodes simultaneously and without *any* other already
+%% running nodes execute this code. :-(
+
+-spec ping_list([atom()]) -> [atom()].
+
+ping_list(Nodelist) ->
+ net_kernel:monitor_nodes(true),
+ Sofar = ping_first(Nodelist, nodes()),
+ collect_new(Sofar, Nodelist).
+
+ping_first([], _S) ->
+ [];
+ping_first([Node|Nodes], S) ->
+ case lists:member(Node, S) of
+ true -> [Node | ping_first(Nodes, S)];
+ false ->
+ case ping(Node) of
+ pong -> [Node];
+ pang -> ping_first(Nodes, S)
+ end
+ end.
+
+collect_new(Sofar, Nodelist) ->
+ receive
+ {nodeup, Node} ->
+ case lists:member(Node, Nodelist) of
+ true ->
+ collect_new(Sofar, Nodelist);
+ false ->
+ collect_new([Node | Sofar], Nodelist)
+ end
+ after 3000 ->
+ net_kernel:monitor_nodes(false),
+ Sofar
+ end.
+
+%% This function polls a set of hosts according to a file called
+%% .hosts.erlang that need to reside either in the current directory
+%% or in your home directory. (The current directory is tried first.)
+%% world() returns a list of all nodes on the network that can be
+%% found (including ourselves). Note: the $HOME variable is inspected.
+%%
+%% Added possibility to supply a list of hosts instead of reading
+%% the .hosts.erlang file. 971016 [email protected]
+%% e.g.
+%% net_adm:world_list(['elrond.du.etx.ericsson.se', 'thorin.du.etx.ericsson.se']).
+
+-spec world() -> [node()].
+
+world() ->
+ world(silent).
+
+-spec world(verbosity()) -> [node()].
+
+world(Verbose) ->
+ case net_adm:host_file() of
+ {error,R} -> exit({error, R});
+ Hosts -> expand_hosts(Hosts, Verbose)
+ end.
+
+-spec world_list([atom()]) -> [node()].
+
+world_list(Hosts) when is_list(Hosts) ->
+ expand_hosts(Hosts, silent).
+
+-spec world_list([atom()], verbosity()) -> [node()].
+
+world_list(Hosts, Verbose) when is_list(Hosts) ->
+ expand_hosts(Hosts, Verbose).
+
+expand_hosts(Hosts, Verbose) ->
+ lists:flatten(collect_nodes(Hosts, Verbose)).
+
+collect_nodes([], _) -> [];
+collect_nodes([Host|Tail], Verbose) ->
+ case collect_host_nodes(Host, Verbose) of
+ nil ->
+ collect_nodes(Tail, Verbose);
+ L ->
+ [L|collect_nodes(Tail, Verbose)]
+ end.
+
+collect_host_nodes(Host, Verbose) ->
+ case names(Host) of
+ {ok, Namelist} ->
+ do_ping(Namelist, atom_to_list(Host), Verbose);
+ _ ->
+ nil
+ end.
+
+do_ping(Names, Host0, Verbose) ->
+ case longshort(Host0) of
+ ignored -> [];
+ Host -> do_ping_1(Names, Host, Verbose)
+ end.
+
+do_ping_1([], _Host, _Verbose) ->
+ [];
+do_ping_1([{Name, _} | Rest], Host, Verbose) ->
+ Node = list_to_atom(Name ++ "@" ++ longshort(Host)),
+ verbose(Verbose, "Pinging ~w -> ", [Node]),
+ Result = ping(Node),
+ verbose(Verbose, "~p\n", [Result]),
+ case Result of
+ pong ->
+ [Node | do_ping_1(Rest, Host, Verbose)];
+ pang ->
+ do_ping_1(Rest, Host, Verbose)
+ end.
+
+verbose(verbose, Format, Args) ->
+ io:format(Format, Args);
+verbose(_, _, _) ->
+ ok.
+
+longshort(Host) ->
+ case net_kernel:longnames() of
+ false -> uptodot(Host);
+ true -> Host;
+ ignored -> ignored
+ end.
+
+uptodot([$.|_]) -> [];
+uptodot([])-> [];
+uptodot([H|T]) -> [H|uptodot(T)].
diff --git a/lib/kernel/src/net_kernel.erl b/lib/kernel/src/net_kernel.erl
new file mode 100644
index 0000000000..3afaedf274
--- /dev/null
+++ b/lib/kernel/src/net_kernel.erl
@@ -0,0 +1,1513 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 1996-2009. All Rights Reserved.
+%%
+%% The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved online at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(net_kernel).
+
+-behaviour(gen_server).
+
+-define(nodedown(N, State), verbose({?MODULE, ?LINE, nodedown, N}, 1, State)).
+-define(nodeup(N, State), verbose({?MODULE, ?LINE, nodeup, N}, 1, State)).
+
+%%-define(dist_debug, true).
+
+%-define(DBG,erlang:display([?MODULE,?LINE])).
+
+-ifdef(dist_debug).
+-define(debug(Term), erlang:display(Term)).
+-else.
+-define(debug(Term), ok).
+-endif.
+
+-ifdef(DEBUG).
+-define(connect_failure(Node,Term),
+ io:format("Net Kernel 2: Failed connection to node ~p, reason ~p~n",
+ [Node,Term])).
+-else.
+-define(connect_failure(Node,Term),noop).
+-endif.
+
+%% Default ticktime change transition period in seconds
+-define(DEFAULT_TRANSITION_PERIOD, 60).
+
+%-define(TCKR_DBG, 1).
+
+-ifdef(TCKR_DBG).
+-define(tckr_dbg(X), erlang:display({?LINE, X})).
+-else.
+-define(tckr_dbg(X), ok).
+-endif.
+
+%% User Interface Exports
+-export([start/1, start_link/1, stop/0,
+ kernel_apply/3,
+ monitor_nodes/1,
+ monitor_nodes/2,
+ longnames/0,
+ allow/1,
+ protocol_childspecs/0,
+ epmd_module/0]).
+
+-export([connect/1, disconnect/1, hidden_connect/1, passive_cnct/1]).
+-export([connect_node/1, hidden_connect_node/1]). %% explicit connect
+-export([set_net_ticktime/1, set_net_ticktime/2, get_net_ticktime/0]).
+
+-export([node_info/1, node_info/2, nodes_info/0,
+ connecttime/0,
+ i/0, i/1, verbose/1]).
+
+-export([publish_on_node/1, update_publish_nodes/1]).
+
+%% Internal Exports
+-export([do_spawn/3,
+ spawn_func/6,
+ ticker/2,
+ ticker_loop/2,
+ aux_ticker/4]).
+
+-export([init/1,handle_call/3,handle_cast/2,handle_info/2,
+ terminate/2,code_change/3]).
+
+-export([passive_connect_monitor/2]).
+
+-import(error_logger,[error_msg/2]).
+
+-record(state, {
+ name, %% The node name
+ node, %% The node name including hostname
+ type, %% long or short names
+ tick, %% tick information
+ connecttime, %% the connection setuptime.
+ connections, %% table of connections
+ conn_owners = [], %% List of connection owner pids,
+ pend_owners = [], %% List of potential owners
+ listen, %% list of #listen
+ allowed, %% list of allowed nodes in a restricted system
+ verbose = 0, %% level of verboseness
+ publish_on_nodes = undefined
+ }).
+
+-record(listen, {
+ listen, %% listen pid
+ accept, %% accepting pid
+ address, %% #net_address
+ module %% proto module
+ }).
+
+-define(LISTEN_ID, #listen.listen).
+-define(ACCEPT_ID, #listen.accept).
+
+-record(connection, {
+ node, %% remote node name
+ state, %% pending | up | up_pending
+ owner, %% owner pid
+ pending_owner, %% possible new owner
+ address, %% #net_address
+ waiting = [], %% queued processes
+ type %% normal | hidden
+ }).
+
+-record(barred_connection, {
+ node %% remote node name
+ }).
+
+
+-record(tick, {ticker, %% ticker : pid()
+ time %% Ticktime in milli seconds : integer()
+ }).
+
+-record(tick_change, {ticker, %% Ticker : pid()
+ time, %% Ticktime in milli seconds : integer()
+ how %% What type of change : atom()
+ }).
+
+%% Default connection setup timeout in milliseconds.
+%% This timeout is set for every distributed action during
+%% the connection setup.
+-define(SETUPTIME, 7000).
+
+-include("net_address.hrl").
+
+%% Interface functions
+
+kernel_apply(M,F,A) -> request({apply,M,F,A}).
+allow(Nodes) -> request({allow, Nodes}).
+longnames() -> request(longnames).
+stop() -> erl_distribution:stop().
+
+node_info(Node) -> get_node_info(Node).
+node_info(Node, Key) -> get_node_info(Node, Key).
+nodes_info() -> get_nodes_info().
+i() -> print_info().
+i(Node) -> print_info(Node).
+
+verbose(Level) when is_integer(Level) ->
+ request({verbose, Level}).
+
+set_net_ticktime(T, TP) when is_integer(T), T > 0, is_integer(TP), TP >= 0 ->
+ ticktime_res(request({new_ticktime, T*250, TP*1000})).
+set_net_ticktime(T) when is_integer(T) ->
+ set_net_ticktime(T, ?DEFAULT_TRANSITION_PERIOD).
+get_net_ticktime() ->
+ ticktime_res(request(ticktime)).
+
+
+%% The monitor_nodes() feature has been moved into the emulator.
+%% The feature is reached via (intentionally) undocumented process
+%% flags (we may want to move it elsewhere later). In order to easily
+%% be backward compatible, errors are created here when process_flag()
+%% fails.
+monitor_nodes(Flag) ->
+ case catch process_flag(monitor_nodes, Flag) of
+ true -> ok;
+ false -> ok;
+ _ -> mk_monitor_nodes_error(Flag, [])
+ end.
+
+monitor_nodes(Flag, Opts) ->
+ case catch process_flag({monitor_nodes, Opts}, Flag) of
+ true -> ok;
+ false -> ok;
+ _ -> mk_monitor_nodes_error(Flag, Opts)
+ end.
+
+%% ...
+ticktime_res({A, I}) when is_atom(A), is_integer(I) -> {A, I div 250};
+ticktime_res(I) when is_integer(I) -> I div 250;
+ticktime_res(A) when is_atom(A) -> A.
+
+%% Called though BIF's
+
+connect(Node) -> do_connect(Node, normal, false).
+%%% Long timeout if blocked (== barred), only affects nodes with
+%%% {dist_auto_connect, once} set.
+passive_cnct(Node) -> do_connect(Node, normal, true).
+disconnect(Node) -> request({disconnect, Node}).
+
+%% connect but not seen
+hidden_connect(Node) -> do_connect(Node, hidden, false).
+
+%% Should this node publish itself on Node?
+publish_on_node(Node) when is_atom(Node) ->
+ request({publish_on_node, Node}).
+
+%% Update publication list
+update_publish_nodes(Ns) ->
+ request({update_publish_nodes, Ns}).
+
+%% explicit connects
+connect_node(Node) when is_atom(Node) ->
+ request({connect, normal, Node}).
+hidden_connect_node(Node) when is_atom(Node) ->
+ request({connect, hidden, Node}).
+
+do_connect(Node, Type, WaitForBarred) -> %% Type = normal | hidden
+ case catch ets:lookup(sys_dist, Node) of
+ {'EXIT', _} ->
+ ?connect_failure(Node,{table_missing, sys_dist}),
+ false;
+ [#barred_connection{}] ->
+ case WaitForBarred of
+ false ->
+ false;
+ true ->
+ Pid = spawn(?MODULE,passive_connect_monitor,[self(),Node]),
+ receive
+ {Pid, true} ->
+ %%io:format("Net Kernel: barred connection (~p) "
+ %% "connected from other end.~n",[Node]),
+ true;
+ {Pid, false} ->
+ ?connect_failure(Node,{barred_connection,
+ ets:lookup(sys_dist, Node)}),
+ %%io:format("Net Kernel: barred connection (~p) "
+ %% "- failure.~n",[Node]),
+ false
+ end
+ end;
+ Else ->
+ case application:get_env(kernel, dist_auto_connect) of
+ {ok, never} ->
+ ?connect_failure(Node,{dist_auto_connect,never}),
+ false;
+ % This might happen due to connection close
+ % not beeing propagated to user space yet.
+ % Save the day by just not connecting...
+ {ok, once} when Else =/= [],
+ (hd(Else))#connection.state =:= up ->
+ ?connect_failure(Node,{barred_connection,
+ ets:lookup(sys_dist, Node)}),
+ false;
+ _ ->
+ request({connect, Type, Node})
+ end
+ end.
+
+passive_connect_monitor(Parent, Node) ->
+ monitor_nodes(true,[{node_type,all}]),
+ case lists:member(Node,nodes([connected])) of
+ true ->
+ monitor_nodes(false,[{node_type,all}]),
+ Parent ! {self(),true};
+ _ ->
+ Ref = make_ref(),
+ Tref = erlang:send_after(connecttime(),self(),Ref),
+ receive
+ Ref ->
+ monitor_nodes(false,[{node_type,all}]),
+ Parent ! {self(), false};
+ {nodeup,Node,_} ->
+ monitor_nodes(false,[{node_type,all}]),
+ erlang:cancel_timer(Tref),
+ Parent ! {self(),true}
+ end
+ end.
+
+%% If the net_kernel isn't running we ignore all requests to the
+%% kernel, thus basically accepting them :-)
+request(Req) ->
+ case whereis(net_kernel) of
+ P when is_pid(P) ->
+ gen_server:call(net_kernel,Req,infinity);
+ _ -> ignored
+ end.
+
+%% This function is used to dynamically start the
+%% distribution.
+
+start(Args) ->
+ erl_distribution:start(Args).
+
+%% This is the main startup routine for net_kernel
+%% The defaults are longnames and a ticktime of 15 secs to the tcp_drv.
+
+start_link([Name]) ->
+ start_link([Name, longnames]);
+
+start_link([Name, LongOrShortNames]) ->
+ start_link([Name, LongOrShortNames, 15000]);
+
+start_link([Name, LongOrShortNames, Ticktime]) ->
+ case gen_server:start_link({local, net_kernel}, net_kernel,
+ {Name, LongOrShortNames, Ticktime}, []) of
+ {ok, Pid} ->
+ {ok, Pid};
+ {error, {already_started, Pid}} ->
+ {ok, Pid};
+ _Error ->
+ exit(nodistribution)
+ end.
+
+%% auth:get_cookie should only be able to return an atom
+%% tuple cookies are unknowns
+
+init({Name, LongOrShortNames, TickT}) ->
+ process_flag(trap_exit,true),
+ case init_node(Name, LongOrShortNames) of
+ {ok, Node, Listeners} ->
+ process_flag(priority, max),
+ Ticktime = to_integer(TickT),
+ Ticker = spawn_link(net_kernel, ticker, [self(), Ticktime]),
+ case auth:get_cookie(Node) of
+ Cookie when is_atom(Cookie) ->
+ {ok, #state{name = Name,
+ node = Node,
+ type = LongOrShortNames,
+ tick = #tick{ticker = Ticker, time = Ticktime},
+ connecttime = connecttime(),
+ connections =
+ ets:new(sys_dist,[named_table,
+ protected,
+ {keypos, 2}]),
+ listen = Listeners,
+ allowed = [],
+ verbose = 0
+ }};
+ _ELSE ->
+ {stop, {error,{bad_cookie, Node}}}
+ end;
+ Error ->
+ {stop, Error}
+ end.
+
+
+%% ------------------------------------------------------------
+%% handle_call.
+%% ------------------------------------------------------------
+
+%%
+%% Set up a connection to Node.
+%% The response is delayed until the connection is up and
+%% running.
+%%
+handle_call({connect, _, Node}, _From, State) when Node =:= node() ->
+ {reply, true, State};
+handle_call({connect, Type, Node}, From, State) ->
+ verbose({connect, Type, Node}, 1, State),
+ case ets:lookup(sys_dist, Node) of
+ [Conn] when Conn#connection.state =:= up ->
+ {reply, true, State};
+ [Conn] when Conn#connection.state =:= pending ->
+ Waiting = Conn#connection.waiting,
+ ets:insert(sys_dist, Conn#connection{waiting = [From|Waiting]}),
+ {noreply, State};
+ [Conn] when Conn#connection.state =:= up_pending ->
+ Waiting = Conn#connection.waiting,
+ ets:insert(sys_dist, Conn#connection{waiting = [From|Waiting]}),
+ {noreply, State};
+ _ ->
+ case setup(Node,Type,From,State) of
+ {ok, SetupPid} ->
+ Owners = [{SetupPid, Node} | State#state.conn_owners],
+ {noreply,State#state{conn_owners=Owners}};
+ _ ->
+ ?connect_failure(Node, {setup_call, failed}),
+ {reply, false, State}
+ end
+ end;
+
+%%
+%% Close the connection to Node.
+%%
+handle_call({disconnect, Node}, _From, State) when Node =:= node() ->
+ {reply, false, State};
+handle_call({disconnect, Node}, _From, State) ->
+ verbose({disconnect, Node}, 1, State),
+ {Reply, State1} = do_disconnect(Node, State),
+ {reply, Reply, State1};
+
+%%
+%% The spawn/4 BIF ends up here.
+%%
+handle_call({spawn,M,F,A,Gleader},{From,Tag},State) when is_pid(From) ->
+ do_spawn([no_link,{From,Tag},M,F,A,Gleader],[],State);
+
+%%
+%% The spawn_link/4 BIF ends up here.
+%%
+handle_call({spawn_link,M,F,A,Gleader},{From,Tag},State) when is_pid(From) ->
+ do_spawn([link,{From,Tag},M,F,A,Gleader],[],State);
+
+%%
+%% The spawn_opt/5 BIF ends up here.
+%%
+handle_call({spawn_opt,M,F,A,O,L,Gleader},{From,Tag},State) when is_pid(From) ->
+ do_spawn([L,{From,Tag},M,F,A,Gleader],O,State);
+
+%%
+%% Only allow certain nodes.
+%%
+handle_call({allow, Nodes}, _From, State) ->
+ case all_atoms(Nodes) of
+ true ->
+ Allowed = State#state.allowed,
+ {reply,ok,State#state{allowed = Allowed ++ Nodes}};
+ false ->
+ {reply,error,State}
+ end;
+
+%%
+%% authentication, used by auth. Simply works as this:
+%% if the message comes through, the other node IS authorized.
+%%
+handle_call({is_auth, _Node}, _From, State) ->
+ {reply,yes,State};
+
+%%
+%% Not applicable any longer !?
+%%
+handle_call({apply,_Mod,_Fun,_Args}, {From,Tag}, State)
+ when is_pid(From), node(From) =:= node() ->
+ gen_server:reply({From,Tag}, not_implemented),
+% Port = State#state.port,
+% catch apply(Mod,Fun,[Port|Args]),
+ {noreply,State};
+
+handle_call(longnames, _From, State) ->
+ {reply, get(longnames), State};
+
+handle_call({update_publish_nodes, Ns}, _From, State) ->
+ {reply, ok, State#state{publish_on_nodes = Ns}};
+
+handle_call({publish_on_node, Node}, _From, State) ->
+ NewState = case State#state.publish_on_nodes of
+ undefined ->
+ State#state{publish_on_nodes =
+ global_group:publish_on_nodes()};
+ _ ->
+ State
+ end,
+ Publish = case NewState#state.publish_on_nodes of
+ all ->
+ true;
+ Nodes ->
+ lists:member(Node, Nodes)
+ end,
+ {reply, Publish, NewState};
+
+
+handle_call({verbose, Level}, _From, State) ->
+ {reply, State#state.verbose, State#state{verbose = Level}};
+
+%%
+%% Set new ticktime
+%%
+
+%% The tick field of the state contains either a #tick{} or a
+%% #tick_change{} record if the ticker process has been upgraded;
+%% otherwise, an integer or an atom.
+
+handle_call(ticktime, _, #state{tick = #tick{time = T}} = State) ->
+ {reply, T, State};
+handle_call(ticktime, _, #state{tick = #tick_change{time = T}} = State) ->
+ {reply, {ongoing_change_to, T}, State};
+
+handle_call({new_ticktime,T,_TP}, _, #state{tick = #tick{time = T}} = State) ->
+ ?tckr_dbg(no_tick_change),
+ {reply, unchanged, State};
+
+handle_call({new_ticktime,T,TP}, _, #state{tick = #tick{ticker = Tckr,
+ time = OT}} = State) ->
+ ?tckr_dbg(initiating_tick_change),
+ start_aux_ticker(T, OT, TP),
+ How = case T > OT of
+ true ->
+ ?tckr_dbg(longer_ticktime),
+ Tckr ! {new_ticktime,T},
+ longer;
+ false ->
+ ?tckr_dbg(shorter_ticktime),
+ shorter
+ end,
+ {reply, change_initiated, State#state{tick = #tick_change{ticker = Tckr,
+ time = T,
+ how = How}}};
+
+handle_call({new_ticktime,_,_},
+ _,
+ #state{tick = #tick_change{time = T}} = State) ->
+ {reply, {ongoing_change_to, T}, State}.
+
+%% ------------------------------------------------------------
+%% handle_cast.
+%% ------------------------------------------------------------
+
+handle_cast(_, State) ->
+ {noreply,State}.
+
+%% ------------------------------------------------------------
+%% code_change.
+%% ------------------------------------------------------------
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok,State}.
+
+%% ------------------------------------------------------------
+%% terminate.
+%% ------------------------------------------------------------
+
+terminate(no_network, State) ->
+ lists:foreach(
+ fun({Node, Type}) ->
+ case Type of
+ normal -> ?nodedown(Node, State);
+ _ -> ok
+ end
+ end, get_up_nodes() ++ [{node(), normal}]);
+terminate(_Reason, State) ->
+ lists:foreach(
+ fun(#listen {listen = Listen,module = Mod}) ->
+ Mod:close(Listen)
+ end, State#state.listen),
+ lists:foreach(
+ fun({Node, Type}) ->
+ case Type of
+ normal -> ?nodedown(Node, State);
+ _ -> ok
+ end
+ end, get_up_nodes() ++ [{node(), normal}]).
+
+
+%% ------------------------------------------------------------
+%% handle_info.
+%% ------------------------------------------------------------
+
+%%
+%% accept a new connection.
+%%
+handle_info({accept,AcceptPid,Socket,Family,Proto}, State) ->
+ MyNode = State#state.node,
+ case get_proto_mod(Family,Proto,State#state.listen) of
+ {ok, Mod} ->
+ Pid = Mod:accept_connection(AcceptPid,
+ Socket,
+ MyNode,
+ State#state.allowed,
+ State#state.connecttime),
+ AcceptPid ! {self(), controller, Pid},
+ {noreply,State};
+ _ ->
+ AcceptPid ! {self(), unsupported_protocol},
+ {noreply, State}
+ end;
+
+%%
+%% A node has successfully been connected.
+%%
+handle_info({SetupPid, {nodeup,Node,Address,Type,Immediate}},
+ State) ->
+ case {Immediate, ets:lookup(sys_dist, Node)} of
+ {true, [Conn]} when Conn#connection.state =:= pending,
+ Conn#connection.owner =:= SetupPid ->
+ ets:insert(sys_dist, Conn#connection{state = up,
+ address = Address,
+ waiting = [],
+ type = Type}),
+ SetupPid ! {self(), inserted},
+ reply_waiting(Node,Conn#connection.waiting, true),
+ {noreply, State};
+ _ ->
+ SetupPid ! {self(), bad_request},
+ {noreply, State}
+ end;
+
+%%
+%% Mark a node as pending (accept) if not busy.
+%%
+handle_info({AcceptPid, {accept_pending,MyNode,Node,Address,Type}}, State) ->
+ case ets:lookup(sys_dist, Node) of
+ [#connection{state=pending}=Conn] ->
+ if
+ MyNode > Node ->
+ AcceptPid ! {self(),{accept_pending,nok_pending}},
+ {noreply,State};
+ true ->
+ %%
+ %% A simultaneous connect has been detected and we want to
+ %% change pending process.
+ %%
+ OldOwner = Conn#connection.owner,
+ ?debug({net_kernel, remark, old, OldOwner, new, AcceptPid}),
+ exit(OldOwner, remarked),
+ receive
+ {'EXIT', OldOwner, _} ->
+ true
+ end,
+ Owners = lists:keyreplace(OldOwner,
+ 1,
+ State#state.conn_owners,
+ {AcceptPid, Node}),
+ ets:insert(sys_dist, Conn#connection{owner = AcceptPid}),
+ AcceptPid ! {self(),{accept_pending,ok_pending}},
+ State1 = State#state{conn_owners=Owners},
+ {noreply,State1}
+ end;
+ [#connection{state=up}=Conn] ->
+ AcceptPid ! {self(), {accept_pending, up_pending}},
+ ets:insert(sys_dist, Conn#connection { pending_owner = AcceptPid,
+ state = up_pending }),
+ Pend = [{AcceptPid, Node} | State#state.pend_owners ],
+ {noreply, State#state { pend_owners = Pend }};
+ [#connection{state=up_pending}] ->
+ AcceptPid ! {self(), {accept_pending, already_pending}},
+ {noreply, State};
+ _ ->
+ ets:insert(sys_dist, #connection{node = Node,
+ state = pending,
+ owner = AcceptPid,
+ address = Address,
+ type = Type}),
+ AcceptPid ! {self(),{accept_pending,ok}},
+ Owners = [{AcceptPid,Node} | State#state.conn_owners],
+ {noreply, State#state{conn_owners = Owners}}
+ end;
+
+handle_info({SetupPid, {is_pending, Node}}, State) ->
+ Reply = lists:member({SetupPid,Node},State#state.conn_owners),
+ SetupPid ! {self(), {is_pending, Reply}},
+ {noreply, State};
+
+
+%%
+%% Handle different types of process terminations.
+%%
+handle_info({'EXIT', From, Reason}, State) when is_pid(From) ->
+ verbose({'EXIT', From, Reason}, 1, State),
+ handle_exit(From, Reason, State);
+
+%%
+%% Handle badcookie and badname messages !
+%%
+handle_info({From,registered_send,To,Mess},State) ->
+ send(From,To,Mess),
+ {noreply,State};
+
+%% badcookies SHOULD not be sent
+%% (if someone does erlang:set_cookie(node(),foo) this may be)
+handle_info({From,badcookie,_To,_Mess}, State) ->
+ error_logger:error_msg("~n** Got OLD cookie from ~w~n",
+ [getnode(From)]),
+ {_Reply, State1} = do_disconnect(getnode(From), State),
+ {noreply,State1};
+
+%%
+%% Tick all connections.
+%%
+handle_info(tick, State) ->
+ ?tckr_dbg(tick),
+ lists:foreach(fun({Pid,_Node}) -> Pid ! {self(), tick} end,
+ State#state.conn_owners),
+ {noreply,State};
+
+handle_info(aux_tick, State) ->
+ ?tckr_dbg(aux_tick),
+ lists:foreach(fun({Pid,_Node}) -> Pid ! {self(), aux_tick} end,
+ State#state.conn_owners),
+ {noreply,State};
+
+handle_info(transition_period_end,
+ #state{tick = #tick_change{ticker = Tckr,
+ time = T,
+ how = How}} = State) ->
+ ?tckr_dbg(transition_period_ended),
+ case How of
+ shorter -> Tckr ! {new_ticktime, T};
+ _ -> done
+ end,
+ {noreply,State#state{tick = #tick{ticker = Tckr, time = T}}};
+
+handle_info(X, State) ->
+ error_msg("Net kernel got ~w~n",[X]),
+ {noreply,State}.
+
+%% -----------------------------------------------------------
+%% Handle exit signals.
+%% We have 6 types of processes to handle.
+%%
+%% 1. The Listen process.
+%% 2. The Accept process.
+%% 3. Connection owning processes.
+%% 4. The ticker process.
+%% (5. Garbage pid.)
+%%
+%% The process type function that handled the process throws
+%% the handle_info return value !
+%% -----------------------------------------------------------
+
+handle_exit(Pid, Reason, State) ->
+ catch do_handle_exit(Pid, Reason, State).
+
+do_handle_exit(Pid, Reason, State) ->
+ listen_exit(Pid, State),
+ accept_exit(Pid, State),
+ conn_own_exit(Pid, Reason, State),
+ pending_own_exit(Pid, State),
+ ticker_exit(Pid, State),
+ {noreply,State}.
+
+listen_exit(Pid, State) ->
+ case lists:keymember(Pid, ?LISTEN_ID, State#state.listen) of
+ true ->
+ error_msg("** Netkernel terminating ... **\n", []),
+ throw({stop,no_network,State});
+ false ->
+ false
+ end.
+
+accept_exit(Pid, State) ->
+ Listen = State#state.listen,
+ case lists:keysearch(Pid, ?ACCEPT_ID, Listen) of
+ {value, ListenR} ->
+ ListenS = ListenR#listen.listen,
+ Mod = ListenR#listen.module,
+ AcceptPid = Mod:accept(ListenS),
+ L = lists:keyreplace(Pid, ?ACCEPT_ID, Listen,
+ ListenR#listen{accept = AcceptPid}),
+ throw({noreply, State#state{listen = L}});
+ _ ->
+ false
+ end.
+
+conn_own_exit(Pid, Reason, State) ->
+ Owners = State#state.conn_owners,
+ case lists:keysearch(Pid, 1, Owners) of
+ {value, {Pid, Node}} ->
+ throw({noreply, nodedown(Pid, Node, Reason, State)});
+ _ ->
+ false
+ end.
+
+pending_own_exit(Pid, State) ->
+ Pend = State#state.pend_owners,
+ case lists:keysearch(Pid, 1, Pend) of
+ {value, {Pid, Node}} ->
+ NewPend = lists:keydelete(Pid, 1, Pend),
+ State1 = State#state { pend_owners = NewPend },
+ case get_conn(Node) of
+ {ok, Conn} when Conn#connection.state =:= up_pending ->
+ reply_waiting(Node,Conn#connection.waiting, true),
+ Conn1 = Conn#connection { state = up,
+ waiting = [],
+ pending_owner = undefined },
+ ets:insert(sys_dist, Conn1);
+ _ ->
+ ok
+ end,
+ throw({noreply, State1});
+ _ ->
+ false
+ end.
+
+ticker_exit(Pid, #state{tick = #tick{ticker = Pid, time = T} = Tck} = State) ->
+ Tckr = restart_ticker(T),
+ throw({noreply, State#state{tick = Tck#tick{ticker = Tckr}}});
+ticker_exit(Pid, #state{tick = #tick_change{ticker = Pid,
+ time = T} = TckCng} = State) ->
+ Tckr = restart_ticker(T),
+ throw({noreply, State#state{tick = TckCng#tick_change{ticker = Tckr}}});
+ticker_exit(_, _) ->
+ false.
+
+%% -----------------------------------------------------------
+%% A node has gone down !!
+%% nodedown(Owner, Node, Reason, State) -> State'
+%% -----------------------------------------------------------
+
+nodedown(Owner, Node, Reason, State) ->
+ case get_conn(Node) of
+ {ok, Conn} ->
+ nodedown(Conn, Owner, Node, Reason, Conn#connection.type, State);
+ _ ->
+ State
+ end.
+
+get_conn(Node) ->
+ case ets:lookup(sys_dist, Node) of
+ [Conn = #connection{}] -> {ok, Conn};
+ _ -> error
+ end.
+
+nodedown(Conn, Owner, Node, Reason, Type, OldState) ->
+ Owners = lists:keydelete(Owner, 1, OldState#state.conn_owners),
+ State = OldState#state{conn_owners = Owners},
+ case Conn#connection.state of
+ pending when Conn#connection.owner =:= Owner ->
+ pending_nodedown(Conn, Node, Type, State);
+ up when Conn#connection.owner =:= Owner ->
+ up_nodedown(Conn, Node, Reason, Type, State);
+ up_pending when Conn#connection.owner =:= Owner ->
+ up_pending_nodedown(Conn, Node, Reason, Type, State);
+ _ ->
+ OldState
+ end.
+
+pending_nodedown(Conn, Node, Type, State) ->
+ % Don't bar connections that have never been alive
+ %mark_sys_dist_nodedown(Node),
+ % - instead just delete the node:
+ ets:delete(sys_dist, Node),
+ reply_waiting(Node,Conn#connection.waiting, false),
+ case Type of
+ normal ->
+ ?nodedown(Node, State);
+ _ ->
+ ok
+ end,
+ State.
+
+up_pending_nodedown(Conn, Node, _Reason, _Type, State) ->
+ AcceptPid = Conn#connection.pending_owner,
+ Owners = State#state.conn_owners,
+ Pend = lists:keydelete(AcceptPid, 1, State#state.pend_owners),
+ Conn1 = Conn#connection { owner = AcceptPid,
+ pending_owner = undefined,
+ state = pending },
+ ets:insert(sys_dist, Conn1),
+ AcceptPid ! {self(), pending},
+ State#state{conn_owners = [{AcceptPid,Node}|Owners], pend_owners = Pend}.
+
+
+up_nodedown(_Conn, Node, _Reason, Type, State) ->
+ mark_sys_dist_nodedown(Node),
+ case Type of
+ normal -> ?nodedown(Node, State);
+ _ -> ok
+ end,
+ State.
+
+mark_sys_dist_nodedown(Node) ->
+ case application:get_env(kernel, dist_auto_connect) of
+ {ok, once} ->
+ ets:insert(sys_dist, #barred_connection{node = Node});
+ _ ->
+ ets:delete(sys_dist, Node)
+ end.
+
+%% -----------------------------------------------------------
+%% End handle_exit/2 !!
+%% -----------------------------------------------------------
+
+
+%% -----------------------------------------------------------
+%% monitor_nodes/[1,2] errors
+%% -----------------------------------------------------------
+
+check_opt(Opt, Opts) ->
+ check_opt(Opt, Opts, false, []).
+
+check_opt(_Opt, [], false, _OtherOpts) ->
+ false;
+check_opt(_Opt, [], {true, ORes}, OtherOpts) ->
+ {true, ORes, OtherOpts};
+check_opt(Opt, [Opt|RestOpts], false, OtherOpts) ->
+ check_opt(Opt, RestOpts, {true, Opt}, OtherOpts);
+check_opt(Opt, [Opt|RestOpts], {true, Opt} = ORes, OtherOpts) ->
+ check_opt(Opt, RestOpts, ORes, OtherOpts);
+check_opt({Opt, value}=TOpt,
+ [{Opt, _Val}=ORes|RestOpts],
+ false,
+ OtherOpts) ->
+ check_opt(TOpt, RestOpts, {true, ORes}, OtherOpts);
+check_opt({Opt, value}=TOpt,
+ [{Opt, _Val}=ORes|RestOpts],
+ {true, ORes}=TORes,
+ OtherOpts) ->
+ check_opt(TOpt, RestOpts, TORes, OtherOpts);
+check_opt({Opt, value},
+ [{Opt, _Val} = ORes1| _RestOpts],
+ {true, {Opt, _OtherVal} = ORes2},
+ _OtherOpts) ->
+ throw({error, {option_value_mismatch, [ORes1, ORes2]}});
+check_opt(Opt, [OtherOpt | RestOpts], TORes, OtherOpts) ->
+ check_opt(Opt, RestOpts, TORes, [OtherOpt | OtherOpts]).
+
+check_options(Opts) when is_list(Opts) ->
+ RestOpts1 = case check_opt({node_type, value}, Opts) of
+ {true, {node_type,Type}, RO1} when Type =:= visible;
+ Type =:= hidden;
+ Type =:= all ->
+ RO1;
+ {true, {node_type, _Type} = Opt, _RO1} ->
+ throw({error, {bad_option_value, Opt}});
+ false ->
+ Opts
+ end,
+ RestOpts2 = case check_opt(nodedown_reason, RestOpts1) of
+ {true, nodedown_reason, RO2} ->
+ RO2;
+ false ->
+ RestOpts1
+ end,
+ case RestOpts2 of
+ [] ->
+ %% This should never happen since we only call this function
+ %% when we know there is an error in the option list
+ {error, internal_error};
+ _ ->
+ {error, {unknown_options, RestOpts2}}
+ end;
+check_options(Opts) ->
+ {error, {options_not_a_list, Opts}}.
+
+mk_monitor_nodes_error(Flag, _Opts) when Flag =/= true, Flag =/= false ->
+ error;
+mk_monitor_nodes_error(_Flag, Opts) ->
+ case catch check_options(Opts) of
+ {error, _} = Error ->
+ Error;
+ UnexpectedError ->
+ {error, {internal_error, UnexpectedError}}
+ end.
+
+% -------------------------------------------------------------
+
+do_disconnect(Node, State) ->
+ case ets:lookup(sys_dist, Node) of
+ [Conn] when Conn#connection.state =:= up ->
+ disconnect_pid(Conn#connection.owner, State);
+ [Conn] when Conn#connection.state =:= up_pending ->
+ disconnect_pid(Conn#connection.owner, State);
+ _ ->
+ {false, State}
+ end.
+
+
+disconnect_pid(Pid, State) ->
+ exit(Pid, disconnect),
+ %% Sync wait for connection to die!!!
+ receive
+ {'EXIT',Pid,Reason} ->
+ {_,State1} = handle_exit(Pid, Reason, State),
+ {true, State1}
+ end.
+
+%%
+%%
+%%
+get_nodes(Which) ->
+ get_nodes(ets:first(sys_dist), Which).
+
+get_nodes('$end_of_table', _) ->
+ [];
+get_nodes(Key, Which) ->
+ case ets:lookup(sys_dist, Key) of
+ [Conn = #connection{state = up}] ->
+ [Conn#connection.node | get_nodes(ets:next(sys_dist, Key),
+ Which)];
+ [Conn = #connection{}] when Which =:= all ->
+ [Conn#connection.node | get_nodes(ets:next(sys_dist, Key),
+ Which)];
+ _ ->
+ get_nodes(ets:next(sys_dist, Key), Which)
+ end.
+
+%% Return a list of all nodes that are 'up'.
+get_up_nodes() ->
+ get_up_nodes(ets:first(sys_dist)).
+
+get_up_nodes('$end_of_table') -> [];
+get_up_nodes(Key) ->
+ case ets:lookup(sys_dist, Key) of
+ [#connection{state=up,node=Node,type=Type}] ->
+ [{Node,Type}|get_up_nodes(ets:next(sys_dist, Key))];
+ _ ->
+ get_up_nodes(ets:next(sys_dist, Key))
+ end.
+
+ticker(Kernel, Tick) when is_integer(Tick) ->
+ process_flag(priority, max),
+ ?tckr_dbg(ticker_started),
+ ticker_loop(Kernel, Tick).
+
+to_integer(T) when is_integer(T) -> T;
+to_integer(T) when is_atom(T) ->
+ list_to_integer(atom_to_list(T));
+to_integer(T) when is_list(T) ->
+ list_to_integer(T).
+
+ticker_loop(Kernel, Tick) ->
+ receive
+ {new_ticktime, NewTick} ->
+ ?tckr_dbg({ticker_changed_time, Tick, NewTick}),
+ ?MODULE:ticker_loop(Kernel, NewTick)
+ after Tick ->
+ Kernel ! tick,
+ ?MODULE:ticker_loop(Kernel, Tick)
+ end.
+
+start_aux_ticker(NewTick, OldTick, TransitionPeriod) ->
+ spawn_link(?MODULE, aux_ticker,
+ [self(), NewTick, OldTick, TransitionPeriod]).
+
+aux_ticker(NetKernel, NewTick, OldTick, TransitionPeriod) ->
+ process_flag(priority, max),
+ ?tckr_dbg(aux_ticker_started),
+ TickInterval = case NewTick > OldTick of
+ true -> OldTick;
+ false -> NewTick
+ end,
+ NoOfTicks = case TransitionPeriod > 0 of
+ true ->
+ %% 1 tick to start
+ %% + ticks to cover the transition period
+ 1 + (((TransitionPeriod - 1) div TickInterval) + 1);
+ false ->
+ 1
+ end,
+ aux_ticker1(NetKernel, TickInterval, NoOfTicks).
+
+aux_ticker1(NetKernel, _, 1) ->
+ NetKernel ! transition_period_end,
+ NetKernel ! aux_tick,
+ bye;
+aux_ticker1(NetKernel, TickInterval, NoOfTicks) ->
+ NetKernel ! aux_tick,
+ receive
+ after TickInterval ->
+ aux_ticker1(NetKernel, TickInterval, NoOfTicks-1)
+ end.
+
+send(_From,To,Mess) ->
+ case whereis(To) of
+ undefined ->
+ Mess;
+ P when is_pid(P) ->
+ P ! Mess
+ end.
+
+-ifdef(UNUSED).
+
+safesend(Name,Mess) when is_atom(Name) ->
+ case whereis(Name) of
+ undefined ->
+ Mess;
+ P when is_pid(P) ->
+ P ! Mess
+ end;
+safesend(Pid, Mess) -> Pid ! Mess.
+
+-endif.
+
+do_spawn(SpawnFuncArgs, SpawnOpts, State) ->
+ case catch spawn_opt(?MODULE, spawn_func, SpawnFuncArgs, SpawnOpts) of
+ {'EXIT', {Reason,_}} ->
+ {reply, {'EXIT', {Reason,[]}}, State};
+ {'EXIT', Reason} ->
+ {reply, {'EXIT', {Reason,[]}}, State};
+ _ ->
+ {noreply,State}
+ end.
+
+%% This code is really intricate. The link will go first and then comes
+%% the pid, This means that the client need not do a network link.
+%% If the link message would not arrive, the runtime system shall
+%% generate a nodedown message
+
+spawn_func(link,{From,Tag},M,F,A,Gleader) ->
+ link(From),
+ gen_server:reply({From,Tag},self()), %% ahhh
+ group_leader(Gleader,self()),
+ apply(M,F,A);
+spawn_func(_,{From,Tag},M,F,A,Gleader) ->
+ gen_server:reply({From,Tag},self()), %% ahhh
+ group_leader(Gleader,self()),
+ apply(M,F,A).
+
+%% -----------------------------------------------------------
+%% Set up connection to a new node.
+%% -----------------------------------------------------------
+
+setup(Node,Type,From,State) ->
+ Allowed = State#state.allowed,
+ case lists:member(Node, Allowed) of
+ false when Allowed =/= [] ->
+ error_msg("** Connection attempt with "
+ "disallowed node ~w ** ~n", [Node]),
+ {error, bad_node};
+ _ ->
+ case select_mod(Node, State#state.listen) of
+ {ok, L} ->
+ Mod = L#listen.module,
+ LAddr = L#listen.address,
+ MyNode = State#state.node,
+ Pid = Mod:setup(Node,
+ Type,
+ MyNode,
+ State#state.type,
+ State#state.connecttime),
+ Addr = LAddr#net_address {
+ address = undefined,
+ host = undefined },
+ ets:insert(sys_dist, #connection{node = Node,
+ state = pending,
+ owner = Pid,
+ waiting = [From],
+ address = Addr,
+ type = normal}),
+ {ok, Pid};
+ Error ->
+ Error
+ end
+ end.
+
+%%
+%% Find a module that is willing to handle connection setup to Node
+%%
+select_mod(Node, [L|Ls]) ->
+ Mod = L#listen.module,
+ case Mod:select(Node) of
+ true -> {ok, L};
+ false -> select_mod(Node, Ls)
+ end;
+select_mod(Node, []) ->
+ {error, {unsupported_address_type, Node}}.
+
+
+get_proto_mod(Family,Protocol,[L|Ls]) ->
+ A = L#listen.address,
+ if A#net_address.family =:= Family,
+ A#net_address.protocol =:= Protocol ->
+ {ok, L#listen.module};
+ true ->
+ get_proto_mod(Family,Protocol,Ls)
+ end;
+get_proto_mod(_Family, _Protocol, []) ->
+ error.
+
+%% -------- Initialisation functions ------------------------
+
+init_node(Name, LongOrShortNames) ->
+ {NameWithoutHost,_Host} = lists:splitwith(fun($@)->false;(_)->true end,
+ atom_to_list(Name)),
+ case create_name(Name, LongOrShortNames, 1) of
+ {ok,Node} ->
+ case start_protos(list_to_atom(NameWithoutHost),Node) of
+ {ok, Ls} ->
+ {ok, Node, Ls};
+ Error ->
+ Error
+ end;
+ Error ->
+ Error
+ end.
+
+%% Create the node name
+create_name(Name, LongOrShortNames, Try) ->
+ put(longnames, case LongOrShortNames of
+ shortnames -> false;
+ longnames -> true
+ end),
+ {Head,Host1} = create_hostpart(Name, LongOrShortNames),
+ case Host1 of
+ {ok,HostPart} ->
+ {ok,list_to_atom(Head ++ HostPart)};
+ {error,long} when Try =:= 1 ->
+ %% It could be we haven't read domain name from resolv file yet
+ inet_config:do_load_resolv(os:type(), longnames),
+ create_name(Name, LongOrShortNames, 0);
+ {error,Type} ->
+ error_logger:info_msg(
+ lists:concat(["Can\'t set ",
+ Type,
+ " node name!\n"
+ "Please check your configuration\n"])),
+ {error,badarg}
+ end.
+
+create_hostpart(Name, LongOrShortNames) ->
+ {Head,Host} = lists:splitwith(fun($@)->false;(_)->true end,
+ atom_to_list(Name)),
+ Host1 = case {Host,LongOrShortNames} of
+ {[$@,_|_],longnames} ->
+ {ok,Host};
+ {[$@,_|_],shortnames} ->
+ case lists:member($.,Host) of
+ true -> {error,short};
+ _ -> {ok,Host}
+ end;
+ {_,shortnames} ->
+ case inet_db:gethostname() of
+ H when is_list(H), length(H)>0 ->
+ {ok,"@" ++ H};
+ _ ->
+ {error,short}
+ end;
+ {_,longnames} ->
+ case {inet_db:gethostname(),inet_db:res_option(domain)} of
+ {H,D} when is_list(D), is_list(H),
+ length(D)> 0, length(H)>0 ->
+ {ok,"@" ++ H ++ "." ++ D};
+ _ ->
+ {error,long}
+ end
+ end,
+ {Head,Host1}.
+
+%%
+%%
+%%
+protocol_childspecs() ->
+ case init:get_argument(proto_dist) of
+ {ok, [Protos]} ->
+ protocol_childspecs(Protos);
+ _ ->
+ protocol_childspecs(["inet_tcp"])
+ end.
+
+protocol_childspecs([]) ->
+ [];
+protocol_childspecs([H|T]) ->
+ Mod = list_to_atom(H ++ "_dist"),
+ case (catch Mod:childspecs()) of
+ {ok, Childspecs} when is_list(Childspecs) ->
+ Childspecs ++ protocol_childspecs(T);
+ _ ->
+ protocol_childspecs(T)
+ end.
+
+
+%%
+%% epmd_module() -> module_name of erl_epmd or similar gen_server_module.
+%%
+
+epmd_module() ->
+ case init:get_argument(epmd_module) of
+ {ok,[[Module]]} ->
+ Module;
+ _ ->
+ erl_epmd
+ end.
+
+%%
+%% Start all protocols
+%%
+
+start_protos(Name,Node) ->
+ case init:get_argument(proto_dist) of
+ {ok, [Protos]} ->
+ start_protos(Name,Protos, Node);
+ _ ->
+ start_protos(Name,["inet_tcp"], Node)
+ end.
+
+start_protos(Name,Ps, Node) ->
+ case start_protos(Name, Ps, Node, []) of
+ [] -> {error, badarg};
+ Ls -> {ok, Ls}
+ end.
+
+start_protos(Name, [Proto | Ps], Node, Ls) ->
+ Mod = list_to_atom(Proto ++ "_dist"),
+ case catch Mod:listen(Name) of
+ {ok, {Socket, Address, Creation}} ->
+ case set_node(Node, Creation) of
+ ok ->
+ AcceptPid = Mod:accept(Socket),
+ auth:sync_cookie(),
+ L = #listen {
+ listen = Socket,
+ address = Address,
+ accept = AcceptPid,
+ module = Mod },
+ start_protos(Name,Ps, Node, [L|Ls]);
+ _ ->
+ Mod:close(Socket),
+ error_logger:info_msg("Invalid node name: ~p~n", [Node]),
+ start_protos(Name, Ps, Node, Ls)
+ end;
+ {'EXIT', {undef,_}} ->
+ error_logger:info_msg("Protocol: ~p: not supported~n", [Proto]),
+ start_protos(Name,Ps, Node, Ls);
+ {'EXIT', Reason} ->
+ error_logger:info_msg("Protocol: ~p: register error: ~p~n",
+ [Proto, Reason]),
+ start_protos(Name,Ps, Node, Ls);
+ {error, duplicate_name} ->
+ error_logger:info_msg("Protocol: ~p: the name " ++
+ atom_to_list(Node) ++
+ " seems to be in use by another Erlang node",
+ [Proto]),
+ start_protos(Name,Ps, Node, Ls);
+ {error, Reason} ->
+ error_logger:info_msg("Protocol: ~p: register/listen error: ~p~n",
+ [Proto, Reason]),
+ start_protos(Name,Ps, Node, Ls)
+ end;
+start_protos(_,[], _Node, Ls) ->
+ Ls.
+
+set_node(Node, Creation) when node() =:= nonode@nohost ->
+ case catch erlang:setnode(Node, Creation) of
+ true ->
+ ok;
+ {'EXIT',Reason} ->
+ {error,Reason}
+ end;
+set_node(Node, _Creation) when node() =:= Node ->
+ ok.
+
+connecttime() ->
+ case application:get_env(kernel, net_setuptime) of
+ {ok,Time} when is_number(Time), Time >= 120 ->
+ 120 * 1000;
+ {ok,Time} when is_number(Time), Time > 0 ->
+ round(Time * 1000);
+ _ ->
+ ?SETUPTIME
+ end.
+
+%% -------- End initialisation functions --------------------
+
+%% ------------------------------------------------------------
+%% Node information.
+%% ------------------------------------------------------------
+
+get_node_info(Node) ->
+ case ets:lookup(sys_dist, Node) of
+ [Conn = #connection{owner = Owner, state = State}] ->
+ case get_status(Owner, Node, State) of
+ {ok, In, Out} ->
+ {ok, [{owner, Owner},
+ {state, State},
+ {address, Conn#connection.address},
+ {type, Conn#connection.type},
+ {in, In},
+ {out, Out}]};
+ _ ->
+ {error, bad_node}
+ end;
+ _ ->
+ {error, bad_node}
+ end.
+
+%%
+%% We can't do monitor_node here incase the node is pending,
+%% the monitor_node/2 call hangs until the connection is ready.
+%% We will not ask about in/out information either for pending
+%% connections as this also would block this call awhile.
+%%
+get_status(Owner, Node, up) ->
+ monitor_node(Node, true),
+ Owner ! {self(), get_status},
+ receive
+ {Owner, get_status, Res} ->
+ monitor_node(Node, false),
+ Res;
+ {nodedown, Node} ->
+ error
+ end;
+get_status(_, _, _) ->
+ {ok, 0, 0}.
+
+get_node_info(Node, Key) ->
+ case get_node_info(Node) of
+ {ok, Info} ->
+ case lists:keysearch(Key, 1, Info) of
+ {value, {Key, Value}} -> {ok, Value};
+ _ -> {error, invalid_key}
+ end;
+ Error ->
+ Error
+ end.
+
+get_nodes_info() ->
+ get_nodes_info(get_nodes(all), []).
+
+get_nodes_info([Node|Nodes], InfoList) ->
+ case get_node_info(Node) of
+ {ok, Info} -> get_nodes_info(Nodes, [{Node, Info}|InfoList]);
+ _ -> get_nodes_info(Nodes, InfoList)
+ end;
+get_nodes_info([], InfoList) ->
+ {ok, InfoList}.
+
+%% ------------------------------------------------------------
+%% Misc. functions
+%% ------------------------------------------------------------
+
+reply_waiting(_Node, Waiting, Rep) ->
+ case Rep of
+ false ->
+ ?connect_failure(_Node, {setup_process, failure});
+ _ ->
+ ok
+ end,
+ reply_waiting1(lists:reverse(Waiting), Rep).
+
+reply_waiting1([From|W], Rep) ->
+ gen_server:reply(From, Rep),
+ reply_waiting1(W, Rep);
+reply_waiting1([], _) ->
+ ok.
+
+
+-ifdef(UNUSED).
+
+delete_all(From, [From |Tail]) -> delete_all(From, Tail);
+delete_all(From, [H|Tail]) -> [H|delete_all(From, Tail)];
+delete_all(_, []) -> [].
+
+-endif.
+
+all_atoms([]) -> true;
+all_atoms([N|Tail]) when is_atom(N) ->
+ all_atoms(Tail);
+all_atoms(_) -> false.
+
+%% It is assumed that only net_kernel uses restart_ticker()
+restart_ticker(Time) ->
+ ?tckr_dbg(restarting_ticker),
+ self() ! aux_tick,
+ spawn_link(?MODULE, ticker, [self(), Time]).
+
+%% ------------------------------------------------------------
+%% Print status information.
+%% ------------------------------------------------------------
+
+print_info() ->
+ nformat("Node", "State", "Type", "In", "Out", "Address"),
+ {ok, NodesInfo} = nodes_info(),
+ {In,Out} = lists:foldl(fun display_info/2, {0,0}, NodesInfo),
+ nformat("Total", "", "",
+ integer_to_list(In), integer_to_list(Out), "").
+
+display_info({Node, Info}, {I,O}) ->
+ State = atom_to_list(fetch(state, Info)),
+ In = fetch(in, Info),
+ Out = fetch(out, Info),
+ Type = atom_to_list(fetch(type, Info)),
+ Address = fmt_address(fetch(address, Info)),
+ nformat(atom_to_list(Node), State, Type,
+ integer_to_list(In), integer_to_list(Out), Address),
+ {I+In,O+Out}.
+
+fmt_address(undefined) ->
+ "-";
+fmt_address(A) ->
+ case A#net_address.family of
+ inet ->
+ case A#net_address.address of
+ {IP,Port} ->
+ inet_parse:ntoa(IP) ++ ":" ++ integer_to_list(Port);
+ _ -> "-"
+ end;
+ inet6 ->
+ case A#net_address.address of
+ {IP,Port} ->
+ inet_parse:ntoa(IP) ++ "/" ++ integer_to_list(Port);
+ _ -> "-"
+ end;
+ _ ->
+ lists:flatten(io_lib:format("~p", [A#net_address.address]))
+ end.
+
+
+fetch(Key, Info) ->
+ case lists:keysearch(Key, 1, Info) of
+ {value, {_, Val}} -> Val;
+ false -> 0
+ end.
+
+nformat(A1, A2, A3, A4, A5, A6) ->
+ io:format("~-20s ~-7s ~-6s ~8s ~8s ~s~n", [A1,A2,A3,A4,A5,A6]).
+
+print_info(Node) ->
+ case node_info(Node) of
+ {ok, Info} ->
+ State = fetch(state, Info),
+ In = fetch(in, Info),
+ Out = fetch(out, Info),
+ Type = fetch(type, Info),
+ Address = fmt_address(fetch(address, Info)),
+ io:format("Node = ~p~n"
+ "State = ~p~n"
+ "Type = ~p~n"
+ "In = ~p~n"
+ "Out = ~p~n"
+ "Address = ~s~n",
+ [Node, State, Type, In, Out, Address]);
+ Error ->
+ Error
+ end.
+
+verbose(Term, Level, #state{verbose = Verbose}) when Verbose >= Level ->
+ error_logger:info_report({net_kernel, Term});
+verbose(_, _, _) ->
+ ok.
+
+getnode(P) when is_pid(P) -> node(P);
+getnode(P) -> P.
diff --git a/lib/kernel/src/os.erl b/lib/kernel/src/os.erl
new file mode 100644
index 0000000000..196e6cdeb2
--- /dev/null
+++ b/lib/kernel/src/os.erl
@@ -0,0 +1,291 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 1997-2009. All Rights Reserved.
+%%
+%% The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved online at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(os).
+
+%% Provides a common operating system interface.
+
+-export([type/0, version/0, cmd/1, find_executable/1, find_executable/2]).
+
+-include("file.hrl").
+
+-spec type() -> 'vxworks' | {'unix',atom()} | {'win32',atom()} | {'ose',atom()}.
+type() ->
+ case erlang:system_info(os_type) of
+ {vxworks, _} ->
+ vxworks;
+ Else -> Else
+ end.
+
+-spec version() -> string() | {non_neg_integer(),non_neg_integer(),non_neg_integer()}.
+version() ->
+ erlang:system_info(os_version).
+
+-spec find_executable(string()) -> string() | 'false'.
+find_executable(Name) ->
+ case os:getenv("PATH") of
+ false -> find_executable(Name, []);
+ Path -> find_executable(Name, Path)
+ end.
+
+-spec find_executable(string(), string()) -> string() | 'false'.
+find_executable(Name, Path) ->
+ Extensions = extensions(),
+ case filename:pathtype(Name) of
+ relative ->
+ find_executable1(Name, split_path(Path), Extensions);
+ _ ->
+ case verify_executable(Name, Extensions) of
+ {ok, Complete} ->
+ Complete;
+ error ->
+ false
+ end
+ end.
+
+find_executable1(Name, [Base|Rest], Extensions) ->
+ Complete0 = filename:join(Base, Name),
+ case verify_executable(Complete0, Extensions) of
+ {ok, Complete} ->
+ Complete;
+ error ->
+ find_executable1(Name, Rest, Extensions)
+ end;
+find_executable1(_Name, [], _Extensions) ->
+ false.
+
+verify_executable(Name0, [Ext|Rest]) ->
+ Name1 = Name0 ++ Ext,
+ case os:type() of
+ vxworks ->
+ %% We consider all existing VxWorks files to be executable
+ case file:read_file_info(Name1) of
+ {ok, _} ->
+ {ok, Name1};
+ _ ->
+ verify_executable(Name0, Rest)
+ end;
+ _ ->
+ case file:read_file_info(Name1) of
+ {ok, #file_info{mode=Mode}} when Mode band 8#111 =/= 0 ->
+ %% XXX This test for execution permission is not full-proof
+ %% on Unix, since we test if any execution bit is set.
+ {ok, Name1};
+ _ ->
+ verify_executable(Name0, Rest)
+ end
+ end;
+verify_executable(_, []) ->
+ error.
+
+split_path(Path) ->
+ case type() of
+ {win32, _} ->
+ {ok,Curr} = file:get_cwd(),
+ split_path(Path, $;, [], [Curr]);
+ _ ->
+ split_path(Path, $:, [], [])
+ end.
+
+split_path([Sep|Rest], Sep, Current, Path) ->
+ split_path(Rest, Sep, [], [reverse_element(Current)|Path]);
+split_path([C|Rest], Sep, Current, Path) ->
+ split_path(Rest, Sep, [C|Current], Path);
+split_path([], _, Current, Path) ->
+ lists:reverse(Path, [reverse_element(Current)]).
+
+reverse_element([]) -> ".";
+reverse_element([$"|T]) -> %"
+ case lists:reverse(T) of
+ [$"|List] -> List; %"
+ List -> List ++ [$"] %"
+ end;
+reverse_element(List) ->
+ lists:reverse(List).
+
+-spec extensions() -> [string()].
+extensions() ->
+ case type() of
+ {win32, _} -> [".exe",".com",".cmd",".bat"];
+ {unix, _} -> [""];
+ vxworks -> [""]
+ end.
+
+%% Executes the given command in the default shell for the operating system.
+-spec cmd(atom() | string() | [string()]) -> string().
+cmd(Cmd) ->
+ validate(Cmd),
+ case type() of
+ {unix, _} ->
+ unix_cmd(Cmd);
+ {win32, Wtype} ->
+ Command = case {os:getenv("COMSPEC"),Wtype} of
+ {false,windows} -> lists:concat(["command.com /c", Cmd]);
+ {false,_} -> lists:concat(["cmd /c", Cmd]);
+ {Cspec,_} -> lists:concat([Cspec," /c",Cmd])
+ end,
+ Port = open_port({spawn, Command}, [stream, in, eof, hide]),
+ get_data(Port, []);
+ %% VxWorks uses a 'sh -c hook' in 'vxcall.c' to run os:cmd.
+ vxworks ->
+ Command = lists:concat(["sh -c '", Cmd, "'"]),
+ Port = open_port({spawn, Command}, [stream, in, eof]),
+ get_data(Port, [])
+ end.
+
+unix_cmd(Cmd) ->
+ Tag = make_ref(),
+ {Pid,Mref} = erlang:spawn_monitor(
+ fun() ->
+ process_flag(trap_exit, true),
+ Port = start_port(),
+ erlang:port_command(Port, mk_cmd(Cmd)),
+ exit({Tag,unix_get_data(Port)})
+ end),
+ receive
+ {'DOWN',Mref,_,Pid,{Tag,Result}} ->
+ Result;
+ {'DOWN',Mref,_,Pid,Reason} ->
+ exit(Reason)
+ end.
+
+%% The -s flag implies that only the positional parameters are set,
+%% and the commands are read from standard input. We set the
+%% $1 parameter for easy identification of the resident shell.
+%%
+-define(SHELL, "/bin/sh -s unix:cmd 2>&1").
+
+%%
+%% Serializing open_port through a process to avoid smp lock contention
+%% when many concurrent os:cmd() want to do vfork (OTP-7890).
+%%
+-spec start_port() -> port().
+start_port() ->
+ {Ref,Client} = {make_ref(),self()},
+ try (os_cmd_port_creator ! {Ref,Client})
+ catch
+ error:_ -> spawn(fun() -> start_port_srv({Ref,Client}) end)
+ end,
+ receive
+ {Ref,Port} when is_port(Port) -> Port;
+ {Ref,Error} -> exit(Error)
+ end.
+
+start_port_srv(Request) ->
+ StayAlive = try register(os_cmd_port_creator, self())
+ catch
+ error:_ -> false
+ end,
+ start_port_srv_loop(Request, StayAlive).
+
+start_port_srv_loop({Ref,Client}, StayAlive) ->
+ Reply = try open_port({spawn, ?SHELL},[stream]) of
+ Port when is_port(Port) ->
+ port_connect(Port, Client),
+ unlink(Port),
+ Port
+ catch
+ error:Reason ->
+ {Reason,erlang:get_stacktrace()}
+ end,
+ Client ! {Ref,Reply},
+ case StayAlive of
+ true -> start_port_srv_loop(receive Msg -> Msg end, true);
+ false -> exiting
+ end.
+
+%%
+%% unix_get_data(Port) -> Result
+%%
+unix_get_data(Port) ->
+ unix_get_data(Port, []).
+
+unix_get_data(Port, Sofar) ->
+ receive
+ {Port,{data, Bytes}} ->
+ case eot(Bytes) of
+ {done, Last} ->
+ lists:flatten([Sofar|Last]);
+ more ->
+ unix_get_data(Port, [Sofar|Bytes])
+ end;
+ {'EXIT', Port, _} ->
+ lists:flatten(Sofar)
+ end.
+
+%%
+%% eot(String) -> more | {done, Result}
+%%
+eot(Bs) ->
+ eot(Bs, []).
+
+eot([4| _Bs], As) ->
+ {done, lists:reverse(As)};
+eot([B| Bs], As) ->
+ eot(Bs, [B| As]);
+eot([], _As) ->
+ more.
+
+%%
+%% mk_cmd(Cmd) -> {ok, ShellCommandString} | {error, ErrorString}
+%%
+%% We do not allow any input to Cmd (hence commands that want
+%% to read from standard input will return immediately).
+%% Standard error is redirected to standard output.
+%%
+%% We use ^D (= EOT = 4) to mark the end of the stream.
+%%
+mk_cmd(Cmd) when is_atom(Cmd) -> % backward comp.
+ mk_cmd(atom_to_list(Cmd));
+mk_cmd(Cmd) ->
+ %% We insert a new line after the command, in case the command
+ %% contains a comment character.
+ io_lib:format("(~s\n) </dev/null; echo \"\^D\"\n", [Cmd]).
+
+
+validate(Atom) when is_atom(Atom) ->
+ ok;
+validate(List) when is_list(List) ->
+ validate1(List).
+
+validate1([C|Rest]) when is_integer(C), 0 =< C, C < 256 ->
+ validate1(Rest);
+validate1([List|Rest]) when is_list(List) ->
+ validate1(List),
+ validate1(Rest);
+validate1([]) ->
+ ok.
+
+get_data(Port, Sofar) ->
+ receive
+ {Port, {data, Bytes}} ->
+ get_data(Port, [Sofar|Bytes]);
+ {Port, eof} ->
+ Port ! {self(), close},
+ receive
+ {Port, closed} ->
+ true
+ end,
+ receive
+ {'EXIT', Port, _} ->
+ ok
+ after 1 -> % force context switch
+ ok
+ end,
+ lists:flatten(Sofar)
+ end.
diff --git a/lib/kernel/src/packages.erl b/lib/kernel/src/packages.erl
new file mode 100644
index 0000000000..e0b1f36b85
--- /dev/null
+++ b/lib/kernel/src/packages.erl
@@ -0,0 +1,158 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2002-2009. All Rights Reserved.
+%%
+%% The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved online at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(packages).
+
+-export([to_string/1, concat/1, concat/2, is_valid/1, is_segmented/1,
+ split/1, last/1, first/1, strip_last/1, find_modules/1,
+ find_modules/2]).
+
+%% A package name (or a package-qualified module name) may be an atom or
+%% a string (list of nonnegative integers) - not a deep list, and not a
+%% list containing atoms. A name may be empty, but may not contain two
+%% consecutive period (`.') characters or end with a period character.
+
+-type package_name() :: atom() | string().
+
+-spec to_string(package_name()) -> string().
+to_string(Name) when is_atom(Name) ->
+ atom_to_list(Name);
+to_string(Name) ->
+ Name.
+
+%% `concat' does not insert a leading period if the first segment is
+%% empty. However, the result may contain leading, consecutive or
+%% dangling period characters, if any of the segments after the first
+%% are empty. Use 'is_valid' to check the result if necessary.
+
+-spec concat(package_name(), package_name()) -> string().
+concat(A, B) ->
+ concat([A, B]).
+
+-spec concat([package_name()]) -> string().
+concat([H | T]) when is_atom(H) ->
+ concat([atom_to_list(H) | T]);
+concat(["" | T]) ->
+ concat_1(T);
+concat(L) ->
+ concat_1(L).
+
+concat_1([H | T]) when is_atom(H) ->
+ concat_1([atom_to_list(H) | T]);
+concat_1([H]) ->
+ H;
+concat_1([H | T]) ->
+ H ++ "." ++ concat_1(T);
+concat_1([]) ->
+ "";
+concat_1(Name) ->
+ erlang:error({badarg, Name}).
+
+-spec is_valid(package_name()) -> boolean().
+is_valid(Name) when is_atom(Name) ->
+ is_valid_1(atom_to_list(Name));
+is_valid([$. | _]) ->
+ false;
+is_valid(Name) ->
+ is_valid_1(Name).
+
+is_valid_1([$.]) -> false;
+is_valid_1([$., $. | _]) -> false;
+is_valid_1([H | T]) when is_integer(H), H >= 0 ->
+ is_valid_1(T);
+is_valid_1([]) -> true;
+is_valid_1(_) -> false.
+
+-spec split(package_name()) -> [string()].
+split(Name) when is_atom(Name) ->
+ split_1(atom_to_list(Name), []);
+split(Name) ->
+ split_1(Name, []).
+
+split_1([$. | T], Cs) ->
+ [lists:reverse(Cs) | split_1(T, [])];
+split_1([H | T], Cs) when is_integer(H), H >= 0 ->
+ split_1(T, [H | Cs]);
+split_1([], Cs) ->
+ [lists:reverse(Cs)];
+split_1(_, _) ->
+ erlang:error(badarg).
+
+%% This is equivalent to testing if `split(Name)' yields a list of
+%% length larger than one (i.e., if the name can be split into two or
+%% more segments), but is cheaper.
+
+-spec is_segmented(package_name()) -> boolean().
+is_segmented(Name) when is_atom(Name) ->
+ is_segmented_1(atom_to_list(Name));
+is_segmented(Name) ->
+ is_segmented_1(Name).
+
+is_segmented_1([$. | _]) -> true;
+is_segmented_1([H | T]) when is_integer(H), H >= 0 ->
+ is_segmented_1(T);
+is_segmented_1([]) -> false;
+is_segmented_1(_) ->
+ erlang:error(badarg).
+
+-spec last(package_name()) -> string().
+last(Name) ->
+ last_1(split(Name)).
+
+last_1([H]) -> H;
+last_1([_ | T]) -> last_1(T).
+
+-spec first(package_name()) -> [string()].
+first(Name) ->
+ first_1(split(Name)).
+
+first_1([H | T]) when T =/= [] -> [H | first_1(T)];
+first_1(_) -> [].
+
+-spec strip_last(package_name()) -> string().
+strip_last(Name) ->
+ concat(first(Name)).
+
+%% This finds all modules available for a given package, using the
+%% current code server search path. (There is no guarantee that the
+%% modules are loadable; only that the object files exist.)
+
+-spec find_modules(package_name()) -> [string()].
+find_modules(P) ->
+ find_modules(P, code:get_path()).
+
+-spec find_modules(package_name(), [string()]) -> [string()].
+find_modules(P, Paths) ->
+ P1 = filename:join(packages:split(P)),
+ find_modules(P1, Paths, code:objfile_extension(), sets:new()).
+
+find_modules(P, [Path | Paths], Ext, S0) ->
+ case file:list_dir(filename:join(Path, P)) of
+ {ok, Fs} ->
+ Fs1 = [F || F <- Fs, filename:extension(F) =:= Ext],
+ S1 = lists:foldl(fun (F, S) ->
+ F1 = filename:rootname(F, Ext),
+ sets:add_element(F1, S)
+ end,
+ S0, Fs1),
+ find_modules(P, Paths, Ext, S1);
+ _ ->
+ find_modules(P, Paths, Ext, S0)
+ end;
+find_modules(_P, [], _Ext, S) ->
+ sets:to_list(S).
diff --git a/lib/kernel/src/pg2.erl b/lib/kernel/src/pg2.erl
new file mode 100644
index 0000000000..fc9508a194
--- /dev/null
+++ b/lib/kernel/src/pg2.erl
@@ -0,0 +1,376 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 1997-2009. All Rights Reserved.
+%%
+%% The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved online at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(pg2).
+
+-export([create/1, delete/1, join/2, leave/2]).
+-export([get_members/1, get_local_members/1]).
+-export([get_closest_pid/1, which_groups/0]).
+-export([start/0,start_link/0,init/1,handle_call/3,handle_cast/2,handle_info/2,
+ terminate/2]).
+
+%%% As of R13B03 monitors are used instead of links.
+
+%%%
+%%% Exported functions
+%%%
+
+-spec start_link() -> {'ok', pid()} | {'error', term()}.
+
+start_link() ->
+ gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
+
+-spec start() -> {'ok', pid()} | {'error', term()}.
+
+start() ->
+ ensure_started().
+
+-spec create(term()) -> 'ok'.
+
+create(Name) ->
+ ensure_started(),
+ case ets:member(pg2_table, {group, Name}) of
+ false ->
+ global:trans({{?MODULE, Name}, self()},
+ fun() ->
+ gen_server:multi_call(?MODULE, {create, Name})
+ end),
+ ok;
+ true ->
+ ok
+ end.
+
+-type name() :: term().
+
+-spec delete(name()) -> 'ok'.
+
+delete(Name) ->
+ ensure_started(),
+ global:trans({{?MODULE, Name}, self()},
+ fun() ->
+ gen_server:multi_call(?MODULE, {delete, Name})
+ end),
+ ok.
+
+-spec join(name(), pid()) -> 'ok' | {'error', {'no_such_group', term()}}.
+
+join(Name, Pid) when is_pid(Pid) ->
+ ensure_started(),
+ case ets:member(pg2_table, {group, Name}) of
+ false ->
+ {error, {no_such_group, Name}};
+ true ->
+ global:trans({{?MODULE, Name}, self()},
+ fun() ->
+ gen_server:multi_call(?MODULE,
+ {join, Name, Pid})
+ end),
+ ok
+ end.
+
+-spec leave(name(), pid()) -> 'ok' | {'error', {'no_such_group', name()}}.
+
+leave(Name, Pid) when is_pid(Pid) ->
+ ensure_started(),
+ case ets:member(pg2_table, {group, Name}) of
+ false ->
+ {error, {no_such_group, Name}};
+ true ->
+ global:trans({{?MODULE, Name}, self()},
+ fun() ->
+ gen_server:multi_call(?MODULE,
+ {leave, Name, Pid})
+ end),
+ ok
+ end.
+
+-type get_members_ret() :: [pid()] | {'error', {'no_such_group', name()}}.
+
+-spec get_members(name()) -> get_members_ret().
+
+get_members(Name) ->
+ ensure_started(),
+ case ets:member(pg2_table, {group, Name}) of
+ true ->
+ group_members(Name);
+ false ->
+ {error, {no_such_group, Name}}
+ end.
+
+-spec get_local_members(name()) -> get_members_ret().
+
+get_local_members(Name) ->
+ ensure_started(),
+ case ets:member(pg2_table, {group, Name}) of
+ true ->
+ local_group_members(Name);
+ false ->
+ {error, {no_such_group, Name}}
+ end.
+
+-spec which_groups() -> [name()].
+
+which_groups() ->
+ ensure_started(),
+ all_groups().
+
+-type gcp_error_reason() :: {'no_process', term()} | {'no_such_group', term()}.
+
+-spec get_closest_pid(term()) -> pid() | {'error', gcp_error_reason()}.
+
+get_closest_pid(Name) ->
+ case get_local_members(Name) of
+ [Pid] ->
+ Pid;
+ [] ->
+ {_,_,X} = erlang:now(),
+ case get_members(Name) of
+ [] -> {error, {no_process, Name}};
+ Members ->
+ lists:nth((X rem length(Members))+1, Members)
+ end;
+ Members when is_list(Members) ->
+ {_,_,X} = erlang:now(),
+ lists:nth((X rem length(Members))+1, Members);
+ Else ->
+ Else
+ end.
+
+%%%
+%%% Callback functions from gen_server
+%%%
+
+-record(state, {}).
+
+-spec init([]) -> {'ok', #state{}}.
+
+init([]) ->
+ Ns = nodes(),
+ net_kernel:monitor_nodes(true),
+ lists:foreach(fun(N) ->
+ {?MODULE, N} ! {new_pg2, node()},
+ self() ! {nodeup, N}
+ end, Ns),
+ pg2_table = ets:new(pg2_table, [ordered_set, protected, named_table]),
+ {ok, #state{}}.
+
+-type call() :: {'create', name()}
+ | {'delete', name()}
+ | {'join', name(), pid()}
+ | {'leave', name(), pid()}.
+
+-spec handle_call(call(), _, #state{}) ->
+ {'reply', 'ok', #state{}}.
+
+handle_call({create, Name}, _From, S) ->
+ assure_group(Name),
+ {reply, ok, S};
+handle_call({join, Name, Pid}, _From, S) ->
+ ets:member(pg2_table, {group, Name}) andalso join_group(Name, Pid),
+ {reply, ok, S};
+handle_call({leave, Name, Pid}, _From, S) ->
+ ets:member(pg2_table, {group, Name}) andalso leave_group(Name, Pid),
+ {reply, ok, S};
+handle_call({delete, Name}, _From, S) ->
+ delete_group(Name),
+ {reply, ok, S};
+handle_call(Request, From, S) ->
+ error_logger:warning_msg("The pg2 server received an unexpected message:\n"
+ "handle_call(~p, ~p, _)\n",
+ [Request, From]),
+ {noreply, S}.
+
+-type all_members() :: [[name(),...]].
+-type cast() :: {'exchange', node(), all_members()}
+ | {'del_member', name(), pid()}.
+
+-spec handle_cast(cast(), #state{}) -> {'noreply', #state{}}.
+
+handle_cast({exchange, _Node, List}, S) ->
+ store(List),
+ {noreply, S};
+handle_cast(_, S) ->
+ %% Ignore {del_member, Name, Pid}.
+ {noreply, S}.
+
+-spec handle_info(tuple(), #state{}) -> {'noreply', #state{}}.
+
+handle_info({'DOWN', MonitorRef, process, _Pid, _Info}, S) ->
+ member_died(MonitorRef),
+ {noreply, S};
+handle_info({nodeup, Node}, S) ->
+ gen_server:cast({?MODULE, Node}, {exchange, node(), all_members()}),
+ {noreply, S};
+handle_info({new_pg2, Node}, S) ->
+ gen_server:cast({?MODULE, Node}, {exchange, node(), all_members()}),
+ {noreply, S};
+handle_info(_, S) ->
+ {noreply, S}.
+
+-spec terminate(term(), #state{}) -> 'ok'.
+
+terminate(_Reason, _S) ->
+ true = ets:delete(pg2_table),
+ ok.
+
+%%%
+%%% Local functions
+%%%
+
+%%% One ETS table, pg2_table, is used for bookkeeping. The type of the
+%%% table is ordered_set, and the fast matching of partially
+%%% instantiated keys is used extensively.
+%%%
+%%% {{group, Name}}
+%%% Process group Name.
+%%% {{ref, Pid}, RPid, MonitorRef, Counter}
+%%% {{ref, MonitorRef}, Pid}
+%%% Each process has one monitor. Sometimes a process is spawned to
+%%% monitor the pid (RPid). Counter is incremented when the Pid joins
+%%% some group.
+%%% {{member, Name, Pid}, GroupCounter}
+%%% {{local_member, Name, Pid}}
+%%% Pid is a member of group Name, GroupCounter is incremented when the
+%%% Pid joins the group Name.
+%%% {{pid, Pid, Name}}
+%%% Pid is a member of group Name.
+
+store(List) ->
+ _ = [assure_group(Name) andalso [join_group(Name, P) || P <- Members] ||
+ [Name, Members] <- List],
+ ok.
+
+assure_group(Name) ->
+ Key = {group, Name},
+ ets:member(pg2_table, Key) orelse true =:= ets:insert(pg2_table, {Key}).
+
+delete_group(Name) ->
+ _ = [leave_group(Name, Pid) || Pid <- group_members(Name)],
+ true = ets:delete(pg2_table, {group, Name}),
+ ok.
+
+member_died(Ref) ->
+ [{{ref, Ref}, Pid}] = ets:lookup(pg2_table, {ref, Ref}),
+ Names = member_groups(Pid),
+ _ = [leave_group(Name, P) ||
+ Name <- Names,
+ P <- member_in_group(Pid, Name)],
+ %% Kept for backward compatibility with links. Can be removed, eventually.
+ _ = [gen_server:abcast(nodes(), ?MODULE, {del_member, Name, Pid}) ||
+ Name <- Names],
+ ok.
+
+join_group(Name, Pid) ->
+ Ref_Pid = {ref, Pid},
+ try _ = ets:update_counter(pg2_table, Ref_Pid, {4, +1})
+ catch _:_ ->
+ {RPid, Ref} = do_monitor(Pid),
+ true = ets:insert(pg2_table, {Ref_Pid, RPid, Ref, 1}),
+ true = ets:insert(pg2_table, {{ref, Ref}, Pid})
+ end,
+ Member_Name_Pid = {member, Name, Pid},
+ try _ = ets:update_counter(pg2_table, Member_Name_Pid, {2, +1})
+ catch _:_ ->
+ true = ets:insert(pg2_table, {Member_Name_Pid, 1}),
+ _ = [ets:insert(pg2_table, {{local_member, Name, Pid}}) ||
+ node(Pid) =:= node()],
+ true = ets:insert(pg2_table, {{pid, Pid, Name}})
+ end.
+
+leave_group(Name, Pid) ->
+ Member_Name_Pid = {member, Name, Pid},
+ try ets:update_counter(pg2_table, Member_Name_Pid, {2, -1}) of
+ N ->
+ if
+ N =:= 0 ->
+ true = ets:delete(pg2_table, {pid, Pid, Name}),
+ _ = [ets:delete(pg2_table, {local_member, Name, Pid}) ||
+ node(Pid) =:= node()],
+ true = ets:delete(pg2_table, Member_Name_Pid);
+ true ->
+ ok
+ end,
+ Ref_Pid = {ref, Pid},
+ case ets:update_counter(pg2_table, Ref_Pid, {4, -1}) of
+ 0 ->
+ [{Ref_Pid,RPid,Ref,0}] = ets:lookup(pg2_table, Ref_Pid),
+ true = ets:delete(pg2_table, {ref, Ref}),
+ true = ets:delete(pg2_table, Ref_Pid),
+ true = erlang:demonitor(Ref, [flush]),
+ kill_monitor_proc(RPid, Pid);
+ _ ->
+ ok
+ end
+ catch _:_ ->
+ ok
+ end.
+
+all_members() ->
+ [[G, group_members(G)] || G <- all_groups()].
+
+group_members(Name) ->
+ [P ||
+ [P, N] <- ets:match(pg2_table, {{member, Name, '$1'},'$2'}),
+ _ <- lists:seq(1, N)].
+
+local_group_members(Name) ->
+ [P ||
+ [Pid] <- ets:match(pg2_table, {{local_member, Name, '$1'}}),
+ P <- member_in_group(Pid, Name)].
+
+member_in_group(Pid, Name) ->
+ [{{member, Name, Pid}, N}] = ets:lookup(pg2_table, {member, Name, Pid}),
+ lists:duplicate(N, Pid).
+
+member_groups(Pid) ->
+ [Name || [Name] <- ets:match(pg2_table, {{pid, Pid, '$1'}})].
+
+all_groups() ->
+ [N || [N] <- ets:match(pg2_table, {{group,'$1'}})].
+
+ensure_started() ->
+ case whereis(?MODULE) of
+ undefined ->
+ C = {pg2, {?MODULE, start_link, []}, permanent,
+ 1000, worker, [?MODULE]},
+ supervisor:start_child(kernel_safe_sup, C);
+ Pg2Pid ->
+ {ok, Pg2Pid}
+ end.
+
+
+kill_monitor_proc(RPid, Pid) ->
+ RPid =:= Pid orelse exit(RPid, kill).
+
+%% When/if erlang:monitor() returns before trying to connect to the
+%% other node this function can be removed.
+do_monitor(Pid) ->
+ case (node(Pid) =:= node()) orelse lists:member(node(Pid), nodes()) of
+ true ->
+ %% Assume the node is still up
+ {Pid, erlang:monitor(process, Pid)};
+ false ->
+ F = fun() ->
+ Ref = erlang:monitor(process, Pid),
+ receive
+ {'DOWN', Ref, process, Pid, _Info} ->
+ exit(normal)
+ end
+ end,
+ erlang:spawn_monitor(F)
+ end.
diff --git a/lib/kernel/src/ram_file.erl b/lib/kernel/src/ram_file.erl
new file mode 100644
index 0000000000..d996650948
--- /dev/null
+++ b/lib/kernel/src/ram_file.erl
@@ -0,0 +1,492 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 1997-2009. All Rights Reserved.
+%%
+%% The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved online at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(ram_file).
+
+%% Binary RAM file interface
+
+%% Generic file contents operations
+-export([open/2, close/1]).
+-export([write/2, read/2, copy/3,
+ pread/2, pread/3, pwrite/2, pwrite/3,
+ position/2, truncate/1, sync/1]).
+
+%% Specialized file operations
+-export([get_size/1, get_file/1, set_file/2, get_file_close/1]).
+-export([compress/1, uncompress/1, uuencode/1, uudecode/1]).
+
+-export([open_mode/1]). %% used by ftp-file
+
+-export([ipread_s32bu_p32bu/3]).
+
+
+
+%% Includes and defines
+
+-define(RAM_FILE_DRV, "ram_file_drv").
+-define(MAX_I32, (1 bsl 31)).
+-define(G_I32(X), is_integer(X), X >= -?MAX_I32, X < ?MAX_I32).
+
+-include("file.hrl").
+
+
+
+%% --------------------------------------------------------------------------
+%% These operation codes were once identical between efile_drv.c
+%% and ram_file_drv.c, but now these drivers are not depeding on each other.
+%% So, the codes could be changed to more logical values now, but why indeed?
+
+%% Defined "file" functions
+-define(RAM_FILE_OPEN, 1).
+-define(RAM_FILE_READ, 2).
+-define(RAM_FILE_LSEEK, 3).
+-define(RAM_FILE_WRITE, 4).
+-define(RAM_FILE_FSYNC, 9).
+-define(RAM_FILE_TRUNCATE, 14).
+-define(RAM_FILE_PREAD, 17).
+-define(RAM_FILE_PWRITE, 18).
+
+%% Other operations
+-define(RAM_FILE_GET, 30).
+-define(RAM_FILE_SET, 31).
+-define(RAM_FILE_GET_CLOSE, 32).
+-define(RAM_FILE_COMPRESS, 33).
+-define(RAM_FILE_UNCOMPRESS, 34).
+-define(RAM_FILE_UUENCODE, 35).
+-define(RAM_FILE_UUDECODE, 36).
+-define(RAM_FILE_SIZE, 37).
+
+%% Open modes for RAM_FILE_OPEN
+-define(RAM_FILE_MODE_READ, 1).
+-define(RAM_FILE_MODE_WRITE, 2).
+-define(RAM_FILE_MODE_READ_WRITE, 3).
+%% Use this mask to get just the mode bits to be passed to the driver.
+-define(RAM_FILE_MODE_MASK, 3).
+
+%% Seek modes for RAM_FILE_LSEEK
+-define(RAM_FILE_SEEK_SET, 0).
+-define(RAM_FILE_SEEK_CUR, 1).
+-define(RAM_FILE_SEEK_END, 2).
+
+%% Return codes
+-define(RAM_FILE_RESP_OK, 0).
+-define(RAM_FILE_RESP_ERROR, 1).
+-define(RAM_FILE_RESP_DATA, 2).
+-define(RAM_FILE_RESP_NUMBER, 3).
+-define(RAM_FILE_RESP_INFO, 4).
+
+%% --------------------------------------------------------------------------
+%% Generic file contents operations.
+%%
+%% Supposed to be called by applications through module file.
+
+open(Data, ModeList) when is_list(ModeList) ->
+ case open_mode(ModeList) of
+ {Mode,Opts} when is_integer(Mode) ->
+ case ll_open(Data, Mode, Opts) of
+ {ok,Port} ->
+ {ok,#file_descriptor{module=?MODULE, data=Port}};
+ Error ->
+ Error
+ end;
+ {error,_}=Error ->
+ Error
+ end;
+%% Old obsolete mode specification
+open(Data, Mode) ->
+ case mode_list(Mode) of
+ ModeList when is_list(ModeList) ->
+ open(Data, ModeList);
+ Error ->
+ Error
+ end.
+
+close(#file_descriptor{module = ?MODULE, data = Port}) ->
+ ll_close(Port).
+
+read(#file_descriptor{module = ?MODULE, data = Port}, Sz)
+ when is_integer(Sz), Sz >= 0 ->
+ if
+ ?G_I32(Sz) ->
+ Cmd = <<?RAM_FILE_READ:8,Sz:32>>,
+ case call_port(Port, Cmd) of
+ {ok, {0, _Data}} when Sz =/= 0 ->
+ eof;
+ {ok, {_Sz, Data}} ->
+ {ok, Data};
+ {error, enomem} ->
+ %% Garbage collecting here might help if
+ %% the current processes has some old binaries left.
+ erlang:garbage_collect(),
+ case call_port(Port, Cmd) of
+ {ok, {0, _Data}} when Sz =/= 0 ->
+ eof;
+ {ok, {_Sz, Data}} ->
+ {ok, Data};
+ Error ->
+ Error
+ end;
+ Error ->
+ Error
+ end;
+ true ->
+ {error, einval}
+ end.
+
+write(#file_descriptor{module = ?MODULE, data = Port}, Bytes) ->
+ case call_port(Port, [?RAM_FILE_WRITE | Bytes]) of
+ {ok, _Sz} ->
+ ok;
+ Error ->
+ Error
+ end.
+
+
+
+
+copy(#file_descriptor{module = ?MODULE} = Source,
+ #file_descriptor{module = ?MODULE} = Dest,
+ Length)
+ when is_integer(Length), Length >= 0;
+ is_atom(Length) ->
+ %% XXX Should be moved down to the driver for optimization.
+ file:copy_opened(Source, Dest, Length).
+
+
+sync(#file_descriptor{module = ?MODULE, data = Port}) ->
+ call_port(Port, <<?RAM_FILE_FSYNC>>).
+
+truncate(#file_descriptor{module = ?MODULE, data = Port}) ->
+ call_port(Port, <<?RAM_FILE_TRUNCATE>>).
+
+position(#file_descriptor{module = ?MODULE, data = Port}, Pos) ->
+ case lseek_position(Pos) of
+ {ok, Offs, Whence} when ?G_I32(Offs) ->
+ call_port(Port, <<?RAM_FILE_LSEEK:8,Offs:32,Whence:32>>);
+ {ok, _, _} ->
+ {error, einval};
+ Error ->
+ Error
+ end.
+
+
+
+pread(#file_descriptor{module = ?MODULE, data = Port}, L) when is_list(L) ->
+ pread_1(Port, L, []).
+
+pread_1(Port, [], Cs) ->
+ pread_2(Port, lists:reverse(Cs), []);
+pread_1(Port, [{At, Sz} | T], Cs)
+ when is_integer(At), is_integer(Sz), Sz >= 0 ->
+ if
+ ?G_I32(At), ?G_I32(Sz) ->
+ pread_1(Port, T, [{Sz,<<?RAM_FILE_PREAD:8,At:32,Sz:32>>}|Cs]);
+ true ->
+ {error, einval}
+ end;
+pread_1(_, _, _243) ->
+ {error, badarg}.
+
+pread_2(_Port, [], R) ->
+ {ok, lists:reverse(R)};
+pread_2(Port, [{Sz,Command}|Commands], R) ->
+ case call_port(Port, Command) of
+ {ok, {0,_Data}} when Sz =/= 0 ->
+ pread_2(Port, Commands, [eof | R]);
+ {ok, {_Sz,Data}} ->
+ pread_2(Port, Commands, [Data | R]);
+ Error ->
+ Error
+ end.
+
+pread(#file_descriptor{module = ?MODULE, data = Port}, At, Sz)
+ when is_integer(At), is_integer(Sz), Sz >= 0 ->
+ if
+ ?G_I32(At), ?G_I32(Sz) ->
+ case call_port(Port, <<?RAM_FILE_PREAD:8,At:32,Sz:32>>) of
+ {ok, {0,_Data}} when Sz =/= 0 ->
+ eof;
+ {ok, {_Sz,Data}} ->
+ {ok, Data};
+ Error ->
+ Error
+ end;
+ true ->
+ {error, einval}
+ end;
+pread(#file_descriptor{module = ?MODULE}, _, _) ->
+ {error, badarg}.
+
+
+
+pwrite(#file_descriptor{module = ?MODULE, data = Port}, L) when is_list(L) ->
+ pwrite_1(Port, L, 0, []).
+
+pwrite_1(Port, [], _, Cs) ->
+ pwrite_2(Port, lists:reverse(Cs), 0);
+pwrite_1(Port, [{At, Bytes} | T], R, Cs) when is_integer(At) ->
+ if
+ ?G_I32(At), is_binary(Bytes) ->
+ pwrite_1(Port, T, R+1,
+ [<<?RAM_FILE_PWRITE:8,At:32,Bytes/binary>> | Cs]);
+ ?G_I32(At) ->
+ try erlang:iolist_to_binary(Bytes) of
+ Bin ->
+ pwrite_1(Port, T, R+1,
+ [<<?RAM_FILE_PWRITE:8,At:32,Bin/binary>> | Cs])
+ catch
+ error:Reason ->
+ {error, Reason}
+ end;
+ true ->
+ {error, {R, einval}}
+ end;
+pwrite_1(_, _, _, _) ->
+ {error, badarg}.
+
+pwrite_2(_Port, [], _R) ->
+ ok;
+pwrite_2(Port, [Command|Commands], R) ->
+ case call_port(Port, Command) of
+ {ok, _Sz} ->
+ pwrite_2(Port, Commands, R+1);
+ {error, badarg} = Error ->
+ Error;
+ {error, Reason} ->
+ {error, {R, Reason}}
+ end.
+
+pwrite(#file_descriptor{module = ?MODULE, data = Port}, At, Bytes)
+ when is_integer(At) ->
+ if
+ ?G_I32(At) ->
+ case call_port(Port, [<<?RAM_FILE_PWRITE:8,At:32>>|Bytes]) of
+ {ok, _Sz} ->
+ ok;
+ Error ->
+ Error
+ end;
+ true ->
+ {error, einval}
+ end;
+pwrite(#file_descriptor{module = ?MODULE}, _, _) ->
+ {error, badarg}.
+
+
+ipread_s32bu_p32bu(#file_descriptor{module = ?MODULE} = Handle, Pos, MaxSz) ->
+ file:ipread_s32bu_p32bu_int(Handle, Pos, MaxSz).
+
+
+
+%% --------------------------------------------------------------------------
+%% Specialized ram_file API for functions not in file, unique to ram_file.
+%%
+
+
+get_file(#file_descriptor{module = ?MODULE, data = Port}) ->
+ case call_port(Port, [?RAM_FILE_GET]) of
+ {ok, {_Sz, Data}} ->
+ {ok, Data};
+ Error ->
+ Error
+ end;
+get_file(#file_descriptor{}) ->
+ {error, enotsup}.
+
+set_file(#file_descriptor{module = ?MODULE, data = Port}, Data) ->
+ call_port(Port, [?RAM_FILE_SET | Data]);
+set_file(#file_descriptor{}, _) ->
+ {error, enotsup}.
+
+get_file_close(#file_descriptor{module = ?MODULE, data = Port}) ->
+ case call_port(Port, [?RAM_FILE_GET_CLOSE]) of
+ {ok, {_Sz, Data}} ->
+ {ok, Data};
+ Error ->
+ Error
+ end;
+get_file_close(#file_descriptor{}) ->
+ {error, enotsup}.
+
+get_size(#file_descriptor{module = ?MODULE, data = Port}) ->
+ call_port(Port, [?RAM_FILE_SIZE]);
+get_size(#file_descriptor{}) ->
+ {error, enotsup}.
+
+compress(#file_descriptor{module = ?MODULE, data = Port}) ->
+ call_port(Port, [?RAM_FILE_COMPRESS]);
+compress(#file_descriptor{}) ->
+ {error, enotsup}.
+
+uncompress(#file_descriptor{module = ?MODULE, data = Port}) ->
+ call_port(Port, [?RAM_FILE_UNCOMPRESS]);
+uncompress(#file_descriptor{}) ->
+ {error, enotsup}.
+
+
+uuencode(#file_descriptor{module = ?MODULE, data = Port}) ->
+ call_port(Port, [?RAM_FILE_UUENCODE]);
+uuencode(#file_descriptor{}) ->
+ {error, enotsup}.
+
+uudecode(#file_descriptor{module = ?MODULE, data = Port}) ->
+ call_port(Port, [?RAM_FILE_UUDECODE]);
+uudecode(#file_descriptor{}) ->
+ {error, enotsup}.
+
+
+
+%%%-----------------------------------------------------------------
+%%% Functions to communicate with the driver
+
+ll_open(Data, Mode, Opts) ->
+ try erlang:open_port({spawn, ?RAM_FILE_DRV}, Opts) of
+ Port ->
+ case call_port(Port, [<<?RAM_FILE_OPEN:8,Mode:32>>|Data]) of
+ {error, _} = Error ->
+ ll_close(Port),
+ Error;
+ {ok, _} ->
+ {ok, Port}
+ end
+ catch
+ error:Reason ->
+ {error, Reason}
+ end.
+
+call_port(Port, Command) when is_port(Port), is_binary(Command) ->
+ try erlang:port_command(Port, Command) of
+ true ->
+ get_response(Port)
+ catch
+ error:badarg ->
+ {error, einval}; % Since Command is valid, Port must be dead
+ error:Reason ->
+ {error, Reason}
+ end;
+call_port(Port, Command) ->
+ try erlang:iolist_to_binary(Command) of
+ Bin ->
+ call_port(Port, Bin)
+ catch
+ error:Reason ->
+ {error, Reason}
+ end.
+
+get_response(Port) ->
+ receive
+ {Port, {data, [Response|Rest]}} ->
+ translate_response(Response, Rest);
+ {'EXIT', Port, _Reason} ->
+ {error, port_died}
+ end.
+
+ll_close(Port) ->
+ try erlang:port_close(Port) catch error:_ -> ok end,
+ receive %% In case the caller is the owner and traps exits
+ {'EXIT', Port, _} ->
+ ok
+ after 0 ->
+ ok
+ end.
+
+%%%-----------------------------------------------------------------
+%%% Utility functions.
+
+mode_list(read) ->
+ [read];
+mode_list(write) ->
+ [write];
+mode_list(read_write) ->
+ [read, write];
+mode_list({binary, Mode}) when is_atom(Mode) ->
+ [binary | mode_list(Mode)];
+mode_list({character, Mode}) when is_atom(Mode) ->
+ mode_list(Mode);
+mode_list(_) ->
+ {error, badarg}.
+
+
+
+%% Converts a list of mode atoms into an mode word for the driver.
+%% Returns {Mode, Opts} wher Opts is a list of options for
+%% erlang:open_port/2, or {error, einval} upon failure.
+
+open_mode(List) when is_list(List) ->
+ case open_mode(List, {0, []}) of
+ {Mode, Opts} when Mode band
+ (?RAM_FILE_MODE_READ bor ?RAM_FILE_MODE_WRITE)
+ =:= 0 ->
+ {Mode bor ?RAM_FILE_MODE_READ, Opts};
+ Other ->
+ Other
+ end.
+
+open_mode([ram|Rest], {Mode, Opts}) ->
+ open_mode(Rest, {Mode, Opts});
+open_mode([read|Rest], {Mode, Opts}) ->
+ open_mode(Rest, {Mode bor ?RAM_FILE_MODE_READ, Opts});
+open_mode([write|Rest], {Mode, Opts}) ->
+ open_mode(Rest, {Mode bor ?RAM_FILE_MODE_WRITE, Opts});
+open_mode([binary|Rest], {Mode, Opts}) ->
+ open_mode(Rest, {Mode, [binary | Opts]});
+open_mode([], {Mode, Opts}) ->
+ {Mode, Opts};
+open_mode(_, _) ->
+ {error, badarg}.
+
+
+
+%% Converts a position tuple {bof, X} | {cur, X} | {eof, X} into
+%% {ok, Offset, OriginCode} for the driver.
+%% Returns {error, einval} upon failure.
+
+lseek_position(Pos) when is_integer(Pos) ->
+ lseek_position({bof, Pos});
+lseek_position(bof) ->
+ lseek_position({bof, 0});
+lseek_position(cur) ->
+ lseek_position({cur, 0});
+lseek_position(eof) ->
+ lseek_position({eof, 0});
+lseek_position({bof, Offset}) when is_integer(Offset) ->
+ {ok, Offset, ?RAM_FILE_SEEK_SET};
+lseek_position({cur, Offset}) when is_integer(Offset) ->
+ {ok, Offset, ?RAM_FILE_SEEK_CUR};
+lseek_position({eof, Offset}) when is_integer(Offset) ->
+ {ok, Offset, ?RAM_FILE_SEEK_END};
+lseek_position(_) ->
+ {error, badarg}.
+
+
+
+translate_response(?RAM_FILE_RESP_OK, []) ->
+ ok;
+translate_response(?RAM_FILE_RESP_OK, Data) ->
+ {ok, Data};
+translate_response(?RAM_FILE_RESP_ERROR, List) when is_list(List) ->
+ {error, list_to_atom(List)};
+translate_response(?RAM_FILE_RESP_NUMBER, [X1, X2, X3, X4]) ->
+ {ok, i32(X1, X2, X3, X4)};
+translate_response(?RAM_FILE_RESP_DATA, [X1, X2, X3, X4|Data]) ->
+ {ok, {i32(X1, X2, X3, X4), Data}};
+translate_response(X, Data) ->
+ {error, {bad_response_from_port, X, Data}}.
+
+i32(X1,X2,X3,X4) ->
+ (X1 bsl 24) bor (X2 bsl 16) bor (X3 bsl 8) bor X4.
diff --git a/lib/kernel/src/rpc.erl b/lib/kernel/src/rpc.erl
new file mode 100644
index 0000000000..d69f2a12ad
--- /dev/null
+++ b/lib/kernel/src/rpc.erl
@@ -0,0 +1,609 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 1996-2009. All Rights Reserved.
+%%
+%% The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved online at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(rpc).
+
+%% General rpc, broadcast,multicall, promise and parallel evaluator
+%% facility
+
+%% This code used to reside in net.erl, but has now been moved to
+%% a searate module.
+
+-define(NAME, rex).
+
+-behaviour(gen_server).
+
+-export([start/0, start_link/0, stop/0,
+ call/4, call/5,
+ block_call/4, block_call/5,
+ server_call/4,
+ cast/4,
+ abcast/2,
+ abcast/3,
+ sbcast/2,
+ sbcast/3,
+ eval_everywhere/3,
+ eval_everywhere/4,
+ multi_server_call/2,
+ multi_server_call/3,
+ multicall/3,
+ multicall/4,
+ multicall/5,
+ async_call/4,
+ yield/1,
+ nb_yield/2,
+ nb_yield/1,
+ parallel_eval/1,
+ pmap/3, pinfo/1, pinfo/2]).
+
+%% Deprecated calls.
+-deprecated([{safe_multi_server_call,2},{safe_multi_server_call,3}]).
+-export([safe_multi_server_call/2,safe_multi_server_call/3]).
+
+%% gen_server exports
+-export([init/1,handle_call/3,handle_cast/2,handle_info/2,
+ terminate/2, code_change/3]).
+
+%% Internals
+-export([proxy_user_flush/0]).
+
+%%------------------------------------------------------------------------
+
+%% Remote execution and broadcasting facility
+
+start() ->
+ gen_server:start({local,?NAME},?MODULE,[],[]).
+
+start_link() ->
+ gen_server:start_link({local,?NAME},?MODULE,[],[]).
+
+stop() ->
+ stop(?NAME).
+
+stop(Rpc) ->
+ gen_server:call(Rpc, stop, infinity).
+
+-spec init([]) -> {'ok', gb_tree()}.
+init([]) ->
+ process_flag(trap_exit, true),
+ {ok, gb_trees:empty()}.
+
+handle_call({call, Mod, Fun, Args, Gleader}, To, S) ->
+ handle_call_call(Mod, Fun, Args, Gleader, To, S);
+handle_call({block_call, Mod, Fun, Args, Gleader}, _To, S) ->
+ MyGL = group_leader(),
+ set_group_leader(Gleader),
+ Reply =
+ case catch apply(Mod,Fun,Args) of
+ {'EXIT', _} = Exit ->
+ {badrpc, Exit};
+ Other ->
+ Other
+ end,
+ group_leader(MyGL, self()), % restore
+ {reply, Reply, S};
+handle_call(stop, _To, S) ->
+ {stop, normal, stopped, S};
+handle_call(_, _To, S) ->
+ {noreply, S}. % Ignore !
+
+
+handle_cast({cast, Mod, Fun, Args, Gleader}, S) ->
+ spawn(
+ fun() ->
+ set_group_leader(Gleader),
+ apply(Mod, Fun, Args)
+ end),
+ {noreply, S};
+handle_cast(_, S) ->
+ {noreply, S}. % Ignore !
+
+
+handle_info({'DOWN', _, process, Caller, Reason}, S) ->
+ case gb_trees:lookup(Caller, S) of
+ {value, To} ->
+ receive
+ {Caller, {reply, Reply}} ->
+ gen_server:reply(To, Reply)
+ after 0 ->
+ gen_server:reply(To, {badrpc, {'EXIT', Reason}})
+ end,
+ {noreply, gb_trees:delete(Caller, S)};
+ none ->
+ {noreply, S}
+ end;
+handle_info({Caller, {reply, Reply}}, S) ->
+ case gb_trees:lookup(Caller, S) of
+ {value, To} ->
+ receive
+ {'DOWN', _, process, Caller, _} ->
+ gen_server:reply(To, Reply),
+ {noreply, gb_trees:delete(Caller, S)}
+ end;
+ none ->
+ {noreply, S}
+ end;
+handle_info({From, {sbcast, Name, Msg}}, S) ->
+ case catch Name ! Msg of %% use catch to get the printout
+ {'EXIT', _} ->
+ From ! {?NAME, node(), {nonexisting_name, Name}};
+ _ ->
+ From ! {?NAME, node(), node()}
+ end,
+ {noreply,S};
+handle_info({From, {send, Name, Msg}}, S) ->
+ case catch Name ! {From, Msg} of %% use catch to get the printout
+ {'EXIT', _} ->
+ From ! {?NAME, node(), {nonexisting_name, Name}};
+ _ ->
+ ok %% It's up to Name to respond !!!!!
+ end,
+ {noreply,S};
+handle_info({From, {call,Mod,Fun,Args,Gleader}}, S) ->
+ %% Special for hidden C node's, uugh ...
+ handle_call_call(Mod, Fun, Args, Gleader, {From,?NAME}, S);
+handle_info(_, S) ->
+ {noreply,S}.
+
+terminate(_, _S) ->
+ ok.
+
+code_change(_, S, _) ->
+ {ok, S}.
+
+%%
+%% Auxiliary function to avoid a false dialyzer warning -- do not inline
+%%
+handle_call_call(Mod, Fun, Args, Gleader, To, S) ->
+ RpcServer = self(),
+ %% Spawn not to block the rpc server.
+ {Caller,_} =
+ erlang:spawn_monitor(
+ fun () ->
+ set_group_leader(Gleader),
+ Reply =
+ %% in case some sucker rex'es
+ %% something that throws
+ case catch apply(Mod, Fun, Args) of
+ {'EXIT', _} = Exit ->
+ {badrpc, Exit};
+ Result ->
+ Result
+ end,
+ RpcServer ! {self(), {reply, Reply}}
+ end),
+ {noreply, gb_trees:insert(Caller, To, S)}.
+
+
+%% RPC aid functions ....
+
+set_group_leader(Gleader) when is_pid(Gleader) ->
+ group_leader(Gleader, self());
+set_group_leader(user) ->
+ %% For example, hidden C nodes doesn't want any I/O.
+ Gleader = case whereis(user) of
+ Pid when is_pid(Pid) -> Pid;
+ undefined -> proxy_user()
+ end,
+ group_leader(Gleader, self()).
+
+
+%% The 'rex_proxy_user' process serve as group leader for early rpc's that
+%% may do IO before the real group leader 'user' has been started (OTP-7903).
+proxy_user() ->
+ case whereis(rex_proxy_user) of
+ Pid when is_pid(Pid) -> Pid;
+ undefined ->
+ Pid = spawn(fun()-> proxy_user_loop() end),
+ try register(rex_proxy_user,Pid) of
+ true -> Pid
+ catch error:_ -> % spawn race, kill and try again
+ exit(Pid,kill),
+ proxy_user()
+ end
+ end.
+
+proxy_user_loop() ->
+ %% Wait for the real 'user' to start
+ timer:sleep(200),
+ case whereis(user) of
+ Pid when is_pid(Pid) -> proxy_user_flush();
+ undefined -> proxy_user_loop()
+ end.
+
+proxy_user_flush() ->
+ %% Forward all received messages to 'user'
+ receive Msg ->
+ user ! Msg
+ after 10*1000 ->
+ %% Hibernate but live for ever, as it's not easy to know
+ %% when no more messages will arrive.
+ erlang:hibernate(?MODULE, proxy_user_flush, [])
+ end,
+ proxy_user_flush().
+
+
+%% THE rpc client interface
+
+-spec call(node(), atom(), atom(), [term()]) -> term().
+
+call(N,M,F,A) when node() =:= N -> %% Optimize local call
+ local_call(M, F, A);
+call(N,M,F,A) ->
+ do_call(N, {call,M,F,A,group_leader()}, infinity).
+
+-spec call(node(), atom(), atom(), [term()], timeout()) -> term().
+
+call(N,M,F,A,_Timeout) when node() =:= N -> %% Optimize local call
+ local_call(M,F,A);
+call(N,M,F,A,infinity) ->
+ do_call(N, {call,M,F,A,group_leader()}, infinity);
+call(N,M,F,A,Timeout) when is_integer(Timeout), Timeout >= 0 ->
+ do_call(N, {call,M,F,A,group_leader()}, Timeout).
+
+-spec block_call(node(), atom(), atom(), [term()]) -> term().
+
+block_call(N,M,F,A) when node() =:= N -> %% Optimize local call
+ local_call(M,F,A);
+block_call(N,M,F,A) ->
+ do_call(N, {block_call,M,F,A,group_leader()}, infinity).
+
+-spec block_call(node(), atom(), atom(), [term()], timeout()) -> term().
+
+block_call(N,M,F,A,_Timeout) when node() =:= N -> %% Optimize local call
+ local_call(M, F, A);
+block_call(N,M,F,A,infinity) ->
+ do_call(N, {block_call,M,F,A,group_leader()}, infinity);
+block_call(N,M,F,A,Timeout) when is_integer(Timeout), Timeout >= 0 ->
+ do_call(N, {block_call,M,F,A,group_leader()}, Timeout).
+
+local_call(M, F, A) when is_atom(M), is_atom(F), is_list(A) ->
+ case catch apply(M, F, A) of
+ {'EXIT',_}=V -> {badrpc, V};
+ Other -> Other
+ end.
+
+do_call(Node, Request, infinity) ->
+ rpc_check(catch gen_server:call({?NAME,Node}, Request, infinity));
+do_call(Node, Request, Timeout) ->
+ Tag = make_ref(),
+ {Receiver,Mref} =
+ erlang:spawn_monitor(
+ fun() ->
+ %% Middleman process. Should be unsensitive to regular
+ %% exit signals.
+ process_flag(trap_exit, true),
+ Result = gen_server:call({?NAME,Node}, Request, Timeout),
+ exit({self(),Tag,Result})
+ end),
+ receive
+ {'DOWN',Mref,_,_,{Receiver,Tag,Result}} ->
+ rpc_check(Result);
+ {'DOWN',Mref,_,_,Reason} ->
+ %% The middleman code failed. Or someone did
+ %% exit(_, kill) on the middleman process => Reason==killed
+ rpc_check_t({'EXIT',Reason})
+ end.
+
+rpc_check_t({'EXIT', {timeout,_}}) -> {badrpc, timeout};
+rpc_check_t(X) -> rpc_check(X).
+
+rpc_check({'EXIT', {{nodedown,_},_}}) -> {badrpc, nodedown};
+rpc_check({'EXIT', X}) -> exit(X);
+rpc_check(X) -> X.
+
+
+%% This is a real handy function to be used when interacting with
+%% a server called Name at node Node, It is assumed that the server
+%% Receives messages on the form {From, Request} and replies on the
+%% form From ! {ReplyWrapper, Node, Reply}.
+%% This function makes such a server call and ensures that that
+%% The entire call is packed into an atomic transaction which
+%% either succeeds or fails, i.e. never hangs (unless the server itself hangs).
+
+-spec server_call(node(), atom(), term(), term()) -> term() | {'error', 'nodedown'}.
+
+server_call(Node, Name, ReplyWrapper, Msg)
+ when is_atom(Node), is_atom(Name) ->
+ if node() =:= nonode@nohost, Node =/= nonode@nohost ->
+ {error, nodedown};
+ true ->
+ Ref = erlang:monitor(process, {Name, Node}),
+ {Name, Node} ! {self(), Msg},
+ receive
+ {'DOWN', Ref, _, _, _} ->
+ {error, nodedown};
+ {ReplyWrapper, Node, Reply} ->
+ erlang:demonitor(Ref),
+ receive
+ {'DOWN', Ref, _, _, _} ->
+ Reply
+ after 0 ->
+ Reply
+ end
+ end
+ end.
+
+-spec cast(node(), atom(), atom(), [term()]) -> 'true'.
+
+cast(Node, Mod, Fun, Args) when Node =:= node() ->
+ catch spawn(Mod, Fun, Args),
+ true;
+cast(Node, Mod, Fun, Args) ->
+ gen_server:cast({?NAME,Node}, {cast,Mod,Fun,Args,group_leader()}),
+ true.
+
+
+%% Asynchronous broadcast, returns nothing, it's just send'n prey
+-spec abcast(atom(), term()) -> 'abcast'.
+
+abcast(Name, Mess) ->
+ abcast([node() | nodes()], Name, Mess).
+
+-spec abcast([node()], atom(), term()) -> 'abcast'.
+
+abcast([Node|Tail], Name, Mess) ->
+ Dest = {Name,Node},
+ case catch erlang:send(Dest, Mess, [noconnect]) of
+ noconnect -> spawn(erlang, send, [Dest,Mess]);
+ _ -> ok
+ end,
+ abcast(Tail, Name, Mess);
+abcast([], _,_) -> abcast.
+
+
+%% Syncronous broadcast, returns a list of the nodes which had Name
+%% as a registered server. Returns {Goodnodes, Badnodes}.
+%% Syncronous in the sense that we know that all servers have received the
+%% message when we return from the call, we can't know that they have
+%% processed the message though.
+
+-spec sbcast(atom(), term()) -> {[node()], [node()]}.
+
+sbcast(Name, Mess) ->
+ sbcast([node() | nodes()], Name, Mess).
+
+-spec sbcast([node()], atom(), term()) -> {[node()], [node()]}.
+
+sbcast(Nodes, Name, Mess) ->
+ Monitors = send_nodes(Nodes, ?NAME, {sbcast, Name, Mess}, []),
+ rec_nodes(?NAME, Monitors).
+
+-spec eval_everywhere(atom(), atom(), [term()]) -> 'abcast'.
+
+eval_everywhere(Mod, Fun, Args) ->
+ eval_everywhere([node() | nodes()] , Mod, Fun, Args).
+
+-spec eval_everywhere([node()], atom(), atom(), [term()]) -> 'abcast'.
+
+eval_everywhere(Nodes, Mod, Fun, Args) ->
+ gen_server:abcast(Nodes, ?NAME, {cast,Mod,Fun,Args,group_leader()}).
+
+
+send_nodes([Node|Tail], Name, Msg, Monitors) when is_atom(Node) ->
+ Monitor = start_monitor(Node, Name),
+ %% Handle non-existing names in rec_nodes.
+ catch {Name, Node} ! {self(), Msg},
+ send_nodes(Tail, Name, Msg, [Monitor | Monitors]);
+send_nodes([_Node|Tail], Name, Msg, Monitors) ->
+ %% Skip non-atom _Node
+ send_nodes(Tail, Name, Msg, Monitors);
+send_nodes([], _Name, _Req, Monitors) ->
+ Monitors.
+
+%% Starts a monitor, either the new way, or the old.
+%% Assumes that the arguments are atoms.
+start_monitor(Node, Name) ->
+ if node() =:= nonode@nohost, Node =/= nonode@nohost ->
+ Ref = make_ref(),
+ self() ! {'DOWN', Ref, process, {Name, Node}, noconnection},
+ {Node, Ref};
+ true ->
+ {Node,erlang:monitor(process, {Name, Node})}
+ end.
+
+%% Cancels a monitor started with Ref=erlang:monitor(_, _),
+%% i.e return value {Node, Ref} from start_monitor/2 above.
+unmonitor(Ref) when is_reference(Ref) ->
+ erlang:demonitor(Ref),
+ receive
+ {'DOWN', Ref, _, _, _} ->
+ true
+ after 0 ->
+ true
+ end.
+
+
+%% Call apply(M,F,A) on all nodes in parallel
+-spec multicall(atom(), atom(), [term()]) -> {[_], [node()]}.
+
+multicall(M, F, A) ->
+ multicall(M, F, A, infinity).
+
+-spec multicall([node()], atom(), atom(), [term()]) -> {[_], [node()]}
+ ; (atom(), atom(), [term()], timeout()) -> {[_], [node()]}.
+
+multicall(Nodes, M, F, A) when is_list(Nodes) ->
+ multicall(Nodes, M, F, A, infinity);
+multicall(M, F, A, Timeout) ->
+ multicall([node() | nodes()], M, F, A, Timeout).
+
+-spec multicall([node()], atom(), atom(), [term()], timeout()) -> {[_], [node()]}.
+
+multicall(Nodes, M, F, A, infinity)
+ when is_list(Nodes), is_atom(M), is_atom(F), is_list(A) ->
+ do_multicall(Nodes, M, F, A, infinity);
+multicall(Nodes, M, F, A, Timeout)
+ when is_list(Nodes), is_atom(M), is_atom(F), is_list(A), is_integer(Timeout),
+ Timeout >= 0 ->
+ do_multicall(Nodes, M, F, A, Timeout).
+
+do_multicall(Nodes, M, F, A, Timeout) ->
+ {Rep,Bad} = gen_server:multi_call(Nodes, ?NAME,
+ {call, M,F,A, group_leader()},
+ Timeout),
+ {lists:map(fun({_,R}) -> R end, Rep), Bad}.
+
+
+%% Send Msg to Name on all nodes, and collect the answers.
+%% Return {Replies, Badnodes} where Badnodes is a list of the nodes
+%% that failed during the timespan of the call.
+%% This function assumes that if we send a request to a server
+%% called Name, the server will reply with a reply
+%% on the form {Name, Node, Reply}, otherwise this function will
+%% hang forever.
+%% It also assumes that the server receives messages on the form
+%% {From, Msg} and then replies as From ! {Name, node(), Reply}.
+%%
+%% There is no apparent order among the replies.
+
+-spec multi_server_call(atom(), term()) -> {[_], [node()]}.
+
+multi_server_call(Name, Msg) ->
+ multi_server_call([node() | nodes()], Name, Msg).
+
+-spec multi_server_call([node()], atom(), term()) -> {[_], [node()]}.
+
+multi_server_call(Nodes, Name, Msg)
+ when is_list(Nodes), is_atom(Name) ->
+ Monitors = send_nodes(Nodes, Name, Msg, []),
+ rec_nodes(Name, Monitors).
+
+%% Deprecated functions. Were only needed when communicating with R6 nodes.
+
+safe_multi_server_call(Name, Msg) ->
+ multi_server_call(Name, Msg).
+
+safe_multi_server_call(Nodes, Name, Msg) ->
+ multi_server_call(Nodes, Name, Msg).
+
+
+rec_nodes(Name, Nodes) ->
+ rec_nodes(Name, Nodes, [], []).
+
+rec_nodes(_Name, [], Badnodes, Replies) ->
+ {Replies, Badnodes};
+rec_nodes(Name, [{N,R} | Tail], Badnodes, Replies) ->
+ receive
+ {'DOWN', R, _, _, _} ->
+ rec_nodes(Name, Tail, [N|Badnodes], Replies);
+ {?NAME, N, {nonexisting_name, _}} ->
+ %% used by sbcast()
+ unmonitor(R),
+ rec_nodes(Name, Tail, [N|Badnodes], Replies);
+ {Name, N, Reply} -> %% Name is bound !!!
+ unmonitor(R),
+ rec_nodes(Name, Tail, Badnodes, [Reply|Replies])
+ end.
+
+%% Now for an asynchronous rpc.
+%% An asyncronous version of rpc that is faster for series of
+%% rpc's towards the same node. I.e. it returns immediately and
+%% it returns a Key that can be used in a subsequent yield(Key).
+
+-spec async_call(node(), atom(), atom(), [term()]) -> pid().
+
+async_call(Node, Mod, Fun, Args) ->
+ ReplyTo = self(),
+ spawn(
+ fun() ->
+ R = call(Node, Mod, Fun, Args), %% proper rpc
+ ReplyTo ! {self(), {promise_reply, R}} %% self() is key
+ end).
+
+-spec yield(pid()) -> term().
+
+yield(Key) when is_pid(Key) ->
+ {value,R} = do_yield(Key, infinity),
+ R.
+
+-spec nb_yield(pid(), timeout()) -> {'value', _} | 'timeout'.
+
+nb_yield(Key, infinity=Inf) when is_pid(Key) ->
+ do_yield(Key, Inf);
+nb_yield(Key, Timeout) when is_pid(Key), is_integer(Timeout), Timeout >= 0 ->
+ do_yield(Key, Timeout).
+
+-spec nb_yield(pid()) -> {'value', _} | 'timeout'.
+
+nb_yield(Key) when is_pid(Key) ->
+ do_yield(Key, 0).
+
+-spec do_yield(pid(), timeout()) -> {'value', _} | 'timeout'.
+
+do_yield(Key, Timeout) ->
+ receive
+ {Key,{promise_reply,R}} ->
+ {value,R}
+ after Timeout ->
+ timeout
+ end.
+
+
+%% A parallel network evaluator
+%% ArgL === [{M,F,Args},........]
+%% Returns a lists of the evaluations in the same order as
+%% given to ArgL
+-spec parallel_eval([{atom(), atom(), [_]}]) -> [_].
+
+parallel_eval(ArgL) ->
+ Nodes = [node() | nodes()],
+ Keys = map_nodes(ArgL,Nodes,Nodes),
+ [yield(K) || K <- Keys].
+
+map_nodes([],_,_) -> [];
+map_nodes(ArgL,[],Original) ->
+ map_nodes(ArgL,Original,Original);
+map_nodes([{M,F,A}|Tail],[Node|MoreNodes], Original) ->
+ [?MODULE:async_call(Node,M,F,A) |
+ map_nodes(Tail,MoreNodes,Original)].
+
+%% Parallel version of lists:map/3 with exactly the same
+%% arguments and return value as lists:map/3,
+%% except that it calls exit/1 if a network error occurs.
+-spec pmap({atom(),atom()}, [term()], [term()]) -> [term()].
+
+pmap({M,F}, As, List) ->
+ check(parallel_eval(build_args(M,F,As, List, [])), []).
+
+%% By using an accumulator twice we get the whole thing right
+build_args(M,F, As, [Arg|Tail], Acc) ->
+ build_args(M,F, As, Tail, [{M,F,[Arg|As]}|Acc]);
+build_args(M,F, _, [], Acc) when is_atom(M), is_atom(F) -> Acc.
+
+%% If one single call fails, we fail the whole computation
+check([{badrpc, _}|_], _) -> exit(badrpc);
+check([X|T], Ack) -> check(T, [X|Ack]);
+check([], Ack) -> Ack.
+
+
+%% location transparent version of process_info
+-spec pinfo(pid()) -> [{atom(), _}] | 'undefined'.
+
+pinfo(Pid) when node(Pid) =:= node() ->
+ process_info(Pid);
+pinfo(Pid) ->
+ call(node(Pid), erlang, process_info, [Pid]).
+
+-spec pinfo(pid(), Item) -> {Item, _} | 'undefined' | []
+ when is_subtype(Item, atom()).
+
+pinfo(Pid, Item) when node(Pid) =:= node() ->
+ process_info(Pid, Item);
+pinfo(Pid, Item) ->
+ block_call(node(Pid), erlang, process_info, [Pid, Item]).
diff --git a/lib/kernel/src/seq_trace.erl b/lib/kernel/src/seq_trace.erl
new file mode 100644
index 0000000000..78c3040f21
--- /dev/null
+++ b/lib/kernel/src/seq_trace.erl
@@ -0,0 +1,126 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 1998-2009. All Rights Reserved.
+%%
+%% The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved online at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% %CopyrightEnd%
+%%
+
+-module(seq_trace).
+
+-define(SEQ_TRACE_SEND, 1). %(1 << 0)
+-define(SEQ_TRACE_RECEIVE, 2). %(1 << 1)
+-define(SEQ_TRACE_PRINT, 4). %(1 << 2)
+-define(SEQ_TRACE_TIMESTAMP, 8). %(1 << 3)
+
+-export([set_token/1,
+ set_token/2,
+ get_token/0,
+ get_token/1,
+ print/1,
+ print/2,
+ reset_trace/0,
+ set_system_tracer/1,
+ get_system_tracer/0]).
+
+%%---------------------------------------------------------------------------
+
+-type flag() :: 'send' | 'receive' | 'print' | 'timestamp'.
+-type component() :: 'label' | 'serial' | flag().
+-type value() :: non_neg_integer()
+ | {non_neg_integer(), non_neg_integer()}
+ | boolean().
+-type token_pair() :: {component(), value()}.
+
+%%---------------------------------------------------------------------------
+
+-type token() :: [] | {integer(), boolean(), _, _, _}.
+-spec set_token(token()) -> token() | 'ok'.
+
+set_token([]) ->
+ erlang:seq_trace(sequential_trace_token,[]);
+set_token({Flags,Label,Serial,_From,Lastcnt}) ->
+ F = decode_flags(Flags),
+ set_token2([{label,Label},{serial,{Lastcnt, Serial}} | F]).
+
+%% We limit the label type to always be a small integer because erl_interface
+%% expects that, the BIF can however "unofficially" handle atoms as well, and
+%% atoms can be used if only Erlang nodes are involved
+
+-spec set_token(component(), value()) -> token_pair().
+
+set_token(Type, Val) ->
+ erlang:seq_trace(Type, Val).
+
+-spec get_token() -> term().
+
+get_token() ->
+ element(2,process_info(self(),sequential_trace_token)).
+
+-spec get_token(component()) -> token_pair().
+
+get_token(Type) ->
+ erlang:seq_trace_info(Type).
+
+-spec print(term()) -> 'ok'.
+
+print(Term) ->
+ erlang:seq_trace_print(Term),
+ ok.
+
+-spec print(integer(), term()) -> 'ok'.
+
+print(Label, Term) when is_atom(Label) ->
+ erlang:error(badarg, [Label, Term]);
+print(Label, Term) ->
+ erlang:seq_trace_print(Label, Term),
+ ok.
+
+-spec reset_trace() -> 'true'.
+
+reset_trace() ->
+ erlang:system_flag(1, 0).
+
+%% reset_trace(Pid) -> % this might be a useful function too
+
+-type tracer() :: pid() | port() | 'false'.
+
+-spec set_system_tracer(tracer()) -> tracer().
+
+set_system_tracer(Pid) ->
+ erlang:system_flag(sequential_tracer, Pid).
+
+-spec get_system_tracer() -> tracer().
+
+get_system_tracer() ->
+ element(2, erlang:system_info(sequential_tracer)).
+
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% internal help functions
+%%
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+set_token2([{Type,Val}|T]) ->
+ erlang:seq_trace(Type, Val),
+ set_token2(T);
+set_token2([]) ->
+ ok.
+
+decode_flags(Flags) ->
+ Print = (Flags band ?SEQ_TRACE_PRINT) > 0,
+ Send = (Flags band ?SEQ_TRACE_SEND) > 0,
+ Rec = (Flags band ?SEQ_TRACE_RECEIVE) > 0,
+ Ts = (Flags band ?SEQ_TRACE_TIMESTAMP) > 0,
+ [{print,Print},{send,Send},{'receive',Rec},{timestamp,Ts}].
diff --git a/lib/kernel/src/standard_error.erl b/lib/kernel/src/standard_error.erl
new file mode 100644
index 0000000000..73901d9896
--- /dev/null
+++ b/lib/kernel/src/standard_error.erl
@@ -0,0 +1,253 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2009. All Rights Reserved.
+%%
+%% The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved online at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(standard_error).
+-behaviour(supervisor_bridge).
+
+%% Basic standard i/o server for user interface port.
+-export([start_link/0, init/1, terminate/2]).
+
+-define(NAME, standard_error).
+-define(PROCNAME_SUP, standard_error_sup).
+%% Internal exports
+-export([server/1, server/2]).
+
+%% Defines for control ops
+-define(CTRL_OP_GET_WINSIZE,100).
+
+%%
+%% The basic server and start-up.
+%%
+start_link() ->
+ supervisor_bridge:start_link({local, ?PROCNAME_SUP}, ?MODULE, []).
+
+terminate(_Reason,Pid) ->
+ (catch exit(Pid,kill)),
+ ok.
+
+init([]) ->
+ case (catch start_port([out,binary])) of
+ Pid when is_pid(Pid) ->
+ {ok,Pid,Pid};
+ _ ->
+ {error,no_stderror}
+ end.
+
+
+start_port(PortSettings) ->
+ Id = spawn(?MODULE,server,[{fd,2,2},PortSettings]),
+ register(?NAME,Id),
+ Id.
+
+
+server(Pid) when is_pid(Pid) ->
+ process_flag(trap_exit, true),
+ link(Pid),
+ run(Pid).
+
+server(PortName,PortSettings) ->
+ process_flag(trap_exit, true),
+ Port = open_port(PortName,PortSettings),
+ run(Port).
+
+run(P) ->
+ put(unicode,false),
+ server_loop(P).
+
+server_loop(Port) ->
+ receive
+ {io_request,From,ReplyAs,Request} when is_pid(From) ->
+ do_io_request(Request, From, ReplyAs, Port),
+ server_loop(Port);
+ {'EXIT',Port,badsig} -> % Ignore badsig errors
+ server_loop(Port);
+ {'EXIT',Port,What} -> % Port has exited
+ exit(What);
+ _Other -> % Ignore other messages
+ server_loop(Port)
+ end.
+
+
+get_fd_geometry(Port) ->
+ case (catch port_control(Port,?CTRL_OP_GET_WINSIZE,[])) of
+ List when is_list(List), length(List) =:= 8 ->
+ <<W:32/native,H:32/native>> = list_to_binary(List),
+ {W,H};
+ _ ->
+ error
+ end.
+
+
+%% NewSaveBuffer = io_request(Request, FromPid, ReplyAs, Port, SaveBuffer)
+
+do_io_request(Req, From, ReplyAs, Port) ->
+ {_Status,Reply} = io_request(Req, Port),
+ io_reply(From, ReplyAs, Reply).
+
+%% New in R13B
+% Wide characters (Unicode)
+io_request({put_chars,Encoding,Chars}, Port) -> % Binary new in R9C
+ put_chars(wrap_characters_to_binary(Chars,Encoding,
+ case get(unicode) of
+ true -> unicode;
+ _ -> latin1
+ end), Port);
+io_request({put_chars,Encoding,Mod,Func,Args}, Port) ->
+ Result = case catch apply(Mod,Func,Args) of
+ Data when is_list(Data); is_binary(Data) ->
+ wrap_characters_to_binary(Data,Encoding,
+ case get(unicode) of
+ true -> unicode;
+ _ -> latin1
+ end);
+ Undef ->
+ Undef
+ end,
+ put_chars(Result, Port);
+%% BC if called from pre-R13 node
+io_request({put_chars,Chars}, Port) ->
+ io_request({put_chars,latin1,Chars}, Port);
+io_request({put_chars,Mod,Func,Args}, Port) ->
+ io_request({put_chars,latin1,Mod,Func,Args}, Port);
+%% New in R12
+io_request({get_geometry,columns},Port) ->
+ case get_fd_geometry(Port) of
+ {W,_H} ->
+ {ok,W};
+ _ ->
+ {error,{error,enotsup}}
+ end;
+io_request({get_geometry,rows},Port) ->
+ case get_fd_geometry(Port) of
+ {_W,H} ->
+ {ok,H};
+ _ ->
+ {error,{error,enotsup}}
+ end;
+io_request({getopts,[]}, Port) ->
+ getopts(Port);
+io_request({setopts,Opts}, Port) when is_list(Opts) ->
+ setopts(Opts, Port);
+io_request({requests,Reqs}, Port) ->
+ io_requests(Reqs, {ok,ok}, Port);
+io_request(R, _Port) -> %Unknown request
+ {error,{error,{request,R}}}. %Ignore but give error (?)
+
+%% Status = io_requests(RequestList, PrevStat, Port)
+%% Process a list of output requests as long as the previous status is 'ok'.
+
+io_requests([R|Rs], {ok,_Res}, Port) ->
+ io_requests(Rs, io_request(R, Port), Port);
+io_requests([_|_], Error, _) ->
+ Error;
+io_requests([], Stat, _) ->
+ Stat.
+
+%% put_port(DeepList, Port)
+%% Take a deep list of characters, flatten and output them to the
+%% port.
+
+put_port(List, Port) ->
+ send_port(Port, {command, List}).
+
+%% send_port(Port, Command)
+
+send_port(Port, Command) ->
+ Port ! {self(),Command}.
+
+
+%% io_reply(From, ReplyAs, Reply)
+%% The function for sending i/o command acknowledgement.
+%% The ACK contains the return value.
+
+io_reply(From, ReplyAs, Reply) ->
+ From ! {io_reply,ReplyAs,Reply}.
+
+%% put_chars
+put_chars(Chars, Port) when is_binary(Chars) ->
+ put_port(Chars, Port),
+ {ok,ok};
+put_chars(Chars, Port) ->
+ case catch list_to_binary(Chars) of
+ Binary when is_binary(Binary) ->
+ put_chars(Binary, Port);
+ _ ->
+ {error,{error,put_chars}}
+ end.
+
+%% setopts
+setopts(Opts0,Port) ->
+ Opts = proplists:unfold(
+ proplists:substitute_negations(
+ [{latin1,unicode}],
+ Opts0)),
+ case check_valid_opts(Opts) of
+ true ->
+ do_setopts(Opts,Port);
+ false ->
+ {error,{error,enotsup}}
+ end.
+check_valid_opts([]) ->
+ true;
+check_valid_opts([{unicode,Valid}|T]) when Valid =:= true; Valid =:= utf8; Valid =:= false ->
+ check_valid_opts(T);
+check_valid_opts(_) ->
+ false.
+
+do_setopts(Opts, _Port) ->
+ case proplists:get_value(unicode,Opts) of
+ Valid when Valid =:= true; Valid =:= utf8 ->
+ put(unicode,true);
+ false ->
+ put(unicode,false);
+ undefined ->
+ ok
+ end,
+ {ok,ok}.
+
+getopts(_Port) ->
+ Uni = {unicode, case get(unicode) of
+ true ->
+ true;
+ _ ->
+ false
+ end},
+ {ok,[Uni]}.
+
+wrap_characters_to_binary(Chars,From,To) ->
+ TrNl = (whereis(user_drv) =/= undefined),
+ Limit = case To of
+ latin1 ->
+ 255;
+ _Else ->
+ 16#10ffff
+ end,
+ unicode:characters_to_binary(
+ [ case X of
+ $\n ->
+ if
+ TrNl ->
+ "\r\n";
+ true ->
+ $\n
+ end;
+ High when High > Limit ->
+ ["\\x{",erlang:integer_to_list(X, 16),$}];
+ Ordinary ->
+ Ordinary
+ end || X <- unicode:characters_to_list(Chars,From) ],unicode,To).
diff --git a/lib/kernel/src/user.erl b/lib/kernel/src/user.erl
new file mode 100644
index 0000000000..edf650ec59
--- /dev/null
+++ b/lib/kernel/src/user.erl
@@ -0,0 +1,786 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 1996-2009. All Rights Reserved.
+%%
+%% The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved online at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(user).
+-compile( [ inline, { inline_size, 100 } ] ).
+
+%% Basic standard i/o server for user interface port.
+
+-export([start/0, start/1, start_out/0]).
+-export([interfaces/1]).
+
+-define(NAME, user).
+
+%% Internal exports
+-export([server/1, server/2]).
+
+%% Defines for control ops
+-define(CTRL_OP_GET_WINSIZE,100).
+
+%%
+%% The basic server and start-up.
+%%
+
+start() ->
+ start_port([eof,binary]).
+
+start([Mod,Fun|Args]) ->
+ %% Mod,Fun,Args should return a pid. That process is supposed to act
+ %% as the io port.
+ Pid = apply(Mod, Fun, Args), % This better work!
+ Id = spawn(?MODULE, server, [Pid]),
+ register(?NAME, Id),
+ Id.
+
+start_out() ->
+ %% Output-only version of start/0
+ start_port([out,binary]).
+
+start_port(PortSettings) ->
+ Id = spawn(?MODULE,server,[{fd,0,1},PortSettings]),
+ register(?NAME,Id),
+ Id.
+
+%% Return the pid of the shell process.
+%% Note: We can't ask the user process for this info since it
+%% may be busy waiting for data from the port.
+interfaces(User) ->
+ case process_info(User, dictionary) of
+ {dictionary,Dict} ->
+ case lists:keysearch(shell, 1, Dict) of
+ {value,Sh={shell,Shell}} when is_pid(Shell) ->
+ [Sh];
+ _ ->
+ []
+ end;
+ _ ->
+ []
+ end.
+
+
+server(Pid) when is_pid(Pid) ->
+ process_flag(trap_exit, true),
+ link(Pid),
+ run(Pid).
+
+server(PortName,PortSettings) ->
+ process_flag(trap_exit, true),
+ Port = open_port(PortName,PortSettings),
+ run(Port).
+
+run(P) ->
+ put(read_mode,list),
+ put(unicode,false),
+ case init:get_argument(noshell) of
+ %% non-empty list -> noshell
+ {ok, [_|_]} ->
+ put(shell, noshell),
+ server_loop(P, queue:new());
+ _ ->
+ group_leader(self(), self()),
+ catch_loop(P, start_init_shell())
+ end.
+
+catch_loop(Port, Shell) ->
+ catch_loop(Port, Shell, queue:new()).
+
+catch_loop(Port, Shell, Q) ->
+ case catch server_loop(Port, Q) of
+ new_shell ->
+ exit(Shell, kill),
+ catch_loop(Port, start_new_shell());
+ {unknown_exit,{Shell,Reason},_} -> % shell has exited
+ case Reason of
+ normal ->
+ put_chars("*** ", Port, []);
+ _ ->
+ put_chars("*** ERROR: ", Port, [])
+ end,
+ put_chars("Shell process terminated! ***\n", Port, []),
+ catch_loop(Port, start_new_shell());
+ {unknown_exit,_,Q1} ->
+ catch_loop(Port, Shell, Q1);
+ {'EXIT',R} ->
+ exit(R)
+ end.
+
+link_and_save_shell(Shell) ->
+ link(Shell),
+ put(shell, Shell),
+ Shell.
+
+start_init_shell() ->
+ link_and_save_shell(shell:start(init)).
+
+start_new_shell() ->
+ link_and_save_shell(shell:start()).
+
+server_loop(Port, Q) ->
+ receive
+ {io_request,From,ReplyAs,Request} when is_pid(From) ->
+ server_loop(Port, do_io_request(Request, From, ReplyAs, Port, Q));
+ {Port,{data,Bytes}} ->
+ case get(shell) of
+ noshell ->
+ server_loop(Port, queue:snoc(Q, Bytes));
+ _ ->
+ case contains_ctrl_g_or_ctrl_c(Bytes) of
+ false ->
+ server_loop(Port, queue:snoc(Q, Bytes));
+ _ ->
+ throw(new_shell)
+ end
+ end;
+ {Port, eof} ->
+ put(eof, true),
+ server_loop(Port, Q);
+
+ %% Ignore messages from port here.
+ {'EXIT',Port,badsig} -> % Ignore badsig errors
+ server_loop(Port, Q);
+ {'EXIT',Port,What} -> % Port has exited
+ exit(What);
+
+ %% Check if shell has exited
+ {'EXIT',SomePid,What} ->
+ case get(shell) of
+ noshell ->
+ server_loop(Port, Q); % Ignore
+ _ ->
+ throw({unknown_exit,{SomePid,What},Q})
+ end;
+
+ _Other -> % Ignore other messages
+ server_loop(Port, Q)
+ end.
+
+
+get_fd_geometry(Port) ->
+ case (catch port_control(Port,?CTRL_OP_GET_WINSIZE,[])) of
+ List when is_list(List), length(List) =:= 8 ->
+ <<W:32/native,H:32/native>> = list_to_binary(List),
+ {W,H};
+ _ ->
+ error
+ end.
+
+
+%% NewSaveBuffer = io_request(Request, FromPid, ReplyAs, Port, SaveBuffer)
+
+do_io_request(Req, From, ReplyAs, Port, Q0) ->
+ case io_request(Req, Port, Q0) of
+ {_Status,Reply,Q1} ->
+ io_reply(From, ReplyAs, Reply),
+ Q1;
+ {exit,What} ->
+ send_port(Port, close),
+ exit(What)
+ end.
+
+%% New in R13B
+%% Encoding option (unicode/latin1)
+io_request({put_chars,unicode,Chars}, Port, Q) -> % Binary new in R9C
+ put_chars(wrap_characters_to_binary(Chars,unicode,
+ case get(unicode) of
+ true -> unicode;
+ _ -> latin1
+ end), Port, Q);
+io_request({put_chars,unicode,Mod,Func,Args}, Port, Q) ->
+ Result = case catch apply(Mod,Func,Args) of
+ Data when is_list(Data); is_binary(Data) ->
+ wrap_characters_to_binary(Data,unicode,
+ case get(unicode) of
+ true -> unicode;
+ _ -> latin1
+ end);
+ Undef ->
+ Undef
+ end,
+ put_chars(Result, Port, Q);
+io_request({put_chars,latin1,Chars}, Port, Q) -> % Binary new in R9C
+ Data = case get(unicode) of
+ true ->
+ unicode:characters_to_binary(Chars,latin1,unicode);
+ false ->
+ erlang:iolist_to_binary(Chars)
+ end,
+ put_chars(Data, Port, Q);
+io_request({put_chars,latin1,Mod,Func,Args}, Port, Q) ->
+ Result = case catch apply(Mod,Func,Args) of
+ Data when is_list(Data); is_binary(Data) ->
+ unicode:characters_to_binary(Data,latin1,
+ case get(unicode) of
+ true -> unicode;
+ _ -> latin1
+ end);
+ Undef ->
+ Undef
+ end,
+ put_chars(Result, Port, Q);
+io_request({get_chars,Enc,Prompt,N}, Port, Q) -> % New in R9C
+ get_chars(Prompt, io_lib, collect_chars, N, Port, Q, Enc);
+io_request({get_line,Enc,Prompt}, Port, Q) ->
+ case get(read_mode) of
+ binary ->
+ get_line_bin(Prompt,Port,Q,Enc);
+ _ ->
+ get_chars(Prompt, io_lib, collect_line, [], Port, Q, Enc)
+ end;
+io_request({get_until,Enc,Prompt,M,F,As}, Port, Q) ->
+ get_chars(Prompt, io_lib, get_until, {M,F,As}, Port, Q, Enc);
+%% End New in R13B
+io_request(getopts, Port, Q) ->
+ getopts(Port, Q);
+io_request({setopts,Opts}, Port, Q) when is_list(Opts) ->
+ setopts(Opts, Port, Q);
+io_request({requests,Reqs}, Port, Q) ->
+ io_requests(Reqs, {ok,ok,Q}, Port);
+
+%% New in R12
+io_request({get_geometry,columns},Port,Q) ->
+ case get_fd_geometry(Port) of
+ {W,_H} ->
+ {ok,W,Q};
+ _ ->
+ {error,{error,enotsup},Q}
+ end;
+io_request({get_geometry,rows},Port,Q) ->
+ case get_fd_geometry(Port) of
+ {_W,H} ->
+ {ok,H,Q};
+ _ ->
+ {error,{error,enotsup},Q}
+ end;
+%% BC with pre-R13 nodes
+io_request({put_chars,Chars}, Port, Q) ->
+ io_request({put_chars,latin1,Chars}, Port, Q);
+io_request({put_chars,Mod,Func,Args}, Port, Q) ->
+ io_request({put_chars,latin1,Mod,Func,Args}, Port, Q);
+io_request({get_chars,Prompt,N}, Port, Q) ->
+ io_request({get_chars,latin1,Prompt,N}, Port, Q);
+io_request({get_line,Prompt}, Port, Q) ->
+ io_request({get_line,latin1,Prompt}, Port, Q);
+io_request({get_until,Prompt,M,F,As}, Port, Q) ->
+ io_request({get_until,latin1,Prompt,M,F,As}, Port, Q);
+
+io_request(R, _Port, Q) -> %Unknown request
+ {error,{error,{request,R}},Q}. %Ignore but give error (?)
+
+%% Status = io_requests(RequestList, PrevStat, Port)
+%% Process a list of output requests as long as the previous status is 'ok'.
+
+io_requests([R|Rs], {ok,_Res,Q}, Port) ->
+ io_requests(Rs, io_request(R, Port, Q), Port);
+io_requests([_|_], Error, _) ->
+ Error;
+io_requests([], Stat, _) ->
+ Stat.
+
+%% put_port(DeepList, Port)
+%% Take a deep list of characters, flatten and output them to the
+%% port.
+
+put_port(List, Port) ->
+ send_port(Port, {command, List}).
+
+%% send_port(Port, Command)
+
+send_port(Port, Command) ->
+ Port ! {self(),Command}.
+
+%% io_reply(From, ReplyAs, Reply)
+%% The function for sending i/o command acknowledgement.
+%% The ACK contains the return value.
+
+io_reply(From, ReplyAs, Reply) ->
+ From ! {io_reply,ReplyAs,Reply}.
+
+%% put_chars
+put_chars(Chars, Port, Q) when is_binary(Chars) ->
+ put_port(Chars, Port),
+ {ok,ok,Q};
+put_chars(Chars, Port, Q) ->
+ case catch list_to_binary(Chars) of
+ Binary when is_binary(Binary) ->
+ put_chars(Binary, Port, Q);
+ _ ->
+ {error,{error,put_chars},Q}
+ end.
+
+expand_encoding([]) ->
+ [];
+expand_encoding([latin1 | T]) ->
+ [{encoding,latin1} | expand_encoding(T)];
+expand_encoding([unicode | T]) ->
+ [{encoding,unicode} | expand_encoding(T)];
+expand_encoding([H|T]) ->
+ [H|expand_encoding(T)].
+
+%% setopts
+setopts(Opts0,Port,Q) ->
+ Opts = proplists:unfold(
+ proplists:substitute_negations(
+ [{list,binary}],
+ expand_encoding(Opts0))),
+ case check_valid_opts(Opts) of
+ true ->
+ do_setopts(Opts,Port,Q);
+ false ->
+ {error,{error,enotsup},Q}
+ end.
+check_valid_opts([]) ->
+ true;
+check_valid_opts([{binary,_}|T]) ->
+ check_valid_opts(T);
+check_valid_opts([{encoding,Valid}|T]) when Valid =:= latin1; Valid =:= utf8; Valid =:= unicode ->
+ check_valid_opts(T);
+check_valid_opts(_) ->
+ false.
+
+do_setopts(Opts, _Port, Q) ->
+ case proplists:get_value(encoding,Opts) of
+ Valid when Valid =:= unicode; Valid =:= utf8 ->
+ put(unicode,true);
+ latin1 ->
+ put(unicode,false);
+ undefined ->
+ ok
+ end,
+ case proplists:get_value(binary, Opts) of
+ true ->
+ put(read_mode,binary),
+ {ok,ok,Q};
+ false ->
+ put(read_mode,list),
+ {ok,ok,Q};
+ _ ->
+ {ok,ok,Q}
+ end.
+
+getopts(_Port,Q) ->
+ Bin = {binary, case get(read_mode) of
+ binary ->
+ true;
+ _ ->
+ false
+ end},
+ Uni = {encoding, case get(unicode) of
+ true ->
+ unicode;
+ _ ->
+ latin1
+ end},
+ {ok,[Bin,Uni],Q}.
+
+
+get_line_bin(Prompt,Port,Q, Enc) ->
+ prompt(Port, Prompt),
+ case {get(eof),queue:is_empty(Q)} of
+ {true,true} ->
+ {ok,eof,Q};
+ _ ->
+ get_line(Prompt,Port, Q, [], Enc)
+ end.
+get_line(Prompt, Port, Q, Acc, Enc) ->
+ case queue:is_empty(Q) of
+ true ->
+ receive
+ {Port,{data,Bytes}} ->
+ get_line_bytes(Prompt, Port, Q, Acc, Bytes, Enc);
+ {Port, eof} ->
+ put(eof, true),
+ {ok, eof, []};
+ {io_request,From,ReplyAs,{get_geometry,_}=Req} when is_pid(From) ->
+ do_io_request(Req, From, ReplyAs, Port,
+ queue:new()),
+ %% No prompt.
+ get_line(Prompt, Port, Q, Acc, Enc);
+ {io_request,From,ReplyAs,Request} when is_pid(From) ->
+ do_io_request(Request, From, ReplyAs, Port, queue:new()),
+ prompt(Port, Prompt),
+ get_line(Prompt, Port, Q, Acc, Enc);
+ {'EXIT',From,What} when node(From) =:= node() ->
+ {exit,What}
+ end;
+ false ->
+ get_line_doit(Prompt, Port, Q, Acc, Enc)
+ end.
+
+get_line_bytes(Prompt, Port, Q, Acc, Bytes, Enc) ->
+ case get(shell) of
+ noshell ->
+ get_line_doit(Prompt, Port, queue:snoc(Q, Bytes),Acc,Enc);
+ _ ->
+ case contains_ctrl_g_or_ctrl_c(Bytes) of
+ false ->
+ get_line_doit(Prompt, Port, queue:snoc(Q, Bytes), Acc, Enc);
+ _ ->
+ throw(new_shell)
+ end
+ end.
+is_cr_at(Pos,Bin) ->
+ case Bin of
+ <<_:Pos/binary,$\r,_/binary>> ->
+ true;
+ _ ->
+ false
+ end.
+srch(<<>>,_,_) ->
+ nomatch;
+srch(<<X:8,_/binary>>,X,N) ->
+ {match,[{N,1}]};
+srch(<<_:8,T/binary>>,X,N) ->
+ srch(T,X,N+1).
+get_line_doit(Prompt, Port, Q, Accu, Enc) ->
+ case queue:is_empty(Q) of
+ true ->
+ case get(eof) of
+ true ->
+ case Accu of
+ [] ->
+ {ok,eof,Q};
+ _ ->
+ {ok,binrev(Accu,[]),Q}
+ end;
+ _ ->
+ get_line(Prompt, Port, Q, Accu, Enc)
+ end;
+ false ->
+ Bin = queue:head(Q),
+ case srch(Bin,$\n,0) of
+ nomatch ->
+ X = byte_size(Bin)-1,
+ case is_cr_at(X,Bin) of
+ true ->
+ <<D:X/binary,_/binary>> = Bin,
+ get_line_doit(Prompt, Port, queue:tail(Q),
+ [<<$\r>>,D|Accu], Enc);
+ false ->
+ get_line_doit(Prompt, Port, queue:tail(Q),
+ [Bin|Accu], Enc)
+ end;
+ {match,[{Pos,1}]} ->
+ %% We are done
+ PosPlus = Pos + 1,
+ case Accu of
+ [] ->
+ {Head,Tail} =
+ case is_cr_at(Pos - 1,Bin) of
+ false ->
+ <<H:PosPlus/binary,
+ T/binary>> = Bin,
+ {H,T};
+ true ->
+ PosMinus = Pos - 1,
+ <<H:PosMinus/binary,
+ _,_,T/binary>> = Bin,
+ {binrev([],[H,$\n]),T}
+ end,
+ case Tail of
+ <<>> ->
+ {ok, cast(Head,Enc), queue:tail(Q)};
+ _ ->
+ {ok, cast(Head,Enc),
+ queue:cons(Tail, queue:tail(Q))}
+ end;
+ [<<$\r>>|Stack1] when Pos =:= 0 ->
+ <<_:PosPlus/binary,Tail/binary>> = Bin,
+ case Tail of
+ <<>> ->
+ {ok, cast(binrev(Stack1, [$\n]),Enc),
+ queue:tail(Q)};
+ _ ->
+ {ok, cast(binrev(Stack1, [$\n]),Enc),
+ queue:cons(Tail, queue:tail(Q))}
+ end;
+ _ ->
+ {Head,Tail} =
+ case is_cr_at(Pos - 1,Bin) of
+ false ->
+ <<H:PosPlus/binary,
+ T/binary>> = Bin,
+ {H,T};
+ true ->
+ PosMinus = Pos - 1,
+ <<H:PosMinus/binary,
+ _,_,T/binary>> = Bin,
+ {[H,$\n],T}
+ end,
+ case Tail of
+ <<>> ->
+ {ok, cast(binrev(Accu,[Head]),Enc),
+ queue:tail(Q)};
+ _ ->
+ {ok, cast(binrev(Accu,[Head]),Enc),
+ queue:cons(Tail, queue:tail(Q))}
+ end
+ end
+ end
+ end.
+
+binrev(L, T) ->
+ list_to_binary(lists:reverse(L, T)).
+
+%% is_cr_at(Pos,Bin) ->
+%% case Bin of
+%% <<_:Pos/binary,$\r,_/binary>> ->
+%% true;
+%% _ ->
+%% false
+%% end.
+
+%% collect_line_bin_re(Bin,_Data,Stack,_) ->
+%% case re:run(Bin,<<"\n">>) of
+%% nomatch ->
+%% X = byte_size(Bin)-1,
+%% case is_cr_at(X,Bin) of
+%% true ->
+%% <<D:X/binary,_/binary>> = Bin,
+%% [<<$\r>>,D|Stack];
+%% false ->
+%% [Bin|Stack]
+%% end;
+%% {match,[{Pos,1}]} ->
+%% PosPlus = Pos + 1,
+%% case Stack of
+%% [] ->
+%% case is_cr_at(Pos - 1,Bin) of
+%% false ->
+%% <<Head:PosPlus/binary,Tail/binary>> = Bin,
+%% {stop, Head, Tail};
+%% true ->
+%% PosMinus = Pos - 1,
+%% <<Head:PosMinus/binary,_,_,Tail/binary>> = Bin,
+%% {stop, binrev([],[Head,$\n]),Tail}
+%% end;
+%% [<<$\r>>|Stack1] when Pos =:= 0 ->
+
+%% <<_:PosPlus/binary,Tail/binary>> = Bin,
+%% {stop,binrev(Stack1, [$\n]),Tail};
+%% _ ->
+%% case is_cr_at(Pos - 1,Bin) of
+%% false ->
+%% <<Head:PosPlus/binary,Tail/binary>> = Bin,
+%% {stop,binrev(Stack, [Head]),Tail};
+%% true ->
+%% PosMinus = Pos - 1,
+%% <<Head:PosMinus/binary,_,_,Tail/binary>> = Bin,
+%% {stop, binrev(Stack,[Head,$\n]),Tail}
+%% end
+%% end
+%% end.
+%% get_chars(Prompt, Module, Function, XtraArg, Port, Queue)
+%% Gets characters from the input port until the applied function
+%% returns {stop,Result,RestBuf}. Does not block output until input
+%% has been received.
+%% Returns:
+%% {Status,Result,NewQueue}
+%% {exit,Reason}
+
+%% Entry function.
+get_chars(Prompt, M, F, Xa, Port, Q, Fmt) ->
+ prompt(Port, Prompt),
+ case {get(eof),queue:is_empty(Q)} of
+ {true,true} ->
+ {ok,eof,Q};
+ _ ->
+ get_chars(Prompt, M, F, Xa, Port, Q, start, Fmt)
+ end.
+
+%% First loop. Wait for port data. Respond to output requests.
+get_chars(Prompt, M, F, Xa, Port, Q, State, Fmt) ->
+ case queue:is_empty(Q) of
+ true ->
+ receive
+ {Port,{data,Bytes}} ->
+ get_chars_bytes(State, M, F, Xa, Port, Q, Bytes, Fmt);
+ {Port, eof} ->
+ put(eof, true),
+ {ok, eof, []};
+ %%{io_request,From,ReplyAs,Request} when is_pid(From) ->
+ %% get_chars_req(Prompt, M, F, Xa, Port, queue:new(), State,
+ %% Request, From, ReplyAs);
+ {io_request,From,ReplyAs,{get_geometry,_}=Req} when is_pid(From) ->
+ do_io_request(Req, From, ReplyAs, Port,
+ queue:new()), %Keep Q over this call
+ %% No prompt.
+ get_chars(Prompt, M, F, Xa, Port, Q, State, Fmt);
+ {io_request,From,ReplyAs,Request} when is_pid(From) ->
+ get_chars_req(Prompt, M, F, Xa, Port, Q, State,
+ Request, From, ReplyAs, Fmt);
+ {'EXIT',From,What} when node(From) =:= node() ->
+ {exit,What}
+ end;
+ false ->
+ get_chars_apply(State, M, F, Xa, Port, Q, Fmt)
+ end.
+
+get_chars_req(Prompt, M, F, XtraArg, Port, Q, State,
+ Req, From, ReplyAs, Fmt) ->
+ do_io_request(Req, From, ReplyAs, Port, queue:new()), %Keep Q over this call
+ prompt(Port, Prompt),
+ get_chars(Prompt, M, F, XtraArg, Port, Q, State, Fmt).
+
+%% Second loop. Pass data to client as long as it wants more.
+%% A ^G in data interrupts loop if 'noshell' is not undefined.
+get_chars_bytes(State, M, F, Xa, Port, Q, Bytes, Fmt) ->
+ case get(shell) of
+ noshell ->
+ get_chars_apply(State, M, F, Xa, Port, queue:snoc(Q, Bytes),Fmt);
+ _ ->
+ case contains_ctrl_g_or_ctrl_c(Bytes) of
+ false ->
+ get_chars_apply(State, M, F, Xa, Port,
+ queue:snoc(Q, Bytes),Fmt);
+ _ ->
+ throw(new_shell)
+ end
+ end.
+
+get_chars_apply(State0, M, F, Xa, Port, Q, Fmt) ->
+ case catch M:F(State0, cast(queue:head(Q),Fmt), Fmt, Xa) of
+ {stop,Result,<<>>} ->
+ {ok,Result,queue:tail(Q)};
+ {stop,Result,[]} ->
+ {ok,Result,queue:tail(Q)};
+ {stop,Result,eof} ->
+ {ok,Result,queue:tail(Q)};
+ {stop,Result,Buf} ->
+ {ok,Result,queue:cons(Buf, queue:tail(Q))};
+ {'EXIT',_} ->
+ {error,{error,err_func(M, F, Xa)},queue:new()};
+ State1 ->
+ get_chars_more(State1, M, F, Xa, Port, queue:tail(Q), Fmt)
+ end.
+
+get_chars_more(State, M, F, Xa, Port, Q, Fmt) ->
+ case queue:is_empty(Q) of
+ true ->
+ case get(eof) of
+ undefined ->
+ receive
+ {Port,{data,Bytes}} ->
+ get_chars_bytes(State, M, F, Xa, Port, Q, Bytes, Fmt);
+ {Port,eof} ->
+ put(eof, true),
+ get_chars_apply(State, M, F, Xa, Port,
+ queue:snoc(Q, eof), Fmt);
+ {'EXIT',From,What} when node(From) =:= node() ->
+ {exit,What}
+ end;
+ _ ->
+ get_chars_apply(State, M, F, Xa, Port, queue:snoc(Q, eof), Fmt)
+ end;
+ false ->
+ get_chars_apply(State, M, F, Xa, Port, Q, Fmt)
+ end.
+
+
+%% prompt(Port, Prompt)
+%% Print Prompt onto Port
+
+%% common case, reduces execution time by 20%
+prompt(_Port, '') -> ok;
+
+prompt(Port, Prompt) ->
+ put_port(io_lib:format_prompt(Prompt), Port).
+
+%% Convert error code to make it look as before
+err_func(io_lib, get_until, {_,F,_}) ->
+ F;
+err_func(_, F, _) ->
+ F.
+
+%% using regexp reduces execution time by >50% compared to old code
+%% running two regexps in sequence is much faster than \\x03|\\x07
+contains_ctrl_g_or_ctrl_c(BinOrList)->
+ case {re:run(BinOrList, <<3>>),re:run(BinOrList, <<7>>)} of
+ {nomatch, nomatch} -> false;
+ _ -> true
+ end.
+
+%% Convert a buffer between list and binary
+cast(Data, _Format) when is_atom(Data) ->
+ Data;
+cast(Data, Format) ->
+ cast(Data, get(read_mode), Format, get(unicode)).
+
+cast(B, binary, latin1, false) when is_binary(B) ->
+ B;
+cast(B, binary, latin1, true) when is_binary(B) ->
+ unicode:characters_to_binary(B, unicode, latin1);
+cast(L, binary, latin1, false) ->
+ erlang:iolist_to_binary(L);
+cast(L, binary, latin1, true) ->
+ case unicode:characters_to_binary(
+ erlang:iolist_to_binary(L),unicode,latin1) of % may fail
+ {error,_,_} -> exit({no_translation, unicode, latin1});
+ Else -> Else
+ end;
+cast(B, binary, unicode, true) when is_binary(B) ->
+ B;
+cast(B, binary, unicode, false) when is_binary(B) ->
+ unicode:characters_to_binary(B,latin1,unicode);
+cast(L, binary, unicode, true) ->
+ % possibly a list containing UTF-8 encoded characters
+ unicode:characters_to_binary(erlang:iolist_to_binary(L));
+cast(L, binary, unicode, false) ->
+ unicode:characters_to_binary(L, latin1, unicode);
+cast(L, list, latin1, UniTerm) ->
+ case UniTerm of
+ true -> % Convert input characters to protocol format (i.e latin1)
+ case unicode:characters_to_list(
+ erlang:iolist_to_binary(L),unicode) of % may fail
+ {error,_,_} -> exit({no_translation, unicode, latin1});
+ Else -> [ case X of
+ High when High > 255 ->
+ exit({no_translation, unicode, latin1});
+ Low ->
+ Low
+ end || X <- Else ]
+ end;
+ _ ->
+ binary_to_list(erlang:iolist_to_binary(L))
+ end;
+cast(L, list, unicode, UniTerm) ->
+ unicode:characters_to_list(erlang:iolist_to_binary(L),
+ case UniTerm of
+ true -> unicode;
+ _ -> latin1
+ end);
+cast(Other, _, _,_) ->
+ Other.
+
+wrap_characters_to_binary(Chars,unicode,latin1) ->
+ case unicode:characters_to_binary(Chars,unicode,latin1) of
+ {error,_,_} ->
+ list_to_binary(
+ [ case X of
+ High when High > 255 ->
+ ["\\x{",erlang:integer_to_list(X, 16),$}];
+ Low ->
+ Low
+ end || X <- unicode:characters_to_list(Chars,unicode) ]);
+ Bin ->
+ Bin
+ end;
+
+wrap_characters_to_binary(Bin,From,From) when is_binary(Bin) ->
+ Bin;
+wrap_characters_to_binary(Chars,From,To) ->
+ unicode:characters_to_binary(Chars,From,To).
diff --git a/lib/kernel/src/user_drv.erl b/lib/kernel/src/user_drv.erl
new file mode 100644
index 0000000000..c34f2ddeb0
--- /dev/null
+++ b/lib/kernel/src/user_drv.erl
@@ -0,0 +1,614 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 1996-2009. All Rights Reserved.
+%%
+%% The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved online at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(user_drv).
+
+%% Basic interface to a port.
+
+-export([start/0,start/1,start/2,start/3,server/2,server/3]).
+
+-export([interfaces/1]).
+
+-define(OP_PUTC,0).
+-define(OP_MOVE,1).
+-define(OP_INSC,2).
+-define(OP_DELC,3).
+-define(OP_BEEP,4).
+% Control op
+-define(CTRL_OP_GET_WINSIZE,100).
+-define(CTRL_OP_GET_UNICODE_STATE,101).
+-define(CTRL_OP_SET_UNICODE_STATE,102).
+
+%% start()
+%% start(ArgumentList)
+%% start(PortName, Shell)
+%% start(InPortName, OutPortName, Shell)
+%% Start the user driver server. The arguments to start/1 are slightly
+%% strange as this may be called both at start up from the command line
+%% and explicitly from other code.
+
+-spec start() -> pid().
+
+start() -> %Default line editing shell
+ spawn(user_drv, server, ['tty_sl -c -e',{shell,start,[init]}]).
+
+start([Pname]) ->
+ spawn(user_drv, server, [Pname,{shell,start,[init]}]);
+start([Pname|Args]) ->
+ spawn(user_drv, server, [Pname|Args]);
+start(Pname) ->
+ spawn(user_drv, server, [Pname,{shell,start,[init]}]).
+
+start(Pname, Shell) ->
+ spawn(user_drv, server, [Pname,Shell]).
+
+start(Iname, Oname, Shell) ->
+ spawn(user_drv, server, [Iname,Oname,Shell]).
+
+
+%% Return the pid of the active group process.
+%% Note: We can't ask the user_drv process for this info since it
+%% may be busy waiting for data from the port.
+
+-spec interfaces(pid()) -> [{'current_group', pid()}].
+
+interfaces(UserDrv) ->
+ case process_info(UserDrv, dictionary) of
+ {dictionary,Dict} ->
+ case lists:keysearch(current_group, 1, Dict) of
+ {value,Gr={_,Group}} when is_pid(Group) ->
+ [Gr];
+ _ ->
+ []
+ end;
+ _ ->
+ []
+ end.
+
+%% server(Pid, Shell)
+%% server(Pname, Shell)
+%% server(Iname, Oname, Shell)
+%% The initial calls to run the user driver. These start the port(s)
+%% then call server1/3 to set everything else up.
+
+server(Pid, Shell) when is_pid(Pid) ->
+ server1(Pid, Pid, Shell);
+server(Pname, Shell) ->
+ process_flag(trap_exit, true),
+ case catch open_port({spawn,Pname}, [eof]) of
+ {'EXIT', _} ->
+ %% Let's try a dumb user instead
+ user:start();
+ Port ->
+ server1(Port, Port, Shell)
+ end.
+
+server(Iname, Oname, Shell) ->
+ process_flag(trap_exit, true),
+ case catch open_port({spawn,Iname}, [eof]) of
+ {'EXIT', _} -> %% It might be a dumb terminal lets start dumb user
+ user:start();
+ Iport ->
+ Oport = open_port({spawn,Oname}, [eof]),
+ server1(Iport, Oport, Shell)
+ end.
+
+server1(Iport, Oport, Shell) ->
+ put(eof, false),
+ %% Start user and initial shell.
+ User = start_user(),
+ Gr1 = gr_add_cur(gr_new(), User, {}),
+
+ {Curr,Shell1} =
+ case init:get_argument(remsh) of
+ {ok,[[Node]]} ->
+ RShell = {list_to_atom(Node),shell,start,[]},
+ RGr = group:start(self(), RShell),
+ {RGr,RShell};
+ E when E =:= error ; E =:= {ok,[[]]} ->
+ {group:start(self(), Shell),Shell}
+ end,
+
+ put(current_group, Curr),
+ Gr = gr_add_cur(Gr1, Curr, Shell1),
+ %% Print some information.
+ io_request({put_chars, unicode,
+ flatten(io_lib:format("~s\n",
+ [erlang:system_info(system_version)]))},
+ Iport, Oport),
+ %% Enter the server loop.
+ server_loop(Iport, Oport, Curr, User, Gr).
+
+%% start_user()
+%% Start a group leader process and register it as 'user', unless,
+%% of course, a 'user' already exists.
+
+start_user() ->
+ case whereis(user_drv) of
+ undefined ->
+ register(user_drv, self());
+ _ ->
+ ok
+ end,
+ case whereis(user) of
+ undefined ->
+ User = group:start(self(), {}),
+ register(user, User),
+ User;
+ User ->
+ User
+ end.
+
+server_loop(Iport, Oport, User, Gr) ->
+ Curr = gr_cur_pid(Gr),
+ put(current_group, Curr),
+ server_loop(Iport, Oport, Curr, User, Gr).
+
+server_loop(Iport, Oport, Curr, User, Gr) ->
+ receive
+ {Iport,{data,Bs}} ->
+ BsBin = list_to_binary(Bs),
+ Unicode = unicode:characters_to_list(BsBin,utf8),
+ port_bytes(Unicode, Iport, Oport, Curr, User, Gr);
+ {Iport,eof} ->
+ Curr ! {self(),eof},
+ server_loop(Iport, Oport, Curr, User, Gr);
+ {User,Req} -> % never block from user!
+ io_request(Req, Iport, Oport),
+ server_loop(Iport, Oport, Curr, User, Gr);
+ {Curr,tty_geometry} ->
+ Curr ! {self(),tty_geometry,get_tty_geometry(Iport)},
+ server_loop(Iport, Oport, Curr, User, Gr);
+ {Curr,get_unicode_state} ->
+ Curr ! {self(),get_unicode_state,get_unicode_state(Iport)},
+ server_loop(Iport, Oport, Curr, User, Gr);
+ {Curr,set_unicode_state, Bool} ->
+ Curr ! {self(),set_unicode_state,set_unicode_state(Iport,Bool)},
+ server_loop(Iport, Oport, Curr, User, Gr);
+ {Curr,Req} ->
+ io_request(Req, Iport, Oport),
+ server_loop(Iport, Oport, Curr, User, Gr);
+ {'EXIT',Iport,_R} ->
+ server_loop(Iport, Oport, Curr, User, Gr);
+ {'EXIT',Oport,_R} ->
+ server_loop(Iport, Oport, Curr, User, Gr);
+ {'EXIT',User,_R} -> % keep 'user' alive
+ NewU = start_user(),
+ server_loop(Iport, Oport, Curr, NewU, gr_set_num(Gr, 1, NewU, {}));
+ {'EXIT',Pid,R} -> % shell and group leader exit
+ case gr_cur_pid(Gr) of
+ Pid when R =/= die ,
+ R =/= terminated -> % current shell exited
+ if R =/= normal ->
+ io_requests([{put_chars,unicode,"*** ERROR: "}], Iport, Oport);
+ true -> % exit not caused by error
+ io_requests([{put_chars,unicode,"*** "}], Iport, Oport)
+ end,
+ io_requests([{put_chars,unicode,"Shell process terminated! "}], Iport, Oport),
+ Gr1 = gr_del_pid(Gr, Pid),
+ case gr_get_info(Gr, Pid) of
+ {Ix,{shell,start,Params}} -> % 3-tuple == local shell
+ io_requests([{put_chars,unicode,"***\n"}], Iport, Oport),
+ %% restart group leader and shell, same index
+ Pid1 = group:start(self(), {shell,start,Params}),
+ {ok,Gr2} = gr_set_cur(gr_set_num(Gr1, Ix, Pid1,
+ {shell,start,Params}), Ix),
+ put(current_group, Pid1),
+ server_loop(Iport, Oport, Pid1, User, Gr2);
+ _ -> % remote shell
+ io_requests([{put_chars,unicode,"(^G to start new job) ***\n"}],
+ Iport, Oport),
+ server_loop(Iport, Oport, Curr, User, Gr1)
+ end;
+ _ -> % not current, just remove it
+ server_loop(Iport, Oport, Curr, User, gr_del_pid(Gr, Pid))
+ end;
+ _X ->
+ %% Ignore unknown messages.
+ server_loop(Iport, Oport, Curr, User, Gr)
+ end.
+
+%% port_bytes(Bytes, InPort, OutPort, CurrentProcess, UserProcess, Group)
+%% Check the Bytes from the port to see if it contains a ^G. If so,
+%% either escape to switch_loop or restart the shell. Otherwise send
+%% the bytes to Curr.
+
+port_bytes([$\^G|_Bs], Iport, Oport, _Curr, User, Gr) ->
+ handle_escape(Iport, Oport, User, Gr);
+
+port_bytes([$\^C|_Bs], Iport, Oport, Curr, User, Gr) ->
+ interrupt_shell(Iport, Oport, Curr, User, Gr);
+
+port_bytes([B], Iport, Oport, Curr, User, Gr) ->
+ Curr ! {self(),{data,[B]}},
+ server_loop(Iport, Oport, Curr, User, Gr);
+port_bytes(Bs, Iport, Oport, Curr, User, Gr) ->
+ case member($\^G, Bs) of
+ true ->
+ handle_escape(Iport, Oport, User, Gr);
+ false ->
+ Curr ! {self(),{data,Bs}},
+ server_loop(Iport, Oport, Curr, User, Gr)
+ end.
+
+interrupt_shell(Iport, Oport, Curr, User, Gr) ->
+ case gr_get_info(Gr, Curr) of
+ undefined ->
+ ok; % unknown
+ _ ->
+ exit(Curr, interrupt)
+ end,
+ server_loop(Iport, Oport, Curr, User, Gr).
+
+handle_escape(Iport, Oport, User, Gr) ->
+ case application:get_env(stdlib, shell_esc) of
+ {ok,abort} ->
+ Pid = gr_cur_pid(Gr),
+ exit(Pid, die),
+ Gr1 =
+ case gr_get_info(Gr, Pid) of
+ {_Ix,{}} -> % no shell
+ Gr;
+ _ ->
+ receive {'EXIT',Pid,_} ->
+ gr_del_pid(Gr, Pid)
+ after 1000 ->
+ Gr
+ end
+ end,
+ Pid1 = group:start(self(), {shell,start,[]}),
+ io_request({put_chars,unicode,"\n"}, Iport, Oport),
+ server_loop(Iport, Oport, User,
+ gr_add_cur(Gr1, Pid1, {shell,start,[]}));
+
+ _ -> % {ok,jcl} | undefined
+ io_request({put_chars,unicode,"\nUser switch command\n"}, Iport, Oport),
+ server_loop(Iport, Oport, User, switch_loop(Iport, Oport, Gr))
+ end.
+
+switch_loop(Iport, Oport, Gr) ->
+ Line = get_line(edlin:start(" --> "), Iport, Oport),
+ switch_cmd(erl_scan:string(Line), Iport, Oport, Gr).
+
+switch_cmd({ok,[{atom,_,c},{integer,_,I}],_}, Iport, Oport, Gr0) ->
+ case gr_set_cur(Gr0, I) of
+ {ok,Gr} -> Gr;
+ undefined -> unknown_group(Iport, Oport, Gr0)
+ end;
+switch_cmd({ok,[{atom,_,c}],_}, Iport, Oport, Gr) ->
+ case gr_get_info(Gr, gr_cur_pid(Gr)) of
+ undefined ->
+ unknown_group(Iport, Oport, Gr);
+ _ ->
+ Gr
+ end;
+switch_cmd({ok,[{atom,_,i},{integer,_,I}],_}, Iport, Oport, Gr) ->
+ case gr_get_num(Gr, I) of
+ {pid,Pid} ->
+ exit(Pid, interrupt),
+ switch_loop(Iport, Oport, Gr);
+ undefined ->
+ unknown_group(Iport, Oport, Gr)
+ end;
+switch_cmd({ok,[{atom,_,i}],_}, Iport, Oport, Gr) ->
+ Pid = gr_cur_pid(Gr),
+ case gr_get_info(Gr, Pid) of
+ undefined ->
+ unknown_group(Iport, Oport, Gr);
+ _ ->
+ exit(Pid, interrupt),
+ switch_loop(Iport, Oport, Gr)
+ end;
+switch_cmd({ok,[{atom,_,k},{integer,_,I}],_}, Iport, Oport, Gr) ->
+ case gr_get_num(Gr, I) of
+ {pid,Pid} ->
+ exit(Pid, die),
+ case gr_get_info(Gr, Pid) of
+ {_Ix,{}} -> % no shell
+ switch_loop(Iport, Oport, Gr);
+ _ ->
+ Gr1 =
+ receive {'EXIT',Pid,_} ->
+ gr_del_pid(Gr, Pid)
+ after 1000 ->
+ Gr
+ end,
+ switch_loop(Iport, Oport, Gr1)
+ end;
+ undefined ->
+ unknown_group(Iport, Oport, Gr)
+ end;
+switch_cmd({ok,[{atom,_,k}],_}, Iport, Oport, Gr) ->
+ Pid = gr_cur_pid(Gr),
+ Info = gr_get_info(Gr, Pid),
+ case Info of
+ undefined ->
+ unknown_group(Iport, Oport, Gr);
+ {_Ix,{}} -> % no shell
+ switch_loop(Iport, Oport, Gr);
+ _ ->
+ exit(Pid, die),
+ Gr1 =
+ receive {'EXIT',Pid,_} ->
+ gr_del_pid(Gr, Pid)
+ after 1000 ->
+ Gr
+ end,
+ switch_loop(Iport, Oport, Gr1)
+ end;
+switch_cmd({ok,[{atom,_,j}],_}, Iport, Oport, Gr) ->
+ io_requests(gr_list(Gr), Iport, Oport),
+ switch_loop(Iport, Oport, Gr);
+switch_cmd({ok,[{atom,_,s},{atom,_,Shell}],_}, Iport, Oport, Gr0) ->
+ Pid = group:start(self(), {Shell,start,[]}),
+ Gr = gr_add_cur(Gr0, Pid, {Shell,start,[]}),
+ switch_loop(Iport, Oport, Gr);
+switch_cmd({ok,[{atom,_,s}],_}, Iport, Oport, Gr0) ->
+ Pid = group:start(self(), {shell,start,[]}),
+ Gr = gr_add_cur(Gr0, Pid, {shell,start,[]}),
+ switch_loop(Iport, Oport, Gr);
+switch_cmd({ok,[{atom,_,r}],_}, Iport, Oport, Gr0) ->
+ case is_alive() of
+ true ->
+ Node = pool:get_node(),
+ Pid = group:start(self(), {Node,shell,start,[]}),
+ Gr = gr_add_cur(Gr0, Pid, {Node,shell,start,[]}),
+ switch_loop(Iport, Oport, Gr);
+ false ->
+ io_request({put_chars,unicode,"Not alive\n"}, Iport, Oport),
+ switch_loop(Iport, Oport, Gr0)
+ end;
+switch_cmd({ok,[{atom,_,r},{atom,_,Node}],_}, Iport, Oport, Gr0) ->
+ Pid = group:start(self(), {Node,shell,start,[]}),
+ Gr = gr_add_cur(Gr0, Pid, {Node,shell,start,[]}),
+ switch_loop(Iport, Oport, Gr);
+switch_cmd({ok,[{atom,_,r},{atom,_,Node},{atom,_,Shell}],_},
+ Iport, Oport, Gr0) ->
+ Pid = group:start(self(), {Node,Shell,start,[]}),
+ Gr = gr_add_cur(Gr0, Pid, {Node,Shell,start,[]}),
+ switch_loop(Iport, Oport, Gr);
+switch_cmd({ok,[{atom,_,q}],_}, Iport, Oport, Gr) ->
+ case erlang:system_info(break_ignored) of
+ true -> % noop
+ io_request({put_chars,unicode,"Unknown command\n"}, Iport, Oport),
+ switch_loop(Iport, Oport, Gr);
+ false ->
+ halt()
+ end;
+switch_cmd({ok,[{atom,_,h}],_}, Iport, Oport, Gr) ->
+ list_commands(Iport, Oport),
+ switch_loop(Iport, Oport, Gr);
+switch_cmd({ok,[{'?',_}],_}, Iport, Oport, Gr) ->
+ list_commands(Iport, Oport),
+ switch_loop(Iport, Oport, Gr);
+switch_cmd({ok,[],_}, Iport, Oport, Gr) ->
+ switch_loop(Iport, Oport, Gr);
+switch_cmd({ok,_Ts,_}, Iport, Oport, Gr) ->
+ io_request({put_chars,unicode,"Unknown command\n"}, Iport, Oport),
+ switch_loop(Iport, Oport, Gr);
+switch_cmd(_Ts, Iport, Oport, Gr) ->
+ io_request({put_chars,unicode,"Illegal input\n"}, Iport, Oport),
+ switch_loop(Iport, Oport, Gr).
+
+unknown_group(Iport, Oport, Gr) ->
+ io_request({put_chars,unicode,"Unknown job\n"}, Iport, Oport),
+ switch_loop(Iport, Oport, Gr).
+
+list_commands(Iport, Oport) ->
+ QuitReq = case erlang:system_info(break_ignored) of
+ true ->
+ [];
+ false ->
+ [{put_chars,unicode," q - quit erlang\n"}]
+ end,
+ io_requests([{put_chars, unicode," c [nn] - connect to job\n"},
+ {put_chars, unicode," i [nn] - interrupt job\n"},
+ {put_chars, unicode," k [nn] - kill job\n"},
+ {put_chars, unicode," j - list all jobs\n"},
+ {put_chars, unicode," s [shell] - start local shell\n"},
+ {put_chars, unicode," r [node [shell]] - start remote shell\n"}] ++
+ QuitReq ++
+ [{put_chars, unicode," ? | h - this message\n"}],
+ Iport, Oport).
+
+get_line({done,Line,_Rest,Rs}, Iport, Oport) ->
+ io_requests(Rs, Iport, Oport),
+ Line;
+get_line({undefined,_Char,Cs,Cont,Rs}, Iport, Oport) ->
+ io_requests(Rs, Iport, Oport),
+ io_request(beep, Iport, Oport),
+ get_line(edlin:edit_line(Cs, Cont), Iport, Oport);
+get_line({What,Cont0,Rs}, Iport, Oport) ->
+ io_requests(Rs, Iport, Oport),
+ receive
+ {Iport,{data,Cs}} ->
+ get_line(edlin:edit_line(Cs, Cont0), Iport, Oport);
+ {Iport,eof} ->
+ get_line(edlin:edit_line(eof, Cont0), Iport, Oport)
+ after
+ get_line_timeout(What) ->
+ get_line(edlin:edit_line([], Cont0), Iport, Oport)
+ end.
+
+get_line_timeout(blink) -> 1000;
+get_line_timeout(more_chars) -> infinity.
+
+% Let driver report window geometry,
+% definitely outside of the common interface
+get_tty_geometry(Iport) ->
+ case (catch port_control(Iport,?CTRL_OP_GET_WINSIZE,[])) of
+ List when length(List) =:= 8 ->
+ <<W:32/native,H:32/native>> = list_to_binary(List),
+ {W,H};
+ _ ->
+ error
+ end.
+get_unicode_state(Iport) ->
+ case (catch port_control(Iport,?CTRL_OP_GET_UNICODE_STATE,[])) of
+ [Int] when Int > 0 ->
+ true;
+ [Int] when Int =:= 0 ->
+ false;
+ _ ->
+ error
+ end.
+
+set_unicode_state(Iport, Bool) ->
+ Data = case Bool of
+ true -> [1];
+ false -> [0]
+ end,
+ case (catch port_control(Iport,?CTRL_OP_SET_UNICODE_STATE,Data)) of
+ [Int] when Int > 0 ->
+ {unicode, utf8};
+ [Int] when Int =:= 0 ->
+ {unicode, false};
+ _ ->
+ error
+ end.
+
+%% io_request(Request, InPort, OutPort)
+%% io_requests(Requests, InPort, OutPort)
+
+io_request({put_chars, unicode,Cs}, _Iport, Oport) ->
+ Oport ! {self(),{command,[?OP_PUTC|unicode:characters_to_binary(Cs,utf8)]}};
+io_request({move_rel,N}, _Iport, Oport) ->
+ Oport ! {self(),{command,[?OP_MOVE|put_int16(N, [])]}};
+io_request({insert_chars,unicode,Cs}, _Iport, Oport) ->
+ Oport ! {self(),{command,[?OP_INSC|unicode:characters_to_binary(Cs,utf8)]}};
+io_request({delete_chars,N}, _Iport, Oport) ->
+ Oport ! {self(),{command,[?OP_DELC|put_int16(N, [])]}};
+io_request(beep, _Iport, Oport) ->
+ Oport ! {self(),{command,[?OP_BEEP]}};
+io_request({requests,Rs}, Iport, Oport) ->
+ io_requests(Rs, Iport, Oport);
+io_request(_R, _Iport, _Oport) ->
+ ok.
+
+io_requests([R|Rs], Iport, Oport) ->
+ io_request(R, Iport, Oport),
+ io_requests(Rs, Iport, Oport);
+io_requests([], _Iport, _Oport) ->
+ ok.
+
+put_int16(N, Tail) ->
+ [(N bsr 8)band 255,N band 255|Tail].
+
+%% gr_new()
+%% gr_get_num(Group, Index)
+%% gr_get_info(Group, Pid)
+%% gr_add_cur(Group, Pid, Shell)
+%% gr_set_cur(Group, Index)
+%% gr_cur_pid(Group)
+%% gr_del_pid(Group, Pid)
+%% Manage the group list. The group structure has the form:
+%% {NextIndex,CurrIndex,CurrPid,GroupList}
+%%
+%% where each element in the group list is:
+%% {Index,GroupPid,Shell}
+
+gr_new() ->
+ {0,0,none,[]}.
+
+gr_get_num({_Next,_CurI,_CurP,Gs}, I) ->
+ gr_get_num1(Gs, I).
+
+gr_get_num1([{I,_Pid,{}}|_Gs], I) ->
+ undefined;
+gr_get_num1([{I,Pid,_S}|_Gs], I) ->
+ {pid,Pid};
+gr_get_num1([_G|Gs], I) ->
+ gr_get_num1(Gs, I);
+gr_get_num1([], _I) ->
+ undefined.
+
+gr_get_info({_Next,_CurI,_CurP,Gs}, Pid) ->
+ gr_get_info1(Gs, Pid).
+
+gr_get_info1([{I,Pid,S}|_Gs], Pid) ->
+ {I,S};
+gr_get_info1([_G|Gs], I) ->
+ gr_get_info1(Gs, I);
+gr_get_info1([], _I) ->
+ undefined.
+
+gr_add_cur({Next,_CurI,_CurP,Gs}, Pid, Shell) ->
+ {Next+1,Next,Pid,append(Gs, [{Next,Pid,Shell}])}.
+
+gr_set_cur({Next,_CurI,_CurP,Gs}, I) ->
+ case gr_get_num1(Gs, I) of
+ {pid,Pid} -> {ok,{Next,I,Pid,Gs}};
+ undefined -> undefined
+ end.
+
+gr_set_num({Next,CurI,CurP,Gs}, I, Pid, Shell) ->
+ {Next,CurI,CurP,gr_set_num1(Gs, I, Pid, Shell)}.
+
+gr_set_num1([{I,_Pid,_Shell}|Gs], I, NewPid, NewShell) ->
+ [{I,NewPid,NewShell}|Gs];
+gr_set_num1([{I,Pid,Shell}|Gs], NewI, NewPid, NewShell) when NewI > I ->
+ [{I,Pid,Shell}|gr_set_num1(Gs, NewI, NewPid, NewShell)];
+gr_set_num1(Gs, NewI, NewPid, NewShell) ->
+ [{NewI,NewPid,NewShell}|Gs].
+
+gr_del_pid({Next,CurI,CurP,Gs}, Pid) ->
+ {Next,CurI,CurP,gr_del_pid1(Gs, Pid)}.
+
+gr_del_pid1([{_I,Pid,_S}|Gs], Pid) ->
+ Gs;
+gr_del_pid1([G|Gs], Pid) ->
+ [G|gr_del_pid1(Gs, Pid)];
+gr_del_pid1([], _Pid) ->
+ [].
+
+gr_cur_pid({_Next,_CurI,CurP,_Gs}) ->
+ CurP.
+
+gr_list({_Next,CurI,_CurP,Gs}) ->
+ gr_list(Gs, CurI, []).
+
+gr_list([{_I,_Pid,{}}|Gs], Cur, Jobs) ->
+ gr_list(Gs, Cur, Jobs);
+gr_list([{Cur,_Pid,Shell}|Gs], Cur, Jobs) ->
+ gr_list(Gs, Cur, [{put_chars, unicode,flatten(io_lib:format("~4w* ~w\n", [Cur,Shell]))}|Jobs]);
+gr_list([{I,_Pid,Shell}|Gs], Cur, Jobs) ->
+ gr_list(Gs, Cur, [{put_chars, unicode,flatten(io_lib:format("~4w ~w\n", [I,Shell]))}|Jobs]);
+gr_list([], _Cur, Jobs) ->
+ lists:reverse(Jobs).
+
+append([H|T], X) ->
+ [H|append(T, X)];
+append([], X) ->
+ X.
+
+member(X, [X|_Rest]) -> true;
+member(X, [_H|Rest]) ->
+ member(X, Rest);
+member(_X, []) -> false.
+
+flatten(List) ->
+ flatten(List, [], []).
+
+flatten([H|T], Cont, Tail) when is_list(H) ->
+ flatten(H, [T|Cont], Tail);
+flatten([H|T], Cont, Tail) ->
+ [H|flatten(T, Cont, Tail)];
+flatten([], [H|Cont], Tail) ->
+ flatten(H, Cont, Tail);
+flatten([], [], Tail) ->
+ Tail.
diff --git a/lib/kernel/src/user_sup.erl b/lib/kernel/src/user_sup.erl
new file mode 100644
index 0000000000..35b7ff0cfe
--- /dev/null
+++ b/lib/kernel/src/user_sup.erl
@@ -0,0 +1,129 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 1996-2009. All Rights Reserved.
+%%
+%% The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved online at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(user_sup).
+
+%% ---------------------------------------------
+%% This is a supervisor bridge hiding the process
+%% details of the user/group implementation.
+%% ---------------------------------------------
+
+-behaviour(supervisor_bridge).
+
+-export([start/0]).
+
+%% Internal exports.
+-export([init/1, terminate/2, relay/1]).
+
+-spec start() -> {'error', {'already_started', pid()}} | {'ok', pid()}.
+
+start() ->
+ supervisor_bridge:start_link(user_sup, []).
+
+-spec init([]) -> 'ignore' | {'error', 'nouser'} | {'ok', pid(), pid()}.
+
+init([]) ->
+ case get_user() of
+ nouser ->
+ ignore;
+ {master, Master} ->
+ Pid = start_slave(Master),
+ {ok, Pid, Pid};
+ {M, F, A} ->
+ case start_user({M, F}, A) of
+ {ok, Pid} ->
+ {ok, Pid, Pid};
+ Error ->
+ Error
+ end
+ end.
+
+start_slave(Master) ->
+ case rpc:call(Master, erlang, whereis, [user]) of
+ User when is_pid(User) ->
+ spawn(?MODULE, relay, [User]);
+ _ ->
+ error_logger:error_msg("Cannot get remote user", []),
+ receive after 1000 -> true end,
+ halt()
+ end.
+
+-spec relay(pid()) -> no_return().
+
+relay(Pid) ->
+ register(user, self()),
+ relay1(Pid).
+
+relay1(Pid) ->
+ receive
+ X ->
+ Pid ! X,
+ relay1(Pid)
+ end.
+
+
+%%-----------------------------------------------------------------
+%% Sleep a while in order to let user write all (some) buffered
+%% information before termination.
+%%-----------------------------------------------------------------
+
+-spec terminate(term(), pid()) -> 'ok'.
+
+terminate(_Reason, UserPid) ->
+ receive after 1000 -> ok end,
+ exit(UserPid, kill),
+ ok.
+
+%%-----------------------------------------------------------------
+%% If there is a user, wait for it to register itself. (But wait
+%% no more than 10 seconds). This is so the application_controller
+%% is guaranteed that the user is started.
+%%-----------------------------------------------------------------
+
+start_user(Func,A) ->
+ apply(Func, A),
+ wait_for_user_p(100).
+
+wait_for_user_p(0) ->
+ {error, nouser};
+wait_for_user_p(N) ->
+ case whereis(user) of
+ Pid when is_pid(Pid) ->
+ link(Pid),
+ {ok, Pid};
+ _ ->
+ receive after 100 -> ok end,
+ wait_for_user_p(N-1)
+ end.
+
+get_user() ->
+ Flags = init:get_arguments(),
+ check_flags(Flags, {user_drv, start, []}).
+
+%% These flags depend upon what arguments the erl script passes on
+%% to erl91.
+check_flags([{nouser, []} |T], _) -> check_flags(T, nouser);
+check_flags([{user, [User]} | T], _) ->
+ check_flags(T, {list_to_atom(User), start, []});
+check_flags([{noshell, []} | T], _) -> check_flags(T, {user, start, []});
+check_flags([{oldshell, []} | T], _) -> check_flags(T, {user, start, []});
+check_flags([{noinput, []} | T], _) -> check_flags(T, {user, start_out, []});
+check_flags([{master, [Node]} | T], _) ->
+ check_flags(T, {master, list_to_atom(Node)});
+check_flags([_H | T], User) -> check_flags(T, User);
+check_flags([], User) -> User.
diff --git a/lib/kernel/src/wrap_log_reader.erl b/lib/kernel/src/wrap_log_reader.erl
new file mode 100644
index 0000000000..5030d3aed5
--- /dev/null
+++ b/lib/kernel/src/wrap_log_reader.erl
@@ -0,0 +1,288 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 1998-2009. All Rights Reserved.
+%%
+%% The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved online at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% %CopyrightEnd%
+%%
+
+%% Read wrap files with internal format
+
+-module(wrap_log_reader).
+
+%%-define(debug, true).
+-ifdef(debug).
+-define(FORMAT(P, A), io:format(P, A)).
+-else.
+-define(FORMAT(P, A), ok).
+-endif.
+
+-export([open/1, open/2, chunk/1, chunk/2, close/1]).
+
+-include("disk_log.hrl").
+
+-record(wrap_reader,
+ {fd :: file:fd(),
+ cont :: dlog_cont(), % disk_log's continuation record
+ file :: file:filename(), % file name without extension
+ file_no :: non_neg_integer(), % current file number
+ mod_time :: date_time(), % modification time of current file
+ first_no :: non_neg_integer() | 'one' % first read file number
+ }).
+
+%%
+%% Exported functions
+%%
+
+%% A special case to be handled when appropriate: if current file
+%% number is one greater than number of files then the max file number
+%% is not yet reached, we are on the first 'round' of filling the wrap
+%% files.
+
+-type open_ret() :: {'ok', #wrap_reader{}} | {'error', tuple()}.
+
+-spec open(atom() | string()) -> open_ret().
+
+open(File) when is_atom(File) ->
+ open(atom_to_list(File));
+open(File) when is_list(File) ->
+ case read_index_file(File) of
+ %% The special case described above.
+ {ok, {CurFileNo, _CurFileSz, _TotSz, NoOfFiles}}
+ when CurFileNo =:= NoOfFiles + 1 ->
+ FileNo = 1,
+ ?FORMAT("open from ~p Cur = ~p, Sz = ~p, Tot = ~p, NoFiles = ~p~n",
+ [FileNo, CurFileNo, _CurFileSz, _TotSz, NoOfFiles]),
+ open_int(File, FileNo, FileNo);
+ {ok, {CurFileNo, _CurFileSz, _TotSz, NoOfFiles}} ->
+ FileNo = case (CurFileNo + 1) rem NoOfFiles of
+ 0 -> NoOfFiles;
+ No -> No
+ end,
+ ?FORMAT("open from ~p Cur = ~p, Sz = ~p, Tot = ~p, NoFiles = ~p~n",
+ [FileNo, CurFileNo, _CurFileSz, _TotSz, NoOfFiles]),
+ open_int(File, FileNo, FileNo);
+ Error ->
+ Error
+ end.
+
+-spec open(atom() | string(), integer()) -> open_ret().
+
+open(File, FileNo) when is_atom(File), is_integer(FileNo) ->
+ open(atom_to_list(File), FileNo);
+open(File, FileNo) when is_list(File), is_integer(FileNo) ->
+ case read_index_file(File) of
+ {ok, {_CurFileNo, _CurFileSz, _TotSz, NoOfFiles}}
+ when NoOfFiles >= FileNo ->
+ ?FORMAT("open file ~p Cur = ~p, Sz = ~p, Tot = ~p, NoFiles = ~p~n",
+ [FileNo, _CurFileNo, _CurFileSz, _TotSz, NoOfFiles]),
+ open_int(File, FileNo, one);
+ %% The special case described above.
+ {ok, {CurFileNo, _CurFileSz, _TotSz, NoOfFiles}}
+ when CurFileNo =:= FileNo, CurFileNo =:= NoOfFiles +1 ->
+ ?FORMAT("open file ~p Cur = ~p, Sz = ~p, Tot = ~p, NoFiles = ~p~n",
+ [FileNo, CurFileNo, _CurFileSz, _TotSz, NoOfFiles]),
+ open_int(File, FileNo, one);
+ {ok, {_CurFileNo, _CurFileSz, _TotSz, _NoOfFiles}} ->
+ {error, {file_not_found, add_ext(File, FileNo)}};
+ Error ->
+ Error
+ end.
+
+-spec close(#wrap_reader{}) -> 'ok' | {'error', atom()}.
+
+close(#wrap_reader{fd = FD}) ->
+ file:close(FD).
+
+-type chunk_ret() :: {#wrap_reader{}, [term()]}
+ | {#wrap_reader{}, [term()], non_neg_integer()}
+ | {#wrap_reader{}, 'eof'}
+ | {'error', term()}.
+
+-spec chunk(#wrap_reader{}) -> chunk_ret().
+
+chunk(WR = #wrap_reader{}) ->
+ chunk(WR, ?MAX_CHUNK_SIZE, 0).
+
+-spec chunk(#wrap_reader{}, 'infinity' | pos_integer()) -> chunk_ret().
+
+chunk(WR = #wrap_reader{}, infinity) ->
+ chunk(WR, ?MAX_CHUNK_SIZE, 0);
+chunk(WR = #wrap_reader{}, N) when is_integer(N), N > 0 ->
+ chunk(WR, N, 0).
+
+%%
+%% Local functions
+%%
+
+open_int(File, FileNo, FirstFileNo) ->
+ FName = add_ext(File, FileNo),
+ case file:open(FName, [raw, binary, read]) of
+ {ok, Fd} -> %% File exists
+ case file:read(Fd, ?HEADSZ) of
+ {ok, Head} ->
+ case disk_log_1:is_head(Head) of
+ no ->
+ file:close(Fd),
+ {error, {not_a_log_file, FName}};
+ _ -> % yes or yes_not_closed
+ case last_mod_time(FName) of
+ {ok, ModTime} ->
+ WR = #wrap_reader{fd = Fd, cont = start,
+ file = File,
+ file_no = FileNo,
+ mod_time = ModTime,
+ first_no = FirstFileNo},
+ {ok, WR};
+ {error, E} ->
+ file:close(Fd),
+ {error, {file_error, FName, E}}
+ end
+ end;
+ _Other ->
+ file:close(Fd),
+ {error, {not_a_log_file, FName}}
+ end;
+ _Other ->
+ {error, {not_a_log_file, FName}}
+ end.
+
+chunk(WR, N, Bad) ->
+ #wrap_reader{fd = Fd, cont = Continue, file = File, file_no = CurFileNo,
+ first_no = FirstFileNo} = WR,
+ case read_a_chunk(Fd, N, Continue, add_ext(File, CurFileNo)) of
+ eof ->
+ case FirstFileNo of
+ one ->
+ {WR, eof};
+ _Else ->
+ chunk_at_eof(WR, N, Bad)
+ end;
+ {ContOut, [], BadBytes} ->
+ ?FORMAT("chunk: empty chunk read, ~p bad bytes~n", [BadBytes]),
+ chunk(WR#wrap_reader{cont = ContOut}, N, Bad + BadBytes);
+ {ContOut, Chunk, BadBytes} when Bad + BadBytes =:= 0 ->
+ {WR#wrap_reader{cont = ContOut}, Chunk};
+ {ContOut, Chunk, BadBytes} ->
+ ?FORMAT("chunk: total of ~p bad bytes~n", [BadBytes]),
+ {WR#wrap_reader{cont = ContOut}, Chunk, Bad + BadBytes};
+ Error ->
+ Error
+ end.
+
+read_a_chunk(Fd, N, start, FileName) ->
+ read_a_chunk(Fd, FileName, 0, [], N);
+read_a_chunk(Fd, N, More, FileName) ->
+ Pos = More#continuation.pos,
+ B = More#continuation.b,
+ read_a_chunk(Fd, FileName, Pos, B, N).
+
+read_a_chunk(Fd, FileName, Pos, B, N) ->
+ R = disk_log_1:chunk_read_only(Fd, FileName, Pos, B, N),
+ %% Create terms from the binaries returned from chunk_read_only/5.
+ %% 'foo' will do here since Log is not used in read-only mode.
+ Log = foo,
+ case disk_log:ichunk_end(R, Log) of
+ {C = #continuation{}, S} ->
+ {C, S, 0};
+ Else ->
+ Else
+ end.
+
+chunk_at_eof(WR, N, Bad) ->
+ #wrap_reader{file = File, file_no = CurFileNo,
+ first_no = FirstFileNo} = WR,
+ case read_index_file(File) of
+ {ok, IndexFile} ->
+ {_, _, _, NoOfFiles} = IndexFile,
+ NewFileNo = case (CurFileNo + 1) rem NoOfFiles of
+ %% The special case described above.
+ _ when CurFileNo > NoOfFiles -> 1;
+ 0 when NoOfFiles > 1 -> NoOfFiles;
+ No when CurFileNo =:= NoOfFiles ->
+ FileName = add_ext(File, CurFileNo+1),
+ case file:read_file_info(FileName) of
+ {ok, _} -> CurFileNo + 1;
+ _ -> No
+ end;
+ No -> No
+ end,
+ ?FORMAT("chunk: at eof, index file: ~p, FirstFileNo: ~p, "
+ "CurFileNo: ~p, NoOfFiles: ~p, NewFileNo: ~p~n",
+ [IndexFile, FirstFileNo, CurFileNo,
+ NoOfFiles, NewFileNo]),
+ case {FirstFileNo, NewFileNo} of
+ {_, 0} -> {WR, eof};
+ {_, FirstFileNo} -> {WR, eof};
+ _ -> read_next_file(WR, N, NewFileNo, Bad)
+ end;
+ Error ->
+ Error
+ end.
+
+%% Read the index file for the File
+%% -> {ok, {CurFileNo, CurFileSz, TotSz, NoOfFiles}} | {error, Reason}
+read_index_file(File) ->
+ case catch disk_log_1:read_index_file(File) of
+ {1, 0, 0, 0} ->
+ {error, {index_file_not_found, File}};
+ {error, _Reason} ->
+ {error, {index_file_not_found, File}};
+ FileData ->
+ {ok, FileData}
+ end.
+
+%% When reading all the index files, this function closes the previous
+%% index file and opens the next one.
+read_next_file(WR, N, NewFileNo, Bad) ->
+ #wrap_reader{file = File, file_no = CurFileNo,
+ mod_time = ModTime, first_no = FirstFileNo} = WR,
+ %% If current file were closed here, then WR would be in a strange
+ %% state should an error occur below.
+ case last_mod_time(add_ext(File, NewFileNo)) of
+ {ok, NewModTime} ->
+ OldMT = calendar:datetime_to_gregorian_seconds(ModTime),
+ NewMT = calendar:datetime_to_gregorian_seconds(NewModTime),
+ Diff = NewMT - OldMT,
+ ?FORMAT("next: now = ~p~n last mtime = ~p~n"
+ " mtime = ~p~n diff = ~p~n",
+ [calendar:local_time(), ModTime, NewModTime, Diff]),
+ if
+ Diff < 0 ->
+ %% The file to be read is older than the one just finished.
+ {error, {is_wrapped, add_ext(File, CurFileNo)}};
+ true ->
+ case open_int(File, NewFileNo, FirstFileNo) of
+ {ok, NWR} ->
+ close(WR), %% Now we can safely close the old file.
+ chunk(NWR, N, Bad);
+ Error ->
+ Error
+ end
+ end;
+ {error, EN} ->
+ {error, {file_error, add_ext(File, NewFileNo), EN}}
+ end.
+
+%% Get the last modification time of a file
+last_mod_time(File) ->
+ case file:read_file_info(File) of
+ {ok, FileInfo} ->
+ {ok, FileInfo#file_info.mtime};
+ E ->
+ {error, E}
+ end.
+
+add_ext(File, Ext) ->
+ lists:concat([File, ".", Ext]).