aboutsummaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/common_test/src/ct.erl2
-rw-r--r--lib/common_test/src/ct_framework.erl15
-rw-r--r--lib/common_test/src/ct_telnet.erl11
-rw-r--r--lib/common_test/src/ct_util.erl25
-rw-r--r--lib/common_test/test/ct_telnet_SUITE_data/ct_telnet_own_server_SUITE.erl42
-rw-r--r--lib/common_test/test/telnet_server.erl8
-rw-r--r--lib/crypto/c_src/crypto.c26
-rw-r--r--lib/crypto/doc/src/crypto.xml4
-rw-r--r--lib/crypto/src/crypto.erl26
-rw-r--r--lib/crypto/test/crypto_SUITE.erl24
-rw-r--r--lib/dialyzer/src/Makefile2
-rw-r--r--lib/dialyzer/src/dialyzer_gui_wx.erl31
-rw-r--r--lib/dialyzer/src/dialyzer_races.erl16
-rw-r--r--lib/dialyzer/test/small_SUITE_data/src/ddfs_master/common_types.hrl6
-rw-r--r--lib/dialyzer/test/small_SUITE_data/src/ddfs_master/config.hrl148
-rw-r--r--lib/dialyzer/test/small_SUITE_data/src/ddfs_master/ddfs.hrl9
-rw-r--r--lib/dialyzer/test/small_SUITE_data/src/ddfs_master/ddfs_gc.hrl17
-rw-r--r--lib/dialyzer/test/small_SUITE_data/src/ddfs_master/ddfs_master.erl531
-rw-r--r--lib/dialyzer/test/small_SUITE_data/src/ddfs_master/ddfs_tag.hrl19
-rw-r--r--lib/dialyzer/test/small_SUITE_data/src/ddfs_master/gs_util.hrl16
-rw-r--r--lib/hipe/cerl/erl_types.erl41
-rw-r--r--lib/inets/test/inets_sup_SUITE.erl130
-rw-r--r--lib/inets/test/inets_sup_SUITE_data/mime.types3
-rw-r--r--lib/inets/test/inets_sup_SUITE_data/simple.conf6
-rw-r--r--lib/kernel/doc/src/kernel_app.xml43
-rw-r--r--lib/kernel/src/application_controller.erl31
-rw-r--r--lib/kernel/test/application_SUITE.erl20
-rw-r--r--lib/kernel/test/application_SUITE_data/t4.config1
-rw-r--r--lib/kernel/test/sendfile_SUITE.erl7
-rw-r--r--lib/reltool/test/reltool_server_SUITE.erl9
-rw-r--r--lib/reltool/test/reltool_test_lib.erl7
-rw-r--r--lib/sasl/test/release_handler_SUITE.erl35
-rw-r--r--lib/sasl/test/systools_SUITE.erl24
-rw-r--r--lib/ssh/doc/src/ssh.xml21
-rw-r--r--lib/ssh/src/ssh.erl4
-rw-r--r--lib/ssh/src/ssh_acceptor.erl47
-rw-r--r--lib/ssh/src/ssh_connection_handler.erl3
-rw-r--r--lib/ssh/test/ssh_basic_SUITE.erl56
-rw-r--r--lib/ssl/internal_doc/ssl-implementation.txt52
-rw-r--r--lib/ssl/src/dtls_connection.erl3
-rw-r--r--lib/ssl/src/ssl.erl8
-rw-r--r--lib/ssl/src/ssl_cipher.erl8
-rw-r--r--lib/ssl/src/ssl_connection.erl91
-rw-r--r--lib/ssl/src/ssl_handshake.erl79
-rw-r--r--lib/ssl/src/ssl_record.erl2
-rw-r--r--lib/ssl/src/tls_connection.erl13
-rw-r--r--lib/ssl/src/tls_record.erl33
-rw-r--r--lib/ssl/test/make_certs.erl5
-rw-r--r--lib/ssl/test/ssl_basic_SUITE.erl142
-rw-r--r--lib/ssl/test/ssl_crl_SUITE.erl30
-rw-r--r--lib/ssl/test/ssl_handshake_SUITE.erl14
-rw-r--r--lib/stdlib/src/erl_lint.erl5
-rw-r--r--lib/stdlib/test/erl_lint_SUITE.erl27
-rw-r--r--lib/test_server/src/test_server.erl4
-rw-r--r--lib/typer/Makefile2
-rw-r--r--lib/typer/doc/Makefile39
-rw-r--r--lib/typer/doc/html/.gitignore0
-rw-r--r--lib/typer/doc/pdf/.gitignore0
-rw-r--r--lib/typer/doc/src/Makefile117
-rw-r--r--lib/typer/doc/src/book.xml41
-rw-r--r--lib/typer/doc/src/fascicules.xml12
-rw-r--r--lib/typer/doc/src/notes.xml51
-rw-r--r--lib/typer/doc/src/part_notes.xml35
-rw-r--r--lib/typer/doc/src/ref_man.xml35
-rw-r--r--lib/typer/doc/src/typer_app.xml43
-rw-r--r--lib/typer/info2
-rw-r--r--lib/typer/src/Makefile2
-rw-r--r--lib/typer/vsn.mk2
68 files changed, 1912 insertions, 451 deletions
diff --git a/lib/common_test/src/ct.erl b/lib/common_test/src/ct.erl
index 241cd928b7..85afdc7834 100644
--- a/lib/common_test/src/ct.erl
+++ b/lib/common_test/src/ct.erl
@@ -773,7 +773,7 @@ comment(Format, Args) when is_list(Format), is_list(Args) ->
send_html_comment(Comment) ->
Html = "<font color=\"green\">" ++ Comment ++ "</font>",
- ct_util:set_testdata({comment,Html}),
+ ct_util:set_testdata({{comment,group_leader()},Html}),
test_server:comment(Html).
%%%-----------------------------------------------------------------
diff --git a/lib/common_test/src/ct_framework.erl b/lib/common_test/src/ct_framework.erl
index 9ef917a507..20903607dc 100644
--- a/lib/common_test/src/ct_framework.erl
+++ b/lib/common_test/src/ct_framework.erl
@@ -657,7 +657,18 @@ end_tc(Mod,Func,TCPid,Result,Args,Return) ->
_ ->
ok
end,
- ct_util:delete_testdata(comment),
+ if Func == end_per_group; Func == end_per_suite ->
+ %% clean up any saved comments
+ ct_util:match_delete_testdata({comment,'_'});
+ true ->
+ %% attemp to delete any saved comment for this TC
+ case process_info(TCPid, group_leader) of
+ {group_leader,TCGL} ->
+ ct_util:delete_testdata({comment,TCGL});
+ _ ->
+ ok
+ end
+ end,
ct_util:delete_suite_data(last_saved_config),
FuncSpec = group_or_func(Func,Args),
@@ -850,7 +861,7 @@ error_notification(Mod,Func,_Args,{Error,Loc}) ->
_ ->
%% this notification comes from the test case process, so
%% we can add error info to comment with test_server:comment/1
- case ct_util:get_testdata(comment) of
+ case ct_util:get_testdata({comment,group_leader()}) of
undefined ->
test_server:comment(ErrorHtml);
Comment ->
diff --git a/lib/common_test/src/ct_telnet.erl b/lib/common_test/src/ct_telnet.erl
index c9dc2338cd..3b2652d06c 100644
--- a/lib/common_test/src/ct_telnet.erl
+++ b/lib/common_test/src/ct_telnet.erl
@@ -604,9 +604,12 @@ handle_msg({cmd,Cmd,Timeout},State) ->
end_gen_log(),
{Return,State#state{buffer=NewBuffer,prompt=Prompt}};
handle_msg({send,Cmd},State) ->
+ start_gen_log(heading(send,State#state.name)),
log(State,send,"Sending: ~p",[Cmd]),
+
debug_cont_gen_log("Throwing Buffer:",[]),
debug_log_lines(State#state.buffer),
+
case {State#state.type,State#state.prompt} of
{ts,_} ->
silent_teln_expect(State#state.name,
@@ -626,6 +629,7 @@ handle_msg({send,Cmd},State) ->
ok
end,
ct_telnet_client:send_data(State#state.teln_pid,Cmd),
+ end_gen_log(),
{ok,State#state{buffer=[],prompt=false}};
handle_msg(get_data,State) ->
start_gen_log(heading(get_data,State#state.name)),
@@ -869,14 +873,13 @@ teln_cmd(Pid,Cmd,Prx,Timeout) ->
teln_receive_until_prompt(Pid,Prx,Timeout).
teln_get_all_data(Pid,Prx,Data,Acc,LastLine) ->
- case check_for_prompt(Prx,lists:reverse(LastLine) ++ Data) of
+ case check_for_prompt(Prx,LastLine++Data) of
{prompt,Lines,_PromptType,Rest} ->
teln_get_all_data(Pid,Prx,Rest,[Lines|Acc],[]);
{noprompt,Lines,LastLine1} ->
case ct_telnet_client:get_data(Pid) of
{ok,[]} ->
- {ok,lists:reverse(lists:append([Lines|Acc])),
- lists:reverse(LastLine1)};
+ {ok,lists:reverse(lists:append([Lines|Acc])),LastLine1};
{ok,Data1} ->
teln_get_all_data(Pid,Prx,Data1,[Lines|Acc],LastLine1)
end
@@ -1334,7 +1337,7 @@ teln_receive_until_prompt(Pid,Prx,Timeout) ->
teln_receive_until_prompt(Pid,Prx,Acc,LastLine) ->
{ok,Data} = ct_telnet_client:get_data(Pid),
- case check_for_prompt(Prx,LastLine ++ Data) of
+ case check_for_prompt(Prx,LastLine++Data) of
{prompt,Lines,PromptType,Rest} ->
Return = lists:reverse(lists:append([Lines|Acc])),
{ok,Return,PromptType,Rest};
diff --git a/lib/common_test/src/ct_util.erl b/lib/common_test/src/ct_util.erl
index f5eb3a72f0..56027586d1 100644
--- a/lib/common_test/src/ct_util.erl
+++ b/lib/common_test/src/ct_util.erl
@@ -37,7 +37,7 @@
save_suite_data_async/3, save_suite_data_async/2,
read_suite_data/1,
delete_suite_data/0, delete_suite_data/1, match_delete_suite_data/1,
- delete_testdata/0, delete_testdata/1,
+ delete_testdata/0, delete_testdata/1, match_delete_testdata/1,
set_testdata/1, get_testdata/1, get_testdata/2,
set_testdata_async/1, update_testdata/2, update_testdata/3,
set_verbosity/1, get_verbosity/1]).
@@ -270,6 +270,9 @@ delete_testdata() ->
delete_testdata(Key) ->
call({delete_testdata, Key}).
+match_delete_testdata(KeyPat) ->
+ call({match_delete_testdata, KeyPat}).
+
update_testdata(Key, Fun) ->
update_testdata(Key, Fun, []).
@@ -361,7 +364,25 @@ loop(Mode,TestData,StartDir) ->
{{delete_testdata,Key},From} ->
TestData1 = lists:keydelete(Key,1,TestData),
return(From,ok),
- loop(From,TestData1,StartDir);
+ loop(From,TestData1,StartDir);
+ {{match_delete_testdata,{Key1,Key2}},From} ->
+ %% handles keys with 2 elements
+ TestData1 =
+ lists:filter(fun({Key,_}) when not is_tuple(Key) ->
+ true;
+ ({Key,_}) when tuple_size(Key) =/= 2 ->
+ true;
+ ({{_,KeyB},_}) when Key1 == '_' ->
+ KeyB =/= Key2;
+ ({{KeyA,_},_}) when Key2 == '_' ->
+ KeyA =/= Key1;
+ (_) when Key1 == '_' ; Key2 == '_' ->
+ false;
+ (_) ->
+ true
+ end, TestData),
+ return(From,ok),
+ loop(From,TestData1,StartDir);
{{set_testdata,New = {Key,_Val}},From} ->
TestData1 = lists:keydelete(Key,1,TestData),
return(From,ok),
diff --git a/lib/common_test/test/ct_telnet_SUITE_data/ct_telnet_own_server_SUITE.erl b/lib/common_test/test/ct_telnet_SUITE_data/ct_telnet_own_server_SUITE.erl
index 0ee0525216..c0f79d0f10 100644
--- a/lib/common_test/test/ct_telnet_SUITE_data/ct_telnet_own_server_SUITE.erl
+++ b/lib/common_test/test/ct_telnet_SUITE_data/ct_telnet_own_server_SUITE.erl
@@ -16,7 +16,8 @@ suite() ->
].
all() ->
- [expect,
+ [
+ expect,
expect_repeat,
expect_sequence,
expect_error_prompt,
@@ -31,8 +32,10 @@ all() ->
ignore_prompt_repeat,
ignore_prompt_sequence,
ignore_prompt_timeout,
+ large_string,
server_speaks,
- server_disconnects].
+ server_disconnects
+ ].
groups() ->
[].
@@ -214,6 +217,41 @@ no_prompt_check_timeout(_) ->
ok = ct_telnet:close(Handle),
ok.
+%% Check that it's possible to receive multiple chunks of data sent from
+%% the server with one get_data call
+large_string(_) ->
+ {ok, Handle} = ct_telnet:open(telnet_server_conn1),
+ String = "abcd efgh ijkl mnop qrst uvwx yz ",
+ BigString = lists:flatmap(fun(S) -> S end,
+ [String || _ <- lists:seq(1,10)]),
+ VerifyStr = [C || C <- BigString, C/=$ ],
+
+ {ok,Data} = ct_telnet:cmd(Handle, "echo_sep "++BigString),
+ ct:log("[CMD] Received ~w chars: ~s", [length(lists:flatten(Data)),Data]),
+ VerifyStr = [C || C <- lists:flatten(Data), C/=$ , C/=$\r, C/=$\n, C/=$>],
+
+ %% Test #1: With a long sleep value, all data gets gets buffered and
+ %% ct_telnet can receive it with one single request to ct_telnet_client.
+ %% Test #2: With a short sleep value, ct_telnet needs multiple calls to
+ %% ct_telnet_client to collect the data. This iterative operation should
+ %% yield the same result as the single request case.
+
+ ok = ct_telnet:send(Handle, "echo_sep "++BigString),
+ timer:sleep(1000),
+ {ok,Data1} = ct_telnet:get_data(Handle),
+ ct:log("[GET DATA #1] Received ~w chars: ~s",
+ [length(lists:flatten(Data1)),Data1]),
+ VerifyStr = [C || C <- lists:flatten(Data1), C/=$ , C/=$\r, C/=$\n, C/=$>],
+
+ ok = ct_telnet:send(Handle, "echo_sep "++BigString),
+ timer:sleep(50),
+ {ok,Data2} = ct_telnet:get_data(Handle),
+ ct:log("[GET DATA #2] Received ~w chars: ~s", [length(lists:flatten(Data2)),Data2]),
+ VerifyStr = [C || C <- lists:flatten(Data2), C/=$ , C/=$\r, C/=$\n, C/=$>],
+
+ ok = ct_telnet:close(Handle),
+ ok.
+
%% The server says things. Manually check that it gets printed correctly
%% in the general IO log.
server_speaks(_) ->
diff --git a/lib/common_test/test/telnet_server.erl b/lib/common_test/test/telnet_server.erl
index ae56787819..1d341d6106 100644
--- a/lib/common_test/test/telnet_server.erl
+++ b/lib/common_test/test/telnet_server.erl
@@ -198,6 +198,14 @@ do_handle_data(Data,#state{authorized={user,_}}=State) ->
do_handle_data("echo " ++ Data,State) ->
send(Data++"\r\n> ",State),
{ok,State};
+do_handle_data("echo_sep " ++ Data,State) ->
+ Msgs = string:tokens(Data," "),
+ lists:foreach(fun(Msg) ->
+ send(Msg,State),
+ timer:sleep(10)
+ end, Msgs),
+ send("\r\n> ",State),
+ {ok,State};
do_handle_data("echo_no_prompt " ++ Data,State) ->
send(Data,State),
{ok,State};
diff --git a/lib/crypto/c_src/crypto.c b/lib/crypto/c_src/crypto.c
index fca08c4eed..3020cadc56 100644
--- a/lib/crypto/c_src/crypto.c
+++ b/lib/crypto/c_src/crypto.c
@@ -215,6 +215,7 @@ static ERL_NIF_TERM des_cfb_crypt(ErlNifEnv* env, int argc, const ERL_NIF_TERM a
static ERL_NIF_TERM des_ecb_crypt(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]);
static ERL_NIF_TERM des_ede3_cbc_crypt(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]);
static ERL_NIF_TERM des_ede3_cfb_crypt_nif(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]);
+static ERL_NIF_TERM aes_cfb_8_crypt(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]);
static ERL_NIF_TERM aes_cfb_128_crypt(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]);
static ERL_NIF_TERM aes_ctr_encrypt(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]);
static ERL_NIF_TERM aes_ctr_stream_encrypt(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]);
@@ -344,6 +345,7 @@ static ErlNifFunc nif_funcs[] = {
{"des_ecb_crypt", 3, des_ecb_crypt},
{"des_ede3_cbc_crypt", 6, des_ede3_cbc_crypt},
{"des_ede3_cfb_crypt_nif", 6, des_ede3_cfb_crypt_nif},
+ {"aes_cfb_8_crypt", 4, aes_cfb_8_crypt},
{"aes_cfb_128_crypt", 4, aes_cfb_128_crypt},
{"aes_ctr_encrypt", 3, aes_ctr_encrypt},
{"aes_ctr_decrypt", 3, aes_ctr_encrypt},
@@ -1600,6 +1602,30 @@ static ERL_NIF_TERM des_ede3_cfb_crypt_nif(ErlNifEnv* env, int argc, const ERL_N
#endif
}
+static ERL_NIF_TERM aes_cfb_8_crypt(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
+{/* (Key, IVec, Data, IsEncrypt) */
+ ErlNifBinary key, ivec, text;
+ AES_KEY aes_key;
+ unsigned char ivec_clone[16]; /* writable copy */
+ int new_ivlen = 0;
+ ERL_NIF_TERM ret;
+
+ if (!enif_inspect_iolist_as_binary(env, argv[0], &key) || key.size != 16
+ || !enif_inspect_binary(env, argv[1], &ivec) || ivec.size != 16
+ || !enif_inspect_iolist_as_binary(env, argv[2], &text)) {
+ return enif_make_badarg(env);
+ }
+
+ memcpy(ivec_clone, ivec.data, 16);
+ AES_set_encrypt_key(key.data, 128, &aes_key);
+ AES_cfb8_encrypt((unsigned char *) text.data,
+ enif_make_new_binary(env, text.size, &ret),
+ text.size, &aes_key, ivec_clone, &new_ivlen,
+ (argv[3] == atom_true));
+ CONSUME_REDS(env,text);
+ return ret;
+}
+
static ERL_NIF_TERM aes_cfb_128_crypt(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{/* (Key, IVec, Data, IsEncrypt) */
ErlNifBinary key, ivec, text;
diff --git a/lib/crypto/doc/src/crypto.xml b/lib/crypto/doc/src/crypto.xml
index e88bf01491..7712173ed8 100644
--- a/lib/crypto/doc/src/crypto.xml
+++ b/lib/crypto/doc/src/crypto.xml
@@ -128,7 +128,7 @@
<p><code>stream_cipher() = rc4 | aes_ctr </code></p>
- <p><code>block_cipher() = aes_cbc128 | aes_cfb128 | aes_ige256 | blowfish_cbc |
+ <p><code>block_cipher() = aes_cbc128 | aes_cfb8 | aes_cfb128 | aes_ige256 | blowfish_cbc |
blowfish_cfb64 | des_cbc | des_cfb | des3_cbc | des3_cbf
| des_ede3 | rc2_cbc </code></p>
@@ -152,7 +152,7 @@
Note that both md4 and md5 are recommended only for compatibility with existing applications.
</p>
<p><code> cipher_algorithms() = des_cbc | des_cfb | des3_cbc | des3_cbf | des_ede3 |
- blowfish_cbc | blowfish_cfb64 | aes_cbc128 | aes_cfb128| aes_cbc256 | aes_ige256 | rc2_cbc | aes_ctr| rc4 </code> </p>
+ blowfish_cbc | blowfish_cfb64 | aes_cbc128 | aes_cfb8 | aes_cfb128| aes_cbc256 | aes_ige256 | rc2_cbc | aes_ctr| rc4 </code> </p>
<p><code> public_key_algorithms() = rsa |dss | ecdsa | dh | ecdh | ec_gf2m</code>
Note that ec_gf2m is not strictly a public key algorithm, but a restriction on what curves are supported
with ecdsa and ecdh.
diff --git a/lib/crypto/src/crypto.erl b/lib/crypto/src/crypto.erl
index 5bf52fc8a4..e1fbbf9ab8 100644
--- a/lib/crypto/src/crypto.erl
+++ b/lib/crypto/src/crypto.erl
@@ -210,7 +210,7 @@ supports()->
[{hashs, Hashs},
{ciphers, [des_cbc, des_cfb, des3_cbc, des_ede3, blowfish_cbc,
- blowfish_cfb64, blowfish_ofb64, blowfish_ecb, aes_cbc128, aes_cfb128,
+ blowfish_cfb64, blowfish_ofb64, blowfish_ecb, aes_cbc128, aes_cfb8, aes_cfb128,
aes_cbc256, rc2_cbc, aes_ctr, rc4] ++ Ciphers},
{public_keys, [rsa, dss, dh, srp] ++ PubKeys}
].
@@ -281,7 +281,7 @@ hmac_final_n(_Context, _HashLen) -> ? nif_stub.
%% Ecrypt/decrypt %%%
-spec block_encrypt(des_cbc | des_cfb | des3_cbc | des3_cbf | des_ede3 | blowfish_cbc |
- blowfish_cfb64 | aes_cbc128 | aes_cfb128 | aes_cbc256 | rc2_cbc,
+ blowfish_cfb64 | aes_cbc128 | aes_cfb8 | aes_cfb128 | aes_cbc256 | rc2_cbc,
Key::iodata(), Ivec::binary(), Data::iodata()) -> binary().
block_encrypt(des_cbc, Key, Ivec, Data) ->
@@ -306,6 +306,8 @@ block_encrypt(aes_cbc256, Key, Ivec, Data) ->
aes_cbc_256_encrypt(Key, Ivec, Data);
block_encrypt(aes_ige256, Key, Ivec, Data) ->
aes_ige_256_encrypt(Key, Ivec, Data);
+block_encrypt(aes_cfb8, Key, Ivec, Data) ->
+ aes_cfb_8_encrypt(Key, Ivec, Data);
block_encrypt(aes_cfb128, Key, Ivec, Data) ->
aes_cfb_128_encrypt(Key, Ivec, Data);
block_encrypt(rc2_cbc, Key, Ivec, Data) ->
@@ -313,7 +315,7 @@ block_encrypt(rc2_cbc, Key, Ivec, Data) ->
-spec block_decrypt(des_cbc | des_cfb | des3_cbc | des3_cbf | des_ede3 | blowfish_cbc |
blowfish_cfb64 | blowfish_ofb64 | aes_cbc128 | aes_cbc256 | aes_ige256 |
- aes_cfb128 | rc2_cbc,
+ aes_cfb8 | aes_cfb128 | rc2_cbc,
Key::iodata(), Ivec::binary(), Data::iodata()) -> binary().
block_decrypt(des_cbc, Key, Ivec, Data) ->
@@ -338,6 +340,8 @@ block_decrypt(aes_cbc256, Key, Ivec, Data) ->
aes_cbc_256_decrypt(Key, Ivec, Data);
block_decrypt(aes_ige256, Key, Ivec, Data) ->
aes_ige_256_decrypt(Key, Ivec, Data);
+block_decrypt(aes_cfb8, Key, Ivec, Data) ->
+ aes_cfb_8_decrypt(Key, Ivec, Data);
block_decrypt(aes_cfb128, Key, Ivec, Data) ->
aes_cfb_128_decrypt(Key, Ivec, Data);
block_decrypt(rc2_cbc, Key, Ivec, Data) ->
@@ -1159,7 +1163,21 @@ blowfish_ofb64_encrypt(_Key, _IVec, _Data) -> ?nif_stub.
%%
-%% AES in cipher feedback mode (CFB)
+%% AES in cipher feedback mode (CFB) - 8 bit shift
+%%
+-spec aes_cfb_8_encrypt(iodata(), binary(), iodata()) -> binary().
+-spec aes_cfb_8_decrypt(iodata(), binary(), iodata()) -> binary().
+
+aes_cfb_8_encrypt(Key, IVec, Data) ->
+ aes_cfb_8_crypt(Key, IVec, Data, true).
+
+aes_cfb_8_decrypt(Key, IVec, Data) ->
+ aes_cfb_8_crypt(Key, IVec, Data, false).
+
+aes_cfb_8_crypt(_Key, _IVec, _Data, _IsEncrypt) -> ?nif_stub.
+
+%%
+%% AES in cipher feedback mode (CFB) - 128 bit shift
%%
-spec aes_cfb_128_encrypt(iodata(), binary(), iodata()) -> binary().
-spec aes_cfb_128_decrypt(iodata(), binary(), iodata()) -> binary().
diff --git a/lib/crypto/test/crypto_SUITE.erl b/lib/crypto/test/crypto_SUITE.erl
index 63552d2e70..479c947029 100644
--- a/lib/crypto/test/crypto_SUITE.erl
+++ b/lib/crypto/test/crypto_SUITE.erl
@@ -55,6 +55,7 @@ all() ->
{group, blowfish_cfb64},
{group, blowfish_ofb64},
{group, aes_cbc128},
+ {group, aes_cfb8},
{group, aes_cfb128},
{group, aes_cbc256},
{group, aes_ige256},
@@ -90,6 +91,7 @@ groups() ->
{des3_cbf,[], [block]},
{rc2_cbc,[], [block]},
{aes_cbc128,[], [block]},
+ {aes_cfb8,[], [block]},
{aes_cfb128,[], [block]},
{aes_cbc256,[], [block]},
{aes_ige256,[], [block]},
@@ -723,6 +725,9 @@ group_config(aes_cbc256, Config) ->
group_config(aes_ige256, Config) ->
Block = aes_ige256(),
[{block, Block} | Config];
+group_config(aes_cfb8, Config) ->
+ Block = aes_cfb8(),
+ [{block, Block} | Config];
group_config(aes_cfb128, Config) ->
Block = aes_cfb128(),
[{block, Block} | Config];
@@ -1164,6 +1169,25 @@ aes_ige256() ->
hexstr2bin("f69f2445df4f9b17ad2b417be66c3710")}
].
+aes_cfb8() ->
+ [{aes_cfb8,
+ hexstr2bin("2b7e151628aed2a6abf7158809cf4f3c"),
+ hexstr2bin("000102030405060708090a0b0c0d0e0f"),
+ hexstr2bin("6bc1bee22e409f96e93d7e117393172a")},
+ {aes_cfb8,
+ hexstr2bin("2b7e151628aed2a6abf7158809cf4f3c"),
+ hexstr2bin("3B3FD92EB72DAD20333449F8E83CFB4A"),
+ hexstr2bin("ae2d8a571e03ac9c9eb76fac45af8e51")},
+ {aes_cfb8,
+ hexstr2bin("2b7e151628aed2a6abf7158809cf4f3c"),
+ hexstr2bin("C8A64537A0B3A93FCDE3CDAD9F1CE58B"),
+ hexstr2bin("30c81c46a35ce411e5fbc1191a0a52ef")},
+ {aes_cfb8,
+ hexstr2bin("2b7e151628aed2a6abf7158809cf4f3c"),
+ hexstr2bin("26751F67A3CBB140B1808CF187A4F4DF"),
+ hexstr2bin("f69f2445df4f9b17ad2b417be66c3710")}
+ ].
+
aes_cfb128() ->
[{aes_cfb128,
hexstr2bin("2b7e151628aed2a6abf7158809cf4f3c"),
diff --git a/lib/dialyzer/src/Makefile b/lib/dialyzer/src/Makefile
index d7265ba31a..91fbdca5bd 100644
--- a/lib/dialyzer/src/Makefile
+++ b/lib/dialyzer/src/Makefile
@@ -88,7 +88,7 @@ APPUP_TARGET= $(EBIN)/$(APPUP_FILE)
ifeq ($(NATIVE_LIBS_ENABLED),yes)
ERL_COMPILE_FLAGS += +native
endif
-ERL_COMPILE_FLAGS += +warn_exported_vars +warn_unused_import +warn_untyped_record +warn_missing_spec +warnings_as_errors
+ERL_COMPILE_FLAGS += +warn_export_vars +warn_unused_import +warn_untyped_record +warn_missing_spec +warnings_as_errors
# ----------------------------------------------------
# Targets
diff --git a/lib/dialyzer/src/dialyzer_gui_wx.erl b/lib/dialyzer/src/dialyzer_gui_wx.erl
index 7070fa240d..868857d675 100644
--- a/lib/dialyzer/src/dialyzer_gui_wx.erl
+++ b/lib/dialyzer/src/dialyzer_gui_wx.erl
@@ -699,8 +699,7 @@ handle_add_files(#gui_state{chosen_box = ChosenBox, file_box = FileBox,
end.
handle_add_dir(#gui_state{chosen_box = ChosenBox, dir_entry = DirBox,
- files_to_analyze = FileList,
- mode = Mode} = State) ->
+ files_to_analyze = FileList, mode = Mode} = State) ->
case wxDirPickerCtrl:getPath(DirBox) of
"" ->
State;
@@ -714,8 +713,8 @@ handle_add_dir(#gui_state{chosen_box = ChosenBox, dir_entry = DirBox,
State#gui_state{files_to_analyze = add_files(filter_mods(NewDir1,Ext), FileList, ChosenBox, Ext)}
end.
-handle_add_rec(#gui_state{chosen_box = ChosenBox, dir_entry = DirBox, files_to_analyze = FileList,
- mode = Mode} = State) ->
+handle_add_rec(#gui_state{chosen_box = ChosenBox, dir_entry = DirBox,
+ files_to_analyze = FileList, mode = Mode} = State) ->
case wxDirPickerCtrl:getPath(DirBox) of
"" ->
State;
@@ -723,11 +722,11 @@ handle_add_rec(#gui_state{chosen_box = ChosenBox, dir_entry = DirBox, files_to_a
NewDir = ordsets:new(),
NewDir1 = ordsets:add_element(Dir,NewDir),
TargetDirs = ordsets:union(NewDir1, all_subdirs(NewDir1)),
- case wxRadioBox:getSelection(Mode) of
- 0 -> Ext = ".beam";
- 1-> Ext = ".erl"
- end,
- State#gui_state{files_to_analyze = add_files(filter_mods(TargetDirs,Ext), FileList, ChosenBox, Ext)}
+ Ext = case wxRadioBox:getSelection(Mode) of
+ 0 -> ".beam";
+ 1 -> ".erl"
+ end,
+ State#gui_state{files_to_analyze = add_files(filter_mods(TargetDirs, Ext), FileList, ChosenBox, Ext)}
end.
handle_file_delete(#gui_state{chosen_box = ChosenBox,
@@ -886,13 +885,10 @@ config_gui_start(State) ->
wxRadioBox:disable(State#gui_state.mode).
save_file(#gui_state{frame = Frame, warnings_box = WBox, log = Log} = State, Type) ->
- case Type of
- warnings ->
- Message = "Save Warnings",
- Box = WBox;
- log -> Message = "Save Log",
- Box = Log
- end,
+ {Message, Box} = case Type of
+ warnings -> {"Save Warnings", WBox};
+ log -> {"Save Log", Log}
+ end,
case wxTextCtrl:getValue(Box) of
"" -> error_sms(State,"There is nothing to save...\n");
_ ->
@@ -936,8 +932,7 @@ include_dialog(#gui_state{gui = Wx, frame = Frame, options = Options}) ->
wxButton:connect(DeleteAllButton, command_button_clicked),
wxButton:connect(Ok, command_button_clicked),
wxButton:connect(Cancel, command_button_clicked),
- Dirs = [io_lib:format("~s", [X])
- || X <- Options#options.include_dirs],
+ Dirs = [io_lib:format("~s", [X]) || X <- Options#options.include_dirs],
wxListBox:set(Box, Dirs),
Layout = wxBoxSizer:new(?wxVERTICAL),
Buttons = wxBoxSizer:new(?wxHORIZONTAL),
diff --git a/lib/dialyzer/src/dialyzer_races.erl b/lib/dialyzer/src/dialyzer_races.erl
index b1f849b16f..28c2ad2c0b 100644
--- a/lib/dialyzer/src/dialyzer_races.erl
+++ b/lib/dialyzer/src/dialyzer_races.erl
@@ -990,8 +990,7 @@ fixup_race_forward_helper(CurrFun, CurrFunLabel, Fun, FunLabel,
NewRaceVarMap, Args, NewFunArgs, NewFunTypes, NestingLevel};
{CurrFun, Fun} ->
NewCallsToAnalyze = lists:delete(Head, CallsToAnalyze),
- NewRaceVarMap =
- race_var_map(Args, NewFunArgs, RaceVarMap, bind),
+ NewRaceVarMap = race_var_map(Args, NewFunArgs, RaceVarMap, bind),
RetC =
case Fun of
InitFun ->
@@ -1018,8 +1017,7 @@ fixup_race_forward_helper(CurrFun, CurrFunLabel, Fun, FunLabel,
label = FunLabel, var_map = NewRaceVarMap,
def_vars = Args, call_vars = NewFunArgs,
arg_types = NewFunTypes}|
- lists:reverse(StateRaceList)] ++
- RetC;
+ lists:reverse(StateRaceList)] ++ RetC;
_ ->
[#curr_fun{status = in, mfa = Fun,
label = FunLabel, var_map = NewRaceVarMap,
@@ -1054,13 +1052,9 @@ fixup_race_backward(CurrFun, Calls, CallsToAnalyze, Parents, Height) ->
false -> [CurrFun|Parents]
end;
[Head|Tail] ->
- MorePaths =
- case Head of
- {Parent, CurrFun} -> true;
- {Parent, _TupleB} -> false
- end,
- case MorePaths of
- true ->
+ {Parent, TupleB} = Head,
+ case TupleB =:= CurrFun of
+ true -> % more paths are needed
NewCallsToAnalyze = lists:delete(Head, CallsToAnalyze),
NewParents =
fixup_race_backward(Parent, NewCallsToAnalyze,
diff --git a/lib/dialyzer/test/small_SUITE_data/src/ddfs_master/common_types.hrl b/lib/dialyzer/test/small_SUITE_data/src/ddfs_master/common_types.hrl
new file mode 100644
index 0000000000..f362a06bca
--- /dev/null
+++ b/lib/dialyzer/test/small_SUITE_data/src/ddfs_master/common_types.hrl
@@ -0,0 +1,6 @@
+-type host() :: nonempty_string().
+-type path() :: nonempty_string().
+-type url() :: binary().
+
+% The host portion of a url, if available.
+-type url_host() :: host() | none.
diff --git a/lib/dialyzer/test/small_SUITE_data/src/ddfs_master/config.hrl b/lib/dialyzer/test/small_SUITE_data/src/ddfs_master/config.hrl
new file mode 100644
index 0000000000..8cab65fc9c
--- /dev/null
+++ b/lib/dialyzer/test/small_SUITE_data/src/ddfs_master/config.hrl
@@ -0,0 +1,148 @@
+
+-define(SECOND, 1000).
+-define(MINUTE, (60 * ?SECOND)).
+-define(HOUR, (60 * ?MINUTE)).
+-define(DAY, (24 * ?HOUR)).
+-define(MB, (1024 * 1024)).
+
+% Maximum length of tag/blob prefix
+-define(NAME_MAX, 511).
+
+% How long ddfs node startup can take. The most time-consuming part
+% is the scanning of the tag objects in the node's DDFS volumes.
+-define(NODE_STARTUP, (1 * ?MINUTE)).
+
+% How long to wait on the master for replies from nodes.
+-define(NODE_TIMEOUT, (10 * ?SECOND)).
+
+% How long to wait for a reply from an operation coordinated by the
+% master that accesses nodes. This value should be larger than
+% NODE_TIMEOUT.
+-define(NODEOP_TIMEOUT, (1 * ?MINUTE)).
+
+% The minimum amount of free space a node must have, to be considered
+% a primary candidate host for a new blob.
+-define(MIN_FREE_SPACE, (1024 * ?MB)).
+
+% The maximum number of active HTTP connections on a system (this
+% applies separately for GET and PUT operations).
+-define(HTTP_MAX_ACTIVE, 3).
+
+% The maximum number of waiting HTTP connections to queue up on a busy system.
+-define(HTTP_QUEUE_LENGTH, 100).
+
+% The maximum number of simultaneous HTTP connections. Note that
+% HTTP_MAX_CONNS * 2 * 2 + 32 < Maximum number of file descriptors, where
+% 2 = Get and put, 2 = two FDs required for each connection (connection
+% itself + a file it accesses), 32 = a guess how many extra fds is needed.
+-define(HTTP_MAX_CONNS, 128).
+
+% How long to keep a PUT request in queue if the system is busy.
+-define(PUT_WAIT_TIMEOUT, (1 * ?MINUTE)).
+
+% How long to keep a GET request in queue if the system is busy.
+-define(GET_WAIT_TIMEOUT, (1 * ?MINUTE)).
+
+% An unused loaded tag expires in TAG_EXPIRES milliseconds. Note that
+% if TAG_EXPIRES is not smaller than GC_INTERVAL, tags will never
+% expire from the memory cache and will always take up memory.
+-define(TAG_EXPIRES, (10 * ?HOUR)).
+
+% How often the master's cache of all known tag names is refreshed.
+% This refresh is only needed to purge deleted tags eventually from
+% the tag cache. It doesn't harm to have a long interval.
+-define(TAG_CACHE_INTERVAL, (10 * ?MINUTE)).
+
+% How soon a tag object initialized in memory expires if it's content
+% cannot be fetched from the cluster.
+-define(TAG_EXPIRES_ONERROR, (1 * ?SECOND)).
+
+% How often a DDFS node should refresh its tag cache from disk.
+-define(FIND_TAGS_INTERVAL, ?DAY).
+
+% How often buffered (delayed) updates to a tag need to be
+% flushed. Tradeoff: The longer the interval, the more updates are
+% bundled in a single commit. On the other hand, in the worst case
+% the requester has to wait for the full interval before getting a
+% reply. A long interval also increases the likelihood that the server
+% crashes before the commit has finished successfully, making requests
+% more unreliable.
+-define(DELAYED_FLUSH_INTERVAL, (1 * ?SECOND)).
+
+% How long to wait between garbage collection runs.
+-define(GC_INTERVAL, ?DAY).
+
+% Max duration for a GC run. This should be smaller than
+% min(ORPHANED_{BLOB,TAG}_EXPIRES).
+-define(GC_MAX_DURATION, (3 * ?DAY)).
+
+% How long to wait after startup for cluster to stabilize before
+% starting the first GC run.
+-define(GC_DEFAULT_INITIAL_WAIT, (5 * ?MINUTE)).
+
+% The longest potential interval between messages in the GC protocol;
+% used to ensure GC makes forward progress. This can be set to the
+% estimated time to traverse all the volumes on a DDFS node.
+-define(GC_PROGRESS_INTERVAL, (30 * ?MINUTE)).
+
+% Number of extra replicas (i.e. lost replicas recovered during GC) to
+% allow before deleting extra replicas.
+-define(NUM_EXTRA_REPLICAS, 1).
+
+% Permissions for files backing blobs and tags.
+-define(FILE_MODE, 8#00400).
+
+% How often to check available disk space in ddfs_node.
+-define(DISKSPACE_INTERVAL, (10 * ?SECOND)).
+
+% The maximum size of payloads of HTTP requests to the /ddfs/tag/
+% prefix.
+-define(MAX_TAG_BODY_SIZE, (512 * ?MB)).
+
+% Tag attribute names and values have a limited size, and there
+% can be only a limited number of them.
+-define(MAX_TAG_ATTRIB_NAME_SIZE, 1024).
+-define(MAX_TAG_ATTRIB_VALUE_SIZE, 1024).
+-define(MAX_NUM_TAG_ATTRIBS, 1000).
+
+% How long HTTP requests that perform tag updates should wait to
+% finish (a long time).
+-define(TAG_UPDATE_TIMEOUT, ?DAY).
+
+% Timeout for re-replicating a single blob over HTTP PUT. This
+% depends on the largest blobs hosted by DDFS, and the speed of the
+% cluster network.
+-define(GC_PUT_TIMEOUT, (180 * ?MINUTE)).
+
+% Delete !partial files after this many milliseconds.
+-define(PARTIAL_EXPIRES, ?DAY).
+
+% When orphaned blob can be deleted. This should be large enough that
+% you can upload all the new blobs of a tag and perform the tag update
+% within this time.
+-define(ORPHANED_BLOB_EXPIRES, (5 * ?DAY)).
+
+% When orphaned tag can be deleted.
+-define(ORPHANED_TAG_EXPIRES, (5 * ?DAY)).
+
+% How long a tag has to stay on the deleted list before
+% we can permanently forget it, after all known instances
+% of the tag object have been removed. This quarantine period
+% ensures that a node that was temporarily unavailable
+% and reactivates can't resurrect deleted tags. You
+% must ensure that all temporarily inactive nodes
+% are reactivated (or cleaned) within the ?DELETED_TAG_EXPIRES
+% time frame.
+%
+% This value _must_ be larger than the other time-related DDFS
+% parameters listed in this file. In particular, it must be larger
+% than ORPHANED_TAG_EXPIRES.
+-define(DELETED_TAG_EXPIRES, (30 * ?DAY)).
+
+% How many times a tag operation should be retried before aborting.
+-define(MAX_TAG_OP_RETRIES, 3).
+
+% How long to wait before timing out a tag retrieval. This should be
+% large enough to read a large tag object off the disk and send it
+% over the network.
+-define(GET_TAG_TIMEOUT, (5 * ?MINUTE)).
diff --git a/lib/dialyzer/test/small_SUITE_data/src/ddfs_master/ddfs.hrl b/lib/dialyzer/test/small_SUITE_data/src/ddfs_master/ddfs.hrl
new file mode 100644
index 0000000000..e43ec23fe1
--- /dev/null
+++ b/lib/dialyzer/test/small_SUITE_data/src/ddfs_master/ddfs.hrl
@@ -0,0 +1,9 @@
+-type volume_name() :: nonempty_string().
+
+% Diskinfo is {FreeSpace, UsedSpace}.
+-type diskinfo() :: {non_neg_integer(), non_neg_integer()}.
+-type volume() :: {diskinfo(), volume_name()}.
+
+-type object_type() :: 'blob' | 'tag'.
+-type object_name() :: binary().
+-type taginfo() :: {erlang:timestamp(), volume_name()}.
diff --git a/lib/dialyzer/test/small_SUITE_data/src/ddfs_master/ddfs_gc.hrl b/lib/dialyzer/test/small_SUITE_data/src/ddfs_master/ddfs_gc.hrl
new file mode 100644
index 0000000000..dc43f7586b
--- /dev/null
+++ b/lib/dialyzer/test/small_SUITE_data/src/ddfs_master/ddfs_gc.hrl
@@ -0,0 +1,17 @@
+-type local_object() :: {object_name(), node()}.
+-type phase() :: 'start' | 'build_map' | 'map_wait' | 'gc'
+ | 'rr_blobs' | 'rr_blobs_wait' | 'rr_tags'.
+-type protocol_msg() :: {'check_blob', object_name()} | 'start_gc' | 'end_rr'.
+
+-type blob_update() :: {object_name(), 'filter' | [url()]}.
+
+-type check_blob_result() :: 'false' | {'true', volume_name()}.
+
+% GC statistics
+
+% {Files, Bytes}
+-type gc_stat() :: {non_neg_integer(), non_neg_integer()}.
+% {Kept, Deleted}
+-type obj_stats() :: {gc_stat(), gc_stat()}.
+% {Tags, Blobs}.
+-type gc_run_stats() :: {obj_stats(), obj_stats()}.
diff --git a/lib/dialyzer/test/small_SUITE_data/src/ddfs_master/ddfs_master.erl b/lib/dialyzer/test/small_SUITE_data/src/ddfs_master/ddfs_master.erl
new file mode 100644
index 0000000000..2be2773dc5
--- /dev/null
+++ b/lib/dialyzer/test/small_SUITE_data/src/ddfs_master/ddfs_master.erl
@@ -0,0 +1,531 @@
+-module(ddfs_master).
+-behaviour(gen_server).
+
+-export([start_link/0]).
+-export([get_tags/1, get_tags/3,
+ get_nodeinfo/1,
+ get_read_nodes/0,
+ get_hosted_tags/1,
+ gc_blacklist/0, gc_blacklist/1,
+ gc_stats/0,
+ choose_write_nodes/3,
+ new_blob/4, new_blob/5,
+ safe_gc_blacklist/0, safe_gc_blacklist/1,
+ refresh_tag_cache/0,
+ tag_notify/2,
+ tag_operation/2, tag_operation/3,
+ update_gc_stats/1,
+ update_nodes/1
+ ]).
+-export([init/1,
+ handle_call/3,
+ handle_cast/2,
+ handle_info/2,
+ terminate/2,
+ code_change/3]).
+
+-define(WEB_PORT, 8011).
+
+-compile(nowarn_deprecated_type).
+
+-include("common_types.hrl").
+-include("gs_util.hrl").
+-include("config.hrl").
+-include("ddfs.hrl").
+-include("ddfs_tag.hrl").
+-include("ddfs_gc.hrl").
+
+-type node_info() :: {node(), {non_neg_integer(), non_neg_integer()}}.
+-type gc_stats() :: none | gc_run_stats().
+
+-record(state, {tags = gb_trees:empty() :: gb_trees:tree(),
+ tag_cache = false :: false | gb_sets:set(),
+ cache_refresher :: pid(),
+
+ nodes = [] :: [node_info()],
+ write_blacklist = [] :: [node()],
+ read_blacklist = [] :: [node()],
+ gc_blacklist = [] :: [node()],
+ safe_gc_blacklist = gb_sets:empty() :: gb_sets:set(),
+ gc_stats = none :: none | {gc_stats(), erlang:timestamp()}}).
+-type state() :: #state{}.
+-type replyto() :: {pid(), reference()}.
+
+-export_type([gc_stats/0, node_info/0]).
+
+%% ===================================================================
+%% API functions
+
+-spec start_link() -> {ok, pid()}.
+start_link() ->
+ lager:info("DDFS master starts"),
+ case gen_server:start_link({local, ?MODULE}, ?MODULE, [], []) of
+ {ok, Server} -> {ok, Server};
+ {error, {already_started, Server}} -> {ok, Server}
+ end.
+
+-spec tag_operation(term(), tagname()) -> term().
+tag_operation(Op, Tag) ->
+ gen_server:call(?MODULE, {tag, Op, Tag}).
+-spec tag_operation(term(), tagname(), non_neg_integer() | infinity) ->
+ term().
+tag_operation(Op, Tag, Timeout) ->
+ gen_server:call(?MODULE, {tag, Op, Tag}, Timeout).
+
+-spec tag_notify(term(), tagname()) -> ok.
+tag_notify(Op, Tag) ->
+ gen_server:cast(?MODULE, {tag_notify, Op, Tag}).
+
+-spec get_nodeinfo(all) -> {ok, [node_info()]}.
+get_nodeinfo(all) ->
+ gen_server:call(?MODULE, {get_nodeinfo, all}).
+
+-spec get_read_nodes() -> {ok, [node()], non_neg_integer()} | {error, term()}.
+get_read_nodes() ->
+ gen_server:call(?MODULE, get_read_nodes, infinity).
+
+-spec gc_blacklist() -> {ok, [node()]}.
+gc_blacklist() ->
+ gen_server:call(?MODULE, gc_blacklist).
+
+-spec gc_blacklist([node()]) -> ok.
+gc_blacklist(Nodes) ->
+ gen_server:cast(?MODULE, {gc_blacklist, Nodes}).
+
+-spec gc_stats() -> {ok, none | {gc_stats(), erlang:timestamp()}} | {error, term()}.
+gc_stats() ->
+ gen_server:call(?MODULE, gc_stats).
+
+-spec get_hosted_tags(host()) -> {ok, [tagname()]} | {error, term()}.
+get_hosted_tags(Host) ->
+ gen_server:call(?MODULE, {get_hosted_tags, Host}).
+
+-spec choose_write_nodes(non_neg_integer(), [node()], [node()]) -> {ok, [node()]}.
+choose_write_nodes(K, Include, Exclude) ->
+ gen_server:call(?MODULE, {choose_write_nodes, K, Include, Exclude}).
+
+-spec get_tags(gc) -> {ok, [tagname()], [node()]} | too_many_failed_nodes;
+ (safe) -> {ok, [binary()]} | too_many_failed_nodes.
+get_tags(Mode) ->
+ get_tags(?MODULE, Mode, ?GET_TAG_TIMEOUT).
+
+-spec get_tags(server(), gc, non_neg_integer()) ->
+ {ok, [tagname()], [node()]} | too_many_failed_nodes;
+ (server(), safe, non_neg_integer()) ->
+ {ok, [binary()]} | too_many_failed_nodes.
+get_tags(Server, Mode, Timeout) ->
+ disco_profile:timed_run(
+ fun() -> gen_server:call(Server, {get_tags, Mode}, Timeout) end,
+ get_tags).
+
+-spec new_blob(string()|object_name(), non_neg_integer(), [node()], [node()]) ->
+ too_many_replicas | {ok, [nonempty_string()]}.
+new_blob(Obj, K, Include, Exclude) ->
+ gen_server:call(?MODULE, {new_blob, Obj, K, Include, Exclude}, infinity).
+
+-spec new_blob(server(), string()|object_name(), non_neg_integer(), [node()], [node()]) ->
+ too_many_replicas | {ok, [nonempty_string()]}.
+new_blob(Master, Obj, K, Include, Exclude) ->
+ gen_server:call(Master, {new_blob, Obj, K, Include, Exclude}, infinity).
+
+-spec safe_gc_blacklist() -> {ok, [node()]} | {error, term()}.
+safe_gc_blacklist() ->
+ gen_server:call(?MODULE, safe_gc_blacklist).
+
+-spec safe_gc_blacklist(gb_sets:set()) -> ok.
+safe_gc_blacklist(SafeGCBlacklist) ->
+ gen_server:cast(?MODULE, {safe_gc_blacklist, SafeGCBlacklist}).
+
+-spec update_gc_stats(gc_run_stats()) -> ok.
+update_gc_stats(Stats) ->
+ gen_server:cast(?MODULE, {update_gc_stats, Stats}).
+
+-type nodes_update() :: [{node(), boolean(), boolean()}].
+-spec update_nodes(nodes_update()) -> ok.
+update_nodes(DDFSNodes) ->
+ gen_server:cast(?MODULE, {update_nodes, DDFSNodes}).
+
+-spec update_nodestats(gb_trees:tree()) -> ok.
+update_nodestats(NewNodes) ->
+ gen_server:cast(?MODULE, {update_nodestats, NewNodes}).
+
+-spec update_tag_cache(gb_sets:set()) -> ok.
+update_tag_cache(TagCache) ->
+ gen_server:cast(?MODULE, {update_tag_cache, TagCache}).
+
+-spec refresh_tag_cache() -> ok.
+refresh_tag_cache() ->
+ gen_server:cast(?MODULE, refresh_tag_cache).
+
+%% ===================================================================
+%% gen_server callbacks
+
+-spec init(_) -> gs_init().
+init(_Args) ->
+ _ = [disco_profile:new_histogram(Name)
+ || Name <- [get_tags, do_get_tags_all, do_get_tags_filter,
+ do_get_tags_safe, do_get_tags_gc]],
+ spawn_link(fun() -> monitor_diskspace() end),
+ spawn_link(fun() -> ddfs_gc:start_gc(disco:get_setting("DDFS_DATA")) end),
+ Refresher = spawn_link(fun() -> refresh_tag_cache_proc() end),
+ put(put_port, disco:get_setting("DDFS_PUT_PORT")),
+ {ok, #state{cache_refresher = Refresher}}.
+
+-type choose_write_nodes_msg() :: {choose_write_nodes, non_neg_integer(), [node()], [node()]}.
+-type new_blob_msg() :: {new_blob, string() | object_name(), non_neg_integer(), [node()]}.
+-type tag_msg() :: {tag, ddfs_tag:call_msg(), tagname()}.
+-spec handle_call(dbg_state_msg(), from(), state()) ->
+ gs_reply(state());
+ ({get_nodeinfo, all}, from(), state()) ->
+ gs_reply({ok, [node_info()]});
+ (get_read_nodes, from(), state()) ->
+ gs_reply({ok, [node()], non_neg_integer});
+ (gc_blacklist, from(), state()) ->
+ gs_reply({ok, [node()]});
+ (gc_stats, from(), state()) ->
+ gs_reply({ok, gc_stats(), erlang:timestamp()});
+ (choose_write_nodes_msg(), from(), state()) ->
+ gs_reply({ok, [node()]});
+ (new_blob_msg(), from(), state()) ->
+ gs_reply(new_blob_result());
+ (tag_msg(), from(), state()) ->
+ gs_reply({error, nonodes}) | gs_noreply();
+ ({get_tags, gc | safe}, from(), state()) ->
+ gs_noreply();
+ ({get_hosted_tags, host()}, from(), state()) ->
+ gs_noreply();
+ (safe_gc_blacklist, from(), state()) ->
+ gs_reply({ok, [node()]}).
+handle_call(dbg_get_state, _, S) ->
+ {reply, S, S};
+
+handle_call({get_nodeinfo, all}, _From, #state{nodes = Nodes} = S) ->
+ {reply, {ok, Nodes}, S};
+
+handle_call(get_read_nodes, _F, #state{nodes = Nodes, read_blacklist = RB} = S) ->
+ {reply, do_get_readable_nodes(Nodes, RB), S};
+
+handle_call(gc_blacklist, _F, #state{gc_blacklist = Nodes} = S) ->
+ {reply, {ok, Nodes}, S};
+
+handle_call(gc_stats, _F, #state{gc_stats = Stats} = S) ->
+ {reply, {ok, Stats}, S};
+
+handle_call({choose_write_nodes, K, Include, Exclude}, _,
+ #state{nodes = N, write_blacklist = WBL, gc_blacklist = GBL} = S) ->
+ BL = lists:umerge(WBL, GBL),
+ {reply, do_choose_write_nodes(N, K, Include, Exclude, BL), S};
+
+handle_call({new_blob, Obj, K, Include, Exclude}, _,
+ #state{nodes = N, gc_blacklist = GBL, write_blacklist = WBL} = S) ->
+ BL = lists:umerge(WBL, GBL),
+ {reply, do_new_blob(Obj, K, Include, Exclude, BL, N), S};
+
+handle_call({tag, _M, _Tag}, _From, #state{nodes = []} = S) ->
+ {reply, {error, no_nodes}, S};
+
+handle_call({tag, M, Tag}, From, S) ->
+ {noreply, do_tag_request(M, Tag, From, S)};
+
+handle_call({get_tags, Mode}, From, #state{nodes = Nodes} = S) ->
+ spawn(fun() ->
+ gen_server:reply(From, do_get_tags(Mode, [N || {N, _} <- Nodes]))
+ end),
+ {noreply, S};
+
+handle_call({get_hosted_tags, Host}, From, S) ->
+ spawn(fun() -> gen_server:reply(From, ddfs_gc:hosted_tags(Host)) end),
+ {noreply, S};
+
+handle_call(safe_gc_blacklist, _From, #state{safe_gc_blacklist = SBL} = S) ->
+ {reply, {ok, gb_sets:to_list(SBL)}, S}.
+
+-spec handle_cast({tag_notify, ddfs_tag:cast_msg(), tagname()}
+ | {gc_blacklist, [node()]}
+ | {safe_gc_blacklist, gb_sets:set()}
+ | {update_gc_stats, gc_stats()}
+ | {update_tag_cache, gb_sets:set()}
+ | refresh_tag_cache
+ | {update_nodes, nodes_update()}
+ | {update_nodestats, gb_trees:tree()},
+ state()) -> gs_noreply().
+handle_cast({tag_notify, M, Tag}, S) ->
+ {noreply, do_tag_notify(M, Tag, S)};
+
+handle_cast({gc_blacklist, Nodes}, #state{safe_gc_blacklist = SBL} = S) ->
+ BLSet = gb_sets:from_list(Nodes),
+ NewSBL = gb_sets:intersection(BLSet, SBL),
+ {noreply, S#state{gc_blacklist = gb_sets:to_list(BLSet),
+ safe_gc_blacklist = NewSBL}};
+
+handle_cast({safe_gc_blacklist, SafeBlacklist}, #state{gc_blacklist = BL} = S) ->
+ SBL = gb_sets:intersection(SafeBlacklist, gb_sets:from_list(BL)),
+ {noreply, S#state{safe_gc_blacklist = SBL}};
+
+handle_cast({update_gc_stats, Stats}, S) ->
+ {noreply, S#state{gc_stats = {Stats, now()}}};
+
+handle_cast({update_tag_cache, TagCache}, S) ->
+ {noreply, S#state{tag_cache = TagCache}};
+
+handle_cast(refresh_tag_cache, #state{cache_refresher = Refresher} = S) ->
+ Refresher ! refresh,
+ {noreply, S};
+
+handle_cast({update_nodes, NewNodes}, S) ->
+ {noreply, do_update_nodes(NewNodes, S)};
+
+handle_cast({update_nodestats, NewNodes}, S) ->
+ {noreply, do_update_nodestats(NewNodes, S)}.
+
+-spec handle_info({'DOWN', _, _, pid(), _}, state()) -> gs_noreply().
+handle_info({'DOWN', _, _, Pid, _}, S) ->
+ {noreply, do_tag_exit(Pid, S)}.
+
+%% ===================================================================
+%% gen_server callback stubs
+
+-spec terminate(term(), state()) -> ok.
+terminate(Reason, _State) ->
+ lager:warning("DDFS master died: ~p", [Reason]).
+
+-spec code_change(term(), state(), term()) -> {ok, state()}.
+code_change(_OldVsn, State, _Extra) -> {ok, State}.
+
+%% ===================================================================
+%% internal functions
+
+-spec do_get_readable_nodes([node_info()], [node()]) ->
+ {ok, [node()], non_neg_integer()}.
+do_get_readable_nodes(Nodes, ReadBlacklist) ->
+ NodeSet = gb_sets:from_ordset(lists:sort([Node || {Node, _} <- Nodes])),
+ BlackSet = gb_sets:from_ordset(ReadBlacklist),
+ ReadableNodeSet = gb_sets:subtract(NodeSet, BlackSet),
+ {ok, gb_sets:to_list(ReadableNodeSet), gb_sets:size(BlackSet)}.
+
+-spec do_choose_write_nodes([node_info()], non_neg_integer(), [node()], [node()], [node()]) ->
+ {ok, [node()]}.
+do_choose_write_nodes(Nodes, K, Include, Exclude, BlackList) ->
+ % Include is the list of nodes that must be included
+ %
+ % Node selection algorithm:
+ % 1. try to choose K nodes randomly from all the nodes which have
+ % more than ?MIN_FREE_SPACE bytes free space available and which
+ % are not excluded or blacklisted.
+ % 2. if K nodes cannot be found this way, choose the K emptiest
+ % nodes which are not excluded or blacklisted.
+ Primary = ([N || {N, {Free, _Total}} <- Nodes, Free > ?MIN_FREE_SPACE / 1024]
+ -- (Exclude ++ BlackList)),
+ if length(Primary) >= K ->
+ {ok, Include ++ disco_util:choose_random(Primary -- Include , K - length(Include))};
+ true ->
+ Preferred = [N || {N, _} <- lists:reverse(lists:keysort(2, Nodes))],
+ Secondary = Include ++ lists:sublist(Preferred -- (Include ++ Exclude ++ BlackList),
+ K - length(Include)),
+ {ok, Secondary}
+ end.
+
+-type new_blob_result() :: too_many_replicas | {ok, [nonempty_string()]}.
+-spec do_new_blob(string()|object_name(), non_neg_integer(), [node()], [node()], [node()], [node_info()]) ->
+ new_blob_result().
+do_new_blob(_Obj, K, _Include, _Exclude, _BlackList, Nodes) when K > length(Nodes) ->
+ too_many_replicas;
+do_new_blob(Obj, K, Include, Exclude, BlackList, Nodes) ->
+ {ok, WriteNodes} = do_choose_write_nodes(Nodes, K, Include, Exclude, BlackList),
+ Urls = [["http://", disco:host(N), ":", get(put_port), "/ddfs/", Obj]
+ || N <- WriteNodes],
+ {ok, Urls}.
+
+% Tag request: Start a new tag server if one doesn't exist already. Forward
+% the request to the tag server.
+
+-spec get_tag_pid(tagname(), gb_trees:tree(), false | gb_sets:set()) ->
+ {pid(), gb_trees:tree()}.
+get_tag_pid(Tag, Tags, Cache) ->
+ case gb_trees:lookup(Tag, Tags) of
+ none ->
+ NotFound = (Cache =/= false
+ andalso not gb_sets:is_element(Tag, Cache)),
+ {ok, Server} = ddfs_tag:start(Tag, NotFound),
+ erlang:monitor(process, Server),
+ {Server, gb_trees:insert(Tag, Server, Tags)};
+ {value, P} ->
+ {P, Tags}
+ end.
+
+-spec do_tag_request(term(), tagname(), replyto(), state()) ->
+ state().
+do_tag_request(M, Tag, From, #state{tags = Tags, tag_cache = Cache} = S) ->
+ {Pid, TagsN} = get_tag_pid(Tag, Tags, Cache),
+ gen_server:cast(Pid, {M, From}),
+ S#state{tags = TagsN,
+ tag_cache = Cache =/= false andalso gb_sets:add(Tag, Cache)}.
+
+-spec do_tag_notify(term(), tagname(), state()) -> state().
+do_tag_notify(M, Tag, #state{tags = Tags, tag_cache = Cache} = S) ->
+ {Pid, TagsN} = get_tag_pid(Tag, Tags, Cache),
+ gen_server:cast(Pid, {notify, M}),
+ S#state{tags = TagsN,
+ tag_cache = Cache =/= false andalso gb_sets:add(Tag, Cache)}.
+
+-spec do_update_nodes(nodes_update(), state()) -> state().
+do_update_nodes(NewNodes, #state{nodes = Nodes, tags = Tags} = S) ->
+ WriteBlacklist = lists:sort([Node || {Node, false, _} <- NewNodes]),
+ ReadBlacklist = lists:sort([Node || {Node, _, false} <- NewNodes]),
+ OldNodes = gb_trees:from_orddict(Nodes),
+ UpdatedNodes = lists:keysort(1, [case gb_trees:lookup(Node, OldNodes) of
+ none ->
+ {Node, {0, 0}};
+ {value, OldStats} ->
+ {Node, OldStats}
+ end || {Node, _WB, _RB} <- NewNodes]),
+ if
+ UpdatedNodes =/= Nodes ->
+ _ = [gen_server:cast(Pid, {die, none}) || Pid <- gb_trees:values(Tags)],
+ spawn(fun() ->
+ {ok, ReadableNodes, RBSize} =
+ do_get_readable_nodes(UpdatedNodes, ReadBlacklist),
+ refresh_tag_cache(ReadableNodes, RBSize)
+ end),
+ S#state{nodes = UpdatedNodes,
+ write_blacklist = WriteBlacklist,
+ read_blacklist = ReadBlacklist,
+ tag_cache = false,
+ tags = gb_trees:empty()};
+ true ->
+ S#state{write_blacklist = WriteBlacklist,
+ read_blacklist = ReadBlacklist}
+ end.
+
+-spec do_update_nodestats(gb_trees:tree(), state()) -> state().
+do_update_nodestats(NewNodes, #state{nodes = Nodes} = S) ->
+ UpdatedNodes = [case gb_trees:lookup(Node, NewNodes) of
+ none ->
+ {Node, Stats};
+ {value, NewStats} ->
+ {Node, NewStats}
+ end || {Node, Stats} <- Nodes],
+ S#state{nodes = UpdatedNodes}.
+
+-spec do_tag_exit(pid(), state()) -> state().
+do_tag_exit(Pid, S) ->
+ NewTags = [X || {_, V} = X <- gb_trees:to_list(S#state.tags), V =/= Pid],
+ S#state{tags = gb_trees:from_orddict(NewTags)}.
+
+-spec do_get_tags(all | filter, [node()]) -> {[node()], [node()], [binary()]};
+ (safe, [node()]) -> {ok, [binary()]} | too_many_failed_nodes;
+ (gc, [node()]) -> {ok, [binary()], [node()]} | too_many_failed_nodes.
+do_get_tags(all, Nodes) ->
+ disco_profile:timed_run(
+ fun() ->
+ {Replies, Failed} =
+ gen_server:multi_call(Nodes, ddfs_node, get_tags, ?NODE_TIMEOUT),
+ {OkNodes, Tags} = lists:unzip(Replies),
+ {OkNodes, Failed, lists:usort(lists:flatten(Tags))}
+ end, do_get_tags_all);
+
+do_get_tags(filter, Nodes) ->
+ disco_profile:timed_run(
+ fun() ->
+ {OkNodes, Failed, Tags} = do_get_tags(all, Nodes),
+ case tag_operation(get_tagnames, <<"+deleted">>, ?NODEOP_TIMEOUT) of
+ {ok, Deleted} ->
+ TagSet = gb_sets:from_ordset(Tags),
+ DelSet = gb_sets:insert(<<"+deleted">>, Deleted),
+ NotDeleted = gb_sets:to_list(gb_sets:subtract(TagSet, DelSet)),
+ {OkNodes, Failed, NotDeleted};
+ E ->
+ E
+ end
+ end, do_get_tags_filter);
+
+do_get_tags(safe, Nodes) ->
+ disco_profile:timed_run(
+ fun() ->
+ TagMinK = list_to_integer(disco:get_setting("DDFS_TAG_MIN_REPLICAS")),
+ case do_get_tags(filter, Nodes) of
+ {_OkNodes, Failed, Tags} when length(Failed) < TagMinK ->
+ {ok, Tags};
+ _ ->
+ too_many_failed_nodes
+ end
+ end, do_get_tags_safe);
+
+% The returned tag list may include +deleted.
+do_get_tags(gc, Nodes) ->
+ disco_profile:timed_run(
+ fun() ->
+ {OkNodes, Failed, Tags} = do_get_tags(all, Nodes),
+ TagMinK = list_to_integer(disco:get_setting("DDFS_TAG_MIN_REPLICAS")),
+ case length(Failed) < TagMinK of
+ false ->
+ too_many_failed_nodes;
+ true ->
+ case tag_operation(get_tagnames, <<"+deleted">>, ?NODEOP_TIMEOUT) of
+ {ok, Deleted} ->
+ TagSet = gb_sets:from_ordset(Tags),
+ NotDeleted = gb_sets:subtract(TagSet, Deleted),
+ {ok, gb_sets:to_list(NotDeleted), OkNodes};
+ E ->
+ E
+ end
+ end
+ end, do_get_tags_gc).
+
+% Timeouts in this call by the below processes can cause ddfs_master
+% itself to crash, since the processes are linked to it.
+-spec safe_get_read_nodes() -> {ok, [node()], non_neg_integer()} | error.
+safe_get_read_nodes() ->
+ try get_read_nodes() of
+ {ok, _ReadableNodes, _RBSize} = RN ->
+ RN;
+ E ->
+ lager:error("unexpected response retrieving readable nodes: ~p", [E]),
+ error
+ catch
+ K:E ->
+ lager:error("error retrieving readable nodes: ~p:~p", [K, E]),
+ error
+ end.
+
+-spec monitor_diskspace() -> no_return().
+monitor_diskspace() ->
+ case safe_get_read_nodes() of
+ {ok, ReadableNodes, _RBSize} ->
+ {Space, _F} = gen_server:multi_call(ReadableNodes,
+ ddfs_node,
+ get_diskspace,
+ ?NODE_TIMEOUT),
+ update_nodestats(gb_trees:from_orddict(lists:keysort(1, Space)));
+ error ->
+ ok
+ end,
+ timer:sleep(?DISKSPACE_INTERVAL),
+ monitor_diskspace().
+
+-spec refresh_tag_cache_proc() -> no_return().
+refresh_tag_cache_proc() ->
+ case safe_get_read_nodes() of
+ {ok, ReadableNodes, RBSize} ->
+ refresh_tag_cache(ReadableNodes, RBSize);
+ error ->
+ ok
+ end,
+ receive
+ refresh ->
+ ok
+ after ?TAG_CACHE_INTERVAL ->
+ ok
+ end,
+ refresh_tag_cache_proc().
+
+-spec refresh_tag_cache([node()], non_neg_integer()) -> ok.
+refresh_tag_cache(Nodes, BLSize) ->
+ TagMinK = list_to_integer(disco:get_setting("DDFS_TAG_MIN_REPLICAS")),
+ {Replies, Failed} =
+ gen_server:multi_call(Nodes, ddfs_node, get_tags, ?NODE_TIMEOUT),
+ if Nodes =/= [], length(Failed) + BLSize < TagMinK ->
+ {_OkNodes, Tags} = lists:unzip(Replies),
+ update_tag_cache(gb_sets:from_list(lists:flatten(Tags)));
+ true -> ok
+ end.
diff --git a/lib/dialyzer/test/small_SUITE_data/src/ddfs_master/ddfs_tag.hrl b/lib/dialyzer/test/small_SUITE_data/src/ddfs_master/ddfs_tag.hrl
new file mode 100644
index 0000000000..2920b67fc5
--- /dev/null
+++ b/lib/dialyzer/test/small_SUITE_data/src/ddfs_master/ddfs_tag.hrl
@@ -0,0 +1,19 @@
+
+-type tokentype() :: 'read' | 'write'.
+-type user_attr() :: [{binary(), binary()}].
+% An 'internal' token is also used by internal consumers, but never stored.
+-type token() :: 'null' | binary().
+
+-type tagname() :: binary().
+-type tagid() :: binary().
+
+-type attrib() :: 'urls' | 'read_token' | 'write_token' | {'user', binary()}.
+
+-record(tagcontent, {id :: tagid(),
+ last_modified :: binary(),
+ read_token = null :: token(),
+ write_token = null :: token(),
+ urls = [] :: [[binary()]],
+ user = [] :: user_attr()}).
+
+-type tagcontent() :: #tagcontent{}.
diff --git a/lib/dialyzer/test/small_SUITE_data/src/ddfs_master/gs_util.hrl b/lib/dialyzer/test/small_SUITE_data/src/ddfs_master/gs_util.hrl
new file mode 100644
index 0000000000..d579e9a7d7
--- /dev/null
+++ b/lib/dialyzer/test/small_SUITE_data/src/ddfs_master/gs_util.hrl
@@ -0,0 +1,16 @@
+% This is a set of type utilities to be used when spec-cing the
+% callbacks of a gen_server implementation. It should be included in
+% the impl module, which needs to define the state() type.
+
+-type gs_init() :: {ok, state()}.
+-type gs_reply(T) :: {reply, (T), state()}.
+-type gs_noreply() :: {noreply, state()}.
+-type gs_noreply_t() :: {noreply, state(), non_neg_integer()}.
+-type gs_stop(T) :: {stop, (T), state()}.
+
+% Generic utilities.
+
+-type server() :: pid() | atom() | {atom(), node()}.
+-type from() :: {pid(), term()}.
+
+-type dbg_state_msg() :: dbg_get_state.
diff --git a/lib/hipe/cerl/erl_types.erl b/lib/hipe/cerl/erl_types.erl
index 47b8dc766a..6065b79664 100644
--- a/lib/hipe/cerl/erl_types.erl
+++ b/lib/hipe/cerl/erl_types.erl
@@ -2985,16 +2985,19 @@ inf_union(U1, U2, Opaques) ->
List = [A,B,F,I,L,N,T,M,Map],
inf_union_collect(List, Opaque, InfFun, [], [])
end,
- O1 = OpaqueFun(U1, U2, fun(E, Opaque) -> t_inf(Opaque, E, Opaques) end),
- O2 = OpaqueFun(U2, U1, fun(E, Opaque) -> t_inf(E, Opaque, Opaques) end),
- Union = inf_union(U1, U2, 0, [], Opaques),
- t_sup([O1, O2, Union]).
+ {O1, ThrowList1} =
+ OpaqueFun(U1, U2, fun(E, Opaque) -> t_inf(Opaque, E, Opaques) end),
+ {O2, ThrowList2}
+ = OpaqueFun(U2, U1, fun(E, Opaque) -> t_inf(E, Opaque, Opaques) end),
+ {Union, ThrowList3} = inf_union(U1, U2, 0, [], [], Opaques),
+ ThrowList = lists:merge3(ThrowList1, ThrowList2, ThrowList3),
+ case t_sup([O1, O2, Union]) of
+ ?none when ThrowList =/= [] -> throw(hd(ThrowList));
+ Sup -> Sup
+ end.
inf_union_collect([], _Opaque, _InfFun, InfList, ThrowList) ->
- case t_sup(InfList) of
- ?none when ThrowList =/= [] -> throw(hd(lists:flatten(ThrowList)));
- Sup -> Sup
- end;
+ {t_sup(InfList), lists:usort(ThrowList)};
inf_union_collect([?none|L], Opaque, InfFun, InfList, ThrowList) ->
inf_union_collect(L, Opaque, InfFun, [?none|InfList], ThrowList);
inf_union_collect([E|L], Opaque, InfFun, InfList, ThrowList) ->
@@ -3005,19 +3008,21 @@ inf_union_collect([E|L], Opaque, InfFun, InfList, ThrowList) ->
inf_union_collect(L, Opaque, InfFun, InfList, [N|ThrowList])
end.
-inf_union([?none|Left1], [?none|Left2], N, Acc, Opaques) ->
- inf_union(Left1, Left2, N, [?none|Acc], Opaques);
-inf_union([T1|Left1], [T2|Left2], N, Acc, Opaques) ->
- case t_inf(T1, T2, Opaques) of
- ?none -> inf_union(Left1, Left2, N, [?none|Acc], Opaques);
- T -> inf_union(Left1, Left2, N+1, [T|Acc], Opaques)
+inf_union([?none|Left1], [?none|Left2], N, Acc, ThrowList, Opaques) ->
+ inf_union(Left1, Left2, N, [?none|Acc], ThrowList, Opaques);
+inf_union([T1|Left1], [T2|Left2], N, Acc, ThrowList, Opaques) ->
+ try t_inf(T1, T2, Opaques) of
+ ?none -> inf_union(Left1, Left2, N, [?none|Acc], ThrowList, Opaques);
+ T -> inf_union(Left1, Left2, N+1, [T|Acc], ThrowList, Opaques)
+ catch throw:N when is_integer(N) ->
+ inf_union(Left1, Left2, N, [?none|Acc], [N|ThrowList], Opaques)
end;
-inf_union([], [], N, Acc, _Opaques) ->
- if N =:= 0 -> ?none;
+inf_union([], [], N, Acc, ThrowList, _Opaques) ->
+ if N =:= 0 -> {?none, ThrowList};
N =:= 1 ->
[Type] = [T || T <- Acc, T =/= ?none],
- Type;
- N >= 2 -> ?union(lists:reverse(Acc))
+ {Type, ThrowList};
+ N >= 2 -> {?union(lists:reverse(Acc)), ThrowList}
end.
inf_bitstr(U1, B1, U2, B2) ->
diff --git a/lib/inets/test/inets_sup_SUITE.erl b/lib/inets/test/inets_sup_SUITE.erl
index cf28f5a245..60979278fc 100644
--- a/lib/inets/test/inets_sup_SUITE.erl
+++ b/lib/inets/test/inets_sup_SUITE.erl
@@ -77,75 +77,32 @@ end_per_suite(_) ->
%% variable, but should NOT alter/remove any existing entries.
%%--------------------------------------------------------------------
init_per_testcase(httpd_subtree, Config) ->
- io:format("init_per_testcase(httpd_subtree) -> entry with"
- "~n Config: ~p"
- "~n", [Config]),
Dog = test_server:timetrap(?t:minutes(1)),
NewConfig = lists:keydelete(watchdog, 1, Config),
-
- DataDir = ?config(data_dir, Config),
PrivDir = ?config(priv_dir, Config),
- ServerROOT = filename:join(PrivDir, "server_root"),
- DocROOT = filename:join(PrivDir, "htdocs"),
- ConfDir = filename:join(ServerROOT, "conf"),
-
- io:format("init_per_testcase(httpd_subtree) -> create dir(s)"
- "~n", []),
- file:make_dir(ServerROOT), %% until http_test is cleaned up!
- ok = file:make_dir(DocROOT),
- ok = file:make_dir(ConfDir),
-
- io:format("init_per_testcase(httpd_subtree) -> copy file(s)"
- "~n", []),
- {ok, _} = inets_test_lib:copy_file("simple.conf", DataDir, PrivDir),
- {ok, _} = inets_test_lib:copy_file("mime.types", DataDir, ConfDir),
-
- io:format("init_per_testcase(httpd_subtree) -> write file(s)"
- "~n", []),
- ConfFile = filename:join(PrivDir, "simple.conf"),
- {ok, Fd} = file:open(ConfFile, [append]),
- ok = file:write(Fd, "ServerRoot " ++ ServerROOT ++ "\n"),
- ok = file:write(Fd, "DocumentRoot " ++ DocROOT ++ "\n"),
- ok = file:close(Fd),
-
- %% To make sure application:set_env is not overwritten by any
- %% app-file settings.
- io:format("init_per_testcase(httpd_subtree) -> load inets app"
- "~n", []),
- application:load(inets),
- io:format("init_per_testcase(httpd_subtree) -> update inets env"
- "~n", []),
- ok = application:set_env(inets, services, [{httpd, ConfFile}]),
-
+
+ SimpleConfig = [{port, 0},
+ {server_name,"www.test"},
+ {modules, [mod_get]},
+ {server_root, PrivDir},
+ {document_root, PrivDir},
+ {bind_address, any},
+ {ipfamily, inet}],
try
- io:format("init_per_testcase(httpd_subtree) -> start inets app"
- "~n", []),
- ok = inets:start(),
- io:format("init_per_testcase(httpd_subtree) -> done"
- "~n", []),
- [{watchdog, Dog}, {server_root, ServerROOT}, {doc_root, DocROOT},
- {conf_dir, ConfDir}| NewConfig]
+ inets:start(),
+ inets:start(httpd, SimpleConfig),
+ [{watchdog, Dog} | NewConfig]
catch
_:Reason ->
- io:format("init_per_testcase(httpd_subtree) -> "
- "failed starting inets - cleanup"
- "~n Reason: ~p"
- "~n", [Reason]),
- application:unset_env(inets, services),
- application:unload(inets),
+ inets:stop(),
exit({failed_starting_inets, Reason})
end;
-init_per_testcase(Case, Config) ->
- io:format("init_per_testcase(~p) -> entry with"
- "~n Config: ~p"
- "~n", [Case, Config]),
+init_per_testcase(_Case, Config) ->
Dog = test_server:timetrap(?t:minutes(5)),
NewConfig = lists:keydelete(watchdog, 1, Config),
- Stop = inets:stop(),
- io:format("init_per_testcase(~p) -> Stop: ~p"
- "~n", [Case, Stop]),
+ inets:stop(),
ok = inets:start(),
[{watchdog, Dog} | NewConfig].
@@ -280,30 +237,21 @@ httpd_subtree(doc) ->
httpd_subtree(suite) ->
[];
httpd_subtree(Config) when is_list(Config) ->
- io:format("httpd_subtree -> entry with"
- "~n Config: ~p"
- "~n", [Config]),
-
%% Check that we have the httpd top supervisor
- io:format("httpd_subtree -> verify inets~n", []),
{ok, _} = verify_child(inets_sup, httpd_sup, supervisor),
%% Check that we have the httpd instance supervisor
- io:format("httpd_subtree -> verify httpd~n", []),
{ok, Id} = verify_child(httpd_sup, httpd_instance_sup, supervisor),
{httpd_instance_sup, Addr, Port} = Id,
Instance = httpd_util:make_name("httpd_instance_sup", Addr, Port),
%% Check that we have the expected httpd instance children
- io:format("httpd_subtree -> verify httpd instance children "
- "(acceptor, misc and manager)~n", []),
{ok, _} = verify_child(Instance, httpd_connection_sup, supervisor),
{ok, _} = verify_child(Instance, httpd_acceptor_sup, supervisor),
{ok, _} = verify_child(Instance, httpd_misc_sup, supervisor),
{ok, _} = verify_child(Instance, httpd_manager, worker),
%% Check that the httpd instance acc supervisor has children
- io:format("httpd_subtree -> verify acc~n", []),
InstanceAcc = httpd_util:make_name("httpd_acceptor_sup", Addr, Port),
case supervisor:which_children(InstanceAcc) of
[_ | _] ->
@@ -328,15 +276,7 @@ httpd_subtree(Config) when is_list(Config) ->
verify_child(Parent, Child, Type) ->
-%% io:format("verify_child -> entry with"
-%% "~n Parent: ~p"
-%% "~n Child: ~p"
-%% "~n Type: ~p"
-%% "~n", [Parent, Child, Type]),
Children = supervisor:which_children(Parent),
-%% io:format("verify_child -> which children"
-%% "~n Children: ~p"
-%% "~n", [Children]),
verify_child(Children, Parent, Child, Type).
verify_child([], Parent, Child, _Type) ->
@@ -344,21 +284,12 @@ verify_child([], Parent, Child, _Type) ->
verify_child([{Id, _Pid, Type2, Mods}|Children], Parent, Child, Type) ->
case lists:member(Child, Mods) of
true when (Type2 =:= Type) ->
-%% io:format("verify_child -> found with expected type"
-%% "~n Id: ~p"
-%% "~n", [Id]),
{ok, Id};
true when (Type2 =/= Type) ->
-%% io:format("verify_child -> found with unexpected type"
-%% "~n Type2: ~p"
-%% "~n Id: ~p"
-%% "~n", [Type2, Id]),
{error, {wrong_type, Type2, Child, Parent}};
false ->
verify_child(Children, Parent, Child, Type)
end.
-
-
%%-------------------------------------------------------------------------
%% httpc_subtree
@@ -368,40 +299,19 @@ httpc_subtree(doc) ->
httpc_subtree(suite) ->
[];
httpc_subtree(Config) when is_list(Config) ->
- tsp("httpc_subtree -> entry with"
- "~n Config: ~p", [Config]),
+ {ok, Foo} = inets:start(httpc, [{profile, foo}]),
- tsp("httpc_subtree -> start inets service httpc with profile foo"),
- {ok, _Foo} = inets:start(httpc, [{profile, foo}]),
+ {ok, Bar} = inets:start(httpc, [{profile, bar}], stand_alone),
- tsp("httpc_subtree -> "
- "start stand-alone inets service httpc with profile bar"),
- {ok, _Bar} = inets:start(httpc, [{profile, bar}], stand_alone),
-
- tsp("httpc_subtree -> retreive list of httpc instances"),
HttpcChildren = supervisor:which_children(httpc_profile_sup),
- tsp("httpc_subtree -> HttpcChildren: ~n~p", [HttpcChildren]),
-
- tsp("httpc_subtree -> verify httpc stand-alone instances"),
+
{value, {httpc_manager, _, worker, [httpc_manager]}} =
lists:keysearch(httpc_manager, 1, HttpcChildren),
- tsp("httpc_subtree -> verify httpc (named) instances"),
- {value,{{httpc,foo}, Pid, worker, [httpc_manager]}} =
+ {value,{{httpc,foo}, _Pid, worker, [httpc_manager]}} =
lists:keysearch({httpc, foo}, 1, HttpcChildren),
false = lists:keysearch({httpc, bar}, 1, HttpcChildren),
- tsp("httpc_subtree -> stop inets"),
- inets:stop(httpc, Pid),
-
- tsp("httpc_subtree -> done"),
- ok.
-
-tsp(F) ->
- tsp(F, []).
-tsp(F, A) ->
- test_server:format("~p ~p:" ++ F ++ "~n", [self(), ?MODULE | A]).
-
-tsf(Reason) ->
- test_server:fail(Reason).
+ inets:stop(httpc, Foo),
+ exit(Bar, normal).
diff --git a/lib/inets/test/inets_sup_SUITE_data/mime.types b/lib/inets/test/inets_sup_SUITE_data/mime.types
deleted file mode 100644
index e52d345ff7..0000000000
--- a/lib/inets/test/inets_sup_SUITE_data/mime.types
+++ /dev/null
@@ -1,3 +0,0 @@
-# MIME type Extension
-text/html html htm
-text/plain asc txt
diff --git a/lib/inets/test/inets_sup_SUITE_data/simple.conf b/lib/inets/test/inets_sup_SUITE_data/simple.conf
deleted file mode 100644
index e1429b4a28..0000000000
--- a/lib/inets/test/inets_sup_SUITE_data/simple.conf
+++ /dev/null
@@ -1,6 +0,0 @@
-Port 8888
-ServerName www.test
-SocketType ip_comm
-Modules mod_get
-ServerAdmin [email protected]
-
diff --git a/lib/kernel/doc/src/kernel_app.xml b/lib/kernel/doc/src/kernel_app.xml
index 49a93d2c70..00c6bc33d6 100644
--- a/lib/kernel/doc/src/kernel_app.xml
+++ b/lib/kernel/doc/src/kernel_app.xml
@@ -4,7 +4,7 @@
<appref>
<header>
<copyright>
- <year>1996</year><year>2013</year>
+ <year>1996</year><year>2014</year>
<holder>Ericsson AB. All Rights Reserved.</holder>
</copyright>
<legalnotice>
@@ -35,7 +35,7 @@
Erlang/OTP consists of Kernel and STDLIB. The Kernel application
contains the following services:</p>
<list type="bulleted">
- <item>application controller, see <c>application(3)</c></item>
+ <item>application controller, see <seealso marker="application">application(3)</seealso></item>
<item><c>code</c></item>
<item><c>disk_log</c></item>
<item><c>dist_ac</c>, distributed application controller</item>
@@ -66,8 +66,8 @@
<section>
<title>Configuration</title>
<p>The following configuration parameters are defined for the Kernel
- application. See <c>app(3)</c> for more information about
- configuration parameters.</p>
+ application. See <seealso marker="app">app(4)</seealso> for more
+ information about configuration parameters.</p>
<taglist>
<tag><c>browser_cmd = string() | {M,F,A}</c></tag>
<item>
@@ -93,7 +93,8 @@
<item><c>Time = integer()>0</c></item>
<item><c>Nodes = [node() | {node(),...,node()}]</c></item>
</list>
- <p>The parameter is described in <c>application(3)</c>, function
+ <p>The parameter is described in
+ <seealso marker="application">application(3)</seealso>, function
<c>load/2</c>.</p>
</item>
<tag><c>dist_auto_connect = Value</c></tag>
@@ -105,11 +106,13 @@
<taglist>
<tag><c>never</c></tag>
<item>Connections are never automatically established, they
- must be explicitly connected. See <c>net_kernel(3)</c>.</item>
+ must be explicitly connected. See
+ <seealso marker="net_kernel">net_kernel(3)</seealso>.</item>
<tag><c>once</c></tag>
<item>Connections will be established automatically, but only
once per node. If a node goes down, it must thereafter be
- explicitly connected. See <c>net_kernel(3)</c>.</item>
+ explicitly connected. See
+ <seealso marker="net_kernel">net_kernel(3)</seealso>.</item>
</taglist>
</item>
<tag><c>permissions = [Perm]</c></tag>
@@ -121,7 +124,8 @@
<item><c>ApplName = atom()</c></item>
<item><c>Bool = boolean()</c></item>
</list>
- <p>Permissions are described in <c>application(3)</c>, function
+ <p>Permissions are described in
+ <seealso marker="application">application(3)</seealso>, function
<c>permit/2</c>.</p>
</item>
<tag><c>error_logger = Value</c></tag>
@@ -149,7 +153,8 @@
</item>
<tag><c>global_groups = [GroupTuple]</c></tag>
<item>
- <p>Defines global groups, see <c>global_group(3)</c>.</p>
+ <p>Defines global groups, see
+ <seealso marker="global_group">global_group(3)</seealso>.</p>
<list type="bulleted">
<item><c>GroupTuple = {GroupName, [Node]} | {GroupName, PublishType, [Node]}</c></item>
<item><c>GroupName = atom()</c></item>
@@ -160,18 +165,19 @@
<tag><c>inet_default_connect_options = [{Opt, Val}]</c></tag>
<item>
<p>Specifies default options for <c>connect</c> sockets,
- see <c>inet(3)</c>.</p>
+ see <seealso marker="inet">inet(3)</seealso>.</p>
</item>
<tag><c>inet_default_listen_options = [{Opt, Val}]</c></tag>
<item>
<p>Specifies default options for <c>listen</c> (and
- <c>accept</c>) sockets, see <c>inet(3)</c>.</p>
+ <c>accept</c>) sockets, see <seealso marker="inet">inet(3)</seealso>.</p>
</item>
<tag><c>{inet_dist_use_interface, ip_address()}</c></tag>
<item>
<p>If the host of an Erlang node has several network interfaces,
this parameter specifies which one to listen on. See
- <c>inet(3)</c> for the type definition of <c>ip_address()</c>.</p>
+ <seealso marker="inet">inet(3)</seealso> for the type definition
+ of <c>ip_address()</c>.</p>
</item>
<tag><c>{inet_dist_listen_min, First}</c></tag>
<item>
@@ -276,7 +282,8 @@ MaxT = TickTime + TickTime / 4</code>
<tag><c>start_boot_server = true | false</c></tag>
<item>
<p>Starts the <c>boot_server</c> if the parameter is <c>true</c>
- (see <c>erl_boot_server(3)</c>). This parameter should be
+ (see <seealso marker="erl_boot_server">erl_boot_server(3)</seealso>).
+ This parameter should be
set to <c>true</c> in an embedded system which uses this
service.</p>
<p>The default value is <c>false</c>.</p>
@@ -296,13 +303,15 @@ MaxT = TickTime + TickTime / 4</code>
<tag><c>start_disk_log = true | false</c></tag>
<item>
<p>Starts the <c>disk_log_server</c> if the parameter is
- <c>true</c> (see <c>disk_log(3)</c>). This parameter should be
+ <c>true</c> (see <seealso marker="disk_log">disk_log(3)</seealso>).
+ This parameter should be
set to true in an embedded system which uses this service.</p>
<p>The default value is <c>false</c>.</p>
</item>
<tag><c>start_pg2 = true | false</c></tag>
<item>
- <p>Starts the <c>pg2</c> server (see <c>pg2(3)</c>) if
+ <p>Starts the <c>pg2</c> server (see
+ <seealso marker="pg2">pg2(3)</seealso>) if
the parameter is <c>true</c>. This parameter should be set to
<c>true</c> in an embedded system which uses this service.</p>
<p>The default value is <c>false</c>.</p>
@@ -310,7 +319,8 @@ MaxT = TickTime + TickTime / 4</code>
<tag><c>start_timer = true | false</c></tag>
<item>
<p>Starts the <c>timer_server</c> if the parameter is
- <c>true</c> (see <c>timer(3)</c>). This parameter should be
+ <c>true</c> (see <seealso marker="stdlib:timer">timer(3)</seealso>).
+ This parameter should be
set to <c>true</c> in an embedded system which uses this
service.</p>
<p>The default value is <c>false</c>.</p>
@@ -351,6 +361,7 @@ MaxT = TickTime + TickTime / 4</code>
<seealso marker="pg2">pg2(3)</seealso>,
<seealso marker="rpc">rpc(3)</seealso>,
<seealso marker="seq_trace">seq_trace(3)</seealso>,
+ <seealso marker="stdlib:timer">timer(3)</seealso>,
<seealso marker="user">user(3)</seealso></p>
</section>
</appref>
diff --git a/lib/kernel/src/application_controller.erl b/lib/kernel/src/application_controller.erl
index ed13035104..daad45b6c2 100644
--- a/lib/kernel/src/application_controller.erl
+++ b/lib/kernel/src/application_controller.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1996-2013. All Rights Reserved.
+%% Copyright Ericsson AB 1996-2014. All Rights Reserved.
%%
%% The contents of this file are subject to the Erlang Public License,
%% Version 1.1, (the "License"); you may not use this file except in
@@ -829,12 +829,12 @@ handle_call({change_application_data, Applications, Config}, _From, S) ->
{reply, Error, S};
{'EXIT', R} ->
{reply, {error, R}, S};
- NewAppls ->
+ {NewAppls, NewConfig} ->
lists:foreach(fun(Appl) ->
ets:insert(ac_tab, {{loaded, Appl#appl.name},
Appl})
end, NewAppls),
- {reply, ok, S#state{conf_data = Config}}
+ {reply, ok, S#state{conf_data = NewConfig}}
end;
handle_call(prep_config_change, _From, S) ->
@@ -1550,18 +1550,19 @@ do_change_apps(Applications, Config, OldAppls) ->
end,
Errors),
- map(fun(Appl) ->
- AppName = Appl#appl.name,
- case is_loaded_app(AppName, Applications) of
- {true, Application} ->
- do_change_appl(make_appl(Application),
- Appl, SysConfig);
-
- %% ignored removed apps - handled elsewhere
- false ->
- Appl
- end
- end, OldAppls).
+ {map(fun(Appl) ->
+ AppName = Appl#appl.name,
+ case is_loaded_app(AppName, Applications) of
+ {true, Application} ->
+ do_change_appl(make_appl(Application),
+ Appl, SysConfig);
+
+ %% ignored removed apps - handled elsewhere
+ false ->
+ Appl
+ end
+ end, OldAppls),
+ SysConfig}.
is_loaded_app(AppName, [{application, AppName, App} | _]) ->
{true, {application, AppName, App}};
diff --git a/lib/kernel/test/application_SUITE.erl b/lib/kernel/test/application_SUITE.erl
index c6cbd1a0ef..036e238c85 100644
--- a/lib/kernel/test/application_SUITE.erl
+++ b/lib/kernel/test/application_SUITE.erl
@@ -1949,14 +1949,22 @@ config_change(Conf) when is_list(Conf) ->
%% Find out application data from boot script
Boot = filename:join([code:root_dir(), "bin", "start.boot"]),
{ok, Bin} = file:read_file(Boot),
- Appls = get_appls(binary_to_term(Bin)),
+ Appls0 = get_appls(binary_to_term(Bin)),
+
+ %% And add app1 in order to test OTP-11864 - included config files
+ %% not read for new (not already loaded) applications
+ Appls = [app1() | Appls0],
%% Simulate contents of "sys.config"
Config = [{stdlib, [{par1,sys},{par2,sys}]},
"t1",
"t2.config",
filename:join([DataDir, "subdir", "t3"]),
- {stdlib, [{par6,sys}]}],
+ {stdlib, [{par6,sys}]},
+ "t4.config"],
+
+ %% Check that app1 is not loaded
+ false = lists:keymember(app1,1,application:loaded_applications()),
%% Order application_controller to update configuration
ok = application_controller:change_application_data(Appls,
@@ -1971,6 +1979,13 @@ config_change(Conf) when is_list(Conf) ->
{value, {par5,t3}} = lists:keysearch(par5, 1, Env),
{value, {par6,sys}} = lists:keysearch(par6, 1, Env),
+ %% Check that app1 parameters are correctly set after loading
+ [] = application:get_all_env(app1),
+ application:load(app1()),
+ App1Env = application:get_all_env(app1),
+ {value, {par1,t4}} = lists:keysearch(par1, 1, App1Env),
+ application:unload(app1),
+
ok = file:set_cwd(CWD).
%% This function is stolen from SASL module release_handler, OTP R10B
@@ -1989,6 +2004,7 @@ get_appls([_ | T], Res) ->
get_appls([], Res) ->
Res.
+
persistent_env(suite) ->
[];
persistent_env(doc) ->
diff --git a/lib/kernel/test/application_SUITE_data/t4.config b/lib/kernel/test/application_SUITE_data/t4.config
new file mode 100644
index 0000000000..8b2bc52c01
--- /dev/null
+++ b/lib/kernel/test/application_SUITE_data/t4.config
@@ -0,0 +1 @@
+[{app1, [{par1,t4}]}]. \ No newline at end of file
diff --git a/lib/kernel/test/sendfile_SUITE.erl b/lib/kernel/test/sendfile_SUITE.erl
index 2c741232c4..123e849ccb 100644
--- a/lib/kernel/test/sendfile_SUITE.erl
+++ b/lib/kernel/test/sendfile_SUITE.erl
@@ -72,7 +72,12 @@ end_per_suite(Config) ->
file:delete(proplists:get_value(big_file, Config)).
init_per_group(async_threads,Config) ->
- [{sendfile_opts,[{use_threads,true}]}|Config];
+ case erlang:system_info(thread_pool_size) of
+ 0 ->
+ {skip,"No async threads"};
+ _ ->
+ [{sendfile_opts,[{use_threads,true}]}|Config]
+ end;
init_per_group(no_async_threads,Config) ->
[{sendfile_opts,[{use_threads,false}]}|Config].
diff --git a/lib/reltool/test/reltool_server_SUITE.erl b/lib/reltool/test/reltool_server_SUITE.erl
index bfe5d39d53..b3b7afd1a9 100644
--- a/lib/reltool/test/reltool_server_SUITE.erl
+++ b/lib/reltool/test/reltool_server_SUITE.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 2009-2013. All Rights Reserved.
+%% Copyright Ericsson AB 2009-2014. All Rights Reserved.
%%
%% The contents of this file are subject to the Erlang Public License,
%% Version 1.1, (the "License"); you may not use this file except in
@@ -1205,14 +1205,9 @@ create_slim(Config) ->
RootDir = code:root_dir(),
Erl = filename:join([RootDir, "bin", "erl"]),
- EscapedQuote =
- case os:type() of
- {win32,_} -> "\\\"";
- _ -> "\""
- end,
Args = ["-boot_var", "RELTOOL_EXT_LIB", TargetLibDir,
"-boot", filename:join(TargetRelVsnDir,RelName),
- "-sasl", "releases_dir", EscapedQuote++TargetRelDir++EscapedQuote],
+ "-sasl", "releases_dir", "\""++TargetRelDir++"\""],
{ok, Node} = ?msym({ok, _}, start_node(?NODE_NAME, Erl, Args)),
?msym(RootDir, rpc:call(Node, code, root_dir, [])),
wait_for_app(Node,sasl,50),
diff --git a/lib/reltool/test/reltool_test_lib.erl b/lib/reltool/test/reltool_test_lib.erl
index 530d0a9985..fa12f19aa7 100644
--- a/lib/reltool/test/reltool_test_lib.erl
+++ b/lib/reltool/test/reltool_test_lib.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 2009-2013. All Rights Reserved.
+%% Copyright Ericsson AB 2009-2014. All Rights Reserved.
%%
%% The contents of this file are subject to the Erlang Public License,
%% Version 1.1, (the "License"); you may not use this file except in
@@ -20,12 +20,13 @@
-compile(export_all).
-include("reltool_test_lib.hrl").
+-define(timeout, 20). % minutes
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
init_per_suite(Config) when is_list(Config)->
global:register_name(reltool_global_logger, group_leader()),
- incr_timetrap(Config, 10).
+ incr_timetrap(Config, ?timeout).
end_per_suite(Config) when is_list(Config)->
global:unregister_name(reltool_global_logger),
@@ -51,7 +52,7 @@ set_kill_timer(Config) ->
Time =
case lookup_config(tc_timeout, Config) of
[] ->
- timer:minutes(10);
+ timer:minutes(?timeout);
ConfigTime when is_integer(ConfigTime) ->
ConfigTime
end,
diff --git a/lib/sasl/test/release_handler_SUITE.erl b/lib/sasl/test/release_handler_SUITE.erl
index 1d8bf45289..8a635796b7 100644
--- a/lib/sasl/test/release_handler_SUITE.erl
+++ b/lib/sasl/test/release_handler_SUITE.erl
@@ -1410,18 +1410,41 @@ upgrade_supervisor_fail(Conf) when is_list(Conf) ->
{error,{code_change_failed,_Pid,a_sup,_Vsn,
{error,{invalid_shutdown,brutal_kil}}}} =
- rpc:call(Node, release_handler, install_release, [RelVsn2]),
-
- %% Check that the upgrade is terminated - normally this would mean
- %% rollback, but since this testcase is very simplified the node
- %% is not started with heart supervision and will therefore not be
- %% restarted. So we just check that the node goes down.
+ rpc:call(Node, release_handler, install_release,
+ [RelVsn2, [{error_action,reboot}]]),
+
+ %% Check that the upgrade is terminated - normally this would be a
+ %% rollback, but
+ %%
+ %% 1. Default rollback is done with init:restart(), which does not
+ %% reboot the emulator, it only restarts the system inside the
+ %% running erlang node.
+ %%
+ %% 2. This does not work well on a slave node since, if timing is
+ %% right (bad), the slave node will get the nodedown from its
+ %% master (because distribution is terminated as part of
+ %% init:restart()) and then it will do halt() and thus never be
+ %% restarted (see slave:wloop/1)
+ %%
+ %% 3. Sometimes, though, init:restart() will manage to finish its
+ %% job before the nodedown is received, making the node
+ %% actually restart - in which case it might very well confuse
+ %% the next test case.
+ %%
+ %% 4. So, to avoid unstability we use {error_action,reboot} above,
+ %% to ensure that the node is actually stopped. Of course, in a
+ %% real system this must be used together with heart
+ %% supervision, and then the node will be restarted anyway. But
+ %% here in this simple test case we are satisfied to see that
+ %% the node terminates.
receive {nodedown,Node} -> ok
after 10000 -> ct:fail(failed_upgrade_never_restarted_node)
end,
ok.
+upgrade_supervisor_fail(cleanup,_Condf) ->
+ stop_node(node_name(upgrade_supervisor_fail)).
%% Test upgrade and downgrade of applications
eval_appup(Conf) when is_list(Conf) ->
diff --git a/lib/sasl/test/systools_SUITE.erl b/lib/sasl/test/systools_SUITE.erl
index 1d3a71e94e..49a4303e0b 100644
--- a/lib/sasl/test/systools_SUITE.erl
+++ b/lib/sasl/test/systools_SUITE.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 2012-2013. All Rights Reserved.
+%% Copyright Ericsson AB 2012-2014. All Rights Reserved.
%%
%% The contents of this file are subject to the Erlang Public License,
%% Version 1.1, (the "License"); you may not use this file except in
@@ -1615,9 +1615,19 @@ no_sasl_relup(Config) when is_list(Config) ->
%% make_relup: Check that application start type is used in relup
app_start_type_relup(Config) when is_list(Config) ->
+ %% This might fail if some applications are not available, if so
+ %% skip the test case.
+ try create_script(latest_app_start_type2,Config) of
+ {Dir2,Name2} ->
+ app_start_type_relup(Dir2,Name2,Config)
+ catch throw:{error,Reason} ->
+ {skip,Reason}
+ end.
+
+app_start_type_relup(Dir2,Name2,Config) ->
PrivDir = ?config(priv_dir, Config),
{Dir1,Name1} = create_script(latest_app_start_type1,Config),
- {Dir2,Name2} = create_script(latest_app_start_type2,Config),
+
Release1 = filename:join(Dir1,Name1),
Release2 = filename:join(Dir2,Name2),
@@ -2242,9 +2252,13 @@ app_vsns(AppVsns) ->
[{App,app_vsn(App,Vsn)} || {App,Vsn} <- AppVsns] ++
[{App,app_vsn(App,Vsn),Type} || {App,Vsn,Type} <- AppVsns].
app_vsn(App,current) ->
- application:load(App),
- {ok,Vsn} = application:get_key(App,vsn),
- Vsn;
+ case application:load(App) of
+ Ok when Ok==ok; Ok=={error,{already_loaded,App}} ->
+ {ok,Vsn} = application:get_key(App,vsn),
+ Vsn;
+ Error ->
+ throw(Error)
+ end;
app_vsn(_App,Vsn) ->
Vsn.
diff --git a/lib/ssh/doc/src/ssh.xml b/lib/ssh/doc/src/ssh.xml
index 7fbd70c87e..5a141ced3c 100644
--- a/lib/ssh/doc/src/ssh.xml
+++ b/lib/ssh/doc/src/ssh.xml
@@ -307,18 +307,31 @@
<tag><c><![CDATA[{negotiation_timeout, integer()}]]></c></tag>
<item>
- <p>Max time in milliseconds for the authentication negotiation. The default value is 2 minutes.
+ <p>Max time in milliseconds for the authentication negotiation. The default value is 2 minutes. If the client fails to login within this time, the connection is closed.
+ </p>
+ </item>
+
+ <tag><c><![CDATA[{max_sessions, pos_integer()}]]></c></tag>
+ <item>
+ <p>The maximum number of simultaneous sessions that are accepted at any time for this daemon. This includes sessions that are being authorized. So if set to <c>N</c>, and <c>N</c> clients have connected but not started the login process, the <c>N+1</c> connection attempt will be aborted. If <c>N</c> connections are authenticated and still logged in, no more loggins will be accepted until one of the existing ones log out.
+ </p>
+ <p>The counter is per listening port, so if two daemons are started, one with <c>{max_sessions,N}</c> and the other with <c>{max_sessions,M}</c> there will be in total <c>N+M</c> connections accepted for the whole ssh application.
+ </p>
+ <p>Note that if <c>parallel_login</c> is <c>false</c>, only one client at a time may be in the authentication phase.
+ </p>
+ <p>As default, the option is not set. This means that the number is not limited.
</p>
</item>
<tag><c><![CDATA[{parallel_login, boolean()}]]></c></tag>
<item>
- <p>If set to false (the default value), only one login is handled a time. If set to true, an unlimited logins will be allowed simultanously. Note that this affects only the connections with authentication in progress, not the already authenticated connections.
+ <p>If set to false (the default value), only one login is handled a time. If set to true, an unlimited number of login attempts will be allowed simultanously.
+ </p>
+ <p>If the <c>max_sessions</c> option is set to <c>N</c> and <c>parallel_login</c> is set to <c>true</c>, the max number of simultaneous login attempts at any time is limited to <c>N-K</c> where <c>K</c> is the number of authenticated connections present at this daemon.
</p>
<warning>
- <p>Do not enable parallel_logins without protecting the server by other means like a firewall. If set to true, there is no protection against dos attacs.</p>
+ <p>Do not enable <c>parallel_logins</c> without protecting the server by other means, for example the <c>max_sessions</c> option or a firewall configuration. If set to <c>true</c>, there is no protection against DOS attacks.</p>
</warning>
-
</item>
<tag><c><![CDATA[{key_cb, atom()}]]></c></tag>
diff --git a/lib/ssh/src/ssh.erl b/lib/ssh/src/ssh.erl
index de6e8cc421..75081b7a61 100644
--- a/lib/ssh/src/ssh.erl
+++ b/lib/ssh/src/ssh.erl
@@ -332,6 +332,8 @@ handle_option([{idle_time, _} = Opt | Rest], SocketOptions, SshOptions) ->
handle_option(Rest, SocketOptions, [handle_ssh_option(Opt) | SshOptions]);
handle_option([{rekey_limit, _} = Opt|Rest], SocketOptions, SshOptions) ->
handle_option(Rest, SocketOptions, [handle_ssh_option(Opt) | SshOptions]);
+handle_option([{max_sessions, _} = Opt|Rest], SocketOptions, SshOptions) ->
+ handle_option(Rest, SocketOptions, [handle_ssh_option(Opt) | SshOptions]);
handle_option([{negotiation_timeout, _} = Opt|Rest], SocketOptions, SshOptions) ->
handle_option(Rest, SocketOptions, [handle_ssh_option(Opt) | SshOptions]);
handle_option([{parallel_login, _} = Opt|Rest], SocketOptions, SshOptions) ->
@@ -366,6 +368,8 @@ handle_ssh_option({pref_public_key_algs, Value} = Opt) when is_list(Value), leng
end;
handle_ssh_option({connect_timeout, Value} = Opt) when is_integer(Value); Value == infinity ->
Opt;
+handle_ssh_option({max_sessions, Value} = Opt) when is_integer(Value), Value>0 ->
+ Opt;
handle_ssh_option({negotiation_timeout, Value} = Opt) when is_integer(Value); Value == infinity ->
Opt;
handle_ssh_option({parallel_login, Value} = Opt) when Value==true ; Value==false ->
diff --git a/lib/ssh/src/ssh_acceptor.erl b/lib/ssh/src/ssh_acceptor.erl
index e57b07cee8..7302196674 100644
--- a/lib/ssh/src/ssh_acceptor.erl
+++ b/lib/ssh/src/ssh_acceptor.erl
@@ -80,18 +80,36 @@ acceptor_loop(Callback, Port, Address, Opts, ListenSocket, AcceptTimeout) ->
ListenSocket, AcceptTimeout)
end.
-handle_connection(_Callback, Address, Port, Options, Socket) ->
+handle_connection(Callback, Address, Port, Options, Socket) ->
SystemSup = ssh_system_sup:system_supervisor(Address, Port),
- {ok, SubSysSup} = ssh_system_sup:start_subsystem(SystemSup, Options),
- ConnectionSup = ssh_subsystem_sup:connection_supervisor(SubSysSup),
- Timeout = proplists:get_value(negotiation_timeout,
- proplists:get_value(ssh_opts, Options, []),
- 2*60*1000),
- ssh_connection_handler:start_connection(server, Socket,
- [{supervisors, [{system_sup, SystemSup},
- {subsystem_sup, SubSysSup},
- {connection_sup, ConnectionSup}]}
- | Options], Timeout).
+ SSHopts = proplists:get_value(ssh_opts, Options, []),
+ MaxSessions = proplists:get_value(max_sessions,SSHopts,infinity),
+ case number_of_connections(SystemSup) < MaxSessions of
+ true ->
+ {ok, SubSysSup} = ssh_system_sup:start_subsystem(SystemSup, Options),
+ ConnectionSup = ssh_subsystem_sup:connection_supervisor(SubSysSup),
+ Timeout = proplists:get_value(negotiation_timeout, SSHopts, 2*60*1000),
+ ssh_connection_handler:start_connection(server, Socket,
+ [{supervisors, [{system_sup, SystemSup},
+ {subsystem_sup, SubSysSup},
+ {connection_sup, ConnectionSup}]}
+ | Options], Timeout);
+ false ->
+ Callback:close(Socket),
+ IPstr = if is_tuple(Address) -> inet:ntoa(Address);
+ true -> Address
+ end,
+ Str = try io_lib:format('~s:~p',[IPstr,Port])
+ catch _:_ -> "port "++integer_to_list(Port)
+ end,
+ error_logger:info_report("Ssh login attempt to "++Str++" denied due to option "
+ "max_sessions limits to "++ io_lib:write(MaxSessions) ++
+ " sessions."
+ ),
+ {error,max_sessions}
+ end.
+
+
handle_error(timeout) ->
ok;
@@ -117,3 +135,10 @@ handle_error(Reason) ->
String = lists:flatten(io_lib:format("Accept error: ~p", [Reason])),
error_logger:error_report(String),
exit({accept_failed, String}).
+
+
+number_of_connections(SystemSup) ->
+ length([X ||
+ {R,X,supervisor,[ssh_subsystem_sup]} <- supervisor:which_children(SystemSup),
+ is_reference(R)
+ ]).
diff --git a/lib/ssh/src/ssh_connection_handler.erl b/lib/ssh/src/ssh_connection_handler.erl
index 322da50f21..06866392da 100644
--- a/lib/ssh/src/ssh_connection_handler.erl
+++ b/lib/ssh/src/ssh_connection_handler.erl
@@ -1482,8 +1482,7 @@ ssh_channel_info([ _ | Rest], Channel, Acc) ->
log_error(Reason) ->
Report = io_lib:format("Erlang ssh connection handler failed with reason: "
- "~p ~n, Stacktace: ~p ~n"
- "please report this to [email protected] \n",
+ "~p ~n, Stacktrace: ~p ~n",
[Reason, erlang:get_stacktrace()]),
error_logger:error_report(Report),
"Internal error".
diff --git a/lib/ssh/test/ssh_basic_SUITE.erl b/lib/ssh/test/ssh_basic_SUITE.erl
index d2e52379fa..a8b64b1425 100644
--- a/lib/ssh/test/ssh_basic_SUITE.erl
+++ b/lib/ssh/test/ssh_basic_SUITE.erl
@@ -47,21 +47,26 @@ all() ->
daemon_already_started,
server_password_option,
server_userpassword_option,
- double_close].
+ double_close,
+ {group, hardening_tests}
+ ].
groups() ->
[{dsa_key, [], basic_tests()},
{rsa_key, [], basic_tests()},
{dsa_pass_key, [], [pass_phrase]},
{rsa_pass_key, [], [pass_phrase]},
- {internal_error, [], [internal_error]}
+ {internal_error, [], [internal_error]},
+ {hardening_tests, [], [max_sessions]}
].
+
basic_tests() ->
[send, close, peername_sockname,
exec, exec_compressed, shell, cli, known_hosts,
idle_time, rekey, openssh_zlib_basic_test].
+
%%--------------------------------------------------------------------
init_per_suite(Config) ->
case catch crypto:start() of
@@ -74,6 +79,8 @@ end_per_suite(_Config) ->
ssh:stop(),
crypto:stop().
%%--------------------------------------------------------------------
+init_per_group(hardening_tests, Config) ->
+ init_per_group(dsa_key, Config);
init_per_group(dsa_key, Config) ->
DataDir = ?config(data_dir, Config),
PrivDir = ?config(priv_dir, Config),
@@ -103,6 +110,8 @@ init_per_group(internal_error, Config) ->
init_per_group(_, Config) ->
Config.
+end_per_group(hardening_tests, Config) ->
+ end_per_group(dsa_key, Config);
end_per_group(dsa_key, Config) ->
PrivDir = ?config(priv_dir, Config),
ssh_test_lib:clean_dsa(PrivDir),
@@ -639,6 +648,49 @@ openssh_zlib_basic_test(Config) ->
ssh:stop_daemon(Pid).
%%--------------------------------------------------------------------
+
+max_sessions(Config) ->
+ SystemDir = filename:join(?config(priv_dir, Config), system),
+ UserDir = ?config(priv_dir, Config),
+ MaxSessions = 2,
+ {Pid, Host, Port} = ssh_test_lib:daemon([{system_dir, SystemDir},
+ {user_dir, UserDir},
+ {user_passwords, [{"carni", "meat"}]},
+ {parallel_login, true},
+ {max_sessions, MaxSessions}
+ ]),
+
+ Connect = fun() ->
+ R=ssh_test_lib:connect(Host, Port, [{silently_accept_hosts, true},
+ {user_dir, UserDir},
+ {user_interaction, false},
+ {user, "carni"},
+ {password, "meat"}
+ ]),
+ ct:log("Connection ~p up",[R])
+ end,
+
+ try [Connect() || _ <- lists:seq(1,MaxSessions)]
+ of
+ _ ->
+ ct:pal("Expect Info Report:",[]),
+ try Connect()
+ of
+ _ConnectionRef ->
+ ssh:stop_daemon(Pid),
+ {fail,"Too many connections accepted"}
+ catch
+ error:{badmatch,{error,"Connection closed"}} ->
+ ssh:stop_daemon(Pid),
+ ok
+ end
+ catch
+ error:{badmatch,{error,"Connection closed"}} ->
+ ssh:stop_daemon(Pid),
+ {fail,"Too few connections accepted"}
+ end.
+
+%%--------------------------------------------------------------------
%% Internal functions ------------------------------------------------
%%--------------------------------------------------------------------
diff --git a/lib/ssl/internal_doc/ssl-implementation.txt b/lib/ssl/internal_doc/ssl-implementation.txt
deleted file mode 100644
index e5d6ac8cd0..0000000000
--- a/lib/ssl/internal_doc/ssl-implementation.txt
+++ /dev/null
@@ -1,52 +0,0 @@
-
-Important modules:
-
- module behaviour children
- ------ ---------
- ssl_app application ssl_sup
- ssl_sup supervisor ssl_server, ssl_broker_sup
- ssl_server gen_server -
- ssl_broker_sup supervisor ssl_broker
- ssl_broker gen_server -
-
-The ssl_server controls a port program that implements the SSL functionality.
-That port program uses the OpenSSL package.
-
-Each socket has a corresponding broker (listen, accept or connect). A broker
-is created and supervised by the ssl_broker_sup.
-
-All communication is between a user and a broker. The broker communicates
-with the ssl_server, that sends its commands to the port program and handles
-the port program responses, that are distributed to users through the
-brokers.
-
-There is a distinction between commands and data flow between the ssl_server
-and the port program. Each established connection between the user and the
-outside world consists of a local erlang socket (owned by the broker) that
-is read from and written to by the broker. At the other end of the local
-connection is a local socket in the port program.
-
-The "real" socket that connects to the outside world is in the port program
-(including listen sockets). The main purpose of the port program is to
-shuffle data between local sockets and outside world sockets, and detect and
-propagate read and write errors (including detection of closed sockets) to
-the ssl_server.
-
-There is documentation in the ssl_broker.erl module.
-
-There is also documentation in the esock.c and esock_openssl.c files.
-
-The ssl_pem.erl, ssl_pkix.erl and ssl_base64.erl modules are support
-modules for reading SSL certificates. Modules for parsing certificates
-are generated from ASN.1 modules in the `pkix' directory.
-
-The `examples' directory contains functions for generating certificates.
-Those certificates are used in the test suites.
-
-
-
-
-
-
-
-
diff --git a/lib/ssl/src/dtls_connection.erl b/lib/ssl/src/dtls_connection.erl
index 57f8dd86d3..508983ddac 100644
--- a/lib/ssl/src/dtls_connection.erl
+++ b/lib/ssl/src/dtls_connection.erl
@@ -202,13 +202,14 @@ hello(Hello = #client_hello{client_version = ClientVersion,
session_cache = Cache,
session_cache_cb = CacheCb,
ssl_options = SslOpts}) ->
- HashSign = ssl_handshake:select_hashsign(HashSigns, Cert),
case dtls_handshake:hello(Hello, SslOpts, {Port, Session0, Cache, CacheCb,
ConnectionStates0, Cert}, Renegotiation) of
{Version, {Type, Session},
ConnectionStates,
#hello_extensions{ec_point_formats = EcPointFormats,
elliptic_curves = EllipticCurves} = ServerHelloExt} ->
+ HashSign = ssl_handshake:select_hashsign(HashSigns, Cert,
+ dtls_v1:corresponding_tls_version(Version)),
ssl_connection:hello({common_client_hello, Type, ServerHelloExt, HashSign},
State#state{connection_states = ConnectionStates,
negotiated_version = Version,
diff --git a/lib/ssl/src/ssl.erl b/lib/ssl/src/ssl.erl
index 743753bf7d..866312f332 100644
--- a/lib/ssl/src/ssl.erl
+++ b/lib/ssl/src/ssl.erl
@@ -357,11 +357,7 @@ cipher_suites(openssl) ->
[ssl_cipher:openssl_suite_name(S) || S <- ssl_cipher:suites(Version)];
cipher_suites(all) ->
Version = tls_record:highest_protocol_version([]),
- Supported = ssl_cipher:suites(Version)
- ++ ssl_cipher:anonymous_suites()
- ++ ssl_cipher:psk_suites(Version)
- ++ ssl_cipher:srp_suites(),
- [suite_definition(S) || S <- Supported].
+ [suite_definition(S) || S <- ssl_cipher:all_suites(Version)].
%%--------------------------------------------------------------------
-spec getopts(#sslsocket{}, [gen_tcp:option_name()]) ->
@@ -953,7 +949,7 @@ handle_cipher_option(Value, Version) when is_list(Value) ->
error:_->
throw({error, {options, {ciphers, Value}}})
end.
-binary_cipher_suites(Version, []) -> %% Defaults to all supported suits
+binary_cipher_suites(Version, []) -> % Defaults to all supported suites
ssl_cipher:suites(Version);
binary_cipher_suites(Version, [{_,_,_,_}| _] = Ciphers0) -> %% Backwards compatibility
Ciphers = [{KeyExchange, Cipher, Hash} || {KeyExchange, Cipher, Hash, _} <- Ciphers0],
diff --git a/lib/ssl/src/ssl_cipher.erl b/lib/ssl/src/ssl_cipher.erl
index 78a328ace8..a3ec419c2a 100644
--- a/lib/ssl/src/ssl_cipher.erl
+++ b/lib/ssl/src/ssl_cipher.erl
@@ -34,7 +34,8 @@
-export([security_parameters/2, security_parameters/3, suite_definition/1,
decipher/5, cipher/5,
- suite/1, suites/1, ec_keyed_suites/0, anonymous_suites/0, psk_suites/1, srp_suites/0,
+ suite/1, suites/1, all_suites/1,
+ ec_keyed_suites/0, anonymous_suites/0, psk_suites/1, srp_suites/0,
openssl_suite/1, openssl_suite_name/1, filter/2, filter_suites/1,
hash_algorithm/1, sign_algorithm/1, is_acceptable_hash/2]).
@@ -224,6 +225,11 @@ suites({3, 0}) ->
suites({3, N}) ->
tls_v1:suites(N).
+all_suites(Version) ->
+ suites(Version)
+ ++ ssl_cipher:anonymous_suites()
+ ++ ssl_cipher:psk_suites(Version)
+ ++ ssl_cipher:srp_suites().
%%--------------------------------------------------------------------
-spec anonymous_suites() -> [cipher_suite()].
%%
diff --git a/lib/ssl/src/ssl_connection.erl b/lib/ssl/src/ssl_connection.erl
index c2810a199f..1eda926bcb 100644
--- a/lib/ssl/src/ssl_connection.erl
+++ b/lib/ssl/src/ssl_connection.erl
@@ -290,12 +290,11 @@ hello(#hello_request{}, #state{role = client} = State0, Connection) ->
{Record, State} = Connection:next_record(State0),
Connection:next_state(hello, hello, Record, State);
-hello({common_client_hello, Type, ServerHelloExt, HashSign},
- #state{session = #session{cipher_suite = CipherSuite},
- negotiated_version = Version} = State, Connection) ->
- {KeyAlg, _, _, _} = ssl_cipher:suite_definition(CipherSuite),
- NegotiatedHashSign = negotiated_hashsign(HashSign, KeyAlg, Version),
+hello({common_client_hello, Type, ServerHelloExt, NegotiatedHashSign},
+ State, Connection) ->
do_server_hello(Type, ServerHelloExt,
+ %% Note NegotiatedHashSign is only negotiated for real if
+ %% if TLS version is at least TLS-1.2
State#state{hashsign_algorithm = NegotiatedHashSign}, Connection);
hello(timeout, State, _) ->
@@ -432,7 +431,8 @@ certify(#server_key_exchange{exchange_keys = Keys},
calculate_secret(Params#server_key_params.params,
State#state{hashsign_algorithm = HashSign}, Connection);
false ->
- ?ALERT_REC(?FATAL, ?DECRYPT_ERROR)
+ Connection:handle_own_alert(?ALERT_REC(?FATAL, ?DECRYPT_ERROR),
+ Version, certify, State)
end
end;
@@ -441,8 +441,9 @@ certify(#server_key_exchange{} = Msg,
Connection:handle_unexpected_message(Msg, certify_server_keyexchange, State);
certify(#certificate_request{hashsign_algorithms = HashSigns},
- #state{session = #session{own_certificate = Cert}} = State0, Connection) ->
- HashSign = ssl_handshake:select_hashsign(HashSigns, Cert),
+ #state{session = #session{own_certificate = Cert},
+ negotiated_version = Version} = State0, Connection) ->
+ HashSign = ssl_handshake:select_hashsign(HashSigns, Cert, Version),
{Record, State} = Connection:next_record(State0#state{client_certificate_requested = true}),
Connection:next_state(certify, certify, Record,
State#state{cert_hashsign_algorithm = HashSign});
@@ -559,7 +560,7 @@ cipher(#certificate_verify{signature = Signature, hashsign_algorithm = CertHashS
tls_handshake_history = Handshake
} = State0, Connection) ->
- HashSign = ssl_handshake:select_cert_hashsign(CertHashSign, Algo, Version),
+ HashSign = ssl_handshake:select_hashsign_algs(CertHashSign, Algo, Version),
case ssl_handshake:certificate_verify(Signature, PublicKeyInfo,
Version, HashSign, MasterSecret, Handshake) of
valid ->
@@ -696,7 +697,11 @@ handle_sync_event({shutdown, How0}, _, StateName,
Error ->
{stop, normal, Error, State}
end;
-
+
+handle_sync_event({recv, _N, _Timeout}, _RecvFrom, StateName,
+ #state{socket_options = #socket_options{active = Active}} = State) when Active =/= false ->
+ {reply, {error, einval}, StateName, State, get_timeout(State)};
+
handle_sync_event({recv, N, Timeout}, RecvFrom, connection = StateName,
#state{protocol_cb = Connection} = State0) ->
Timer = start_or_recv_cancel_timer(Timeout, RecvFrom),
@@ -1559,60 +1564,6 @@ cipher_role(server, Data, Session, #state{connection_states = ConnectionStates0
session = Session}, cipher, Connection),
Connection:next_state_connection(cipher, ack_connection(State#state{session = Session})).
-negotiated_hashsign(undefined, Algo, Version) ->
- default_hashsign(Version, Algo);
-negotiated_hashsign(HashSign = {_, _}, _, _) ->
- HashSign.
-
-%% RFC 5246, Sect. 7.4.1.4.1. Signature Algorithms
-%% If the client does not send the signature_algorithms extension, the
-%% server MUST do the following:
-%%
-%% - If the negotiated key exchange algorithm is one of (RSA, DHE_RSA,
-%% DH_RSA, RSA_PSK, ECDH_RSA, ECDHE_RSA), behave as if client had
-%% sent the value {sha1,rsa}.
-%%
-%% - If the negotiated key exchange algorithm is one of (DHE_DSS,
-%% DH_DSS), behave as if the client had sent the value {sha1,dsa}.
-%%
-%% - If the negotiated key exchange algorithm is one of (ECDH_ECDSA,
-%% ECDHE_ECDSA), behave as if the client had sent value {sha1,ecdsa}.
-
-default_hashsign(_Version = {Major, Minor}, KeyExchange)
- when Major >= 3 andalso Minor >= 3 andalso
- (KeyExchange == rsa orelse
- KeyExchange == dhe_rsa orelse
- KeyExchange == dh_rsa orelse
- KeyExchange == ecdhe_rsa orelse
- KeyExchange == ecdh_rsa orelse
- KeyExchange == srp_rsa) ->
- {sha, rsa};
-default_hashsign(_Version, KeyExchange)
- when KeyExchange == rsa;
- KeyExchange == dhe_rsa;
- KeyExchange == dh_rsa;
- KeyExchange == ecdhe_rsa;
- KeyExchange == ecdh_rsa;
- KeyExchange == srp_rsa ->
- {md5sha, rsa};
-default_hashsign(_Version, KeyExchange)
- when KeyExchange == ecdhe_ecdsa;
- KeyExchange == ecdh_ecdsa ->
- {sha, ecdsa};
-default_hashsign(_Version, KeyExchange)
- when KeyExchange == dhe_dss;
- KeyExchange == dh_dss;
- KeyExchange == srp_dss ->
- {sha, dsa};
-default_hashsign(_Version, KeyExchange)
- when KeyExchange == dh_anon;
- KeyExchange == ecdh_anon;
- KeyExchange == psk;
- KeyExchange == dhe_psk;
- KeyExchange == rsa_psk;
- KeyExchange == srp_anon ->
- {null, anon}.
-
select_curve(#state{client_ecc = {[Curve|_], _}}) ->
{namedCurve, Curve};
select_curve(_) ->
@@ -1884,3 +1835,15 @@ new_ssl_options([undefined | Rest0], [Head1| Rest1], Acc) ->
new_ssl_options(Rest0, Rest1, [Head1 | Acc]);
new_ssl_options([Head0 | Rest0], [_| Rest1], Acc) ->
new_ssl_options(Rest0, Rest1, [Head0 | Acc]).
+
+negotiated_hashsign(undefined, Alg, Version) ->
+ %% Not negotiated choose default
+ case is_anonymous(Alg) of
+ true ->
+ {null, anon};
+ false ->
+ ssl_handshake:select_hashsign_algs(Alg, Version)
+ end;
+negotiated_hashsign(HashSign = {_, _}, _, _) ->
+ HashSign.
+
diff --git a/lib/ssl/src/ssl_handshake.erl b/lib/ssl/src/ssl_handshake.erl
index 1108edcf48..fc67d2c28d 100644
--- a/lib/ssl/src/ssl_handshake.erl
+++ b/lib/ssl/src/ssl_handshake.erl
@@ -73,7 +73,8 @@
]).
%% MISC
--export([select_version/3, prf/5, select_hashsign/2, select_cert_hashsign/3,
+-export([select_version/3, prf/5, select_hashsign/3,
+ select_hashsign_algs/2, select_hashsign_algs/3,
premaster_secret/2, premaster_secret/3, premaster_secret/4]).
%%====================================================================
@@ -590,23 +591,25 @@ prf({3,1}, Secret, Label, Seed, WantedLength) ->
{ok, tls_v1:prf(?MD5SHA, Secret, Label, Seed, WantedLength)};
prf({3,_N}, Secret, Label, Seed, WantedLength) ->
{ok, tls_v1:prf(?SHA256, Secret, Label, Seed, WantedLength)}.
+
+
%%--------------------------------------------------------------------
--spec select_hashsign(#hash_sign_algos{}| undefined, undefined | binary()) ->
- [{atom(), atom()}] | undefined.
+-spec select_hashsign(#hash_sign_algos{}| undefined, undefined | binary(), ssl_record:ssl_version()) ->
+ {atom(), atom()} | undefined.
%%
%% Description:
%%--------------------------------------------------------------------
-select_hashsign(_, undefined) ->
+select_hashsign(_, undefined, _Version) ->
{null, anon};
-select_hashsign(undefined, Cert) ->
+select_hashsign(undefined, Cert, Version) ->
#'OTPCertificate'{tbsCertificate = TBSCert} = public_key:pkix_decode_cert(Cert, otp),
#'OTPSubjectPublicKeyInfo'{algorithm = {_,Algo, _}} = TBSCert#'OTPTBSCertificate'.subjectPublicKeyInfo,
- select_cert_hashsign(undefined, Algo, {undefined, undefined});
-select_hashsign(#hash_sign_algos{hash_sign_algos = HashSigns}, Cert) ->
+ select_hashsign_algs(undefined, Algo, Version);
+select_hashsign(#hash_sign_algos{hash_sign_algos = HashSigns}, Cert, Version) ->
#'OTPCertificate'{tbsCertificate = TBSCert} =public_key:pkix_decode_cert(Cert, otp),
#'OTPSubjectPublicKeyInfo'{algorithm = {_,Algo, _}} = TBSCert#'OTPTBSCertificate'.subjectPublicKeyInfo,
- DefaultHashSign = {_, Sign} = select_cert_hashsign(undefined, Algo, {undefined, undefined}),
+ DefaultHashSign = {_, Sign} = select_hashsign_algs(undefined, Algo, Version),
case lists:filter(fun({sha, dsa}) ->
true;
({_, dsa}) ->
@@ -622,26 +625,59 @@ select_hashsign(#hash_sign_algos{hash_sign_algos = HashSigns}, Cert) ->
[HashSign| _] ->
HashSign
end.
+
%%--------------------------------------------------------------------
--spec select_cert_hashsign(#hash_sign_algos{}| undefined, oid(), ssl_record:ssl_version() | {undefined, undefined}) ->
+-spec select_hashsign_algs(#hash_sign_algos{}| undefined, oid(), ssl_record:ssl_version()) ->
{atom(), atom()}.
+%% Description: For TLS 1.2 hash function and signature algorithm pairs can be
+%% negotiated with the signature_algorithms extension,
+%% for previous versions always use appropriate defaults.
+%% RFC 5246, Sect. 7.4.1.4.1. Signature Algorithms
+%% If the client does not send the signature_algorithms extension, the
+%% server MUST do the following: (e.i defaults for TLS 1.2)
+%%
+%% - If the negotiated key exchange algorithm is one of (RSA, DHE_RSA,
+%% DH_RSA, RSA_PSK, ECDH_RSA, ECDHE_RSA), behave as if client had
+%% sent the value {sha1,rsa}.
+%%
+%% - If the negotiated key exchange algorithm is one of (DHE_DSS,
+%% DH_DSS), behave as if the client had sent the value {sha1,dsa}.
%%
-%% Description: For TLS 1.2 selected cert_hash_sign will be recived
-%% in the handshake message, for previous versions use appropriate defaults.
-%% This function is also used by select_hashsign to extract
-%% the alogrithm of the server cert key.
+%% - If the negotiated key exchange algorithm is one of (ECDH_ECDSA,
+%% ECDHE_ECDSA), behave as if the client had sent value {sha1,ecdsa}.
+
%%--------------------------------------------------------------------
-select_cert_hashsign(HashSign, _, {Major, Minor}) when HashSign =/= undefined andalso
+select_hashsign_algs(HashSign, _, {Major, Minor}) when HashSign =/= undefined andalso
Major >= 3 andalso Minor >= 3 ->
HashSign;
-select_cert_hashsign(undefined,?'id-ecPublicKey', _) ->
+select_hashsign_algs(undefined, ?rsaEncryption, {Major, Minor}) when Major >= 3 andalso Minor >= 3 ->
+ {sha, rsa};
+select_hashsign_algs(undefined,?'id-ecPublicKey', _) ->
{sha, ecdsa};
-select_cert_hashsign(undefined, ?rsaEncryption, _) ->
+select_hashsign_algs(undefined, ?rsaEncryption, _) ->
{md5sha, rsa};
-select_cert_hashsign(undefined, ?'id-dsa', _) ->
+select_hashsign_algs(undefined, ?'id-dsa', _) ->
{sha, dsa}.
+-spec select_hashsign_algs(atom(), ssl_record:ssl_version()) -> {atom(), atom()}.
+%% Wrap function to keep the knowledge of the default values in
+%% one place only
+select_hashsign_algs(Alg, Version) when (Alg == rsa orelse
+ Alg == dhe_rsa orelse
+ Alg == dh_rsa orelse
+ Alg == ecdhe_rsa orelse
+ Alg == ecdh_rsa orelse
+ Alg == srp_rsa) ->
+ select_hashsign_algs(undefined, ?rsaEncryption, Version);
+select_hashsign_algs(Alg, Version) when (Alg == dhe_dss orelse
+ Alg == dh_dss orelse
+ Alg == srp_dss) ->
+ select_hashsign_algs(undefined, ?'id-dsa', Version);
+select_hashsign_algs(Alg, Version) when (Alg == ecdhe_ecdsa orelse
+ Alg == ecdh_ecdsa) ->
+ select_hashsign_algs(undefined, ?'id-ecPublicKey', Version).
+
%%--------------------------------------------------------------------
-spec master_secret(atom(), ssl_record:ssl_version(), #session{} | binary(), #connection_states{},
client | server) -> {binary(), #connection_states{}} | #alert{}.
@@ -1017,12 +1053,9 @@ decode_suites('3_bytes', Dec) ->
%%-------------Cipeher suite handling --------------------------------
available_suites(UserSuites, Version) ->
- case UserSuites of
- [] ->
- ssl_cipher:suites(Version);
- _ ->
- UserSuites
- end.
+ lists:filtermap(fun(Suite) ->
+ lists:member(Suite, ssl_cipher:all_suites(Version))
+ end, UserSuites).
available_suites(ServerCert, UserSuites, Version, Curve) ->
ssl_cipher:filter(ServerCert, available_suites(UserSuites, Version))
diff --git a/lib/ssl/src/ssl_record.erl b/lib/ssl/src/ssl_record.erl
index b0e9943e6d..7337225bc4 100644
--- a/lib/ssl/src/ssl_record.erl
+++ b/lib/ssl/src/ssl_record.erl
@@ -377,7 +377,7 @@ cipher(Version, Fragment,
ssl_cipher:cipher(BulkCipherAlgo, CipherS0, MacHash, Fragment, Version),
{CipherFragment, WriteState0#connection_state{cipher_state = CipherS1}}.
%%--------------------------------------------------------------------
--spec decipher(ssl_version(), binary(), #connection_state{}) -> {binary(), binary(), #connection_state{}}.
+-spec decipher(ssl_version(), binary(), #connection_state{}) -> {binary(), binary(), #connection_state{}} | #alert{}.
%%
%% Description: Payload decryption
%%--------------------------------------------------------------------
diff --git a/lib/ssl/src/tls_connection.erl b/lib/ssl/src/tls_connection.erl
index ffa04ee8ba..930706cde6 100644
--- a/lib/ssl/src/tls_connection.erl
+++ b/lib/ssl/src/tls_connection.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 2007-2013. All Rights Reserved.
+%% Copyright Ericsson AB 2007-2014. All Rights Reserved.
%%
%% The contents of this file are subject to the Erlang Public License,
%% Version 1.1, (the "License"); you may not use this file except in
@@ -208,11 +208,11 @@ hello(Hello = #client_hello{client_version = ClientVersion,
session_cache = Cache,
session_cache_cb = CacheCb,
ssl_options = SslOpts}) ->
- HashSign = ssl_handshake:select_hashsign(HashSigns, Cert),
case tls_handshake:hello(Hello, SslOpts, {Port, Session0, Cache, CacheCb,
ConnectionStates0, Cert}, Renegotiation) of
{Version, {Type, Session},
ConnectionStates, ServerHelloExt} ->
+ HashSign = ssl_handshake:select_hashsign(HashSigns, Cert, Version),
ssl_connection:hello({common_client_hello, Type, ServerHelloExt, HashSign},
State#state{connection_states = ConnectionStates,
negotiated_version = Version,
@@ -751,7 +751,11 @@ handle_tls_handshake(Handle, StateName,
handle_tls_handshake(Handle, NextStateName, State);
{stop, _,_} = Stop ->
Stop
- end.
+ end;
+
+handle_tls_handshake(_Handle, _StateName, #state{}) ->
+ throw(?ALERT_REC(?FATAL, ?HANDSHAKE_FAILURE)).
+
write_application_data(Data0, From,
#state{socket = Socket,
negotiated_version = Version,
@@ -859,7 +863,8 @@ handle_alert(#alert{level = ?WARNING, description = ?NO_RENEGOTIATION} = Alert,
{Record, State} = next_record(State0),
next_state(StateName, connection, Record, State);
-handle_alert(#alert{level = ?WARNING, description = ?USER_CANCELED} = Alert, StateName,
+%% Gracefully log and ignore all other warning alerts
+handle_alert(#alert{level = ?WARNING} = Alert, StateName,
#state{ssl_options = SslOpts} = State0) ->
log_alert(SslOpts#ssl_options.log_alert, StateName, Alert),
{Record, State} = next_record(State0),
diff --git a/lib/ssl/src/tls_record.erl b/lib/ssl/src/tls_record.erl
index 4da08e9c51..f50ea22f39 100644
--- a/lib/ssl/src/tls_record.erl
+++ b/lib/ssl/src/tls_record.erl
@@ -154,21 +154,24 @@ decode_cipher_text(#ssl_tls{type = Type, version = Version,
sequence_number = Seq,
security_parameters = SecParams} = ReadState0,
CompressAlg = SecParams#security_parameters.compression_algorithm,
- {PlainFragment, Mac, ReadState1} = ssl_record:decipher(Version, CipherFragment, ReadState0),
- MacHash = calc_mac_hash(Type, Version, PlainFragment, ReadState1),
- case ssl_record:is_correct_mac(Mac, MacHash) of
- true ->
- {Plain, CompressionS1} = ssl_record:uncompress(CompressAlg,
- PlainFragment, CompressionS0),
- ConnnectionStates = ConnnectionStates0#connection_states{
- current_read = ReadState1#connection_state{
- sequence_number = Seq + 1,
- compression_state = CompressionS1}},
- {CipherText#ssl_tls{fragment = Plain}, ConnnectionStates};
- false ->
- ?ALERT_REC(?FATAL, ?BAD_RECORD_MAC)
- end.
-
+ case ssl_record:decipher(Version, CipherFragment, ReadState0) of
+ {PlainFragment, Mac, ReadState1} ->
+ MacHash = calc_mac_hash(Type, Version, PlainFragment, ReadState1),
+ case ssl_record:is_correct_mac(Mac, MacHash) of
+ true ->
+ {Plain, CompressionS1} = ssl_record:uncompress(CompressAlg,
+ PlainFragment, CompressionS0),
+ ConnnectionStates = ConnnectionStates0#connection_states{
+ current_read = ReadState1#connection_state{
+ sequence_number = Seq + 1,
+ compression_state = CompressionS1}},
+ {CipherText#ssl_tls{fragment = Plain}, ConnnectionStates};
+ false ->
+ ?ALERT_REC(?FATAL, ?BAD_RECORD_MAC)
+ end;
+ #alert{} = Alert ->
+ Alert
+ end.
%%--------------------------------------------------------------------
-spec protocol_version(tls_atom_version() | tls_version()) ->
tls_version() | tls_atom_version().
diff --git a/lib/ssl/test/make_certs.erl b/lib/ssl/test/make_certs.erl
index 0947657ca7..15a7e118ff 100644
--- a/lib/ssl/test/make_certs.erl
+++ b/lib/ssl/test/make_certs.erl
@@ -32,6 +32,7 @@
v2_crls = true,
ecc_certs = false,
issuing_distribution_point = false,
+ crl_port = 8000,
openssl_cmd = "openssl"}).
@@ -57,6 +58,8 @@ make_config([{default_bits, Bits}|T], C) when is_integer(Bits) ->
make_config(T, C#config{default_bits = Bits});
make_config([{v2_crls, Bool}|T], C) when is_boolean(Bool) ->
make_config(T, C#config{v2_crls = Bool});
+make_config([{crl_port, Port}|T], C) when is_integer(Port) ->
+ make_config(T, C#config{crl_port = Port});
make_config([{ecc_certs, Bool}|T], C) when is_boolean(Bool) ->
make_config(T, C#config{ecc_certs = Bool});
make_config([{issuing_distribution_point, Bool}|T], C) when is_boolean(Bool) ->
@@ -423,7 +426,7 @@ ca_cnf(C) ->
"[crl_section]\n"
%% intentionally invalid
"URI.1=http://localhost/",C#config.commonName,"/crl.pem\n"
- "URI.2=http://localhost:8000/",C#config.commonName,"/crl.pem\n"
+ "URI.2=http://localhost:",integer_to_list(C#config.crl_port),"/",C#config.commonName,"/crl.pem\n"
"\n"
"[user_cert_digital_signature_only]\n"
diff --git a/lib/ssl/test/ssl_basic_SUITE.erl b/lib/ssl/test/ssl_basic_SUITE.erl
index 8e3d2e4b80..406be65c3b 100644
--- a/lib/ssl/test/ssl_basic_SUITE.erl
+++ b/lib/ssl/test/ssl_basic_SUITE.erl
@@ -115,7 +115,8 @@ options_tests() ->
reuseaddr,
tcp_reuseaddr,
honor_server_cipher_order,
- honor_client_cipher_order
+ honor_client_cipher_order,
+ ciphersuite_vs_version
].
api_tests() ->
@@ -187,7 +188,10 @@ error_handling_tests()->
tcp_error_propagation_in_active_mode,
tcp_connect,
tcp_connect_big,
- close_transport_accept
+ close_transport_accept,
+ recv_active,
+ recv_active_once,
+ dont_crash_on_handshake_garbage
].
rizzo_tests() ->
@@ -1154,6 +1158,57 @@ close_transport_accept(Config) when is_list(Config) ->
Other ->
exit({?LINE, Other})
end.
+%%--------------------------------------------------------------------
+recv_active() ->
+ [{doc,"Test recv on active socket"}].
+
+recv_active(Config) when is_list(Config) ->
+ ClientOpts = ?config(client_opts, Config),
+ ServerOpts = ?config(server_opts, Config),
+ {ClientNode, ServerNode, Hostname} = ssl_test_lib:run_where(Config),
+ Server =
+ ssl_test_lib:start_server([{node, ServerNode}, {port, 0},
+ {from, self()},
+ {mfa, {?MODULE, try_recv_active, []}},
+ {options, [{active, true} | ServerOpts]}]),
+ Port = ssl_test_lib:inet_port(Server),
+ Client =
+ ssl_test_lib:start_client([{node, ClientNode}, {port, Port},
+ {host, Hostname},
+ {from, self()},
+ {mfa, {?MODULE, try_recv_active, []}},
+ {options, [{active, true} | ClientOpts]}]),
+
+ ssl_test_lib:check_result(Server, ok, Client, ok),
+
+ ssl_test_lib:close(Server),
+ ssl_test_lib:close(Client).
+
+%%--------------------------------------------------------------------
+recv_active_once() ->
+ [{doc,"Test recv on active socket"}].
+
+recv_active_once(Config) when is_list(Config) ->
+ ClientOpts = ?config(client_opts, Config),
+ ServerOpts = ?config(server_opts, Config),
+ {ClientNode, ServerNode, Hostname} = ssl_test_lib:run_where(Config),
+ Server =
+ ssl_test_lib:start_server([{node, ServerNode}, {port, 0},
+ {from, self()},
+ {mfa, {?MODULE, try_recv_active_once, []}},
+ {options, [{active, once} | ServerOpts]}]),
+ Port = ssl_test_lib:inet_port(Server),
+ Client =
+ ssl_test_lib:start_client([{node, ClientNode}, {port, Port},
+ {host, Hostname},
+ {from, self()},
+ {mfa, {?MODULE, try_recv_active_once, []}},
+ {options, [{active, once} | ClientOpts]}]),
+
+ ssl_test_lib:check_result(Server, ok, Client, ok),
+
+ ssl_test_lib:close(Server),
+ ssl_test_lib:close(Client).
%%--------------------------------------------------------------------
dh_params() ->
@@ -2559,6 +2614,81 @@ honor_cipher_order(Config, Honor, ServerCiphers, ClientCiphers, Expected) ->
ssl_test_lib:close(Client).
%%--------------------------------------------------------------------
+ciphersuite_vs_version(Config) when is_list(Config) ->
+
+ {_ClientNode, ServerNode, Hostname} = ssl_test_lib:run_where(Config),
+ ServerOpts = ?config(server_opts, Config),
+
+ Server = ssl_test_lib:start_server_error([{node, ServerNode}, {port, 0},
+ {from, self()},
+ {options, ServerOpts}]),
+ Port = ssl_test_lib:inet_port(Server),
+
+ {ok, Socket} = gen_tcp:connect(Hostname, Port, [binary, {active, false}]),
+ ok = gen_tcp:send(Socket,
+ <<22, 3,0, 49:16, % handshake, SSL 3.0, length
+ 1, 45:24, % client_hello, length
+ 3,0, % SSL 3.0
+ 16#deadbeef:256, % 32 'random' bytes = 256 bits
+ 0, % no session ID
+ %% three cipher suites -- null, one with sha256 hash and one with sha hash
+ 6:16, 0,255, 0,61, 0,57,
+ 1, 0 % no compression
+ >>),
+ {ok, <<22, RecMajor:8, RecMinor:8, _RecLen:16, 2, HelloLen:24>>} = gen_tcp:recv(Socket, 9, 10000),
+ {ok, <<HelloBin:HelloLen/binary>>} = gen_tcp:recv(Socket, HelloLen, 5000),
+ ServerHello = tls_handshake:decode_handshake({RecMajor, RecMinor}, 2, HelloBin),
+ case ServerHello of
+ #server_hello{server_version = {3,0}, cipher_suite = <<0,57>>} ->
+ ok;
+ _ ->
+ ct:fail({unexpected_server_hello, ServerHello})
+ end.
+
+%%--------------------------------------------------------------------
+
+dont_crash_on_handshake_garbage() ->
+ [{doc, "Ensure SSL server worker thows an alert on garbage during handshake "
+ "instead of crashing and exposing state to user code"}].
+
+dont_crash_on_handshake_garbage(Config) ->
+ ServerOpts = ?config(server_opts, Config),
+
+ {_ClientNode, ServerNode, Hostname} = ssl_test_lib:run_where(Config),
+
+ Server = ssl_test_lib:start_server([{node, ServerNode}, {port, 0},
+ {from, self()},
+ {mfa, {ssl_test_lib, send_recv_result_active, []}},
+ {options, ServerOpts}]),
+ unlink(Server), monitor(process, Server),
+ Port = ssl_test_lib:inet_port(Server),
+
+ {ok, Socket} = gen_tcp:connect(Hostname, Port, [binary, {active, false}]),
+
+ % Send hello and garbage record
+ ok = gen_tcp:send(Socket,
+ [<<22, 3,3, 49:16, 1, 45:24, 3,3, % client_hello
+ 16#deadbeef:256, % 32 'random' bytes = 256 bits
+ 0, 6:16, 0,255, 0,61, 0,57, 1, 0 >>, % some hello values
+
+ <<22, 3,3, 5:16, 92,64,37,228,209>> % garbage
+ ]),
+ % Send unexpected change_cipher_spec
+ ok = gen_tcp:send(Socket, <<20, 0,0,12, 111,40,244,7,137,224,16,109,197,110,249,152>>),
+
+ % Ensure we receive an alert, not sudden disconnect
+ {ok, <<21, _/binary>>} = drop_handshakes(Socket, 1000).
+
+drop_handshakes(Socket, Timeout) ->
+ {ok, <<RecType:8, _RecMajor:8, _RecMinor:8, RecLen:16>> = Header} = gen_tcp:recv(Socket, 5, Timeout),
+ {ok, <<Frag:RecLen/binary>>} = gen_tcp:recv(Socket, RecLen, Timeout),
+ case RecType of
+ 22 -> drop_handshakes(Socket, Timeout);
+ _ -> {ok, <<Header/binary, Frag/binary>>}
+ end.
+
+
+%%--------------------------------------------------------------------
hibernate() ->
[{doc,"Check that an SSL connection that is started with option "
@@ -3582,3 +3712,11 @@ version_option_test(Config, Version) ->
ssl_test_lib:close(Server),
ssl_test_lib:close(Client).
+
+try_recv_active(Socket) ->
+ ssl:send(Socket, "Hello world"),
+ {error, einval} = ssl:recv(Socket, 11),
+ ok.
+try_recv_active_once(Socket) ->
+ {error, einval} = ssl:recv(Socket, 11),
+ ok.
diff --git a/lib/ssl/test/ssl_crl_SUITE.erl b/lib/ssl/test/ssl_crl_SUITE.erl
index 4eacf3adfc..bad0949ec4 100644
--- a/lib/ssl/test/ssl_crl_SUITE.erl
+++ b/lib/ssl/test/ssl_crl_SUITE.erl
@@ -48,8 +48,8 @@ all() ->
].
groups() ->
- [{basic, [], basic_tests()},
- {v1_crl, [], v1_crl_tests()},
+ [{basic, [], basic_tests()},
+ {v1_crl, [], v1_crl_tests()},
{idp_crl, [], idp_crl_tests()}].
basic_tests() ->
@@ -72,8 +72,8 @@ init_per_suite(Config0) ->
_ ->
TLSVersion = ?config(tls_version, Config0),
OpenSSL_version = (catch os:cmd("openssl version")),
- ct:log("TLS version: ~p~nOpenSSL version: ~p~n~n~p:module_info(): ~p~n~nssh:module_info(): ~p~n",
- [TLSVersion, OpenSSL_version, ?MODULE, ?MODULE:module_info(), ssh:module_info()]),
+ ct:log("TLS version: ~p~nOpenSSL version: ~p~n~n~p:module_info(): ~p~n~nssl:module_info(): ~p~n",
+ [TLSVersion, OpenSSL_version, ?MODULE, ?MODULE:module_info(), ssl:module_info()]),
case ssl_test_lib:enough_openssl_crl_support(OpenSSL_version) of
false ->
{skip, io_lib:format("Bad openssl version: ~p",[OpenSSL_version])};
@@ -82,7 +82,13 @@ init_per_suite(Config0) ->
try crypto:start() of
ok ->
ssl:start(),
- [{watchdog, Dog}, {openssl_version,OpenSSL_version} | Config0]
+ {ok, Hostname0} = inet:gethostname(),
+ IPfamily =
+ case lists:member(list_to_atom(Hostname0), ct:get_config(ipv6_hosts,[])) of
+ true -> inet6;
+ false -> inet
+ end,
+ [{ipfamily,IPfamily}, {watchdog, Dog}, {openssl_version,OpenSSL_version} | Config0]
catch _C:_E ->
ct:log("crypto:start() caught ~p:~p",[_C,_E]),
{skip, "Crypto did not start"}
@@ -98,21 +104,23 @@ end_per_suite(_Config) ->
%%% Group init/end
init_per_group(Group, Config) ->
- ct:log("~p:~p~nlisteners to port 8000:~n~p~n)",[?MODULE,?LINE,os:cmd("netstat -tln|grep ':8000'")]),
ssl:start(),
inets:start(),
CertDir = filename:join(?config(priv_dir, Config), Group),
DataDir = ?config(data_dir, Config),
ServerRoot = make_dir_path([?config(priv_dir,Config), Group, tmp]),
- Result = make_certs:all(DataDir, CertDir, cert_opts(Group)),
- ct:log("~p:~p~nmake_certs:all(~n DataDir=~p,~n CertDir=~p,~n ServerRoot=~p~n Opts=~p~n) returned ~p~n", [?MODULE,?LINE,DataDir, CertDir, ServerRoot, cert_opts(Group), Result]),
%% start a HTTP server to serve the CRLs
- {ok, Httpd} = inets:start(httpd, [{server_name, "localhost"}, {port, 8000},
+ {ok, Httpd} = inets:start(httpd, [{ipfamily, ?config(ipfamily,Config)},
+ {server_name, "localhost"}, {port, 0},
{server_root, ServerRoot},
{document_root, CertDir},
{modules, [mod_get]}
]),
- ct:log("~p:~p~nlisteners to port 8000:~n~p~n)",[?MODULE,?LINE,os:cmd("netstat -tln|grep ':8000'")]),
+ [{port,Port}] = httpd:info(Httpd, [port]),
+ ct:log("~p:~p~nHTTPD IP family=~p, port=~p~n", [?MODULE, ?LINE, ?config(ipfamily,Config), Port]),
+ CertOpts = [{crl_port,Port}|cert_opts(Group)],
+ Result = make_certs:all(DataDir, CertDir, CertOpts),
+ ct:log("~p:~p~nmake_certs:all(~n DataDir=~p,~n CertDir=~p,~n ServerRoot=~p~n Opts=~p~n) returned ~p~n", [?MODULE,?LINE,DataDir, CertDir, ServerRoot, CertOpts, Result]),
[{make_cert_result, Result}, {cert_dir, CertDir}, {httpd, Httpd} | Config].
cert_opts(v1_crl) -> [{v2_crls, false}];
@@ -134,7 +142,6 @@ end_per_group(_GroupName, Config) ->
,ct:log("Stopped",[])
end,
inets:stop(),
- ct:log("~p:~p~nlisteners to port 8000:~n~p~n)",[?MODULE,?LINE,os:cmd("netstat -tln|grep ':8000'")]),
Config.
%%%================================================================
@@ -481,7 +488,6 @@ fetch([]) ->
not_available;
fetch([{uniformResourceIdentifier, "http"++_=URL}|Rest]) ->
ct:log("~p:~p~ngetting CRL from ~p~n", [?MODULE,?LINE, URL]),
- ct:log("~p:~p~nlisteners to port 8000:~n~p~n)",[?MODULE,?LINE,os:cmd("netstat -tln|grep ':8000'")]),
case httpc:request(get, {URL, []}, [], [{body_format, binary}]) of
{ok, {_Status, _Headers, Body}} ->
case Body of
diff --git a/lib/ssl/test/ssl_handshake_SUITE.erl b/lib/ssl/test/ssl_handshake_SUITE.erl
index 6d020c472b..5f36842f9e 100644
--- a/lib/ssl/test/ssl_handshake_SUITE.erl
+++ b/lib/ssl/test/ssl_handshake_SUITE.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 2008-2013. All Rights Reserved.
+%% Copyright Ericsson AB 2008-2014. All Rights Reserved.
%%
%% The contents of this file are subject to the Erlang Public License,
%% Version 1.1, (the "License"); you may not use this file except in
@@ -26,6 +26,7 @@
-include_lib("common_test/include/ct.hrl").
-include("ssl_internal.hrl").
-include("tls_handshake.hrl").
+-include_lib("public_key/include/public_key.hrl").
%%--------------------------------------------------------------------
%% Common Test interface functions -----------------------------------
@@ -36,7 +37,8 @@ all() -> [decode_hello_handshake,
decode_single_hello_extension_correctly,
decode_supported_elliptic_curves_hello_extension_correctly,
decode_unknown_hello_extension_correctly,
- encode_single_hello_sni_extension_correctly].
+ encode_single_hello_sni_extension_correctly,
+ select_proper_tls_1_2_rsa_default_hashsign].
%%--------------------------------------------------------------------
%% Test Cases --------------------------------------------------------
@@ -95,3 +97,11 @@ encode_single_hello_sni_extension_correctly(_Config) ->
HelloExt = <<ExtSize:16/unsigned-big-integer, SNI/binary>>,
Encoded = ssl_handshake:encode_hello_extensions(Exts),
HelloExt = Encoded.
+
+select_proper_tls_1_2_rsa_default_hashsign(_Config) ->
+ % RFC 5246 section 7.4.1.4.1 tells to use {sha1,rsa} as default signature_algorithm for RSA key exchanges
+ {sha, rsa} = ssl_handshake:select_hashsign_algs(undefined, ?rsaEncryption, {3,3}),
+ % Older versions use MD5/SHA1 combination
+ {md5sha, rsa} = ssl_handshake:select_hashsign_algs(undefined, ?rsaEncryption, {3,2}),
+ {md5sha, rsa} = ssl_handshake:select_hashsign_algs(undefined, ?rsaEncryption, {3,0}).
+
diff --git a/lib/stdlib/src/erl_lint.erl b/lib/stdlib/src/erl_lint.erl
index 7c064ce902..39cc03cf7a 100644
--- a/lib/stdlib/src/erl_lint.erl
+++ b/lib/stdlib/src/erl_lint.erl
@@ -1046,9 +1046,10 @@ check_undefined_types(#lint{usage=Usage,types=Def}=St0) ->
Used = Usage#usage.used_types,
UTAs = dict:fetch_keys(Used),
Undef = [{TA,dict:fetch(TA, Used)} ||
- TA <- UTAs,
+ {T,_}=TA <- UTAs,
not dict:is_key(TA, Def),
- not is_default_type(TA)],
+ not is_default_type(TA),
+ not is_newly_introduced_var_arity_type(T)],
foldl(fun ({TA,L}, St) ->
add_error(L, {undefined_type,TA}, St)
end, St0, Undef).
diff --git a/lib/stdlib/test/erl_lint_SUITE.erl b/lib/stdlib/test/erl_lint_SUITE.erl
index d9512c0ef4..ea61b2082b 100644
--- a/lib/stdlib/test/erl_lint_SUITE.erl
+++ b/lib/stdlib/test/erl_lint_SUITE.erl
@@ -52,7 +52,7 @@
guard/1, otp_4886/1, otp_4988/1, otp_5091/1, otp_5276/1, otp_5338/1,
otp_5362/1, otp_5371/1, otp_7227/1, otp_5494/1, otp_5644/1, otp_5878/1,
otp_5917/1, otp_6585/1, otp_6885/1, otp_10436/1, otp_11254/1,
- otp_11772/1, otp_11771/1,
+ otp_11772/1, otp_11771/1, otp_11872/1,
export_all/1,
bif_clash/1,
behaviour_basic/1, behaviour_multiple/1,
@@ -88,7 +88,7 @@ all() ->
otp_4886, otp_4988, otp_5091, otp_5276, otp_5338,
otp_5362, otp_5371, otp_7227, otp_5494, otp_5644,
otp_5878, otp_5917, otp_6585, otp_6885, otp_10436, otp_11254,
- otp_11772, otp_11771, export_all,
+ otp_11772, otp_11771, otp_11872, export_all,
bif_clash, behaviour_basic, behaviour_multiple,
otp_7550, otp_8051, format_warn, {group, on_load},
too_many_arguments, basic_errors, bin_syntax_errors, predef,
@@ -2630,6 +2630,29 @@ otp_11771(Config) when is_list(Config) ->
[]} = run_test2(Config, Ts, []),
ok.
+otp_11872(doc) ->
+ "OTP-11872. The type map() undefined when exported.";
+otp_11872(suite) -> [];
+otp_11872(Config) when is_list(Config) ->
+ Ts = <<"
+ -module(map).
+
+ -compile(export_all).
+
+ -export_type([map/0, product/0]).
+
+ -opaque map() :: dict().
+
+ -spec t() -> map().
+
+ t() ->
+ 1.
+ ">>,
+ {error,[{6,erl_lint,{undefined_type,{product,0}}}],
+ [{8,erl_lint,{new_var_arity_type,map}}]} =
+ run_test2(Config, Ts, []),
+ ok.
+
export_all(doc) ->
"OTP-7392. Warning for export_all.";
export_all(Config) when is_list(Config) ->
diff --git a/lib/test_server/src/test_server.erl b/lib/test_server/src/test_server.erl
index 9b05bddf63..70dc7a1441 100644
--- a/lib/test_server/src/test_server.erl
+++ b/lib/test_server/src/test_server.erl
@@ -444,7 +444,7 @@ run_test_case_apply(Mod, Func, Args, Name, RunInit, TimetrapData) ->
%% If this process (group leader of the test case) terminates before
%% all messages have been replied back to the io server, the io server
%% hangs. Fixed by the 20 milli timeout check here, and by using monitor in
-%% io.erl (livrem OCH hangslen mao :)
+%% io.erl.
%%
%% A test case is known to have failed if it returns {'EXIT', _} tuple,
%% or sends a message {failed, File, Line} to it's group_leader
@@ -673,7 +673,7 @@ handle_tc_exit({testcase_aborted,{user_timetrap_error,_}=Msg,_}, St) ->
spawn_fw_call(Mod, Func, Config, Pid, Msg, unknown, self()),
St;
handle_tc_exit(Reason, #st{status={framework,FwMod,FwFunc},
- config=Config,pid=Pid}=St) ->
+ config=Config,pid=Pid}=St) ->
R = case Reason of
{timetrap_timeout,TVal,_} ->
{timetrap,TVal};
diff --git a/lib/typer/Makefile b/lib/typer/Makefile
index 40a82e9bba..d4396abc9d 100644
--- a/lib/typer/Makefile
+++ b/lib/typer/Makefile
@@ -29,7 +29,7 @@ include $(ERL_TOP)/make/$(TARGET)/otp.mk
# Macros
#
-SUB_DIRECTORIES = src
+SUB_DIRECTORIES = src doc/src
include vsn.mk
VSN = $(TYPER_VSN)
diff --git a/lib/typer/doc/Makefile b/lib/typer/doc/Makefile
new file mode 100644
index 0000000000..4ea0137202
--- /dev/null
+++ b/lib/typer/doc/Makefile
@@ -0,0 +1,39 @@
+#
+# %CopyrightBegin%
+#
+# Copyright Ericsson AB 2006-2012. All Rights Reserved.
+#
+# The contents of this file are subject to the Erlang Public License,
+# Version 1.1, (the "License"); you may not use this file except in
+# compliance with the License. You should have received a copy of the
+# Erlang Public License along with this software. If not, it can be
+# retrieved online at http://www.erlang.org/.
+#
+# Software distributed under the License is distributed on an "AS IS"
+# basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+# the License for the specific language governing rights and limitations
+# under the License.
+#
+# %CopyrightEnd%
+#
+SHELL=/bin/sh
+
+include $(ERL_TOP)/make/target.mk
+include $(ERL_TOP)/make/$(TARGET)/otp.mk
+
+clean:
+ -rm -f *.html edoc-info stylesheet.css erlang.png
+
+distclean: clean
+realclean: clean
+
+# ----------------------------------------------------
+# Special Build Targets
+# ----------------------------------------------------
+
+
+
+# ----------------------------------------------------
+# Release Target
+# ----------------------------------------------------
+include $(ERL_TOP)/make/otp_release_targets.mk
diff --git a/lib/typer/doc/html/.gitignore b/lib/typer/doc/html/.gitignore
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/lib/typer/doc/html/.gitignore
diff --git a/lib/typer/doc/pdf/.gitignore b/lib/typer/doc/pdf/.gitignore
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/lib/typer/doc/pdf/.gitignore
diff --git a/lib/typer/doc/src/Makefile b/lib/typer/doc/src/Makefile
new file mode 100644
index 0000000000..2683c08679
--- /dev/null
+++ b/lib/typer/doc/src/Makefile
@@ -0,0 +1,117 @@
+#
+# %CopyrightBegin%
+#
+# Copyright Ericsson AB 2006-2012. All Rights Reserved.
+#
+# The contents of this file are subject to the Erlang Public License,
+# Version 1.1, (the "License"); you may not use this file except in
+# compliance with the License. You should have received a copy of the
+# Erlang Public License along with this software. If not, it can be
+# retrieved online at http://www.erlang.org/.
+#
+# Software distributed under the License is distributed on an "AS IS"
+# basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+# the License for the specific language governing rights and limitations
+# under the License.
+#
+# %CopyrightEnd%
+#
+include $(ERL_TOP)/make/target.mk
+include $(ERL_TOP)/make/$(TARGET)/otp.mk
+
+# ----------------------------------------------------
+# Application version
+# ----------------------------------------------------
+include ../../vsn.mk
+VSN=$(TYPER_VSN)
+APPLICATION=typer
+
+# ----------------------------------------------------
+# Release directory specification
+# ----------------------------------------------------
+RELSYSDIR = $(RELEASE_PATH)/lib/$(APPLICATION)-$(VSN)
+
+# ----------------------------------------------------
+# Target Specs
+# ----------------------------------------------------
+XML_APPLICATION_FILES = ref_man.xml
+XML_REF3_FILES =
+
+XML_PART_FILES = part_notes.xml
+XML_CHAPTER_FILES = notes.xml
+
+BOOK_FILES = book.xml
+
+XML_FILES = \
+ $(BOOK_FILES) $(XML_CHAPTER_FILES) \
+ $(XML_PART_FILES) $(XML_REF3_FILES) $(XML_APPLICATION_FILES)
+
+GIF_FILES =
+
+# ----------------------------------------------------
+
+HTML_FILES = $(XML_APPLICATION_FILES:%.xml=$(HTMLDIR)/%.html) \
+ $(XML_PART_FILES:%.xml=$(HTMLDIR)/%.html)
+
+INFO_FILE = ../../info
+EXTRA_FILES = \
+ $(DEFAULT_GIF_FILES) \
+ $(DEFAULT_HTML_FILES) \
+ $(XML_REF3_FILES:%.xml=$(HTMLDIR)/%.html) \
+ $(XML_CHAPTER_FILES:%.xml=$(HTMLDIR)/%.html)
+
+MAN3_FILES = $(XML_REF3_FILES:%.xml=$(MAN3DIR)/%.3)
+
+HTML_REF_MAN_FILE = $(HTMLDIR)/index.html
+
+TOP_PDF_FILE = $(PDFDIR)/$(APPLICATION)-$(VSN).pdf
+
+# ----------------------------------------------------
+# FLAGS
+# ----------------------------------------------------
+XML_FLAGS +=
+
+# ----------------------------------------------------
+# Targets
+# ----------------------------------------------------
+$(HTMLDIR)/%.gif: %.gif
+ $(INSTALL_DATA) $< $@
+
+docs: pdf html man
+
+$(TOP_PDF_FILE): $(XML_FILES)
+
+pdf: $(TOP_PDF_FILE)
+
+html: gifs $(HTML_REF_MAN_FILE)
+
+man: $(MAN3_FILES)
+
+gifs: $(GIF_FILES:%=$(HTMLDIR)/%)
+
+debug opt:
+
+clean clean_docs:
+ rm -rf $(HTMLDIR)/*
+ rm -f $(MAN3DIR)/*
+ rm -f $(TOP_PDF_FILE) $(TOP_PDF_FILE:%.pdf=%.fo)
+ rm -f errs core *~
+
+distclean: clean
+realclean: clean
+
+# ----------------------------------------------------
+# Release Target
+# ----------------------------------------------------
+include $(ERL_TOP)/make/otp_release_targets.mk
+
+release_docs_spec: docs
+ $(INSTALL_DIR) "$(RELSYSDIR)/doc/pdf"
+ $(INSTALL_DATA) $(TOP_PDF_FILE) "$(RELSYSDIR)/doc/pdf"
+ $(INSTALL_DIR) "$(RELSYSDIR)/doc/html"
+ $(INSTALL_DATA) $(HTMLDIR)/* \
+ "$(RELSYSDIR)/doc/html"
+ $(INSTALL_DATA) $(INFO_FILE) "$(RELSYSDIR)"
+
+
+release_spec:
diff --git a/lib/typer/doc/src/book.xml b/lib/typer/doc/src/book.xml
new file mode 100644
index 0000000000..5cc85a3022
--- /dev/null
+++ b/lib/typer/doc/src/book.xml
@@ -0,0 +1,41 @@
+<?xml version="1.0" encoding="utf-8" ?>
+<!DOCTYPE book SYSTEM "book.dtd">
+
+<book xmlns:xi="http://www.w3.org/2001/XInclude">
+ <header titlestyle="normal">
+ <copyright>
+ <year>2006</year><year>2013</year>
+ <holder>Ericsson AB. All Rights Reserved.</holder>
+ </copyright>
+ <legalnotice>
+ The contents of this file are subject to the Erlang Public License,
+ Version 1.1, (the "License"); you may not use this file except in
+ compliance with the License. You should have received a copy of the
+ Erlang Public License along with this software. If not, it can be
+ retrieved online at http://www.erlang.org/.
+
+ Software distributed under the License is distributed on an "AS IS"
+ basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ the License for the specific language governing rights and limitations
+ under the License.
+
+ </legalnotice>
+
+ <title>TypEr</title>
+ <prepared></prepared>
+ <docno></docno>
+ <date></date>
+ <rev></rev>
+ </header>
+ <pagetext></pagetext>
+ <preamble>
+ </preamble>
+ <pagetext>TypEr</pagetext>
+ <applications>
+ <xi:include href="ref_man.xml"/>
+ </applications>
+ <releasenotes>
+ <xi:include href="notes.xml"/>
+ </releasenotes>
+</book>
+
diff --git a/lib/typer/doc/src/fascicules.xml b/lib/typer/doc/src/fascicules.xml
new file mode 100644
index 0000000000..b15610fa8b
--- /dev/null
+++ b/lib/typer/doc/src/fascicules.xml
@@ -0,0 +1,12 @@
+<?xml version="1.0" encoding="utf-8" ?>
+<!DOCTYPE fascicules SYSTEM "fascicules.dtd">
+
+<fascicules>
+ <fascicule file="part_notes" href="part_notes_frame.html" entry="yes">
+ Release Notes
+ </fascicule>
+ <fascicule file="" href="../../../../doc/print.html" entry="no">
+ Off-Print
+ </fascicule>
+</fascicules>
+
diff --git a/lib/typer/doc/src/notes.xml b/lib/typer/doc/src/notes.xml
new file mode 100644
index 0000000000..53d554820d
--- /dev/null
+++ b/lib/typer/doc/src/notes.xml
@@ -0,0 +1,51 @@
+<?xml version="1.0" encoding="utf-8" ?>
+<!DOCTYPE chapter SYSTEM "chapter.dtd">
+
+<chapter>
+ <header>
+ <copyright>
+ <year>2014</year>
+ <holder>Ericsson AB. All Rights Reserved.</holder>
+ </copyright>
+ <legalnotice>
+ The contents of this file are subject to the Erlang Public License,
+ Version 1.1, (the "License"); you may not use this file except in
+ compliance with the License. You should have received a copy of the
+ Erlang Public License along with this software. If not, it can be
+ retrieved online at http://www.erlang.org/.
+
+ Software distributed under the License is distributed on an "AS IS"
+ basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ the License for the specific language governing rights and limitations
+ under the License.
+
+ </legalnotice>
+
+ <title>TypEr Release Notes</title>
+ <prepared>otp_appnotes</prepared>
+ <docno>nil</docno>
+ <date>nil</date>
+ <rev>nil</rev>
+ <file>notes.xml</file>
+ </header>
+ <p>This document describes the changes made to TypEr.</p>
+
+<section><title>TypEr 0.9.7</title>
+
+ <section><title>Fixed Bugs and Malfunctions</title>
+ <list>
+ <item>
+ <p>
+ Added initial documentation framework for TypEr.</p>
+ <p>
+ Own Id: OTP-11860</p>
+ </item>
+ </list>
+ </section>
+
+</section>
+
+
+
+</chapter>
+
diff --git a/lib/typer/doc/src/part_notes.xml b/lib/typer/doc/src/part_notes.xml
new file mode 100644
index 0000000000..b4ccd3ed77
--- /dev/null
+++ b/lib/typer/doc/src/part_notes.xml
@@ -0,0 +1,35 @@
+<?xml version="1.0" encoding="utf-8" ?>
+<!DOCTYPE part SYSTEM "part.dtd">
+
+<part xmlns:xi="http://www.w3.org/2001/XInclude">
+ <header>
+ <copyright>
+ <year>2006</year><year>2013</year>
+ <holder>Ericsson AB. All Rights Reserved.</holder>
+ </copyright>
+ <legalnotice>
+ The contents of this file are subject to the Erlang Public License,
+ Version 1.1, (the "License"); you may not use this file except in
+ compliance with the License. You should have received a copy of the
+ Erlang Public License along with this software. If not, it can be
+ retrieved online at http://www.erlang.org/.
+
+ Software distributed under the License is distributed on an "AS IS"
+ basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ the License for the specific language governing rights and limitations
+ under the License.
+
+ </legalnotice>
+
+ <title>TypEr Release Notes</title>
+ <prepared></prepared>
+ <docno></docno>
+ <date></date>
+ <rev></rev>
+ </header>
+ <description>
+ <p><em>TypEr</em></p>
+ </description>
+ <xi:include href="notes.xml"/>
+</part>
+
diff --git a/lib/typer/doc/src/ref_man.xml b/lib/typer/doc/src/ref_man.xml
new file mode 100644
index 0000000000..b54a5f5947
--- /dev/null
+++ b/lib/typer/doc/src/ref_man.xml
@@ -0,0 +1,35 @@
+<?xml version="1.0" encoding="utf-8" ?>
+<!DOCTYPE application SYSTEM "application.dtd">
+
+<application xmlns:xi="http://www.w3.org/2001/XInclude">
+ <header>
+ <copyright>
+ <year>2014</year>
+ <holder>Ericsson AB. All Rights Reserved.</holder>
+ </copyright>
+ <legalnotice>
+ The contents of this file are subject to the Erlang Public License,
+ Version 1.1, (the "License"); you may not use this file except in
+ compliance with the License. You should have received a copy of the
+ Erlang Public License along with this software. If not, it can be
+ retrieved online at http://www.erlang.org/.
+
+ Software distributed under the License is distributed on an "AS IS"
+ basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ the License for the specific language governing rights and limitations
+ under the License.
+
+ </legalnotice>
+
+ <title>TypEr</title>
+ <prepared></prepared>
+ <docno></docno>
+ <date></date>
+ <rev></rev>
+ <file>ref_man.xml</file>
+ </header>
+ <description>
+ </description>
+ <xi:include href="typer_app.xml"/>
+</application>
+
diff --git a/lib/typer/doc/src/typer_app.xml b/lib/typer/doc/src/typer_app.xml
new file mode 100644
index 0000000000..469a9be108
--- /dev/null
+++ b/lib/typer/doc/src/typer_app.xml
@@ -0,0 +1,43 @@
+<?xml version="1.0" encoding="utf-8" ?>
+<!DOCTYPE appref SYSTEM "appref.dtd">
+
+<appref>
+ <header>
+ <copyright>
+ <year>2014</year>
+ <holder>Ericsson AB. All Rights Reserved.</holder>
+ </copyright>
+ <legalnotice>
+ The contents of this file are subject to the Erlang Public License,
+ Version 1.1, (the "License"); you may not use this file except in
+ compliance with the License. You should have received a copy of the
+ Erlang Public License along with this software. If not, it can be
+ retrieved online at http://www.erlang.org/.
+
+ Software distributed under the License is distributed on an "AS IS"
+ basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ the License for the specific language governing rights and limitations
+ under the License.
+
+ </legalnotice>
+
+ <title>TypEr</title>
+ <prepared></prepared>
+ <responsible></responsible>
+ <docno></docno>
+ <approved></approved>
+ <checked></checked>
+ <date></date>
+ <rev></rev>
+ <file>typer.xml</file>
+ </header>
+ <app>TypEr</app>
+ <appsummary>The TypEr Application</appsummary>
+ <description>
+ <p>An Erlang/OTP application that shows type information
+ for Erlang modules to the user. Additionally, it can
+ annotate the code of files with such type information.</p>
+ </description>
+
+</appref>
+
diff --git a/lib/typer/info b/lib/typer/info
new file mode 100644
index 0000000000..5145fbcfff
--- /dev/null
+++ b/lib/typer/info
@@ -0,0 +1,2 @@
+group: tools
+short: TypEr
diff --git a/lib/typer/src/Makefile b/lib/typer/src/Makefile
index 13af466755..a7059de971 100644
--- a/lib/typer/src/Makefile
+++ b/lib/typer/src/Makefile
@@ -63,7 +63,7 @@ APPUP_TARGET= $(EBIN)/$(APPUP_FILE)
# ----------------------------------------------------
# FLAGS
# ----------------------------------------------------
-ERL_COMPILE_FLAGS += +warn_exported_vars +warn_untyped_record +warn_missing_spec
+ERL_COMPILE_FLAGS += +warn_export_vars +warn_untyped_record +warn_missing_spec
# ----------------------------------------------------
# Targets
diff --git a/lib/typer/vsn.mk b/lib/typer/vsn.mk
index 49fdda756e..9cc044c621 100644
--- a/lib/typer/vsn.mk
+++ b/lib/typer/vsn.mk
@@ -1 +1 @@
-TYPER_VSN = 0.9.6
+TYPER_VSN = 0.9.7