aboutsummaryrefslogtreecommitdiffstats
path: root/lib/asn1/src/asn1ct_tok.erl
diff options
context:
space:
mode:
authorBjörn Gustavsson <[email protected]>2010-09-01 10:12:59 +0200
committerBjörn Gustavsson <[email protected]>2010-09-01 10:32:51 +0200
commitd10b3698655328fc19a34fb6c7d37319547dc945 (patch)
tree77c2d66726ac0869bc22f5249cd551728bfb9fe0 /lib/asn1/src/asn1ct_tok.erl
parent8537e5489707c8961c1a6f045f635b7a064f811c (diff)
parent96db19557ac894cffcaa18e8f1191d387a5b05ea (diff)
downloadotp-d10b3698655328fc19a34fb6c7d37319547dc945.tar.gz
otp-d10b3698655328fc19a34fb6c7d37319547dc945.tar.bz2
otp-d10b3698655328fc19a34fb6c7d37319547dc945.zip
Merge branch 'maint-r13' into dev
* maint-r13: asn1 tests: Remove duplicated SUITE data directories Update of vsn.mk, relnotes and appup for patch in R13B04 track Add support for Extension addition group Conflicts: lib/asn1/test/Makefile lib/asn1/test/asn1_SUITE.erl.src lib/asn1/vsn.mk OTP-8598
Diffstat (limited to 'lib/asn1/src/asn1ct_tok.erl')
-rw-r--r--lib/asn1/src/asn1ct_tok.erl184
1 files changed, 92 insertions, 92 deletions
diff --git a/lib/asn1/src/asn1ct_tok.erl b/lib/asn1/src/asn1ct_tok.erl
index f0ce31bdb2..85199c65ec 100644
--- a/lib/asn1/src/asn1ct_tok.erl
+++ b/lib/asn1/src/asn1ct_tok.erl
@@ -21,7 +21,7 @@
%% Tokenize ASN.1 code (input to parser generated with yecc)
--export([get_name/2,tokenise/2, file/1]).
+-export([get_name/2,tokenise/4, file/1]).
file(File) ->
@@ -29,12 +29,9 @@ file(File) ->
{error, Reason} ->
{error,{File,file:format_error(Reason)}};
{ok,Stream} ->
- process0(Stream)
+ process(Stream,0,[])
end.
-process0(Stream) ->
- process(Stream,0,[]).
-
process(Stream,Lno,R) ->
process(io:get_line(Stream, ''), Stream,Lno+1,R).
@@ -45,131 +42,128 @@ process(eof, Stream,Lno,R) ->
process(L, Stream,Lno,R) when is_list(L) ->
%%io:format('read:~s',[L]),
- case catch tokenise(L,Lno) of
+ case catch tokenise(Stream,L,Lno,[]) of
{'ERR',Reason} ->
io:format("Tokeniser error on line: ~w ~w~n",[Lno,Reason]),
exit(0);
- {multiline_comment,NestingLevel} ->
- {RestL,Lno2} = process_skip_multiline_comment(Stream,Lno,NestingLevel),
- process(RestL,Stream,Lno2,R);
- T ->
+ {NewLno,T} ->
%%io:format('toks:~w~n',[T]),
- process(Stream,Lno,[T|R])
+ process(Stream,NewLno,[T|R])
end.
-process_skip_multiline_comment(Stream,Lno,NestingLevel) ->
- process_skip_multiline_comment(io:get_line(Stream, ''),
- Stream, Lno + 1, NestingLevel).
-process_skip_multiline_comment(eof,_Stream,Lno,_NestingLevel) ->
- io:format("Tokeniser error on line: ~w, premature end of multiline comment~n",[Lno]),
- exit(0);
-process_skip_multiline_comment(Line,Stream,Lno,NestingLevel) ->
- case catch skip_multiline_comment(Line,NestingLevel) of
- {multiline_comment,NestingLevel2} ->
- process_skip_multiline_comment(Stream,Lno,NestingLevel2);
- T ->
- {T,Lno}
- end.
-
-tokenise([H|T],Lno) when $a =< H , H =< $z ->
+tokenise(Stream,[H|T],Lno,R) when $a =< H , H =< $z ->
{X, T1} = get_name(T, [H]),
- [{identifier,Lno, list_to_atom(X)}|tokenise(T1,Lno)];
+ tokenise(Stream,T1,Lno,[{identifier,Lno, list_to_atom(X)}|R]);
-tokenise([$&,H|T],Lno) when $A =< H , H =< $Z ->
+tokenise(Stream,[$&,H|T],Lno,R) when $A =< H , H =< $Z ->
{Y, T1} = get_name(T, [H]),
X = list_to_atom(Y),
- [{typefieldreference, Lno, X} | tokenise(T1, Lno)];
+ tokenise(Stream,T1,Lno,[{typefieldreference, Lno, X} | R]);
-tokenise([$&,H|T],Lno) when $a =< H , H =< $z ->
+tokenise(Stream,[$&,H|T],Lno,R) when $a =< H , H =< $z ->
{Y, T1} = get_name(T, [H]),
X = list_to_atom(Y),
- [{valuefieldreference, Lno, X} | tokenise(T1, Lno)];
+ tokenise(Stream,T1,Lno,[{valuefieldreference, Lno, X} | R]);
-tokenise([H|T],Lno) when $A =< H , H =< $Z ->
+tokenise(Stream,[H|T],Lno,R) when $A =< H , H =< $Z ->
{Y, T1} = get_name(T, [H]),
X = list_to_atom(Y),
case reserved_word(X) of
true ->
- [{X,Lno}|tokenise(T1,Lno)];
+ tokenise(Stream,T1,Lno,[{X,Lno}|R]);
false ->
- [{typereference,Lno,X}|tokenise(T1,Lno)];
+ tokenise(Stream,T1,Lno,[{typereference,Lno,X}|R]);
rstrtype ->
- [{restrictedcharacterstringtype,Lno,X}|tokenise(T1,Lno)]
+ tokenise(Stream,T1,Lno,[{restrictedcharacterstringtype,Lno,X}|R])
end;
-tokenise([$-,H|T],Lno) when $0 =< H , H =< $9 ->
+tokenise(Stream,[$-,H|T],Lno,R) when $0 =< H , H =< $9 ->
{X, T1} = get_number(T, [H]),
- [{number,Lno,-1 * list_to_integer(X)}|tokenise(T1,Lno)];
+ tokenise(Stream,T1,Lno,[{number,Lno,-1 * list_to_integer(X)}|R]);
-tokenise([H|T],Lno) when $0 =< H , H =< $9 ->
+tokenise(Stream,[H|T],Lno,R) when $0 =< H , H =< $9 ->
{X, T1} = get_number(T, [H]),
- [{number,Lno,list_to_integer(X)}|tokenise(T1,Lno)];
+ tokenise(Stream,T1,Lno,[{number,Lno,list_to_integer(X)}|R]);
-tokenise([$-,$-|T],Lno) ->
- tokenise(skip_comment(T),Lno);
+tokenise(Stream,[$-,$-|T],Lno,R) ->
+ tokenise(Stream,skip_comment(T),Lno,R);
-tokenise([$/,$*|T],Lno) ->
- tokenise(skip_multiline_comment(T,0),Lno);
+tokenise(Stream,[$/,$*|T],Lno,R) ->
+ {NewLno,T1} = skip_multiline_comment(Stream,T,Lno,0),
+ tokenise(Stream,T1,NewLno,R);
-tokenise([$:,$:,$=|T],Lno) ->
- [{'::=',Lno}|tokenise(T,Lno)];
+tokenise(Stream,[$:,$:,$=|T],Lno,R) ->
+ tokenise(Stream,T,Lno,[{'::=',Lno}|R]);
-tokenise([$'|T],Lno) ->
+tokenise(Stream,[$'|T],Lno,R) ->
case catch collect_quoted(T,Lno,[]) of
{'ERR',_} ->
throw({'ERR','bad_quote'});
{Thing, T1} ->
- [Thing|tokenise(T1,Lno)]
+ tokenise(Stream,T1,Lno,[Thing|R])
end;
-tokenise([$"|T],Lno) ->
- collect_string(T,Lno);
+tokenise(Stream,[$"|T],Lno,R) ->
+ {Str,T1} = collect_string(T,Lno),
+ tokenise(Stream,T1,Lno,[Str|R]);
+
+tokenise(Stream,[${|T],Lno,R) ->
+ tokenise(Stream,T,Lno,[{'{',Lno}|R]);
+
+tokenise(Stream,[$}|T],Lno,R) ->
+ tokenise(Stream,T,Lno,[{'}',Lno}|R]);
-tokenise([${|T],Lno) ->
- [{'{',Lno}|tokenise(T,Lno)];
+%% tokenise(Stream,[$],$]|T],Lno,R) ->
+%% tokenise(Stream,T,Lno,[{']]',Lno}|R]);
-tokenise([$}|T],Lno) ->
- [{'}',Lno}|tokenise(T,Lno)];
+%% Even though x.680 specify '[[' and ']]' as lexical items
+%% it does not work to have them as such since the single [ and ] can
+%% be used beside each other in the SYNTAX OF in x.681
+%% the solution chosen here , i.e. to have them as separate lexical items
+%% will not detect the cases where there is white space between them
+%% which would be an error in the use in ExtensionAdditionGroups
-tokenise([$]|T],Lno) ->
- [{']',Lno}|tokenise(T,Lno)];
+%% tokenise(Stream,[$[,$[|T],Lno,R) ->
+%% tokenise(Stream,T,Lno,[{'[[',Lno}|R]);
-tokenise([$[|T],Lno) ->
- [{'[',Lno}|tokenise(T,Lno)];
+tokenise(Stream,[$]|T],Lno,R) ->
+ tokenise(Stream,T,Lno,[{']',Lno}|R]);
-tokenise([$,|T],Lno) ->
- [{',',Lno}|tokenise(T,Lno)];
+tokenise(Stream,[$[|T],Lno,R) ->
+ tokenise(Stream,T,Lno,[{'[',Lno}|R]);
-tokenise([$(|T],Lno) ->
- [{'(',Lno}|tokenise(T,Lno)];
-tokenise([$)|T],Lno) ->
- [{')',Lno}|tokenise(T,Lno)];
+tokenise(Stream,[$,|T],Lno,R) ->
+ tokenise(Stream,T,Lno,[{',',Lno}|R]);
-tokenise([$.,$.,$.|T],Lno) ->
- [{'...',Lno}|tokenise(T,Lno)];
+tokenise(Stream,[$(|T],Lno,R) ->
+ tokenise(Stream,T,Lno,[{'(',Lno}|R]);
+tokenise(Stream,[$)|T],Lno,R) ->
+ tokenise(Stream,T,Lno,[{')',Lno}|R]);
-tokenise([$.,$.|T],Lno) ->
- [{'..',Lno}|tokenise(T,Lno)];
+tokenise(Stream,[$.,$.,$.|T],Lno,R) ->
+ tokenise(Stream,T,Lno,[{'...',Lno}|R]);
-tokenise([$.|T],Lno) ->
- [{'.',Lno}|tokenise(T,Lno)];
-tokenise([$^|T],Lno) ->
- [{'^',Lno}|tokenise(T,Lno)];
-tokenise([$!|T],Lno) ->
- [{'!',Lno}|tokenise(T,Lno)];
-tokenise([$||T],Lno) ->
- [{'|',Lno}|tokenise(T,Lno)];
+tokenise(Stream,[$.,$.|T],Lno,R) ->
+ tokenise(Stream,T,Lno,[{'..',Lno}|R]);
+tokenise(Stream,[$.|T],Lno,R) ->
+ tokenise(Stream,T,Lno,[{'.',Lno}|R]);
+tokenise(Stream,[$^|T],Lno,R) ->
+ tokenise(Stream,T,Lno,[{'^',Lno}|R]);
+tokenise(Stream,[$!|T],Lno,R) ->
+ tokenise(Stream,T,Lno,[{'!',Lno}|R]);
+tokenise(Stream,[$||T],Lno,R) ->
+ tokenise(Stream,T,Lno,[{'|',Lno}|R]);
-tokenise([H|T],Lno) ->
+tokenise(Stream,[H|T],Lno,R) ->
case white_space(H) of
true ->
- tokenise(T,Lno);
+ tokenise(Stream,T,Lno,R);
false ->
- [{list_to_atom([H]),Lno}|tokenise(T,Lno)]
+ tokenise(Stream,T,Lno,[{list_to_atom([H]),Lno}|R])
end;
-tokenise([],_) ->
- [].
+tokenise(_Stream,[],Lno,R) ->
+ {Lno,lists:reverse(R)}.
collect_string(L,Lno) ->
@@ -181,7 +175,7 @@ collect_string([],_,_) ->
collect_string([H|T],Lno,Str) ->
case H of
$" ->
- [{cstring,1,lists:reverse(Str)}|tokenise(T,Lno)];
+ {{cstring,1,lists:reverse(Str)},T};
Ch ->
collect_string(T,Lno,[Ch|Str])
end.
@@ -252,17 +246,23 @@ skip_comment([_|T]) ->
skip_comment(T).
-skip_multiline_comment([],L) ->
- throw({multiline_comment,L});
-skip_multiline_comment([$*,$/|T],0) ->
- T;
-skip_multiline_comment([$*,$/|T],Level) ->
- skip_multiline_comment(T,Level - 1);
-skip_multiline_comment([$/,$*|T],Level) ->
- skip_multiline_comment(T,Level + 1);
-skip_multiline_comment([_|T],Level) ->
- skip_multiline_comment(T,Level).
-
+skip_multiline_comment(Stream,[],Lno,Level) ->
+ case io:get_line(Stream,'') of
+ eof ->
+ io:format("Tokeniser error on line: ~w~n"
+ "premature end of multiline comment~n",[Lno]),
+ exit(0);
+ Line ->
+ skip_multiline_comment(Stream,Line,Lno+1,Level)
+ end;
+skip_multiline_comment(_Stream,[$*,$/|T],Lno,0) ->
+ {Lno,T};
+skip_multiline_comment(Stream,[$*,$/|T],Lno,Level) ->
+ skip_multiline_comment(Stream,T,Lno,Level - 1);
+skip_multiline_comment(Stream,[$/,$*|T],Lno,Level) ->
+ skip_multiline_comment(Stream,T,Lno,Level + 1);
+skip_multiline_comment(Stream,[_|T],Lno,Level) ->
+ skip_multiline_comment(Stream,T,Lno,Level).
collect_quoted([$',$B|T],Lno, L) ->
case check_bin(L) of