aboutsummaryrefslogtreecommitdiffstats
path: root/lib/compiler/src/v3_codegen.erl
diff options
context:
space:
mode:
Diffstat (limited to 'lib/compiler/src/v3_codegen.erl')
-rw-r--r--lib/compiler/src/v3_codegen.erl237
1 files changed, 152 insertions, 85 deletions
diff --git a/lib/compiler/src/v3_codegen.erl b/lib/compiler/src/v3_codegen.erl
index 34c67b16ca..47c1567f10 100644
--- a/lib/compiler/src/v3_codegen.erl
+++ b/lib/compiler/src/v3_codegen.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1999-2012. All Rights Reserved.
+%% Copyright Ericsson AB 1999-2016. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -69,6 +69,10 @@
stk=[], %Stack table
res=[]}). %Reserved regs: [{reserved,I,V}]
+-type life_module() :: {module(),_,_,[_]}.
+
+-spec module(life_module(), [compile:option()]) -> {'ok',beam_asm:module_code()}.
+
module({Mod,Exp,Attr,Forms}, _Options) ->
{Fs,St} = functions(Forms, {atom,Mod}),
{ok,{Mod,Exp,Attr,Fs,St#cg.lcount}}.
@@ -151,6 +155,8 @@ cg({bif,Bif,As,Rs}, Le, Vdb, Bef, St) ->
bif_cg(Bif, As, Rs, Le, Vdb, Bef, St);
cg({gc_bif,Bif,As,Rs}, Le, Vdb, Bef, St) ->
gc_bif_cg(Bif, As, Rs, Le, Vdb, Bef, St);
+cg({internal,Bif,As,Rs}, Le, Vdb, Bef, St) ->
+ internal_cg(Bif, As, Rs, Le, Vdb, Bef, St);
cg({receive_loop,Te,Rvar,Rm,Tes,Rs}, Le, Vdb, Bef, St) ->
recv_loop_cg(Te, Rvar, Rm, Tes, Rs, Le, Vdb, Bef, St);
cg(receive_next, Le, Vdb, Bef, St) ->
@@ -208,15 +214,10 @@ need_heap_1(#l{ke={set,_,Val}}, H) ->
{tuple,Es} -> 1 + length(Es);
_Other -> 0
end};
-need_heap_1(#l{ke={bif,dsetelement,_As,_Rs},i=I}, H) ->
- {need_heap_need(I, H),0};
-need_heap_1(#l{ke={bif,{make_fun,_,_,_,_},_As,_Rs},i=I}, H) ->
- {need_heap_need(I, H),0};
-need_heap_1(#l{ke={bif,bs_init_writable,_As,_Rs},i=I}, H) ->
- {need_heap_need(I, H),0};
need_heap_1(#l{ke={bif,_Bif,_As,_Rs}}, H) ->
{[],H};
need_heap_1(#l{i=I}, H) ->
+ %% Call or call-like instruction such as set_tuple_element/3.
{need_heap_need(I, H),0}.
need_heap_need(_I, 0) -> [];
@@ -366,7 +367,7 @@ bsm_rename_ctx(#l{ke={match,Ms0,Rs}}=L, Old, New, InProt) ->
bsm_rename_ctx(#l{ke={guard_match,Ms0,Rs}}=L, Old, New, InProt) ->
Ms = bsm_rename_ctx(Ms0, Old, New, InProt),
L#l{ke={guard_match,Ms,Rs}};
-bsm_rename_ctx(#l{ke={test,_,_}}=L, _, _, _) -> L;
+bsm_rename_ctx(#l{ke={test,_,_,_}}=L, _, _, _) -> L;
bsm_rename_ctx(#l{ke={bif,_,_,_}}=L, _, _, _) -> L;
bsm_rename_ctx(#l{ke={gc_bif,_,_,_}}=L, _, _, _) -> L;
bsm_rename_ctx(#l{ke={set,_,_}}=L, _, _, _) -> L;
@@ -827,21 +828,24 @@ select_extract_bin([{var,Hd},{var,Tl}], Size0, Unit, Type, Flags, Vf,
{bs_save2,CtxReg,{Ctx,Tl}}],Int1}
end,
{Es,clear_dead(Aft, I, Vdb),St};
-select_extract_bin([{var,Hd}], Size0, Unit, binary, Flags, Vf,
+select_extract_bin([{var,Hd}], Size, Unit, binary, Flags, Vf,
I, Vdb, Bef, Ctx, Body, St) ->
- SizeReg = get_bin_size_reg(Size0, Bef),
+ %% Match the last segment of a binary. We KNOW that the size
+ %% must be 'all'.
+ Size = {atom,all}, %Assertion.
{Es,Aft} =
case vdb_find(Hd, Vdb) of
{_,_,Lhd} when Lhd =< I ->
+ %% The result will not be used. Furthermore, since we
+ %% we are at the end of the binary, the position will
+ %% not be used again; thus, it is safe to do a cheaper
+ %% test of the unit.
CtxReg = fetch_var(Ctx, Bef),
- {case SizeReg =:= {atom,all} andalso is_context_unused(Body) of
- true when Unit =:= 1 ->
+ {case Unit of
+ 1 ->
[];
- true ->
- [{test,bs_test_unit,{f,Vf},[CtxReg,Unit]}];
- false ->
- [{test,bs_skip_bits2,{f,Vf},
- [CtxReg,SizeReg,Unit,{field_flags,Flags}]}]
+ _ ->
+ [{test,bs_test_unit,{f,Vf},[CtxReg,Unit]}]
end,Bef};
{_,_,_} ->
case is_context_unused(Body) of
@@ -853,7 +857,7 @@ select_extract_bin([{var,Hd}], Size0, Unit, binary, Flags, Vf,
Name = bs_get_binary2,
Live = max_reg(Bef#sr.reg),
{[{test,Name,{f,Vf},Live,
- [CtxReg,SizeReg,Unit,{field_flags,Flags}],Rhd}],
+ [CtxReg,Size,Unit,{field_flags,Flags}],Rhd}],
Int1};
true ->
%% Since the matching context will not be used again,
@@ -868,7 +872,7 @@ select_extract_bin([{var,Hd}], Size0, Unit, binary, Flags, Vf,
Name = bs_get_binary2,
Live = max_reg(Int1#sr.reg),
{[{test,Name,{f,Vf},Live,
- [CtxReg,SizeReg,Unit,{field_flags,Flags}],CtxReg}],
+ [CtxReg,Size,Unit,{field_flags,Flags}],CtxReg}],
Int1}
end
end,
@@ -1051,8 +1055,15 @@ guard_cg(#l{ke={protected,Ts,Rs},i=I,vdb=Pdb}, Fail, _Vdb, Bef, St) ->
protected_cg(Ts, Rs, Fail, I, Pdb, Bef, St);
guard_cg(#l{ke={block,Ts},i=I,vdb=Bdb}, Fail, _Vdb, Bef, St) ->
guard_cg_list(Ts, Fail, I, Bdb, Bef, St);
-guard_cg(#l{ke={test,Test,As},i=I,vdb=_Tdb}, Fail, Vdb, Bef, St) ->
- test_cg(Test, As, Fail, I, Vdb, Bef, St);
+guard_cg(#l{ke={test,Test,As,Inverted},i=I,vdb=_Tdb}, Fail, Vdb, Bef, St0) ->
+ case Inverted of
+ false ->
+ test_cg(Test, As, Fail, I, Vdb, Bef, St0);
+ true ->
+ {Psucc,St1} = new_label(St0),
+ {Is,Aft,St2} = test_cg(Test, As, Psucc, I, Vdb, Bef, St1),
+ {Is++[{jump,{f,Fail}},{label,Psucc}],Aft,St2}
+ end;
guard_cg(G, _Fail, Vdb, Bef, St) ->
%%ok = io:fwrite("cg ~w: ~p~n", [?LINE,{G,Fail,Vdb,Bef}]),
{Gis,Aft,St1} = cg(G, Vdb, Bef, St),
@@ -1086,6 +1097,30 @@ protected_cg(Ts, Rs, _Fail, I, Vdb, Bef, St0) ->
%% test_cg(TestName, Args, Fail, I, Vdb, Bef, St) -> {[Ainstr],Aft,St}.
%% Generate test instruction. Use explicit fail label here.
+test_cg(is_map, [A], Fail, I, Vdb, Bef, St) ->
+ %% We must avoid creating code like this:
+ %%
+ %% move x(0) y(0)
+ %% is_map Fail [x(0)]
+ %% make_fun => x(0) %% Overwrite x(0)
+ %% put_map_assoc y(0) ...
+ %%
+ %% The code is safe, but beam_validator does not understand that.
+ %% Extending beam_validator to handle such (rare) code as the
+ %% above would make it slower for all programs. Instead, change
+ %% the code generator to always prefer the Y register for is_map()
+ %% and put_map_assoc() instructions, ensuring that they use the
+ %% same register.
+ Arg = cg_reg_arg_prefer_y(A, Bef),
+ Aft = clear_dead(Bef, I, Vdb),
+ {[{test,is_map,{f,Fail},[Arg]}],Aft,St};
+test_cg(is_boolean, [{atom,Val}], Fail, I, Vdb, Bef, St) ->
+ Aft = clear_dead(Bef, I, Vdb),
+ Is = case is_boolean(Val) of
+ true -> [];
+ false -> [{jump,{f,Fail}}]
+ end,
+ {Is,Aft,St};
test_cg(Test, As, Fail, I, Vdb, Bef, St) ->
Args = cg_reg_args(As, Bef),
Aft = clear_dead(Bef, I, Vdb),
@@ -1152,19 +1187,15 @@ call_cg(Func, As, Rs, Le, Vdb, Bef, St0) ->
%% Inside a guard. The only allowed function call is to
%% erlang:error/1,2. We will generate the following code:
%%
- %% jump FailureLabel
%% move {atom,ok} DestReg
- %%
- %% The 'move' instruction will never be executed, but we
- %% generate it anyway in case the beam_validator is run
- %% on unoptimized code.
+ %% jump FailureLabel
{remote,{atom,erlang},{atom,error}} = Func, %Assertion.
[{var,DestVar}] = Rs,
Int0 = clear_dead(Bef, Le#l.i, Vdb),
Reg = put_reg(DestVar, Int0#sr.reg),
Int = Int0#sr{reg=Reg},
Dst = fetch_reg(DestVar, Reg),
- {[{jump,{f,Fail}},{move,{atom,ok},Dst}],
+ {[{move,{atom,ok},Dst},{jump,{f,Fail}}],
clear_dead(Int, Le#l.i, Vdb),St0};
#cg{} ->
%% Ordinary function call in a function body.
@@ -1285,10 +1316,10 @@ trap_bif(erlang, group_leader, 2) -> true;
trap_bif(erlang, exit, 2) -> true;
trap_bif(_, _, _) -> false.
-%% bif_cg(Bif, [Arg], [Ret], Le, Vdb, StackReg, State) ->
+%% internal_cg(Bif, [Arg], [Ret], Le, Vdb, StackReg, State) ->
%% {[Ainstr],StackReg,State}.
-bif_cg(bs_context_to_binary=Instr, [Src0], [], Le, Vdb, Bef, St0) ->
+internal_cg(bs_context_to_binary=Instr, [Src0], [], Le, Vdb, Bef, St0) ->
[Src] = cg_reg_args([Src0], Bef),
case is_register(Src) of
false ->
@@ -1296,25 +1327,34 @@ bif_cg(bs_context_to_binary=Instr, [Src0], [], Le, Vdb, Bef, St0) ->
true ->
{[{Instr,Src}],clear_dead(Bef, Le#l.i, Vdb), St0}
end;
-bif_cg(dsetelement, [Index0,Tuple0,New0], _Rs, Le, Vdb, Bef, St0) ->
+internal_cg(dsetelement, [Index0,Tuple0,New0], _Rs, Le, Vdb, Bef, St0) ->
[New,Tuple,{integer,Index1}] = cg_reg_args([New0,Tuple0,Index0], Bef),
Index = Index1-1,
{[{set_tuple_element,New,Tuple,Index}],
clear_dead(Bef, Le#l.i, Vdb), St0};
-bif_cg({make_fun,Func,Arity,Index,Uniq}, As, Rs, Le, Vdb, Bef, St0) ->
+internal_cg(make_fun, [Func0,Arity0|As], Rs, Le, Vdb, Bef, St0) ->
%% This behaves more like a function call.
+ {atom,Func} = Func0,
+ {integer,Arity} = Arity0,
{Sis,Int} = cg_setup_call(As, Bef, Le#l.i, Vdb),
Reg = load_vars(Rs, clear_regs(Int#sr.reg)),
{FuncLbl,St1} = local_func_label(Func, Arity, St0),
- MakeFun = {make_fun2,{f,FuncLbl},Index,Uniq,length(As)},
+ MakeFun = {make_fun2,{f,FuncLbl},0,0,length(As)},
{Sis ++ [MakeFun],
clear_dead(Int#sr{reg=Reg}, Le#l.i, Vdb),
St1};
-bif_cg(bs_init_writable=I, As, Rs, Le, Vdb, Bef, St) ->
+internal_cg(bs_init_writable=I, As, Rs, Le, Vdb, Bef, St) ->
%% This behaves like a function call.
{Sis,Int} = cg_setup_call(As, Bef, Le#l.i, Vdb),
Reg = load_vars(Rs, clear_regs(Int#sr.reg)),
{Sis++[I],clear_dead(Int#sr{reg=Reg}, Le#l.i, Vdb),St};
+internal_cg(raise, As, Rs, Le, Vdb, Bef, St) ->
+ %% raise can be treated like a guard BIF.
+ bif_cg(raise, As, Rs, Le, Vdb, Bef, St).
+
+%% bif_cg(Bif, [Arg], [Ret], Le, Vdb, StackReg, State) ->
+%% {[Ainstr],StackReg,State}.
+
bif_cg(Bif, As, [{var,V}], Le, Vdb, Bef, St0) ->
Ars = cg_reg_args(As, Bef),
@@ -1327,12 +1367,13 @@ bif_cg(Bif, As, [{var,V}], Le, Vdb, Bef, St0) ->
%% that we save any variable that will be live after this BIF call.
MayFail = not erl_bifs:is_safe(erlang, Bif, length(As)),
- {Sis,Int0} = case St0#cg.in_catch andalso
- St0#cg.bfail =:= 0 andalso
- MayFail of
- true -> adjust_stack(Bef, Le#l.i, Le#l.i+1, Vdb);
- false -> {[],Bef}
- end,
+ {Sis,Int0} =
+ case MayFail of
+ true ->
+ maybe_adjust_stack(Bef, Le#l.i, Le#l.i+1, Vdb, St0);
+ false ->
+ {[],Bef}
+ end,
Int1 = clear_dead(Int0, Le#l.i, Vdb),
Reg = put_reg(V, Int1#sr.reg),
Int = Int1#sr{reg=Reg},
@@ -1363,11 +1404,7 @@ gc_bif_cg(Bif, As, [{var,V}], Le, Vdb, Bef, St0) ->
%% Currently, we are somewhat pessimistic in
%% that we save any variable that will be live after this BIF call.
- {Sis,Int0} =
- case St0#cg.in_catch andalso St0#cg.bfail =:= 0 of
- true -> adjust_stack(Bef, Le#l.i, Le#l.i+1, Vdb);
- false -> {[],Bef}
- end,
+ {Sis,Int0} = maybe_adjust_stack(Bef, Le#l.i, Le#l.i+1, Vdb, St0),
Int1 = clear_dead(Int0, Le#l.i, Vdb),
Reg = put_reg(V, Int1#sr.reg),
@@ -1512,8 +1549,7 @@ set_cg([{var,R}], {cons,Es}, Le, Vdb, Bef, St) ->
Int1 = Int0#sr{reg=put_reg(R, Int0#sr.reg)},
Ret = fetch_reg(R, Int1#sr.reg),
{[{put_list,S1,S2,Ret}], Int1, St};
-set_cg([{var,R}], {binary,Segs}, Le, Vdb, Bef,
- #cg{in_catch=InCatch, bfail=Bfail}=St) ->
+set_cg([{var,R}], {binary,Segs}, Le, Vdb, Bef, #cg{bfail=Bfail}=St) ->
%% At run-time, binaries are constructed in three stages:
%% 1) First the size of the binary is calculated.
%% 2) Then the binary is allocated.
@@ -1532,28 +1568,19 @@ set_cg([{var,R}], {binary,Segs}, Le, Vdb, Bef,
%% First generate the code that constructs each field.
Fail = {f,Bfail},
PutCode = cg_bin_put(Segs, Fail, Bef),
- {Sis,Int1} =
- case InCatch of
- true -> adjust_stack(Int0, Le#l.i, Le#l.i+1, Vdb);
- false -> {[],Int0}
- end,
+ {Sis,Int1} = maybe_adjust_stack(Int0, Le#l.i, Le#l.i+1, Vdb, St),
MaxRegs = max_reg(Bef#sr.reg),
Aft = clear_dead(Int1, Le#l.i, Vdb),
%% Now generate the complete code for constructing the binary.
Code = cg_binary(PutCode, Target, Temp, Fail, MaxRegs, Le#l.a),
{Sis++Code,Aft,St};
-% Map single variable key
-set_cg([{var,R}], {map,Op,Map,[{map_pair,{var,_}=K,V}]}, Le, Vdb, Bef,
- #cg{in_catch=InCatch,bfail=Bfail}=St) ->
- Fail = {f,Bfail},
- {Sis,Int0} =
- case InCatch of
- true -> adjust_stack(Bef, Le#l.i, Le#l.i+1, Vdb);
- false -> {[],Bef}
- end,
- SrcReg = cg_reg_arg(Map,Int0),
+%% Map: single variable key.
+set_cg([{var,R}], {map,Op,Map,[{map_pair,{var,_}=K,V}]}, Le, Vdb, Bef, St0) ->
+ {Sis,Int0} = maybe_adjust_stack(Bef, Le#l.i, Le#l.i+1, Vdb, St0),
+
+ SrcReg = cg_reg_arg_prefer_y(Map, Int0),
Line = line(Le#l.a),
List = [cg_reg_arg(K,Int0),cg_reg_arg(V,Int0)],
@@ -1565,26 +1592,17 @@ set_cg([{var,R}], {map,Op,Map,[{map_pair,{var,_}=K,V}]}, Le, Vdb, Bef,
Aft = Aft0#sr{reg=put_reg(R, Aft0#sr.reg)},
Target = fetch_reg(R, Aft#sr.reg),
- I = case Op of
- assoc -> put_map_assoc;
- exact -> put_map_exact
- end,
- {Sis++[Line]++[{I,Fail,SrcReg,Target,Live,{list,List}}],Aft,St};
+ {Is,St1} = set_cg_map(Line, Op, SrcReg, Target, Live, List, St0),
+ {Sis++Is,Aft,St1};
-% Map (possibly) multiple literal keys
-set_cg([{var,R}], {map,Op,Map,Es}, Le, Vdb, Bef,
- #cg{in_catch=InCatch,bfail=Bfail}=St) ->
+%% Map: (possibly) multiple literal keys.
+set_cg([{var,R}], {map,Op,Map,Es}, Le, Vdb, Bef, St0) ->
%% assert key literals
[] = [Var||{map_pair,{var,_}=Var,_} <- Es],
- Fail = {f,Bfail},
- {Sis,Int0} =
- case InCatch of
- true -> adjust_stack(Bef, Le#l.i, Le#l.i+1, Vdb);
- false -> {[],Bef}
- end,
- SrcReg = cg_reg_arg(Map,Int0),
+ {Sis,Int0} = maybe_adjust_stack(Bef, Le#l.i, Le#l.i+1, Vdb, St0),
+ SrcReg = cg_reg_arg_prefer_y(Map, Int0),
Line = line(Le#l.a),
%% fetch registers for values to be put into the map
@@ -1598,11 +1616,10 @@ set_cg([{var,R}], {map,Op,Map,Es}, Le, Vdb, Bef,
Aft = Aft0#sr{reg=put_reg(R, Aft0#sr.reg)},
Target = fetch_reg(R, Aft#sr.reg),
- I = case Op of
- assoc -> put_map_assoc;
- exact -> put_map_exact
- end,
- {Sis++[Line]++[{I,Fail,SrcReg,Target,Live,{list,List}}],Aft,St};
+ {Is,St1} = set_cg_map(Line, Op, SrcReg, Target, Live, List, St0),
+ {Sis++Is,Aft,St1};
+
+%% Everything else.
set_cg([{var,R}], Con, Le, Vdb, Bef, St) ->
%% Find a place for the return register first.
Int = Bef#sr{reg=put_reg(R, Bef#sr.reg)},
@@ -1615,6 +1632,34 @@ set_cg([{var,R}], Con, Le, Vdb, Bef, St) ->
end,
{Ais,clear_dead(Int, Le#l.i, Vdb),St}.
+
+set_cg_map(Line, Op0, SrcReg, Target, Live, List, St0) ->
+ Bfail = St0#cg.bfail,
+ Fail = {f,St0#cg.bfail},
+ Op = case Op0 of
+ assoc -> put_map_assoc;
+ exact -> put_map_exact
+ end,
+ {OkLbl,St1} = new_label(St0),
+ {BadLbl,St2} = new_label(St1),
+ Is = if
+ Bfail =:= 0 orelse Op =:= put_map_assoc ->
+ [Line,{Op,{f,0},SrcReg,Target,Live,{list,List}}];
+ true ->
+ %% Ensure that Target is always set, even if
+ %% the map update operation fails. That is necessary
+ %% because Target may be included in a test_heap
+ %% instruction.
+ [Line,
+ {Op,{f,BadLbl},SrcReg,Target,Live,{list,List}},
+ {jump,{f,OkLbl}},
+ {label,BadLbl},
+ {move,{atom,ok},Target},
+ {jump,Fail},
+ {label,OkLbl}]
+ end,
+ {Is,St2}.
+
%%%
%%% Code generation for constructing binaries.
%%%
@@ -1857,6 +1902,9 @@ cg_reg_args(As, Bef) -> [cg_reg_arg(A, Bef) || A <- As].
cg_reg_arg({var,V}, Bef) -> fetch_var(V, Bef);
cg_reg_arg(Literal, _) -> Literal.
+cg_reg_arg_prefer_y({var,V}, Bef) -> fetch_var_prefer_y(V, Bef);
+cg_reg_arg_prefer_y(Literal, _) -> Literal.
+
%% cg_setup_call([Arg], Bef, Cur, Vdb) -> {[Instr],Aft}.
%% Do the complete setup for a call/enter.
@@ -2038,6 +2086,19 @@ trim_free([R|Rs0]) ->
end;
trim_free([]) -> [].
+%% maybe_adjust_stack(Bef, FirstBefore, LastFrom, Vdb, St) -> {[Ainstr],Aft}.
+%% Adjust the stack, but only if the code is inside a catch and not
+%% inside a guard. Use this funtion before instructions that may
+%% cause an exception.
+
+maybe_adjust_stack(Bef, Fb, Lf, Vdb, St) ->
+ case St of
+ #cg{in_catch=true,bfail=0} ->
+ adjust_stack(Bef, Fb, Lf, Vdb);
+ #cg{} ->
+ {[],Bef}
+ end.
+
%% adjust_stack(Bef, FirstBefore, LastFrom, Vdb) -> {[Ainstr],Aft}.
%% Do complete stack adjustment by compressing stack and adding
%% variables to be saved. Try to optimise ordering on stack by
@@ -2085,6 +2146,12 @@ fetch_var(V, Sr) ->
error -> fetch_stack(V, Sr#sr.stk)
end.
+fetch_var_prefer_y(V, #sr{reg=Reg,stk=Stk}) ->
+ case find_stack(V, Stk) of
+ {ok,R} -> R;
+ error -> fetch_reg(V, Reg)
+ end.
+
load_vars(Vs, Regs) ->
foldl(fun ({var,V}, Rs) -> put_reg(V, Rs) end, Regs, Vs).
@@ -2158,11 +2225,11 @@ fetch_stack(Var, Stk) -> fetch_stack(Var, Stk, 0).
fetch_stack(V, [{V}|_], I) -> {yy,I};
fetch_stack(V, [_|Stk], I) -> fetch_stack(V, Stk, I+1).
-% find_stack(Var, Stk) -> find_stack(Var, Stk, 0).
+find_stack(Var, Stk) -> find_stack(Var, Stk, 0).
-% find_stack(V, [{V}|Stk], I) -> {ok,{yy,I}};
-% find_stack(V, [O|Stk], I) -> find_stack(V, Stk, I+1);
-% find_stack(V, [], I) -> error.
+find_stack(V, [{V}|_], I) -> {ok,{yy,I}};
+find_stack(V, [_|Stk], I) -> find_stack(V, Stk, I+1);
+find_stack(_, [], _) -> error.
on_stack(V, Stk) -> keymember(V, 1, Stk).