File 5231-stdlib-Optimize-gen_server-multi_call.patch of Package erlang

From d527c30df1cfa7c87c82e235ab365ce185112691 Mon Sep 17 00:00:00 2001
From: Rickard Green <rickard@erlang.org>
Date: Fri, 6 Jan 2023 04:00:59 +0100
Subject: [PATCH] [stdlib] Optimize gen_server:multi_call()

Utilize the "new" selective receive optimizations (introduced in OTP 24) and
process alias (also introduced in OTP 24) to implement gen_server:multi_call()
without the use of a middleman process. Also removed support for communication
with R6B nodes.
---
 lib/stdlib/src/gen_server.erl        | 269 ++++++++----------------
 lib/stdlib/test/gen_server_SUITE.erl | 302 ++++++++++++++++++++++++++-
 2 files changed, 387 insertions(+), 184 deletions(-)

diff --git a/lib/stdlib/src/gen_server.erl b/lib/stdlib/src/gen_server.erl
index d920aa9fcd..e999d56057 100644
--- a/lib/stdlib/src/gen_server.erl
+++ b/lib/stdlib/src/gen_server.erl
@@ -665,7 +665,7 @@ do_abcast([], _,_) -> abcast.
 %%
 multi_call(Name, Request)
   when is_atom(Name) ->
-    do_multi_call([node() | nodes()], Name, Request, infinity).
+    multi_call([node() | nodes()], Name, Request, infinity).
 
 -spec multi_call(
         Nodes   :: [node()],
@@ -679,7 +679,7 @@ multi_call(Name, Request)
 %%
 multi_call(Nodes, Name, Request)
   when is_list(Nodes), is_atom(Name) ->
-    do_multi_call(Nodes, Name, Request, infinity).
+    multi_call(Nodes, Name, Request, infinity).
 
 -spec multi_call(
         Nodes   :: [node()],
@@ -694,8 +694,93 @@ multi_call(Nodes, Name, Request)
 %%
 multi_call(Nodes, Name, Request, Timeout)
   when is_list(Nodes), is_atom(Name), ?is_timeout(Timeout) ->
-    do_multi_call(Nodes, Name, Request, Timeout).
+    Alias = alias(),
+    try
+        Timer = if Timeout == infinity -> undefined;
+                   true -> erlang:start_timer(Timeout, self(), Alias)
+                end,
+        Reqs = mc_send(Nodes, Name, Alias, Request, Timer, []),
+        mc_recv(Reqs, Alias, Timer, [], [])
+    after
+        _ = unalias(Alias)
+    end.
+
+-dialyzer({no_improper_lists, mc_send/6}).
+
+mc_send([], _Name, _Alias, _Request, _Timer, Reqs) ->
+    Reqs;
+mc_send([Node|Nodes], Name, Alias, Request, Timer, Reqs) when is_atom(Node) ->
+    NN = {Name, Node},
+    Mon = try
+              erlang:monitor(process, NN, [{tag, Alias}])
+          catch
+              error:badarg ->
+                  %% Node not alive...
+                  M = make_ref(),
+                  Alias ! {Alias, M, process, NN, noconnection},
+                  M
+          end,
+    try
+        %% We use 'noconnect' since it is no point in bringing up a new
+        %% connection if it was not brought up by the monitor signal...
+        _ = erlang:send(NN,
+                        {'$gen_call', {self(), [[alias|Alias]|Mon]}, Request},
+                        [noconnect]),
+        ok
+    catch
+        _:_ ->
+            ok
+    end,
+    mc_send(Nodes, Name, Alias, Request, Timer, [[Node|Mon]|Reqs]);
+mc_send(_BadNodes, _Name, Alias, _Request, Timer, Reqs) ->
+    %% Cleanup then fail...
+    unalias(Alias),
+    mc_cancel_timer(Timer, Alias),
+    _ = mc_recv_tmo(Reqs, Alias, [], []),
+    error(badarg).
+
+mc_recv([], Alias, Timer, Replies, BadNodes) ->
+    mc_cancel_timer(Timer, Alias),
+    unalias(Alias),
+    {Replies, BadNodes};
+mc_recv([[Node|Mon] | RestReqs] = Reqs, Alias, Timer, Replies, BadNodes) ->
+    receive
+        {[[alias|Alias]|Mon], Reply} ->
+            erlang:demonitor(Mon, [flush]),
+            mc_recv(RestReqs, Alias, Timer, [{Node,Reply}|Replies], BadNodes);
+        {Alias, Mon, process, _, _} ->
+            mc_recv(RestReqs, Alias, Timer, Replies, [Node|BadNodes]);
+        {timeout, Timer, Alias} ->
+            unalias(Alias),
+            mc_recv_tmo(Reqs, Alias, Replies, BadNodes)
+    end.
+
+mc_recv_tmo([], _Alias, Replies, BadNodes) ->
+    {Replies, BadNodes};
+mc_recv_tmo([[Node|Mon] | RestReqs], Alias, Replies, BadNodes) ->
+    erlang:demonitor(Mon),
+    receive
+        {[[alias|Alias]|Mon], Reply} ->
+            mc_recv_tmo(RestReqs, Alias, [{Node,Reply}|Replies], BadNodes);
+        {Alias, Mon, process, _, _} ->
+            mc_recv_tmo(RestReqs, Alias, Replies, [Node|BadNodes])
+    after
+        0 ->
+            mc_recv_tmo(RestReqs, Alias, Replies, [Node|BadNodes])
+    end.
 
+mc_cancel_timer(undefined, _Alias) ->
+    ok;
+mc_cancel_timer(Timer, Alias) ->
+    case erlang:cancel_timer(Timer) of
+        false ->
+            receive
+                {timeout, Timer, Alias} ->
+                    ok
+            end;
+        _ ->
+            ok
+    end.
 
 %%-----------------------------------------------------------------
 %% enter_loop(Mod, Options, State, <ServerName>, <TimeOut>) ->_ 
@@ -953,184 +1038,6 @@ do_send(Dest, Msg) ->
     end,
     ok.
 
-do_multi_call([Node], Name, Req, infinity) when Node =:= node() ->
-    % Special case when multi_call is used with local node only.
-    % In that case we can leverage the benefit of recv_mark optimisation
-    % existing in simple gen:call.
-    try gen:call(Name, '$gen_call', Req, infinity) of
-        {ok, Res} -> {[{Node, Res}],[]}
-    catch exit:_ ->
-        {[], [Node]}
-    end;
-do_multi_call(Nodes, Name, Req, infinity) ->
-    Tag = make_ref(),
-    Monitors = send_nodes(Nodes, Name, Tag, Req),
-    rec_nodes(Tag, Monitors, Name, undefined);
-do_multi_call(Nodes, Name, Req, Timeout) ->
-    Tag = make_ref(),
-    Caller = self(),
-    Receiver =
-	spawn(
-	  fun() ->
-		  %% Middleman process. Should be unsensitive to regular
-		  %% exit signals. The sychronization is needed in case
-		  %% the receiver would exit before the caller started
-		  %% the monitor.
-		  process_flag(trap_exit, true),
-		  Mref = erlang:monitor(process, Caller),
-		  receive
-		      {Caller,Tag} ->
-			  Monitors = send_nodes(Nodes, Name, Tag, Req),
-			  TimerId = erlang:start_timer(Timeout, self(), ok),
-			  Result = rec_nodes(Tag, Monitors, Name, TimerId),
-			  exit({self(),Tag,Result});
-		      {'DOWN',Mref,_,_,_} ->
-			  %% Caller died before sending us the go-ahead.
-			  %% Give up silently.
-			  exit(normal)
-		  end
-	  end),
-    Mref = erlang:monitor(process, Receiver),
-    Receiver ! {self(),Tag},
-    receive
-	{'DOWN',Mref,_,_,{Receiver,Tag,Result}} ->
-	    Result;
-	{'DOWN',Mref,_,_,Reason} ->
-	    %% The middleman code failed. Or someone did 
-	    %% exit(_, kill) on the middleman process => Reason==killed
-	    exit(Reason)
-    end.
-
-send_nodes(Nodes, Name, Tag, Req) ->
-    send_nodes(Nodes, Name, Tag, Req, []).
-
-send_nodes([Node|Tail], Name, Tag, Req, Monitors)
-  when is_atom(Node) ->
-    Monitor = start_monitor(Node, Name),
-    %% Handle non-existing names in rec_nodes.
-    catch {Name, Node} ! {'$gen_call', {self(), {Tag, Node}}, Req},
-    send_nodes(Tail, Name, Tag, Req, [Monitor | Monitors]);
-send_nodes([_Node|Tail], Name, Tag, Req, Monitors) ->
-    %% Skip non-atom Node
-    send_nodes(Tail, Name, Tag, Req, Monitors);
-send_nodes([], _Name, _Tag, _Req, Monitors) -> 
-    Monitors.
-
-%% Against old nodes:
-%% If no reply has been delivered within 2 secs. (per node) check that
-%% the server really exists and wait for ever for the answer.
-%%
-%% Against contemporary nodes:
-%% Wait for reply, server 'DOWN', or timeout from TimerId.
-
-rec_nodes(Tag, Nodes, Name, TimerId) -> 
-    rec_nodes(Tag, Nodes, Name, [], [], 2000, TimerId).
-
-rec_nodes(Tag, [{N,R}|Tail], Name, Badnodes, Replies, Time, TimerId ) ->
-    receive
-	{'DOWN', R, _, _, _} ->
-	    rec_nodes(Tag, Tail, Name, [N|Badnodes], Replies, Time, TimerId);
-	{{Tag, N}, Reply} ->  %% Tag is bound !!!
-	    erlang:demonitor(R, [flush]),
-	    rec_nodes(Tag, Tail, Name, Badnodes, 
-		      [{N,Reply}|Replies], Time, TimerId);
-	{timeout, TimerId, _} ->	
-	    erlang:demonitor(R, [flush]),
-	    %% Collect all replies that already have arrived
-	    rec_nodes_rest(Tag, Tail, Name, [N|Badnodes], Replies)
-    end;
-rec_nodes(Tag, [N|Tail], Name, Badnodes, Replies, Time, TimerId) ->
-    %% R6 node
-    receive
-	{nodedown, N} ->
-	    monitor_node(N, false),
-	    rec_nodes(Tag, Tail, Name, [N|Badnodes], Replies, 2000, TimerId);
-	{{Tag, N}, Reply} ->  %% Tag is bound !!!
-	    receive {nodedown, N} -> ok after 0 -> ok end,
-	    monitor_node(N, false),
-	    rec_nodes(Tag, Tail, Name, Badnodes,
-		      [{N,Reply}|Replies], 2000, TimerId);
-	{timeout, TimerId, _} ->	
-	    receive {nodedown, N} -> ok after 0 -> ok end,
-	    monitor_node(N, false),
-	    %% Collect all replies that already have arrived
-	    rec_nodes_rest(Tag, Tail, Name, [N | Badnodes], Replies)
-    after Time ->
-	    case rpc:call(N, erlang, whereis, [Name]) of
-		Pid when is_pid(Pid) -> % It exists try again.
-		    rec_nodes(Tag, [N|Tail], Name, Badnodes,
-			      Replies, infinity, TimerId);
-		_ -> % badnode
-		    receive {nodedown, N} -> ok after 0 -> ok end,
-		    monitor_node(N, false),
-		    rec_nodes(Tag, Tail, Name, [N|Badnodes],
-			      Replies, 2000, TimerId)
-	    end
-    end;
-rec_nodes(_, [], _, Badnodes, Replies, _, TimerId) ->
-    case catch erlang:cancel_timer(TimerId) of
-	false ->  % It has already sent it's message
-	    receive
-		{timeout, TimerId, _} -> ok
-	    after 0 ->
-		    ok
-	    end;
-	_ -> % Timer was cancelled, or TimerId was 'undefined'
-	    ok
-    end,
-    {Replies, Badnodes}.
-
-%% Collect all replies that already have arrived
-rec_nodes_rest(Tag, [{N,R}|Tail], Name, Badnodes, Replies) ->
-    receive
-	{'DOWN', R, _, _, _} ->
-	    rec_nodes_rest(Tag, Tail, Name, [N|Badnodes], Replies);
-	{{Tag, N}, Reply} -> %% Tag is bound !!!
-	    erlang:demonitor(R, [flush]),
-	    rec_nodes_rest(Tag, Tail, Name, Badnodes, [{N,Reply}|Replies])
-    after 0 ->
-	    erlang:demonitor(R, [flush]),
-	    rec_nodes_rest(Tag, Tail, Name, [N|Badnodes], Replies)
-    end;
-rec_nodes_rest(Tag, [N|Tail], Name, Badnodes, Replies) ->
-    %% R6 node
-    receive
-	{nodedown, N} ->
-	    monitor_node(N, false),
-	    rec_nodes_rest(Tag, Tail, Name, [N|Badnodes], Replies);
-	{{Tag, N}, Reply} ->  %% Tag is bound !!!
-	    receive {nodedown, N} -> ok after 0 -> ok end,
-	    monitor_node(N, false),
-	    rec_nodes_rest(Tag, Tail, Name, Badnodes, [{N,Reply}|Replies])
-    after 0 ->
-	    receive {nodedown, N} -> ok after 0 -> ok end,
-	    monitor_node(N, false),
-	    rec_nodes_rest(Tag, Tail, Name, [N|Badnodes], Replies)
-    end;
-rec_nodes_rest(_Tag, [], _Name, Badnodes, Replies) ->
-    {Replies, Badnodes}.
-
-
-%%% ---------------------------------------------------
-%%% Monitor functions
-%%% ---------------------------------------------------
-
-start_monitor(Node, Name) when is_atom(Node), is_atom(Name) ->
-    if node() =:= nonode@nohost, Node =/= nonode@nohost ->
-	    Ref = make_ref(),
-	    self() ! {'DOWN', Ref, process, {Name, Node}, noconnection},
-	    {Node, Ref};
-       true ->
-	    case catch erlang:monitor(process, {Name, Node}) of
-		{'EXIT', _} ->
-		    %% Remote node is R6
-		    monitor_node(Node, true),
-		    Node;
-		Ref when is_reference(Ref) ->
-		    {Node, Ref}
-	    end
-    end.
-
 %% ---------------------------------------------------
 %% Helper functions for try-catch of callbacks.
 %% Returns the return value of the callback, or
diff --git a/lib/stdlib/test/gen_server_SUITE.erl b/lib/stdlib/test/gen_server_SUITE.erl
index 5fa604d4fd..aa26099ad3 100644
--- a/lib/stdlib/test/gen_server_SUITE.erl
+++ b/lib/stdlib/test/gen_server_SUITE.erl
@@ -30,8 +30,12 @@
          send_request_receive_reqid_collection/1,
          send_request_wait_reqid_collection/1,
          send_request_check_reqid_collection/1,
-         cast/1, cast_fast/1,
-	 continue/1, info/1, abcast/1, multicall/1, multicall_down/1,
+         cast/1, cast_fast/1, continue/1, info/1, abcast/1,
+         multicall/1, multicall_down/1, multicall_remote/1,
+         multicall_remote_old1/1, multicall_remote_old2/1,
+         multicall_recv_opt_success/1,
+         multicall_recv_opt_timeout/1,
+         multicall_recv_opt_noconnection/1,
 	 call_remote1/1, call_remote2/1, call_remote3/1, calling_self/1,
 	 call_remote_n1/1, call_remote_n2/1, call_remote_n3/1, spec_init/1,
 	 spec_init_local_registered_parent/1, 
@@ -59,11 +63,22 @@
 	 spec_init_anonymous_default_timeout/1,
 	 spec_init_not_proc_lib/1, cast_fast_messup/0]).
 
+%% Internal test specific exports
+-export([multicall_srv_ctrlr/2, multicall_suspender/2]).
 
 %% The gen_server behaviour
 -export([init/1, handle_call/3, handle_cast/2, handle_continue/2,
 	 handle_info/2, code_change/3, terminate/2, format_status/2]).
 
+%% This module needs to compile on old nodes...
+-ifndef(CT_PEER).
+-define(CT_PEER(), {ok, undefined, undefined}).
+-define(CT_PEER(Opts), {ok, undefined, undefined}).
+-endif.
+-ifndef(CT_PEER_REL).
+-define(CT_PEER_REL(Opts, Release, PrivDir), {ok, undefined, undefined}).
+-endif.
+
 suite() ->
     [{ct_hooks,[ts_install_cth]},
      {timetrap,{minutes,1}}].
@@ -72,7 +87,9 @@ all() ->
     [start, {group,stop}, crash, call, send_request,
      send_request_receive_reqid_collection, send_request_wait_reqid_collection,
      send_request_check_reqid_collection, cast, cast_fast, info, abcast,
-     continue, multicall, multicall_down, call_remote1, call_remote2, calling_self,
+     continue,
+     {group, multi_call},
+     call_remote1, call_remote2, calling_self,
      call_remote3, call_remote_n1, call_remote_n2,
      call_remote_n3, spec_init,
      spec_init_local_registered_parent,
@@ -87,6 +104,13 @@ all() ->
 groups() -> 
     [{stop, [],
       [stop1, stop2, stop3, stop4, stop5, stop6, stop7, stop8, stop9, stop10]},
+     {multi_call, [], [{group, multi_call_parallel}, {group, multi_call_sequence}]},
+     {multi_call_parallel, [parallel],
+      [multicall, multicall_down, multicall_remote, multicall_remote_old1,
+       multicall_remote_old2]},
+     {multi_call_sequence, [],
+      [multicall_recv_opt_success, multicall_recv_opt_timeout,
+       multicall_recv_opt_noconnection]},
      {format_status, [],
       [call_format_status, error_format_status, terminate_crash_format,
        crash_in_format_status, throw_in_format_status, format_all_status]},
@@ -1434,6 +1458,251 @@ busy_wait_for_process(Pid,N) ->
 	_ ->
 	    ok
     end.
+
+multicall_remote(Config) when is_list(Config) ->
+    PNs = lists:map(fun (_) ->
+                            {ok, P, N} = ?CT_PEER(),
+                            {P, N}
+                    end, lists:seq(1, 4)),
+    multicall_remote_test(PNs, ?FUNCTION_NAME),
+    ok.
+
+multicall_remote_old1(Config) when is_list(Config) ->
+    multicall_remote_old_test(Config, 1, ?FUNCTION_NAME).
+
+multicall_remote_old2(Config) when is_list(Config) ->
+    multicall_remote_old_test(Config, 2, ?FUNCTION_NAME).
+
+
+multicall_remote_old_test(Config, OldN, Name) ->
+    try
+        {OldRelName, OldRel} = old_release(OldN),
+        PD = proplists:get_value(priv_dir, Config),
+        PNs = lists:map(fun (I) ->
+                                Dir = atom_to_list(Name)++"-"++integer_to_list(I),
+                                AbsDir = filename:join([PD, Dir]),
+                                ok = file:make_dir(AbsDir),
+                                case ?CT_PEER_REL(#{connection => 0}, OldRelName, AbsDir) of
+                                    not_available ->
+                                        throw({skipped, "No OTP "++OldRel++" available"});
+                                    {ok, P, N} ->
+                                        {P, N}
+                                end
+                        end, lists:seq(1, 4)),
+        OldNodes = lists:map(fun ({_, N}) -> N end, PNs),
+        %% Recompile on one old node and load this on all old nodes...
+        SrcFile = filename:rootname(code:which(?MODULE)) ++ ".erl",
+        {ok, ?MODULE, BeamCode} = erpc:call(hd(OldNodes), compile, file, [SrcFile, [binary]]),
+        LoadResult = lists:duplicate(length(OldNodes), {ok, {module, ?MODULE}}),
+        LoadResult = erpc:multicall(OldNodes, code, load_binary, [?MODULE, SrcFile, BeamCode]),
+        multicall_remote_test(PNs, Name)
+    catch
+        throw:Res ->
+            Res
+    end.
+
+multicall_remote_test([{Peer1, Node1},
+                       {Peer2, Node2},
+                       {Peer3, Node3},
+                       {Peer4, Node4}],
+                      Name) ->
+    Tester = self(),
+    ThisNode = node(),
+
+    Nodes = [Node1, Node2, Node3, Node4, ThisNode],
+
+    SrvList =
+        lists:map(fun (Node) ->
+                          Ctrl = spawn_link(Node, ?MODULE,
+                                            multicall_srv_ctrlr,
+                                            [Tester, Name]),
+                          receive
+                              {Ctrl, _Srv} = Procs ->
+                                  {Node, Procs}
+                          end
+                  end, Nodes),
+    SrvMap = maps:from_list(SrvList),
+
+    Res0 = {lists:map(fun (Node) ->
+                              {Node,ok}
+                      end, Nodes), []},
+
+    Res0 = gen_server:multi_call(Nodes, Name, started_p),
+
+    true = try
+               _ = gen_server:multi_call([Node1, Node2, Node3, node(), {Node4}],
+                                         Name, {delayed_answer,1}),
+               false
+           catch
+               _:_ ->
+                   true
+           end,
+
+    Res1 = {lists:map(fun (Node) ->
+                              {Node,delayed}
+                      end, Nodes), []},
+
+    Res1 = gen_server:multi_call(Nodes, Name, {delayed_answer,1}),
+
+    Res2 = {[], Nodes},
+
+    Start = erlang:monotonic_time(millisecond),
+    Res2 = gen_server:multi_call(Nodes, Name, {delayed_answer,1000}, 100),
+    End = erlang:monotonic_time(millisecond),
+    Time = End-Start,
+    ct:log("Time: ~p ms~n", [Time]),
+    true = 200 >= Time,
+
+    {Ctrl2, Srv2} = maps:get(Node2, SrvMap),
+    unlink(Ctrl2),
+    exit(Ctrl2, kill),
+    wait_until(fun () ->
+                       false == erpc:call(Node2, erlang,
+                                          is_process_alive, [Srv2])
+               end),
+
+    {Ctrl3, _Srv3} = maps:get(Node3, SrvMap),
+    unlink(Ctrl3),
+    peer:stop(Peer3),
+
+    {Ctrl4, Srv4} = maps:get(Node4, SrvMap),
+    Spndr = spawn_link(Node4, ?MODULE, multicall_suspender, [Tester, Srv4]),
+
+    Res3 = {[{Node1, delayed}, {ThisNode, delayed}],
+            [Node2, Node3, Node4]},
+
+    Res3 = gen_server:multi_call(Nodes, Name, {delayed_answer,1}, 1000),
+
+    Spndr ! {Tester, resume_it},
+
+    receive Msg -> ct:fail({unexpected_msg, Msg})
+    after 1000 -> ok
+    end,
+
+    unlink(Ctrl4),
+
+    {Ctrl1, _Srv1} = maps:get(Node1, SrvMap),
+
+    unlink(Ctrl1),
+
+    peer:stop(Peer1),
+    peer:stop(Peer2),
+    peer:stop(Peer4),
+
+    ok.
+
+multicall_srv_ctrlr(Tester, Name) ->
+    {ok, Srv} = gen_server:start_link({local, Name},
+                                      gen_server_SUITE, [], []),
+    Tester ! {self(), Srv},
+    receive after infinity -> ok end.
+
+multicall_suspender(Tester, Suspendee) ->
+    true = erlang:suspend_process(Suspendee),
+    receive
+        {Tester, resume_it} ->
+            erlang:resume_process(Suspendee)
+    end.
+
+multicall_recv_opt_success(Config) when is_list(Config) ->
+    multicall_recv_opt_test(success).
+
+multicall_recv_opt_timeout(Config) when is_list(Config) ->
+    multicall_recv_opt_test(timeout).
+
+multicall_recv_opt_noconnection(Config) when is_list(Config) ->
+    multicall_recv_opt_test(noconnection).
+
+multicall_recv_opt_test(Type) ->
+    Tester = self(),
+    Name = ?FUNCTION_NAME,
+    Loops = 1000,
+    HugeMsgQ = 500000,
+    process_flag(message_queue_data, off_heap),
+
+    {ok, Peer1, Node1} = ?CT_PEER(),
+    {ok, Peer2, Node2} = ?CT_PEER(),
+
+    if Type == noconnection -> peer:stop(Peer2);
+       true -> ok
+    end,
+
+    Nodes = [Node1, Node2],
+
+    SrvList =
+        lists:map(fun (Node) ->
+                          Ctrl = spawn_link(Node, ?MODULE,
+                                            multicall_srv_ctrlr,
+                                            [Tester, Name]),
+                          receive
+                              {Ctrl, _Srv} = Procs ->
+                                  {Node, Procs}
+                          end
+                  end,
+                  if Type == noconnection -> [Node1];
+                     true -> Nodes
+                  end),
+
+    {Req, ExpRes, Tmo} = case Type of
+                             success ->
+                                 {ping,
+                                  {[{Node1, pong}, {Node2, pong}], []},
+                                  infinity};
+                             timeout ->
+                                 {{delayed_answer,100},
+                                  {[], Nodes},
+                                  1};
+                             noconnection ->
+                                 {ping,
+                                  {[{Node1, pong}], [Node2]},
+                                  infinity}
+                         end,
+
+    _Warmup = time_multicall(ExpRes, Nodes, Name, Req, Tmo, Loops div 10),
+
+    Empty = time_multicall(ExpRes, Nodes, Name, Req, Tmo, Loops),
+    ct:pal("Time with empty message queue: ~p microsecond~n",
+           [erlang:convert_time_unit(Empty, native, microsecond)]),
+
+    make_msgq(HugeMsgQ),
+
+    Huge = time_multicall(ExpRes, Nodes, Name, Req, Tmo, Loops),
+    ct:pal("Time with huge message queue: ~p microsecond~n",
+           [erlang:convert_time_unit(Huge, native, microsecond)]),
+
+    lists:foreach(fun ({_Node, {Ctrl, _Srv}}) -> unlink(Ctrl) end, SrvList),
+
+    peer:stop(Peer1),
+    if Type == noconnection -> ok;
+       true -> peer:stop(Peer2)
+    end,
+
+    Q = Huge / Empty,
+    HugeMsgQ = flush_msgq(),
+    case Q > 10 of
+	true ->
+	    ct:fail({ratio, Q});
+	false ->
+	    {comment, "Ratio: "++erlang:float_to_list(Q)}
+    end.
+
+time_multicall(Expect, Nodes, Name, Req, Tmo, Times) ->
+    Start = erlang:monotonic_time(),
+    ok = do_time_multicall(Expect, Nodes, Name, Req, Tmo, Times),
+    erlang:monotonic_time() - Start.
+
+do_time_multicall(_Expect, _Nodes, _Name, _Req, _Tmo, 0) ->
+    ok;
+do_time_multicall(Expect, Nodes, Name, Req, Tmo, N) ->
+    Expect = gen_server:multi_call(Nodes, Name, Req, Tmo),
+    do_time_multicall(Expect, Nodes, Name, Req, Tmo, N-1).
+
+make_msgq(0) ->
+    ok;
+make_msgq(N) ->
+    self() ! {a, msg},
+    make_msgq(N-1).
+
 %%--------------------------------------------------------------
 %% Test gen_server:enter_loop/[3,4,5]. Used when you want to write
 %% your own special init-phase.
@@ -2500,6 +2769,8 @@ init({state,State}) ->
 handle_call(started_p, _From, State) ->
     io:format("FROZ"),
     {reply,ok,State};
+handle_call(ping, _From, State) ->
+    {reply,pong,State};
 handle_call({delayed_answer, T}, From, State) ->
     {noreply,{reply_to,From,State},T};
 handle_call({call_within, T}, _From, _) ->
@@ -2645,3 +2916,28 @@ format_status(terminate, [_PDict, State]) ->
     {formatted, State};
 format_status(normal, [_PDict, _State]) ->
     format_status_called.
+
+%% Utils...
+
+wait_until(Fun) ->
+    case catch Fun() of
+        true ->
+            ok;
+        _ ->
+            receive after 100 -> ok end,
+            wait_until(Fun)
+    end.
+
+old_release(N) ->
+    OldRel = integer_to_list(list_to_integer(erlang:system_info(otp_release))-N),
+    {OldRel++"_latest", OldRel}.
+
+flush_msgq() ->
+    flush_msgq(0).
+flush_msgq(N) ->
+    receive
+	_ ->
+	    flush_msgq(N+1)
+    after 0 ->
+	    N
+    end.
-- 
2.35.3

openSUSE Build Service is sponsored by