From df2441c6315de4b245e1faf5b72517c5199fe179 Mon Sep 17 00:00:00 2001 From: Magnus Ahltorp Date: Fri, 17 Mar 2017 01:24:44 +0100 Subject: Added benchmark reporting. --- merge/src/merge_backup.erl | 8 ++++++++ merge/src/merge_dist.erl | 8 ++++++++ 2 files changed, 16 insertions(+) (limited to 'merge/src') diff --git a/merge/src/merge_backup.erl b/merge/src/merge_backup.erl index 068725c..3c19527 100644 --- a/merge/src/merge_backup.erl +++ b/merge/src/merge_backup.erl @@ -20,6 +20,7 @@ start_link(Args) -> init([Name, Address]) -> lager:info("~p:~p: starting (~p)", [?MODULE, Name, Address]), Timer = erlang:start_timer(1000, self(), backup), + bench:timingpoint("merge_backup", Name, "start"), {ok, #state{timer = Timer, node_name = Name, node_address = Address}}. handle_call(stop, _From, State) -> @@ -42,14 +43,17 @@ terminate(Reason, #state{timer = Timer}) -> backup(Size, #state{node_name = NodeName, node_address = NodeAddress} = State) -> lager:debug("~p: logorder size ~B", [NodeName, Size]), + bench:timingpoint("merge_backup", NodeName, "idle"), ht:load_tree(Size - 1), % TODO: Make sure this is OK to do from multiple processes and that it's not "moving backwards". try {ok, VerifiedSize} = verified_size(NodeName, NodeAddress), + bench:timingpoint("merge_backup", NodeName, "verifiedsize"), lager:debug("~p: verifiedsize ~B", [NodeName, VerifiedSize]), case VerifiedSize == Size of true -> TreeHead = ht:root(Size - 1), ok = check_root(NodeName, NodeAddress, Size, TreeHead), + bench:timingpoint("merge_backup", NodeName, "verifyroot"), ok = write_backupfile(NodeName, Size, TreeHead); false -> true = VerifiedSize < Size, % Secondary ahead of primary? @@ -69,13 +73,17 @@ do_backup(NodeName, NodeAddress, Start, NTotal) -> N = min(NTotal, plopconfig:get_env(merge_backup_winsize, 1000)), Hashes = index:getrange(logorder, Start, Start + N - 1), ok = merge_util:sendlog(NodeAddress, NodeName, Start, Hashes, plopconfig:get_env(merge_backup_sendlog_chunksize, 1000)), + bench:timingpoint("merge_backup", NodeName, "sendlog"), {ok, HashesMissingEncoded} = merge_util:missingentries(NodeAddress, NodeName), HashesMissing = lists:map(fun base64:decode/1, HashesMissingEncoded), ok = merge_util:sendentries(NodeAddress, NodeName, HashesMissing, plopconfig:get_env(merge_backup_sendentries_chunksize, 100)), + bench:timingpoint("merge_backup", NodeName, "sendentries"), Size = Start + N, TreeHead = ht:root(Size - 1), ok = check_root(NodeName, NodeAddress, Size, TreeHead), + bench:timingpoint("merge_backup", NodeName, "verifyroot"), ok = setverifiedsize(NodeName, NodeAddress, Size), + bench:timingpoint("merge_backup", NodeName, "setverifiedsize"), ok = write_backupfile(NodeName, Size, TreeHead), true = NTotal >= N, do_backup(NodeName, NodeAddress, Size, NTotal - N). diff --git a/merge/src/merge_dist.erl b/merge/src/merge_dist.erl index 3c38401..23c9d19 100644 --- a/merge/src/merge_dist.erl +++ b/merge/src/merge_dist.erl @@ -21,6 +21,7 @@ start_link(Args) -> init([Name, Address]) -> lager:info("~p:~p: starting (~p)", [?MODULE, Name, Address]), Timer = erlang:start_timer(1000, self(), dist), + bench:timingpoint("merge_dist", Name, "start"), {ok, #state{timer = Timer, node_name = Name, node_address = Address, @@ -51,6 +52,7 @@ dist({struct, PropList} = STH, #state{node_address = NodeAddress, node_name = NodeName, sth_timestamp = LastTimestamp} = State) -> + bench:timingpoint("merge_dist", NodeName, "idle"), Treesize = proplists:get_value(<<"tree_size">>, PropList), Timestamp = proplists:get_value(<<"timestamp">>, PropList), RootHash = base64:decode(proplists:get_value(<<"sha256_root_hash">>, PropList)), @@ -59,12 +61,14 @@ dist({struct, PropList} = STH, TS = case Timestamp > LastTimestamp of true -> true = plop:verify_sth(Treesize, Timestamp, RootHash, Signature), + bench:timingpoint("merge_dist", NodeName, "verify_sth"), try lager:info("~p: starting dist, sth at ~B, logorder at ~B", [NodeAddress, Treesize, Logordersize]), statusreport:report("merge_dist", NodeName, "targetsth", Treesize), ok = do_dist(NodeAddress, NodeName, min(Treesize, Logordersize)), ok = publish_sth(NodeName, NodeAddress, STH), + bench:timingpoint("merge_dist", NodeName, "publish_sth"), statusreport:report("merge_dist", NodeName, "sth", Treesize), lager:info("~p: Published STH with size ~B and timestamp " ++ "~p.", [NodeAddress, Treesize, Timestamp]), @@ -101,12 +105,16 @@ do_dist(NodeAddress, NodeName, Start, NTotal) -> SendlogChunksize = application:get_env(plop, merge_dist_sendlog_chunksize, 1000), SendentriesChunksize = application:get_env(plop, merge_dist_sendentries_chunksize, 100), ok = merge_util:sendlog(NodeAddress, NodeName, Start, Hashes, SendlogChunksize), + bench:timingpoint("merge_dist", NodeName, "sendlog"), statusreport:report("merge_dist", NodeName, "sendlog", Start + N), {ok, HashesMissingEncoded} = merge_util:missingentries(NodeAddress, NodeName), + bench:timingpoint("merge_dist", NodeName, "missingentries"), lager:debug("number of missing entries: ~B", [length(HashesMissingEncoded)]), HashesMissing = lists:map(fun base64:decode/1, HashesMissingEncoded), ok = merge_util:sendentries(NodeAddress, NodeName, HashesMissing, SendentriesChunksize), + bench:timingpoint("merge_dist", NodeName, "sendentries"), {ok, NewSize} = frontend_verify_entries(NodeName, NodeAddress, Start + N), + bench:timingpoint("merge_dist", NodeName, "verifyentries"), lager:info("~p: Done distributing ~B out of ~B entries.", [NodeAddress, NewSize-Start, NTotal]), statusreport:report("merge_dist", NodeName, "verified", Start + N), -- cgit v1.1