From 146911b3485dbe2fe9222d76068b986c936924e0 Mon Sep 17 00:00:00 2001 From: Magnus Ahltorp Date: Sun, 23 Nov 2014 16:15:34 +0200 Subject: Deserialize all of plop --- src/plop.erl | 80 +++++++++++++++++++++++++++++++++--------------------------- 1 file changed, 44 insertions(+), 36 deletions(-) diff --git a/src/plop.erl b/src/plop.erl index 161faf8..38d7047 100644 --- a/src/plop.erl +++ b/src/plop.erl @@ -117,10 +117,12 @@ sth() -> -spec get(non_neg_integer(), non_neg_integer()) -> [{non_neg_integer(), binary(), binary()}]. get(Start, End) -> - call(?MODULE, {get, {index, Start, End}}). + EndBound = min(End, db:size() - 1), + lists:map(fun (E) -> fill_in_entry(E) end, + db:get_by_indices(Start, EndBound, {sorted, false})). get(Hash) -> - call(?MODULE, {get, {hash, Hash}}). + db:get_by_entry_hash(Hash). spt(Data) -> #signature{algorithm = #sig_and_hash_alg{ @@ -129,16 +131,52 @@ spt(Data) -> signature = sign:sign(Data)}. consistency(TreeSizeFirst, TreeSizeSecond) -> - call(?MODULE, {consistency, {TreeSizeFirst, TreeSizeSecond}}). + LastAllowedEntry = db:size() - 1, + if + TreeSizeFirst > LastAllowedEntry -> + []; + TreeSizeSecond > LastAllowedEntry -> + []; + true -> + ht:consistency(TreeSizeFirst - 1, TreeSizeSecond - 1) + end. + -spec inclusion(binary(), non_neg_integer()) -> {ok, {binary(), binary()}} | {notfound, string()}. inclusion(Hash, TreeSize) -> - call(?MODULE, {inclusion, {Hash, TreeSize}}). + LastAllowedTreeSize = db:size(), + if + TreeSize > LastAllowedTreeSize -> + {notfound, "Unknown tree size"}; + true -> + case db:get_by_leaf_hash(Hash) of + notfound -> + {notfound, "Unknown hash"}; % FIXME: include Hash + {I, _MTLHash, _Entry} -> + {ok, I, ht:path(I, TreeSize - 1)} + end + end. + -spec inclusion_and_entry(non_neg_integer(), non_neg_integer()) -> {ok, {binary(), binary()}} | {notfound, string()}. inclusion_and_entry(Index, TreeSize) -> - call(?MODULE, {inclusion_and_entry, {Index, TreeSize}}). + LastAllowedTreeSize = db:size(), + LastAllowedEntry = db:size() - 1, + if + TreeSize > LastAllowedTreeSize -> + {notfound, "Unknown tree size"}; + Index > LastAllowedEntry -> + {notfound, "Unknown index"}; + true -> + case db:get_by_index(Index) of + notfound -> + {notfound, "Unknown index"}; % FIXME: include Index + {I, _MTLHash, Entry} -> + {ok, Entry, ht:path(I, TreeSize - 1)} + end + end. + get_logid() -> sign:get_logid(). testing_get_pubkey() -> @@ -214,38 +252,8 @@ fill_in_entry({_Index, LeafHash, notfetched}) -> %%%%%%%%%%%%%%%%%%%% handle_call(stop, _From, Plop) -> - {stop, normal, stopped, Plop}; + {stop, normal, stopped, Plop}. -handle_call({get, {index, Start, End}}, _From, Plop) -> - EndBound = min(End, db:size() - 1), - {reply, lists:map(fun (E) -> fill_in_entry(E) end, - db:get_by_indices(Start, EndBound, {sorted, false})), - Plop}; - -handle_call({get, {hash, EntryHash}}, _From, Plop) -> - {reply, db:get_by_entry_hash(EntryHash), Plop}; - - -handle_call({consistency, {First, Second}}, _From, Plop) -> - {reply, ht:consistency(First - 1, Second - 1), Plop}; - -handle_call({inclusion, {Hash, TreeSize}}, _From, Plop) -> - R = case db:get_by_leaf_hash(Hash) of - notfound -> - {notfound, "Unknown hash"}; % FIXME: include Hash - {I, _MTLHash, _Entry} -> - {ok, I, ht:path(I, TreeSize - 1)} - end, - {reply, R, Plop}; - -handle_call({inclusion_and_entry, {Index, TreeSize}}, _From, Plop) -> - R = case db:get_by_index(Index) of - notfound -> - {notfound, "Unknown index"}; % FIXME: include Index - {I, _MTLHash, Entry} -> - {ok, Entry, ht:path(I, TreeSize - 1)} - end, - {reply, R, Plop}. %% @doc Signed Plop Timestamp, conformant to an SCT in RFC6962 3.2 and %% RFC5246 4.7. -- cgit v1.1 From 80e68e390f3a999c25fb3a153388d6d6d9ec64d7 Mon Sep 17 00:00:00 2001 From: Magnus Ahltorp Date: Wed, 28 Jan 2015 12:15:41 +0100 Subject: storage/fetchnewentries speedp storage/fetchnewentries now fetches only hashes Implmented storage/getentry to actually fetch the entry --- src/storage.erl | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/src/storage.erl b/src/storage.erl index 6a49a22..b7acfd7 100644 --- a/src/storage.erl +++ b/src/storage.erl @@ -33,9 +33,17 @@ request(get, "ct/storage/fetchnewentries", _Input) -> NewHashes = storagedb:fetchnewhashes(0), % XXX send only hashes, implement getentry Entries = lists:map(fun(LeafHash) -> + base64:encode(LeafHash) + end, NewHashes), + success({[{result, <<"ok">>}, + {entries, Entries}]}); +request(get, "ct/storage/getentry", Query) -> + Hash = base64:decode(proplists:get_value("hash", Query)), + Hashes = [Hash], + Entries = lists:map(fun(LeafHash) -> {[{hash, base64:encode(LeafHash)}, {entry, base64:encode(db:entry_for_leafhash(LeafHash))}]} - end, NewHashes), + end, Hashes), success({[{result, <<"ok">>}, {entries, Entries}]}). -- cgit v1.1 From 1f2c976ea9924589fc3dc6c6b0f6d231e724a45f Mon Sep 17 00:00:00 2001 From: Magnus Ahltorp Date: Wed, 28 Jan 2015 17:29:58 +0100 Subject: Perform frontend index writes in two phases --- src/db.erl | 25 ++++++++++++++++++------- src/frontend.erl | 6 +++++- src/perm.erl | 26 ++++++++++++++++++++------ 3 files changed, 43 insertions(+), 14 deletions(-) diff --git a/src/db.erl b/src/db.erl index d2e0814..0541678 100644 --- a/src/db.erl +++ b/src/db.erl @@ -6,10 +6,11 @@ %% API. -export([start_link/0, stop/0]). --export([add/2, add_entryhash/2, add_index/2, set_treesize/1, size/0]). +-export([add/2, add_entryhash/2, add_index_nosync/2, set_treesize/1, size/0]). -export([get_by_index/1, get_by_indices/3, get_by_leaf_hash/1]). -export([get_by_entry_hash/1, entry_for_leafhash/1, leafhash_for_index/1]). -export([leafhash_for_indices/2, indexsize/0]). +-export([indexforhash_sync/2, index_sync/0]). %% gen_server callbacks. -export([init/1, handle_call/3, terminate/2, handle_cast/2, handle_info/2, code_change/3]). @@ -48,9 +49,9 @@ add_entryhash(LeafHash, EntryHash) -> ok = perm:ensurefile(entryhash_root_path(), EntryHash, LeafHash), ok. --spec add_index(binary(), non_neg_integer()) -> ok. -add_index(LeafHash, Index) -> - call(?MODULE, {add_index, {LeafHash, Index}}). +-spec add_index_nosync(binary(), non_neg_integer()) -> ok. +add_index_nosync(LeafHash, Index) -> + call(?MODULE, {add_index_nosync, {LeafHash, Index}}). -spec set_treesize(non_neg_integer()) -> ok. set_treesize(Size) -> @@ -177,8 +178,18 @@ get_by_indices_helper(Start, End) -> handle_call(stop, _From, State) -> {stop, normal, stopped, State}; -handle_call({add_index, {LeafHash, Index}}, _From, State) -> - ok = perm:ensurefile(indexforhash_root_path(), - LeafHash, integer_to_binary(Index)), +handle_call({add_index_nosync, {LeafHash, Index}}, _From, State) -> + ok = perm:ensurefile_nosync(indexforhash_root_path(), + LeafHash, integer_to_binary(Index)), ok = index:add(index_path(), Index, LeafHash), {reply, ok, State}. + +indexforhash_sync(LeafHash, Index) -> + ok = perm:ensurefile(indexforhash_root_path(), + LeafHash, integer_to_binary(Index)), + ok. + +index_sync() -> + Basepath = index_path(), + ok = util:fsync([Basepath, filename:dirname(Basepath)]), + ok. diff --git a/src/frontend.erl b/src/frontend.erl index 6fc2fd5..b2244de 100644 --- a/src/frontend.erl +++ b/src/frontend.erl @@ -29,8 +29,12 @@ request(post, "ct/frontend/sendlog", Input) -> Indices = lists:seq(Start, Start + length(Hashes) - 1), lists:foreach(fun ({Hash, Index}) -> - ok = db:add_index(Hash, Index) + ok = db:add_index_nosync(Hash, Index) end, lists:zip(Hashes, Indices)), + lists:foreach(fun ({Hash, Index}) -> + ok = db:indexforhash_sync(Hash, Index) + end, lists:zip(Hashes, Indices)), + ok = db:index_sync(), success({[{result, <<"ok">>}]}) end; diff --git a/src/perm.erl b/src/perm.erl index 9f02b55..614656a 100644 --- a/src/perm.erl +++ b/src/perm.erl @@ -2,7 +2,7 @@ %%% See LICENSE for licensing information. -module(perm). --export([ensurefile/3, readfile/2]). +-export([ensurefile/3, ensurefile_nosync/3, readfile/2]). -spec readfile_and_verify(string(), binary()) -> ok | differ | {error, atom()}. readfile_and_verify(Name, Content) -> @@ -49,13 +49,21 @@ path_for_key(Rootdir, Key) -> -spec ensurefile(string(), binary(), binary()) -> ok | differ. ensurefile(Rootdir, Key, Content) -> + ensurefile(Rootdir, Key, Content, sync). + +-spec ensurefile_nosync(string(), binary(), binary()) -> ok | differ. +ensurefile_nosync(Rootdir, Key, Content) -> + ensurefile(Rootdir, Key, Content, nosync). + +ensurefile(Rootdir, Key, Content, Syncflag) -> lager:debug("dir ~p key ~p", [Rootdir, Key]), {Dirs, Path} = path_for_key(Rootdir, Key), case readfile_and_verify(Path, Content) of ok -> lager:debug("key ~p existed, fsync", [Key]), - util:fsync([Path, Rootdir | Dirs]), - lager:debug("key ~p fsynced", [Key]); + ok = util:fsync([Path, Rootdir | Dirs]), + lager:debug("key ~p fsynced", [Key]), + ok; differ -> lager:debug("key ~p existed, was different", [Key]), differ; @@ -67,9 +75,15 @@ ensurefile(Rootdir, Key, Content) -> NurseryName = Rootdir ++ "nursery/" ++ util:tempfilename(hex:bin_to_hexstr(Key)), util:write_tempfile_and_rename(Path, NurseryName, Content), - lager:debug("key ~p added, fsync", [Key]), - util:fsync([Path, Rootdir | Dirs]), - lager:debug("key ~p fsynced", [Key]); + case Syncflag of + sync -> + lager:debug("key ~p added, fsync", [Key]), + ok = util:fsync([Path, Rootdir | Dirs]), + lager:debug("key ~p fsynced", [Key]), + ok; + nosync -> + ok + end; {error, Error} -> util:exit_with_error(Error, readfile, "Error reading file") end. -- cgit v1.1 From 7d8d0e05da0d8d221254fd6c85145d327157845e Mon Sep 17 00:00:00 2001 From: Magnus Ahltorp Date: Wed, 28 Jan 2015 22:41:15 +0100 Subject: Delay fsync for index writes --- src/db.erl | 2 +- src/index.erl | 18 +++++++++++++++--- 2 files changed, 16 insertions(+), 4 deletions(-) diff --git a/src/db.erl b/src/db.erl index 0541678..0361aaf 100644 --- a/src/db.erl +++ b/src/db.erl @@ -181,7 +181,7 @@ handle_call(stop, _From, State) -> handle_call({add_index_nosync, {LeafHash, Index}}, _From, State) -> ok = perm:ensurefile_nosync(indexforhash_root_path(), LeafHash, integer_to_binary(Index)), - ok = index:add(index_path(), Index, LeafHash), + ok = index:add_nosync(index_path(), Index, LeafHash), {reply, ok, State}. indexforhash_sync(LeafHash, Index) -> diff --git a/src/index.erl b/src/index.erl index 96195e3..c0e344a 100644 --- a/src/index.erl +++ b/src/index.erl @@ -12,13 +12,20 @@ %% TODO: Checksums -module(index). --export([get/2, getrange/3, add/3, addlast/2, indexsize/1]). +-export([get/2, getrange/3, add/3, add_nosync/3, addlast/2, indexsize/1]). -define(ENTRYSIZE, 32). -define(ENTRYSIZEINFILE, (?ENTRYSIZE*2+1)). -spec add(string(), integer() | last, binary()) -> ok. -add(Basepath, Index, Entry) when is_binary(Entry), size(Entry) == ?ENTRYSIZE -> +add(Basepath, Index, Entry) -> + add(Basepath, Index, Entry, sync). + +-spec add_nosync(string(), integer() | last, binary()) -> ok. +add_nosync(Basepath, Index, Entry) -> + add(Basepath, Index, Entry, nosync). + +add(Basepath, Index, Entry, Syncflag) when is_binary(Entry), size(Entry) == ?ENTRYSIZE -> case file:open(Basepath, [read, write, binary]) of {ok, File} -> {ok, Position} = file:position(File, eof), @@ -55,7 +62,12 @@ add(Basepath, Index, Entry) when is_binary(Entry), size(Entry) == ?ENTRYSIZE -> end end, ok = file:close(File), - util:fsync([Basepath, filename:dirname(Basepath)]); + case Syncflag of + sync -> + util:fsync([Basepath, filename:dirname(Basepath)]); + nosync -> + ok + end; {error, Error} -> util:exit_with_error(Error, writefile, "Error opening file for writing") -- cgit v1.1 From 5ca088779fab8188bb032b9b86933808e2b9dcb7 Mon Sep 17 00:00:00 2001 From: Magnus Ahltorp Date: Thu, 29 Jan 2015 13:25:40 +0100 Subject: plop.erl: better debug messages --- src/plop.erl | 52 +++++++++++++++++++++++++++++++++++++--------------- 1 file changed, 37 insertions(+), 15 deletions(-) diff --git a/src/plop.erl b/src/plop.erl index 38d7047..821fe99 100644 --- a/src/plop.erl +++ b/src/plop.erl @@ -71,19 +71,23 @@ init([]) -> handle_cast(_Request, State) -> {noreply, State}. -handle_http_reply(RepliesUntilQuorum, +handle_http_reply(TreeLeafHash, RepliesUntilQuorum, StatusCode, Body) -> - lager:debug("http_reply: ~p", [Body]), + lager:debug("leafhash ~s: http_reply: ~p", + [mochihex:to_hex(TreeLeafHash), Body]), {struct, PropList} = mochijson2:decode(Body), Result = proplists:get_value(<<"result">>, PropList), if Result == <<"ok">>, StatusCode == 200 -> case RepliesUntilQuorum - 1 of 0 -> %% reached quorum - lager:debug("reached quorum"), + lager:debug("leafhash ~s: reached quorum", + [mochihex:to_hex(TreeLeafHash)]), {ok}; NewRepliesUntilQuorum -> - lager:debug("replies until quorum: ~p", [NewRepliesUntilQuorum]), + lager:debug("leafhash ~s: replies until quorum: ~p", + [mochihex:to_hex(TreeLeafHash), + NewRepliesUntilQuorum]), {continue, NewRepliesUntilQuorum} end end. @@ -101,7 +105,7 @@ terminate(_Reason, _State) -> %%%%%%%%%%%%%%%%%%%% -spec add(binary(), binary(), binary()) -> ok. add(LogEntry, TreeLeafHash, EntryHash) -> - lager:debug("add leafhash ~p", [TreeLeafHash]), + lager:debug("add leafhash ~s", [mochihex:to_hex(TreeLeafHash)]), case storage_nodes() of [] -> exit(internal_merge_not_supported); @@ -195,11 +199,12 @@ send_storage_sendentry(URLBase, LogEntry, TreeLeafHash) -> {entry, base64:encode(LogEntry)}, {treeleafhash, base64:encode(TreeLeafHash)} ]}), - lager:debug("send sendentry to storage node ~p: ~p", [URLBase, Request]), + lager:debug("leafhash ~s: send sendentry to storage node ~p", + [mochihex:to_hex(TreeLeafHash), URLBase]), {ok, RequestId} = httpc:request(post, {URLBase ++ "sendentry", [], "text/json", list_to_binary(Request)}, [], [{sync, false}]), - RequestId. + {RequestId, URLBase}. send_storage_entrycommitted(URLBase, EntryHash, TreeLeafHash) -> Request = mochijson2:encode( @@ -211,39 +216,56 @@ send_storage_entrycommitted(URLBase, EntryHash, TreeLeafHash) -> "text/json", list_to_binary(Request)}, [], [{sync, false}]). -store_loop(Requests, RepliesUntilQuorum) -> +store_loop(TreeLeafHash, Requests, RepliesUntilQuorum) -> receive {http, {RequestId, {StatusLine, _Headers, Body}}} -> {_HttpVersion, StatusCode, _ReasonPhrase} = StatusLine, - case sets:is_element(RequestId, Requests) of + case dict:is_key(RequestId, Requests) of false -> - lager:info("stray storage reply: ~p", [{StatusLine, Body}]), - store_loop(Requests, RepliesUntilQuorum); + lager:info("leafhash ~s: stray storage reply: ~p", + [mochihex:to_hex(TreeLeafHash), + {StatusLine, Body}]), + store_loop(TreeLeafHash, Requests, RepliesUntilQuorum); true -> - case handle_http_reply(RepliesUntilQuorum, StatusCode, Body) of + NewRequests = dict:erase(RequestId, Requests), + case handle_http_reply(TreeLeafHash, RepliesUntilQuorum, + StatusCode, Body) of {ok} -> ok; {continue, NewRepliesUntilQuorum} -> - store_loop(Requests, NewRepliesUntilQuorum) + store_loop(TreeLeafHash, NewRequests, + NewRepliesUntilQuorum) end end after 2000 -> + lager:error("leafhash ~s: storage failed: " ++ + "~p replies until quorum, nodes left: ~p", + [mochihex:to_hex(TreeLeafHash), RepliesUntilQuorum, + lists:map(fun({_Key, Value}) -> + Value + end, dict:to_list(Requests))]), error end. store_at_all_nodes(Nodes, {LogEntry, TreeLeafHash, EntryHash}) -> - lager:debug("leafhash ~p: send requests to ~p", [TreeLeafHash, Nodes]), + lager:debug("leafhash ~s: send requests to ~p", + [mochihex:to_hex(TreeLeafHash), Nodes]), Requests = [send_storage_sendentry(URLBase, LogEntry, TreeLeafHash) || URLBase <- Nodes], - case store_loop(sets:from_list(Requests), storage_nodes_quorum()) of + case store_loop(TreeLeafHash, dict:from_list(Requests), + storage_nodes_quorum()) of ok -> + lager:debug("leafhash ~s: all requests answered", + [mochihex:to_hex(TreeLeafHash)]), lists:foreach(fun (URLBase) -> send_storage_entrycommitted(URLBase, EntryHash, TreeLeafHash) end, Nodes), ok; Any -> + lager:debug("leafhash ~s: error: ~p", + [mochihex:to_hex(TreeLeafHash), Any]), Any end. -- cgit v1.1 From dd9dcfa363f6219d30b5f229fa8097c03395698e Mon Sep 17 00:00:00 2001 From: Magnus Ahltorp Date: Thu, 29 Jan 2015 13:56:43 +0100 Subject: Log leaf hashes as hex strings --- src/db.erl | 4 ++-- src/perm.erl | 14 +++++++------- 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/src/db.erl b/src/db.erl index 0361aaf..6430661 100644 --- a/src/db.erl +++ b/src/db.erl @@ -39,9 +39,9 @@ stop() -> -spec add(binary(), binary()) -> ok. add(LeafHash, Data) -> - lager:debug("add leafhash ~p", [LeafHash]), + lager:debug("add leafhash ~s", [mochihex:to_hex(LeafHash)]), ok = perm:ensurefile(entry_root_path(), LeafHash, Data), - lager:debug("leafhash ~p added", [LeafHash]), + lager:debug("leafhash ~s added", [mochihex:to_hex(LeafHash)]), ok. -spec add_entryhash(binary(), binary()) -> ok. diff --git a/src/perm.erl b/src/perm.erl index 614656a..0cbe6cb 100644 --- a/src/perm.erl +++ b/src/perm.erl @@ -56,19 +56,19 @@ ensurefile_nosync(Rootdir, Key, Content) -> ensurefile(Rootdir, Key, Content, nosync). ensurefile(Rootdir, Key, Content, Syncflag) -> - lager:debug("dir ~p key ~p", [Rootdir, Key]), + lager:debug("dir ~p key ~s", [Rootdir, mochihex:to_hex(Key)]), {Dirs, Path} = path_for_key(Rootdir, Key), case readfile_and_verify(Path, Content) of ok -> - lager:debug("key ~p existed, fsync", [Key]), + lager:debug("key ~s existed, fsync", [mochihex:to_hex(Key)]), ok = util:fsync([Path, Rootdir | Dirs]), - lager:debug("key ~p fsynced", [Key]), + lager:debug("key ~s fsynced", [mochihex:to_hex(Key)]), ok; differ -> - lager:debug("key ~p existed, was different", [Key]), + lager:debug("key ~s existed, was different", [mochihex:to_hex(Key)]), differ; {error, enoent} -> - lager:debug("key ~p didn't exist, add", [Key]), + lager:debug("key ~s didn't exist, add", [mochihex:to_hex(Key)]), util:check_error(make_dirs([Rootdir, Rootdir ++ "nursery/"] ++ Dirs), makedir, "Error creating directory"), @@ -77,9 +77,9 @@ ensurefile(Rootdir, Key, Content, Syncflag) -> util:write_tempfile_and_rename(Path, NurseryName, Content), case Syncflag of sync -> - lager:debug("key ~p added, fsync", [Key]), + lager:debug("key ~s added, fsync", [mochihex:to_hex(Key)]), ok = util:fsync([Path, Rootdir | Dirs]), - lager:debug("key ~p fsynced", [Key]), + lager:debug("key ~s fsynced", [mochihex:to_hex(Key)]), ok; nosync -> ok -- cgit v1.1 From 4956c3dc10bc856739c6371f048031543376090e Mon Sep 17 00:00:00 2001 From: Magnus Ahltorp Date: Thu, 19 Feb 2015 12:08:40 +0100 Subject: plop:consistency: Correct off-by-one error --- src/plop.erl | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/plop.erl b/src/plop.erl index 821fe99..114695d 100644 --- a/src/plop.erl +++ b/src/plop.erl @@ -135,11 +135,11 @@ spt(Data) -> signature = sign:sign(Data)}. consistency(TreeSizeFirst, TreeSizeSecond) -> - LastAllowedEntry = db:size() - 1, + TreeSize = db:size(), if - TreeSizeFirst > LastAllowedEntry -> + TreeSizeFirst >= TreeSizeSecond -> []; - TreeSizeSecond > LastAllowedEntry -> + TreeSizeSecond > TreeSize -> []; true -> ht:consistency(TreeSizeFirst - 1, TreeSizeSecond - 1) -- cgit v1.1 From 0a8c2486dab278353febb4c50c9722779977227a Mon Sep 17 00:00:00 2001 From: Magnus Ahltorp Date: Thu, 19 Feb 2015 12:03:14 +0100 Subject: db:get_by_entry_hash: Check that entry exists --- src/db.erl | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/src/db.erl b/src/db.erl index 6430661..5760fe0 100644 --- a/src/db.erl +++ b/src/db.erl @@ -91,9 +91,13 @@ get_by_entry_hash(EntryHash) -> noentry -> notfound; LeafHash -> - Entry = entry_for_leafhash(LeafHash), - %% Don't fetch index, isn't used and might not exist - {notfetched, LeafHash, Entry} + case entry_for_leafhash(LeafHash) of + noentry -> + notfound; + Entry -> + %% Don't fetch index, isn't used and might not exist + {notfetched, LeafHash, Entry} + end end. %%%%%%%%%%%%%%%%%%%% -- cgit v1.1 From a287b6c12c4d81b9fd09dd4ae9a85d8f6eef1cb9 Mon Sep 17 00:00:00 2001 From: Magnus Ahltorp Date: Thu, 19 Feb 2015 12:32:44 +0100 Subject: Use hackney instead of inets:httpc --- Emakefile | 1 + src/plop.erl | 31 +++++++++++++++++++++++++------ src/plop_app.erl | 1 + 3 files changed, 27 insertions(+), 6 deletions(-) diff --git a/Emakefile b/Emakefile index fe4b31d..dc13e9c 100644 --- a/Emakefile +++ b/Emakefile @@ -2,6 +2,7 @@ {["src/*", "test/*"], [debug_info, {i, "include/"}, + {i, "../"}, {outdir, "ebin/"}, {parse_transform, lager_transform}]}. {["test/src/*"], diff --git a/src/plop.erl b/src/plop.erl index 114695d..f90d287 100644 --- a/src/plop.erl +++ b/src/plop.erl @@ -39,6 +39,7 @@ %%-include("db.hrl"). -include_lib("public_key/include/public_key.hrl"). -include_lib("eunit/include/eunit.hrl"). +-include_lib("hackney/include/hackney_lib.hrl"). %%%%% moved from plop.hrl, maybe remove -define(PLOPVERSION, 0). @@ -193,6 +194,28 @@ storage_nodes_quorum() -> {ok, Value} = application:get_env(plop, storage_nodes_quorum), Value. +send_http_request(TreeLeafHash, URL, Headers, RequestBody) -> + ParentPid = self(), + RequestId = make_ref(), + CACertFile = application:get_env(catlfish, https_cacertfile, none), + spawn(fun () -> + Starttime = os:timestamp(), + ParsedURL = hackney_url:parse_url(URL), + #hackney_url{path = Path} = ParsedURL, + lager:debug("leafhash ~s: sending http request to ~p", [mochihex:to_hex(TreeLeafHash), URL]), + {ok, ConnRef} = hackney:connect(ParsedURL, [{ssl_options, [{cacertfile, CACertFile}]}]), + lager:debug("leafhash ~s: connected to ~p", [mochihex:to_hex(TreeLeafHash), URL]), + {ok, StatusCode, RespHeaders, ClientRef} = hackney:send_request(ConnRef, {post, Path, Headers, RequestBody}), + lager:debug("leafhash ~s: received headers for ~p", [mochihex:to_hex(TreeLeafHash), URL]), + {ok, Body} = hackney:body(ClientRef), + Stoptime = os:timestamp(), + hackney:close(ClientRef), + lager:debug("leafhash ~s: received body for ~p: time ~p", [mochihex:to_hex(TreeLeafHash), URL, timer:now_diff(Stoptime, Starttime)]), + StatusLine = {none, StatusCode, none}, + ParentPid ! {http, {RequestId, {StatusLine, RespHeaders, Body}}} + end), + RequestId. + send_storage_sendentry(URLBase, LogEntry, TreeLeafHash) -> Request = mochijson2:encode( {[{plop_version, 1}, @@ -201,9 +224,7 @@ send_storage_sendentry(URLBase, LogEntry, TreeLeafHash) -> ]}), lager:debug("leafhash ~s: send sendentry to storage node ~p", [mochihex:to_hex(TreeLeafHash), URLBase]), - {ok, RequestId} = httpc:request(post, {URLBase ++ "sendentry", [], - "text/json", list_to_binary(Request)}, - [], [{sync, false}]), + RequestId = send_http_request(TreeLeafHash, URLBase ++ "sendentry", [{"Content-Type", "text/json"}], list_to_binary(Request)), {RequestId, URLBase}. send_storage_entrycommitted(URLBase, EntryHash, TreeLeafHash) -> @@ -212,9 +233,7 @@ send_storage_entrycommitted(URLBase, EntryHash, TreeLeafHash) -> {entryhash, base64:encode(EntryHash)}, {treeleafhash, base64:encode(TreeLeafHash)} ]}), - httpc:request(post, {URLBase ++ "entrycommitted", [], - "text/json", list_to_binary(Request)}, - [], [{sync, false}]). + send_http_request(TreeLeafHash, URLBase ++ "entrycommitted", [{"Content-Type", "text/json"}], list_to_binary(Request)). store_loop(TreeLeafHash, Requests, RepliesUntilQuorum) -> receive diff --git a/src/plop_app.erl b/src/plop_app.erl index 767bf06..9cb5558 100644 --- a/src/plop_app.erl +++ b/src/plop_app.erl @@ -6,6 +6,7 @@ -export([start/2, stop/1]). start(normal, Args) -> + hackney:start(), plop_sup:start_link(Args). stop(_State) -> -- cgit v1.1 From e5d870308d6e71dd12f46ec4745640e6e75d43d9 Mon Sep 17 00:00:00 2001 From: Magnus Ahltorp Date: Mon, 2 Feb 2015 14:37:22 +0100 Subject: Only serialize the actual write on storagedb, not the fsync --- src/index.erl | 14 +++++++++----- src/storagedb.erl | 8 +++++--- 2 files changed, 14 insertions(+), 8 deletions(-) diff --git a/src/index.erl b/src/index.erl index c0e344a..9d0df96 100644 --- a/src/index.erl +++ b/src/index.erl @@ -12,7 +12,7 @@ %% TODO: Checksums -module(index). --export([get/2, getrange/3, add/3, add_nosync/3, addlast/2, indexsize/1]). +-export([get/2, getrange/3, add/3, add_nosync/3, addlast_nosync/2, indexsize/1, sync/1]). -define(ENTRYSIZE, 32). -define(ENTRYSIZEINFILE, (?ENTRYSIZE*2+1)). @@ -64,7 +64,7 @@ add(Basepath, Index, Entry, Syncflag) when is_binary(Entry), size(Entry) == ?ENT ok = file:close(File), case Syncflag of sync -> - util:fsync([Basepath, filename:dirname(Basepath)]); + sync(Basepath); nosync -> ok end; @@ -74,9 +74,13 @@ add(Basepath, Index, Entry, Syncflag) when is_binary(Entry), size(Entry) == ?ENT end. --spec addlast(string(), integer()) -> ok. -addlast(Basepath, Entry) -> - add(Basepath, last, Entry). +-spec sync(string()) -> ok. +sync(Basepath) -> + util:fsync([Basepath, filename:dirname(Basepath)]). + +-spec addlast_nosync(string(), integer()) -> ok. +addlast_nosync(Basepath, Entry) -> + add_nosync(Basepath, last, Entry). decodedata(Binary) -> lists:reverse(decodedata(Binary, [])). diff --git a/src/storagedb.erl b/src/storagedb.erl index 444abc1..9cdf4c1 100644 --- a/src/storagedb.erl +++ b/src/storagedb.erl @@ -36,7 +36,9 @@ fetchnewhashes(Index) -> -spec add(binary()) -> ok. add(LeafHash) -> - call(?MODULE, {add, LeafHash}). + ok = call(?MODULE, {add_nosync, LeafHash}), + ok = index:sync(newentries_path()), + ok. %%%%%%%%%%%%%%%%%%%% %% gen_server callbacks. @@ -63,6 +65,6 @@ newentries_path() -> handle_call(stop, _From, State) -> {stop, normal, stopped, State}; -handle_call({add, LeafHash}, _From, State) -> - ok = index:addlast(newentries_path(), LeafHash), +handle_call({add_nosync, LeafHash}, _From, State) -> + ok = index:addlast_nosync(newentries_path(), LeafHash), {reply, ok, State}. -- cgit v1.1 From 7f09020956447b561eacbbb1926fa7d61b510180 Mon Sep 17 00:00:00 2001 From: Magnus Ahltorp Date: Fri, 6 Feb 2015 19:14:54 +0100 Subject: Implement fetching of multiple entries in storage/getentry --- src/storage.erl | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/src/storage.erl b/src/storage.erl index b7acfd7..64f4ded 100644 --- a/src/storage.erl +++ b/src/storage.erl @@ -38,11 +38,15 @@ request(get, "ct/storage/fetchnewentries", _Input) -> success({[{result, <<"ok">>}, {entries, Entries}]}); request(get, "ct/storage/getentry", Query) -> - Hash = base64:decode(proplists:get_value("hash", Query)), - Hashes = [Hash], + Hashes = [base64:decode(Value) || {Key, Value} <- Query, Key == "hash"], Entries = lists:map(fun(LeafHash) -> {[{hash, base64:encode(LeafHash)}, - {entry, base64:encode(db:entry_for_leafhash(LeafHash))}]} + {entry, base64:encode(case db:entry_for_leafhash(LeafHash) of + noentry -> + <<>>; + Entry -> + Entry + end)}]} end, Hashes), success({[{result, <<"ok">>}, {entries, Entries}]}). -- cgit v1.1 From 2d4e978b85b1e451971bf6d34b8b114f0b8fe578 Mon Sep 17 00:00:00 2001 From: Linus Nordberg Date: Fri, 20 Feb 2015 14:53:25 +0100 Subject: Don't include include/. Not used any more. --- Emakefile | 1 - 1 file changed, 1 deletion(-) diff --git a/Emakefile b/Emakefile index dc13e9c..42c6d44 100644 --- a/Emakefile +++ b/Emakefile @@ -1,7 +1,6 @@ %% erl -make (-*- erlang -*-) {["src/*", "test/*"], [debug_info, - {i, "include/"}, {i, "../"}, {outdir, "ebin/"}, {parse_transform, lager_transform}]}. -- cgit v1.1