diff options
author | Magnus Ahltorp <map@kth.se> | 2015-06-11 16:38:30 +0200 |
---|---|---|
committer | Linus Nordberg <linus@nordu.net> | 2015-06-12 15:45:18 +0200 |
commit | 4e26b3679e9743690a85c9f72f7f4fc8ea8fd3f0 (patch) | |
tree | 6c445bb8644a7f2ece4084b731faedb726aea3f0 | |
parent | 3de0e0af3521f826e60468b2d6d19717fa0a53d7 (diff) |
Implement rate limiting of add_chain
-rw-r--r-- | src/catlfish.erl | 62 | ||||
-rw-r--r-- | src/catlfish_sup.erl | 13 | ||||
-rw-r--r-- | src/ratelimit.erl | 87 | ||||
-rw-r--r-- | test/catlfish-test-local-1.cfg | 3 | ||||
-rwxr-xr-x | tools/compileconfig.py | 18 |
5 files changed, 156 insertions, 27 deletions
diff --git a/src/catlfish.erl b/src/catlfish.erl index dd25a84..e48f788 100644 --- a/src/catlfish.erl +++ b/src/catlfish.erl @@ -113,39 +113,49 @@ get_sct(Hash, TimestampedEntry) -> calc_sct(TimestampedEntry) end. --spec add_chain(binary(), [binary()], normal|precert) -> {[{_,_},...]}. -add_chain(LeafCert, CertChain, Type) -> - CombinedChain = [LeafCert | CertChain], - EntryHash = crypto:hash(sha256, CombinedChain), +add_to_db(Type, LeafCert, CertChain, EntryHash) -> EntryType = case Type of normal -> x509_entry; precert -> precert_entry end, + Timestamp = plop:generate_timestamp(), + TSE = timestamped_entry(Timestamp, EntryType, LeafCert, CertChain), + MTLText = serialise(#mtl{leaf_version = v1, + leaf_type = timestamped_entry, + entry = TSE}), + MTLHash = ht:leaf_hash(MTLText), + ExtraData = + case Type of + normal -> CertChain; + precert -> [LeafCert | CertChain] + end, + LogEntry = + list_to_binary( + [encode_tls_vector(MTLText, 4), + encode_tls_vector( + encode_tls_vector( + list_to_binary( + [encode_tls_vector(C, 3) || C <- ExtraData]), + 3), + 4)]), + ok = plop:add(LogEntry, MTLHash, EntryHash), + {TSE, MTLHash}. + +get_ratelimit_token(Type) -> + ratelimit:get_token(Type). + +-spec add_chain(binary(), [binary()], normal|precert) -> {[{_,_},...]}. +add_chain(LeafCert, CertChain, Type) -> + EntryHash = crypto:hash(sha256, [LeafCert | CertChain]), {TimestampedEntry, Hash} = case plop:get(EntryHash) of notfound -> - Timestamp = plop:generate_timestamp(), - TSE = timestamped_entry(Timestamp, EntryType, LeafCert, CertChain), - MTLText = serialise(#mtl{leaf_version = v1, - leaf_type = timestamped_entry, - entry = TSE}), - MTLHash = ht:leaf_hash(MTLText), - ExtraData = - case Type of - normal -> CertChain; - precert -> CombinedChain - end, - LogEntry = - list_to_binary( - [encode_tls_vector(MTLText, 4), - encode_tls_vector( - encode_tls_vector( - list_to_binary( - [encode_tls_vector(C, 3) || C <- ExtraData]), - 3), - 4)]), - ok = plop:add(LogEntry, MTLHash, EntryHash), - {TSE, MTLHash}; + case get_ratelimit_token(add_chain) of + ok -> + add_to_db(Type, LeafCert, CertChain, EntryHash); + _ -> + exit({internalerror, "Rate limiting"}) + end; {_Index, MTLHash, DBEntry} -> {MTLText, _ExtraData} = unpack_entry(DBEntry), MTL = deserialise_mtl(MTLText), diff --git a/src/catlfish_sup.erl b/src/catlfish_sup.erl index 5da7b93..353b691 100644 --- a/src/catlfish_sup.erl +++ b/src/catlfish_sup.erl @@ -9,6 +9,16 @@ start_link(_Args) -> supervisor:start_link({local, ?MODULE}, ?MODULE, []). +permanent_worker(Name, {Module, Function, Args}) -> + permanent_worker(Name, {Module, Function, Args}, [Module]). + +permanent_worker(Name, StartFunc, Modules) -> + {Name, + StartFunc, + permanent, + 10000, + worker, Modules}. + gen_http_config(Config, SSLOptions, SSLFlag) -> {ChildName, IpAddress, Port, Module} = Config, {ok, IPv4Address} = @@ -37,6 +47,7 @@ init([]) -> gen_http_config(Config, SSLOptions, false) end, application:get_env(catlfish, http_servers, [])), lager:debug("Starting servers ~p", [Servers]), + RateLimit = permanent_worker(ratelimit, {ratelimit, start_link, []}), {ok, {{one_for_one, 3, 10}, - Servers}}. + [RateLimit | Servers]}}. diff --git a/src/ratelimit.erl b/src/ratelimit.erl new file mode 100644 index 0000000..16d0f30 --- /dev/null +++ b/src/ratelimit.erl @@ -0,0 +1,87 @@ +%%% Copyright (c) 2015, NORDUnet A/S. +%%% See LICENSE for licensing information. +%%% + +-module(ratelimit). +-behaviour(gen_server). + +-export([start_link/0, stop/0]). +-export([get_token/1]). +%% gen_server callbacks. +-export([init/1, handle_call/3, terminate/2, handle_cast/2, handle_info/2, + code_change/3]). + +start_link() -> + gen_server:start_link({local, ?MODULE}, ?MODULE, + application:get_env(catlfish, ratelimits, []), []). + +stop() -> + gen_server:call(?MODULE, stop). + +get_token(Type) -> + gen_server:call(?MODULE, {get_token, Type}). + +init(Types) -> + lager:debug("starting ratelimit service"), + State = dict:from_list([{Type, {Rules, queue:new()}} + || {Type, Rules} <- Types]), + {ok, State}. + +rule_interval_atom([{_, Interval}]) -> + Interval. + +rule_interval([{_, second}]) -> + 1000; +rule_interval([{_, minute}]) -> + 60*1000; +rule_interval([{_, hour}]) -> + 60*60*1000. + +rule_times([{Times, _}]) when is_integer(Times) -> + Times. + +clean_queue(Interval, Queue) -> + Now = plop:generate_timestamp(), + case queue:peek(Queue) of + {value, Item} when Item + Interval < Now -> + clean_queue(Interval, queue:drop(Queue)); + _ -> + Queue + end. + +get_token_for_type({none, Queue}) -> + {ok, {none, Queue}}; +get_token_for_type({Rules, Queue}) -> + CleanedQueue = clean_queue(rule_interval(Rules), Queue), + MaxTimes = rule_times(Rules), + QueueLength = queue:len(CleanedQueue), + if + QueueLength < MaxTimes -> + Now = plop:generate_timestamp(), + {ok, {Rules, queue:in(Now, CleanedQueue)}}; + true -> + {overload, {Rules, CleanedQueue}} + end. + +handle_call(stop, _From, State) -> + {stop, normal, stopped, State}; +handle_call({get_token, Type}, _From, State) -> + case dict:find(Type, State) of + {ok, TypeState} -> + {Result, NewTypeState} = get_token_for_type(TypeState), + {Rules, Queue} = NewTypeState, + lager:debug("current rate: ~p per ~p", + [queue:len(Queue), rule_interval_atom(Rules)]), + {reply, Result, dict:store(Type, NewTypeState, State)}; + error -> + {reply, ok, State} + end. + +handle_cast(_Request, State) -> + {noreply, State}. +handle_info(_Info, State) -> + {noreply, State}. +code_change(_OldVersion, State, _Extra) -> + {ok, State}. +terminate(_Reason, _State) -> + ok. diff --git a/test/catlfish-test-local-1.cfg b/test/catlfish-test-local-1.cfg index be1c5b3..88eda59 100644 --- a/test/catlfish-test-local-1.cfg +++ b/test/catlfish-test-local-1.cfg @@ -23,5 +23,8 @@ paths: logpublickey: tests/keys/logkey.pem privatekeys: tests/privatekeys +ratelimits: + add_chain: 10 per second + #options: # - sctcaching diff --git a/tools/compileconfig.py b/tools/compileconfig.py index d90d96d..c48ba66 100755 --- a/tools/compileconfig.py +++ b/tools/compileconfig.py @@ -150,6 +150,21 @@ def allowed_servers_frontend(signingnodenames, storagenodenames): ("/ct/signing/sct", signingnodenames), ] +def parse_ratelimit_expression(expression): + if expression == "none": + return Symbol("none") + parts = expression.split(" ") + if not (len(parts) == 3 and parts[1] == 'per' and parts[2] in ["second", "minute", "hour"]): + print >>sys.stderr, "Ratelimit expressions must have the format \"<frequency> per second|minute|hour\" or \"none\"" + sys.exit(1) + return (int(parts[0]), Symbol(parts[2])) + +def parse_ratelimit((type, description)): + descriptions = [parse_ratelimit_expression(s.strip()) for s in description.split(",")] + if len(descriptions) != 1: + print >>sys.stderr, "%s: Only one ratelimit expression supported right now" % (type,) + return (Symbol(type), descriptions) + def gen_config(nodename, config, localconfig): print "generating config for", nodename paths = localconfig["paths"] @@ -171,6 +186,9 @@ def gen_config(nodename, config, localconfig): catlfishconfig.append((Symbol("known_roots_path"), localconfig["paths"]["knownroots"])) if "sctcaching" in options: catlfishconfig.append((Symbol("sctcache_root_path"), paths["db"] + "sctcache/")) + if localconfig["ratelimits"]: + ratelimits = map(parse_ratelimit, localconfig["ratelimits"].items()) + catlfishconfig.append((Symbol("ratelimits"), ratelimits)) catlfishconfig += [ (Symbol("https_servers"), https_servers), |