-module(db). -behaviour(gen_server). %% API. -export([start_link/0, stop/0]). -export([init_db/0, init_db/1, init_tables/0, init_tables/1]). -export([add/1, find/1, get_by_index/2, size/0]). %% API for testing. -export([dump/1, destroy_tables/0, info_tables/0, dump_to_file/1]). %% gen_server callbacks. -export([init/1, handle_call/3, terminate/2, handle_cast/2, handle_info/2, code_change/3]). -include_lib("stdlib/include/qlc.hrl"). -include("db.hrl"). -include("$CTROOT/plop/include/plop.hrl"). %% @doc Set up a database schema on all nodes that are to be included %% in the "database cluster". Has to be run _before_ mnesia has been %% started. init_db() -> init_db([node()]). init_db(Nodes) -> ok = mnesia:create_schema(Nodes), rpc:multicall(Nodes, application, start, [mnesia]), init_tables(Nodes), rpc:multicall(Nodes, application, stop, [mnesia]). %% @doc Run once, or rather every time you start on a new database. %% If run more than once, we'll get {aborted, {already_exists, TABLE}}. init_tables() -> init_tables([node()]). init_tables(Nodes) -> %% We've once upon a time invoked mnesia:create_schema/1 with the %% nodes that will be part of the database. RamCopies = [], DiscCopies = [], DiscOnlyCopies = Nodes, mnesia:start(), {atomic, ok} = mnesia:create_table(plop, [{type, set}, {ram_copies, RamCopies}, {disc_copies, DiscCopies}, {disc_only_copies, DiscOnlyCopies}, {attributes, record_info(fields, plop)}, {majority, true}]), {atomic, ok} = mnesia:add_table_index(plop, hash). destroy_tables() -> mnesia:delete_table(plop). info_tables() -> mnesia:table_info(plop, all). dump_to_file(Filename) -> mnesia:dump_to_textfile(Filename). size() -> mnesia:table_info(plop, size). init(_Args) -> {mnesia:wait_for_tables([plop], 5000), []}. start_link() -> gen_server:start_link({local, ?MODULE}, ?MODULE, [], []). stop() -> gen_server:call(?MODULE, stop). %% API. add(Entry) -> gen_server:call(?MODULE, {add, Entry}). find(Hash) -> gen_server:call(?MODULE, {find, Hash}). dump(Table) -> gen_server:call(?MODULE, {dump, Table}). get_by_index(Start, End) -> gen_server:call(?MODULE, {get_by_index, {Start, End}}). %%%%%%%%%%%%%%%%%%%% handle_cast(_Request, State) -> {noreply, State}. handle_info(_Info, State) -> {noreply, State}. code_change(_OldVsn, State, _Extra) -> {ok, State}. terminate(_Reason, _State) -> io:format("~p terminating~n", [?MODULE]), ok. %%%%%%%%%%%%%%%%%%%% handle_call(stop, _From, State) -> {stop, normal, stopped, State}; handle_call({add, Entry}, _From, State) -> F = fun() -> mnesia:write(Entry) end, Res = mnesia:transaction(F), {reply, Res, State}; handle_call({dump, Table}, _From, State) -> F = fun() -> Q = qlc:q([E || E <- mnesia:table(Table)]), qlc:e(Q) end, Res = mnesia:transaction(F), {reply, Res, State}; handle_call({find, Hash}, _From, State) -> F = fun() -> mnesia:index_read(plop, Hash, #plop.hash) end, {atomic, Result} = mnesia:transaction(F), Record = case length(Result) of 0 -> []; 1 -> hd(Result); _ -> duplicate_hash_in_db % FIXME: log an error end, {reply, Record, State}; handle_call({get_by_index, {Start, End}}, _From, State) -> F = fun() -> MatchHead = #plop{index = '$1', mtl = '$2', _ = '_'}, Guard = [{'>=', '$1', Start}, {'=<', '$1', End}], Result = ['$2'], mnesia:select(plop, [{MatchHead, Guard, Result}]) end, {atomic, Res} = mnesia:transaction(F), {reply, Res, State}.