Skip to content

Commit

Permalink
Merge pull request #1012 from basho/feature/edoc-cleanup
Browse files Browse the repository at this point in the history
Edoc cleanup

Reviewed-by: macintux
  • Loading branch information
borshop committed Aug 8, 2014
2 parents cdde007 + 86f8af8 commit ec7e525
Show file tree
Hide file tree
Showing 21 changed files with 121 additions and 127 deletions.
27 changes: 14 additions & 13 deletions src/riak_client.erl
Original file line number Diff line number Diff line change
Expand Up @@ -62,7 +62,7 @@

-export_type([riak_client/0]).

%% @spec new(Node, ClientId) -> riak_client().
%% @spec new(Node, ClientId) -> riak_client()
%% @doc Return a riak client instance.
new(Node, ClientId) ->
{?MODULE, [Node,ClientId]}.
Expand Down Expand Up @@ -262,15 +262,15 @@ consistent_put_type(RObj, Options) ->
put_once
end.

%% @spec put(RObj :: riak_object:riak_object(), riak_kv_put_fsm::options(), riak_client()) ->
%% @spec put(RObj :: riak_object:riak_object(), riak_kv_put_fsm:options(), riak_client()) ->
%% ok |
%% {ok, details()} |
%% {ok, riak_object:riak_object()} |
%% {ok, riak_object:riak_object(), details()} |
%% {error, notfound} |
%% {error, timeout} |
%% {error, {n_val_violation, N::integer()}} |
%% {error, Err :: term()}
%% {error, Err :: term()} |
%% {error, Err :: term(), details()}
%% @doc Store RObj in the cluster.
put(RObj, Options, {?MODULE, [Node, _ClientId]}=THIS) when is_list(Options) ->
Expand Down Expand Up @@ -408,7 +408,7 @@ consistent_delete(Bucket, Key, Options, _Timeout, {?MODULE, [Node, _ClientId]})
delete_vclock(Bucket,Key,VClock,{?MODULE, [_Node, _ClientId]}=THIS) ->
delete_vclock(Bucket,Key,VClock,[{rw,default}],?DEFAULT_TIMEOUT,THIS).

%% @spec delete_vclock(riak_object:bucket(), riak_object:key(), vclock::vclock(),
%% @spec delete_vclock(riak_object:bucket(), riak_object:key(), vclock:vclock(),
%% RW :: integer(), riak_client()) ->
%% ok |
%% {error, too_many_fails} |
Expand Down Expand Up @@ -487,7 +487,8 @@ list_keys(Bucket, {?MODULE, [_Node, _ClientId]}=THIS) ->
list_keys(Bucket, Timeout, {?MODULE, [_Node, _ClientId]}=THIS) ->
list_keys(Bucket, none, Timeout, THIS).

%% @spec list_keys(riak_object:bucket(), TimeoutMillisecs :: integer(), riak_client()) ->
%% @spec list_keys(riak_object:bucket(), Filter :: term(),
%% TimeoutMillisecs :: integer(), riak_client()) ->
%% {ok, [Key :: riak_object:key()]} |
%% {error, timeout} |
%% {error, Err :: term()}
Expand Down Expand Up @@ -567,7 +568,7 @@ stream_list_keys(Input, Timeout, Client, {?MODULE, [Node, _ClientId]}) when is_p
filter_keys(Bucket, Fun, {?MODULE, [_Node, _ClientId]}=THIS) ->
list_keys(Bucket, Fun, ?DEFAULT_TIMEOUT, THIS).

%% @spec filter_keys(riak_object:bucket(), Fun :: function(), TimeoutMillisecs :: integer()
%% @spec filter_keys(riak_object:bucket(), Fun :: function(), TimeoutMillisecs :: integer(),
%% riak_client()) ->
%% {ok, [Key :: riak_object:key()]} |
%% {error, timeout} |
Expand All @@ -592,7 +593,7 @@ filter_keys(Bucket, Fun, Timeout, {?MODULE, [_Node, _ClientId]}=THIS) ->
list_buckets({?MODULE, [_Node, _ClientId]}=THIS) ->
list_buckets(none, ?DEFAULT_TIMEOUT, <<"default">>, THIS).

%% @spec list_buckets(timeout()) ->
%% @spec list_buckets(timeout(), riak_client()) ->
%% {ok, [Bucket :: riak_object:bucket()]} |
%% {error, timeout} |
%% {error, Err :: term()}
Expand All @@ -607,7 +608,8 @@ list_buckets(undefined, {?MODULE, [_Node, _ClientId]}=THIS) ->
list_buckets(Timeout, {?MODULE, [_Node, _ClientId]}=THIS) ->
list_buckets(none, Timeout, <<"default">>, THIS).

%% @spec list_buckets(TimeoutMillisecs :: integer(), riak_client()) ->
%% @spec list_buckets(TimeoutMillisecs :: integer(), Filter :: term(),
%% riak_client()) ->
%% {ok, [Bucket :: riak_object:bucket()]} |
%% {error, timeout} |
%% {error, Err :: term()}
Expand Down Expand Up @@ -654,7 +656,8 @@ stream_list_buckets(Filter, Timeout, {?MODULE, [_Node, _ClientId]}=THIS) ->

%% @spec stream_list_buckets(FilterFun :: fun(),
%% TimeoutMillisecs :: integer(),
%% Client :: pid()) ->
%% Client :: pid(),
%% riak_client()) ->
%% {ok, [Bucket :: riak_object:bucket()]} |
%% {error, timeout} |
%% {error, Err :: term()}
Expand Down Expand Up @@ -686,8 +689,7 @@ stream_list_buckets(Filter, Timeout, Client, Type,
%% riak_client()) ->
%% {ok, [Key :: riak_object:key()]} |
%% {error, timeout} |
%% {error, Err :: term()}.
%%
%% {error, Err :: term()}
%% @doc Run the provided index query.
get_index(Bucket, Query, {?MODULE, [_Node, _ClientId]}=THIS) ->
get_index(Bucket, Query, [{timeout, ?DEFAULT_TIMEOUT}], THIS).
Expand All @@ -698,8 +700,7 @@ get_index(Bucket, Query, {?MODULE, [_Node, _ClientId]}=THIS) ->
%% riak_client()) ->
%% {ok, [Key :: riak_object:key()]} |
%% {error, timeout} |
%% {error, Err :: term()}.
%%
%% {error, Err :: term()}
%% @doc Run the provided index query.
get_index(Bucket, Query, Opts, {?MODULE, [Node, _ClientId]}) ->
Timeout = proplists:get_value(timeout, Opts, ?DEFAULT_TIMEOUT),
Expand Down
58 changes: 25 additions & 33 deletions src/riak_index.erl
Original file line number Diff line number Diff line change
Expand Up @@ -57,16 +57,16 @@
%% it did before the FSM timeout bug was "fixed"
-define(DEFAULT_TIMEOUT, infinity).

%% @type data_type_defs() :: [data_type_def()].
%% @type data_type_def() :: {MatchFunction::function(), ParseFunction::function()}.
%% @type failure_reason() :: {unknown_field_type, Field :: binary()}
%% @type data_type_defs() = [data_type_def()].
%% @type data_type_def() = {MatchFunction :: function(), ParseFunction :: function()}.
%% @type failure_reason() = {unknown_field_type, Field :: binary()}
%% | {field_parsing_failed, {Field :: binary(), Value :: binary()}}.

%% @type bucketname() :: binary().
%% @type index_field() :: binary().
%% @type index_value() :: binary() | integer().
%% @type query_element() :: {eq, index_field(), [index_value()]},
%% :: {range, index_field(), [index_value(), index_value()}
%% @type bucketname() = binary().
%% @type index_field() = binary().
%% @type index_value() = binary() | integer().
%% @type query_element() = {eq, index_field(), index_value()} |
%% {range, index_field(), index_value(), index_value()}

-type query_def() :: {ok, term()} | {error, term()} | {term(), {error, term()}}.
-export_type([query_def/0]).
Expand All @@ -85,14 +85,13 @@ mapred_index(_Pipe, [Bucket, Query], Timeout) ->
{ok, Bucket, ReqId}.

%% @spec parse_object_hook(riak_object:riak_object()) ->
%% riak_object:riak_object() | {fail, [failure_reason()]}.
%%
%% riak_object:riak_object() | {fail, [failure_reason()]}
%% @doc Parse the index fields stored in object metadata. Conforms to
%% the pre-commit hook interface. Return the new object with
%% parsed fields stuffed into the matadata if validation was
%% successful, or {fail, [Reasons]} if validation failed. Reason
%% is either `{unknown_field_type, Field}` or
%% `{field_parsing_failed, {Field, Value}}.`
%% is either `{unknown_field_type, Field}' or
%% `{field_parsing_failed, {Field, Value}}.'
parse_object_hook(RObj) ->
%% Ensure that the object only has a single metadata, or fail
%% loudly.
Expand All @@ -118,13 +117,12 @@ parse_object_hook(RObj) ->
end.

%% @spec parse_object(riak_object:riak_object()) -> {ok, [{Field::binary(), Val :: term()}]}
%% | {error, [failure_reason()]}.
%%
%% | {error, [failure_reason()]}
%% @doc Pull out index fields stored in the metadata of the provided
%% Riak Object. Parse the fields, and return {ok, [{Field,
%% Value}]} if successful, or {error, [Reasons]} on error. Reason
%% is either `{unknown_field_type, Field}` or
%% `{field_parsing_failed, {Field, Value}}.`
%% is either `{unknown_field_type, Field}' or
%% `{field_parsing_failed, {Field, Value}}.'
parse_object(RObj) ->
%% For each object metadata, pull out any IndexFields. This could
%% be called on a write with siblings, so we need to examine *all*
Expand All @@ -142,13 +140,12 @@ parse_object(RObj) ->
%% Now parse the fields, returning the result.
parse_fields(IndexFields).

%% @spec parse_fields([Field :: {Key:binary(), Value :: binary()}]) ->
%% {ok, [{Field :: binary(), Value :: term()}]} | {error, [failure_reason()]}.
%%
%% @spec parse_fields([Field :: {Key :: binary(), Value :: binary()}]) ->
%% {ok, [{Field :: binary(), Value :: term()}]} | {error, [failure_reason()]}
%% @doc Parse the provided index fields. Returns {ok, Fields} if the
%% parsing was successful, or {error, Reasons} if parsing
%% failed. Reason is either `{unknown_field_type, Field}` or
%% `{field_parsing_failed, {Field, Value}}.`
%% failed. Reason is either `{unknown_field_type, Field}' or
%% `{field_parsing_failed, {Field, Value}}.'
parse_fields(IndexFields) ->
%% Call parse_field on each field, and accumulate in ResultAcc or
%% ErrorAcc, depending on whether the operation was successful.
Expand All @@ -174,12 +171,11 @@ parse_fields(IndexFields) ->


%% @spec parse_field(Key::binary(), Value::binary(), Types::data_type_defs()) ->
%% {ok, Value} | {error, Reason}.
%%
%% {ok, Value} | {error, Reason}
%% @doc Parse an index field. Return {ok, Value} on success, or
%% {error, Reason} if there is a problem. Reason is either
%% `{unknown_field_type, Field}` or `{field_parsing_failed,
%% {Field, Value}}.`
%% `{unknown_field_type, Field}' or `{field_parsing_failed,
%% {Field, Value}}.'
parse_field(Key, Value, [Type|Types]) ->
%% Run the regex to check if the key suffix matches this data
%% type.
Expand All @@ -205,8 +201,7 @@ parse_field(Key, _Value, []) ->


%% @private
%% @spec is_field_match(Key :: binary(), Suffix :: binary()) -> boolean().
%%
%% @spec is_field_match(Key :: binary(), Suffix :: binary()) -> boolean()
%% @doc Return true if the Key matches the suffix. Special case for $bucket
%% and $key.
is_field_match(Key, ?BUCKETFIELD) ->
Expand All @@ -228,8 +223,7 @@ is_field_match(Key, Suffix) when size(Suffix) < size(Key) ->
is_field_match(_, _) ->
false.

%% @spec format_failure_reason(FailureReason :: {atom(), term()}) -> string().
%%
%% @spec format_failure_reason(FailureReason :: {atom(), term()}) -> string()
%% @doc Given a failure reason, turn it into a human-readable string.
format_failure_reason(FailureReason) ->
case FailureReason of
Expand All @@ -239,8 +233,7 @@ format_failure_reason(FailureReason) ->
io_lib:format("Could not parse field '~s', value '~s'.~n", [Field, Value])
end.

%% @spec timestamp() -> integer().
%%
%% @spec timestamp() -> integer()
%% @doc Get a timestamp, the number of milliseconds returned by
%% erlang:now().
timestamp() ->
Expand Down Expand Up @@ -518,8 +511,7 @@ add_timeout_opt(0, Opts) ->
add_timeout_opt(Timeout, Opts) ->
[{timeout, Timeout} | Opts].

%% @spec field_types() -> data_type_defs().
%%
%% @spec field_types() -> data_type_defs()
%% @doc Return a list of {MatchFunction, ParseFunction} tuples that
%% map a field name to a field type.
field_types() ->
Expand Down
3 changes: 2 additions & 1 deletion src/riak_kv_bucket.erl
Original file line number Diff line number Diff line change
Expand Up @@ -88,7 +88,8 @@
%%
%% There is no way to _remove_ a property
%%
%% @see validate_dt_props/3, assert_no_datatype/1
%% @see validate_dt_props/3
%% @see assert_no_datatype/1
-spec validate(create | update,
{riak_core_bucket_type:bucket_type(), undefined | binary()} | binary(),
undefined | props(),
Expand Down
2 changes: 1 addition & 1 deletion src/riak_kv_counter.erl
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@
%% @doc Backwards compatibility with 1.4 counters `value' function as
%% used in the CRDT cookbook.
%%
%% @see riak_kv_crdt.erl
%% @see riak_kv_crdt
%% @end

-module(riak_kv_counter).
Expand Down
4 changes: 2 additions & 2 deletions src/riak_kv_crdt.erl
Original file line number Diff line number Diff line change
Expand Up @@ -228,7 +228,7 @@ counter_op(N) ->
%% the operation then apply the operation to the local merged replica,
%% and risk precondition errors and unexpected behaviour.
%%
%% @see split_ops/1 for more explanation
%% @see split_ops/1
-spec update_crdt(orddict:orddict(), riak_dt:actor(), crdt_op() | non_neg_integer()) ->
orddict:orddict() | precondition_error().
update_crdt(Dict, Actor, Amt) when is_integer(Amt) ->
Expand Down Expand Up @@ -450,7 +450,7 @@ mod_map(_) ->



%% @Doc the update context can be empty for some types.
%% @doc the update context can be empty for some types.
%% Those that support an precondition_context should supply
%% a smaller than Type:to_binary(Value) binary context.
get_context(Type, Value) ->
Expand Down
3 changes: 0 additions & 3 deletions src/riak_kv_delete.erl
Original file line number Diff line number Diff line change
Expand Up @@ -48,9 +48,6 @@ start_link(ReqId, Bucket, Key, Options, Timeout, Client, ClientId, VClock) ->
Options, Timeout, Client, ClientId,
VClock])}.

%% @spec delete(ReqId :: binary(), riak_object:bucket(), riak_object:key(),
%% RW :: integer(), TimeoutMillisecs :: integer(), Client :: pid())
%% -> term()
%% @doc Delete the object at Bucket/Key. Direct return value is uninteresting,
%% see riak_client:delete/3 for expected gen_server replies to Client.
delete(ReqId,Bucket,Key,Options,Timeout,Client,ClientId,undefined) ->
Expand Down
2 changes: 1 addition & 1 deletion src/riak_kv_gcounter.erl
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@
%% can only be incremented. Borrows liberally from argv0 and Justin Sheehy's vclock module
%% in implementation.
%%
%% @see riak_kv_pncounter.erl for a counter that can be decremented
%% @see riak_kv_pncounter
%%
%% @reference Marc Shapiro, Nuno Preguiça, Carlos Baquero, Marek Zawirski (2011) A comprehensive study of
%% Convergent and Commutative Replicated Data Types. http://hal.upmc.fr/inria-00555588/
Expand Down
1 change: 0 additions & 1 deletion src/riak_kv_memory_backend.erl
Original file line number Diff line number Diff line change
Expand Up @@ -732,7 +732,6 @@ ttl_test_() ->
?_assertEqual({error, not_found, State}, get(Bucket, Key, State))
].

%% @private
max_memory_test_() ->
%% Set max size to 1.5kb
Config = [{max_memory, 1.5 * (1 / 1024)}],
Expand Down
60 changes: 29 additions & 31 deletions src/riak_kv_multi_backend.erl
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,35 @@
%%
%% -------------------------------------------------------------------

%% @doc riak_kv_multi_backend allows you to run multiple backends within a
%% single Riak instance. The 'backend' property of a bucket specifies
%% the backend in which the object should be stored. If no 'backend'
%% is specified, then the 'multi_backend_default' setting is used.
%% If this is unset, then the first defined backend is used.
%%
%% === Configuration ===
%%
%% {storage_backend, riak_kv_multi_backend},
%% {multi_backend_default, first_backend},
%% {multi_backend, [
%% % format: {name, module, [Configs]}
%% {first_backend, riak_xxx_backend, [
%% {config1, ConfigValue1},
%% {config2, ConfigValue2}
%% ]},
%% {second_backend, riak_yyy_backend, [
%% {config1, ConfigValue1},
%% {config2, ConfigValue2}
%% ]}
%% ]}
%%
%%
%% Then, tell a bucket which one to use...
%%
%% riak_core_bucket:set_bucket(&lt;&lt;"MY_BUCKET"&gt;&gt;, [{backend, second_backend}])
%%
%%

-module (riak_kv_multi_backend).
-behavior(riak_kv_backend).

Expand Down Expand Up @@ -60,35 +89,6 @@
-type state() :: #state{}.
-type config() :: [{atom(), term()}].

%% @doc riak_kv_multi_backend allows you to run multiple backends within a
%% single Riak instance. The 'backend' property of a bucket specifies
%% the backend in which the object should be stored. If no 'backend'
%% is specified, then the 'multi_backend_default' setting is used.
%% If this is unset, then the first defined backend is used.
%%
%% === Configuration ===
%%
%% {storage_backend, riak_kv_multi_backend},
%% {multi_backend_default, first_backend},
%% {multi_backend, [
%% % format: {name, module, [Configs]}
%% {first_backend, riak_xxx_backend, [
%% {config1, ConfigValue1},
%% {config2, ConfigValue2}
%% ]},
%% {second_backend, riak_yyy_backend, [
%% {config1, ConfigValue1},
%% {config2, ConfigValue2}
%% ]}
%% ]}
%%
%%
%% Then, tell a bucket which one to use...
%%
%% riak_core_bucket:set_bucket(&lt;&lt;"MY_BUCKET"&gt;&gt;, [{backend, second_backend}])
%%
%%

%% ===================================================================
%% Public API
%% ===================================================================
Expand Down Expand Up @@ -598,7 +598,6 @@ backend_can_index_reformat(Mod, ModState) ->
%% ===================================================================
-ifdef(TEST).

%% @private
multi_backend_test_() ->
{foreach,
fun() ->
Expand Down Expand Up @@ -674,7 +673,6 @@ multi_backend_test_() ->

-ifdef(EQC).

%% @private
eqc_test_() ->
{spawn,
[{inorder,
Expand Down
2 changes: 1 addition & 1 deletion src/riak_kv_pipe_index.erl
Original file line number Diff line number Diff line change
Expand Up @@ -119,7 +119,7 @@ keysend(Bucket, [Key | Keys], Partition, FittingDetails) ->
ER
end.

%% @Doc remove the index term from the
%% @doc remove the index term from the
%% 2i result
strip_index({_IndexTerm, Key}) ->
Key;
Expand Down
Loading

0 comments on commit ec7e525

Please sign in to comment.