diff --git a/include/yokozuna.hrl b/include/yokozuna.hrl index 1c40a015..f9cdd3ed 100644 --- a/include/yokozuna.hrl +++ b/include/yokozuna.hrl @@ -160,6 +160,10 @@ app_helper:get_env(riak_core, platform_data_dir)++"/yz")). -define(YZ_TEMP_DIR, app_helper:get_env(?YZ_APP_NAME, temp_dir, app_helper:get_env(riak_core, platform_data_dir)++"/yz_temp")). +%% The request timeout for Solr calls. Defaults to 60 seconds. +-define(YZ_SOLR_REQUEST_TIMEOUT, app_helper:get_env(?YZ_APP_NAME, + solr_request_timeout, + 60000)). -define(YZ_PRIV, code:priv_dir(?YZ_APP_NAME)). -define(YZ_CORE_CFG_FILE, "solrconfig.xml"). -define(YZ_INDEX_CMD, #yz_index_cmd). @@ -237,11 +241,12 @@ {partition, lp()} | {limit, pos_integer()}]. -type ed_continuation() :: none | base64(). +-type ed_pairs() :: [{DocID::binary(), Hash::base64()}]. -record(entropy_data, { more=false :: boolean(), continuation :: ed_continuation(), - pairs :: [{DocID::binary(), Hash::base64()}] + pairs :: ed_pairs() }). -type entropy_data() :: #entropy_data{}. -type keydiff() :: hashtree:keydiff(). @@ -288,6 +293,8 @@ -define(ERROR(Fmt), lager:error(Fmt)). -define(ERROR(Fmt, Args), lager:error(Fmt, Args)). -define(INFO(Fmt, Args), lager:info(Fmt, Args)). +-define(NOTICE(Fmt, Args), lager:notice(Fmt, Args)). +-define(NOTICE(Fmt), lager:notice(Fmt)). -define(WARN(Fmt, Args), lager:warning(Fmt, Args)). %%%=================================================================== diff --git a/priv/yokozuna.schema b/priv/yokozuna.schema index fd79cbcd..67d7c9f8 100644 --- a/priv/yokozuna.schema +++ b/priv/yokozuna.schema @@ -59,3 +59,12 @@ {datatype, directory}, hidden ]}. + +%% @doc The timeout for ibrowse (ibrowse:send_req) requests to Solr endpoints. +%% Defaults to 60 seconds. It will always round up to the nearest second, e.g. +%% 1ms = 999 ms = 1s. +{mapping, "search.solr.request_timeout", "yokozuna.solr_request_timeout", [ + {default, "60s"}, + {datatype, {duration, ms}}, + hidden +]}. diff --git a/riak_test/intercepts/yz_solr_intercepts.erl b/riak_test/intercepts/yz_solr_intercepts.erl index 87db6312..2c65ef66 100644 --- a/riak_test/intercepts/yz_solr_intercepts.erl +++ b/riak_test/intercepts/yz_solr_intercepts.erl @@ -1,6 +1,25 @@ -module(yz_solr_intercepts). -compile(export_all). +-type index_name() :: binary(). + +-define(FMT(S, Args), lists:flatten(io_lib:format(S, Args))). + +-spec slow_cores() -> {ok, []}. slow_cores() -> timer:sleep(6000), {ok, []}. + +-spec entropy_data_cant_complete(index_name(), list()) -> {error, term()}. +entropy_data_cant_complete(Core, Filter) -> + Params = [{wt, json}|Filter] -- [{continuation, none}], + Params2 = proplists:substitute_aliases([{continuation, continue}, + {limit,n}], Params), + Opts = [{response_format, binary}], + URL = ?FMT("~s/~s/entropy_data?~s", + [yz_solr:base_url(), Core, mochiweb_util:urlencode(Params2)]), + case ibrowse:send_req(URL, [], get, [], Opts, 0) of + Error -> + {error, Error} + end. + diff --git a/riak_test/yz_entropy_data.erl b/riak_test/yz_entropy_data.erl new file mode 100644 index 00000000..35583966 --- /dev/null +++ b/riak_test/yz_entropy_data.erl @@ -0,0 +1,131 @@ +%% ------------------------------------------------------------------- +%% +%% Copyright (c) 2015 Basho Technologies, Inc. +%% +%% This file is provided to you under the Apache License, +%% Version 2.0 (the "License"); you may not use this file +%% except in compliance with the License. You may obtain +%% a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, +%% software distributed under the License is distributed on an +%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +%% KIND, either express or implied. See the License for the +%% specific language governing permissions and limitations +%% under the License. +%% +%%------------------------------------------------------------------- + +%% @doc Test that checks through various entropy_data endpoint calls +%% and entropy_data iteration handling +%% @end + +-module(yz_entropy_data). + +-compile(export_all). +-include("yokozuna.hrl"). +-include_lib("eunit/include/eunit.hrl"). + +-define(RING_SIZE, 32). +-define(CFG, + [ + {riak_core, + [ + {ring_creation_size, ?RING_SIZE} + ]}, + {riak_kv, + [ + {anti_entropy_tick, 1000}, + %% allow AAE to build trees and exchange rapidly + {anti_entropy_build_limit, {100, 1000}}, + {anti_entropy_concurrency, 8} + ]}, + {yokozuna, + [ + {enabled, true}, + {solr_request_timeout, 60000} + ]} + ]). +-define(NO_HEADERS, []). +-define(NO_BODY, <<>>). +-define(INDEX, <<"test_search_entropy">>). +-define(TYPE, <<"data_entropy">>). +-define(BUCKET, {?TYPE, <<"test_search_entropy">>}). +-define(TOTAL, 1000). + +confirm() -> + [Node1|_] = Cluster = rt:build_cluster(4, ?CFG), + rt:wait_for_cluster_service(Cluster, yokozuna), + ok = yz_rt:create_bucket_type(Node1, ?TYPE), + ok = yz_rt:create_index_http(Cluster, ?INDEX), + yz_rt:set_index(Node1, ?BUCKET, ?INDEX), + HP = hd(yz_rt:host_entries(rt:connection_info(Cluster))), + + %% write_objs writes a seq of 1000 objects + yz_rt:write_objs(Cluster, ?BUCKET), + + %% time for soft auto commit + timer:sleep(1100), + + ?assert(yz_rt:search_expect(yokozuna, HP, ?INDEX, "*", "*", ?TOTAL)), + + EDParams = [{wt, json}], + + PartitionList = rpc:call(Node1, yokozuna, partition_list, [?INDEX]), + + EntropyURL = yz_rt:entropy_data_url({rt:select_random(Cluster), + yz_rt:node_solr_port(Node1)}, + ?INDEX, EDParams), + + test_entropy_get_missing_partition_param(EntropyURL), + test_entropy_get(Node1, ?INDEX, PartitionList, EDParams), + + test_ed_timeout_error(Cluster, ?INDEX, rt:select_random(PartitionList), + ?CFG), + + pass. + +-spec test_entropy_get_missing_partition_param(string()) -> ok. +test_entropy_get_missing_partition_param(URL) -> + lager:info("Test missing `partition` parameter on entropy url"), + {ok, Status, _, _} = yz_rt:http(get, URL, ?NO_HEADERS, ?NO_BODY), + ?assertEqual(Status, "500"). + +test_entropy_get(Node, Index, PartitionList, EDParams) -> + lager:info("Test checking through documents on each solr `partition` in the partition list"), + EntropyURLs = [yz_rt:entropy_data_url( + {Node, yz_rt:node_solr_port(Node)}, + Index, + [{partition, P}|EDParams]) + || P <- PartitionList], + Results = + [begin + {ok, "200", _, R} = yz_rt:http(get, URL, ?NO_HEADERS, ?NO_BODY), + yz_rt:get_count(R) + end || URL <- EntropyURLs], + + [?assert(Count > 0) || Count <- Results], + ok. + +-spec test_ed_timeout_error([node()], index_name(), p(), proplist()) -> ok. +test_ed_timeout_error(Cluster, Index, Partition, _Config) -> + lager:info("wait for full exchange around before making entropy call"), + TS1 = erlang:now(), + yz_rt:wait_for_full_exchange_round(Cluster, TS1), + + Node = rt:select_random(Cluster), + + %% load and install the intercept + rt_intercept:load_code(Node, [filename:join([rt_config:get(yz_dir), + "riak_test", "intercepts", "*.erl"])]), + rt_intercept:add(Node, {yz_solr, [{{entropy_data, 2}, + entropy_data_cant_complete}]}), + + Filter = [{partition, Partition}], + Fun = fun({_BKey, _Hash}) -> + fake_fun + end, + ?assertEqual(rpc:call(Node, yz_entropy, iterate_entropy_data, + [Index, Filter, Fun]), error). diff --git a/riak_test/yz_mapreduce.erl b/riak_test/yz_mapreduce.erl index d10f165a..67fa6710 100644 --- a/riak_test/yz_mapreduce.erl +++ b/riak_test/yz_mapreduce.erl @@ -40,7 +40,7 @@ confirm() -> yz_rt:create_index(yz_rt:select_random(Cluster), Index), yz_rt:set_bucket_type_index(yz_rt:select_random(Cluster), Index), timer:sleep(500), - write_objs(Cluster, Bucket), + yz_rt:write_objs(Cluster, Bucket), verify_objs_mr(Cluster, Index), ok = yz_rt:load_module(Cluster, ?MODULE), %% NOTE: Deliberate choice not to use `wait_unil'. The data is @@ -123,23 +123,6 @@ verify_objs_mr(Cluster, Index) -> end, yz_rt:wait_until(Cluster, F). --spec write_objs([node()], index_name()) -> ok. -write_objs(Cluster, Bucket) -> - lager:info("Writing 1000 objects"), - lists:foreach(write_obj(Cluster, Bucket), lists:seq(1,1000)). - --spec write_obj([node()], bucket()) -> fun(). -write_obj(Cluster, Bucket) -> - fun(N) -> - PL = [{name_s,<<"yokozuna">>}, {num_i,N}], - Key = list_to_binary(io_lib:format("key_~B", [N])), - Body = mochijson2:encode(PL), - HP = yz_rt:select_random(yz_rt:host_entries(rt:connection_info(Cluster))), - CT = "application/json", - lager:info("Writing object with bkey ~p [~p]", [{Bucket, Key}, HP]), - yz_rt:http_put(HP, Bucket, Key, CT, Body) - end. - -spec http_mr({host(), portnum()}, term()) -> binary(). http_mr({Host,Port}, MR) -> URL = ?FMT("http://~s:~s/mapred", [Host, integer_to_list(Port)]), diff --git a/riak_test/yz_rt.erl b/riak_test/yz_rt.erl index b0cd28b0..6a062c56 100644 --- a/riak_test/yz_rt.erl +++ b/riak_test/yz_rt.erl @@ -486,8 +486,9 @@ wait_for_schema(Cluster, Name, Content) -> ok. verify_count(Expected, Resp) -> - lager:info("E: ~p, A: ~p", [Expected, get_count(Resp)]), - Expected == get_count(Resp). + Count = get_count(Resp), + lager:info("E: ~p, A: ~p", [Expected, Count]), + Expected =:= get_count(Resp). -spec wait_for_index(list(), index_name()) -> ok. wait_for_index(Cluster, Index) -> @@ -547,3 +548,33 @@ commit(Nodes, Index) -> [Index, ?SOFTCOMMIT]), rpc:multicall(Nodes, yz_solr, commit, [Index]), ok. + +entropy_data_url({Host, Port}, Index, Params) -> + ?FMT("http://~s:~B/internal_solr/~s/entropy_data?~s", + [Host, Port, Index, mochiweb_util:urlencode(Params)]). + +-spec merge_config(proplist(), proplist()) -> proplist(). +merge_config(Change, Base) -> + lists:ukeymerge(1, lists:keysort(1, Change), lists:keysort(1, Base)). + +-spec write_objs([node()], bucket()) -> ok. +write_objs(Cluster, Bucket) -> + lager:info("Writing 1000 objects"), + write_objs(Cluster, Bucket, 1000). + +write_objs(Cluster, Bucket, NumObjects) -> + lager:info("Writing ~B objects", [NumObjects]), + lists:foreach(write_obj(Cluster, Bucket), lists:seq(1, NumObjects)). + +-spec write_obj([node()], bucket()) -> fun(). +write_obj(Cluster, Bucket) -> + fun(N) -> + PL = [{name_s,<<"yokozuna">>}, {num_i,N}], + Key = list_to_binary(io_lib:format("key_~B", [N])), + Body = mochijson2:encode(PL), + HP = yz_rt:select_random(yz_rt:host_entries(rt:connection_info( + Cluster))), + CT = "application/json", + lager:info("Writing object with bkey ~p [~p]", [{Bucket, Key}, HP]), + yz_rt:http_put(HP, Bucket, Key, CT, Body) + end. diff --git a/src/yz_entropy.erl b/src/yz_entropy.erl index 43d8b727..f7b190b1 100644 --- a/src/yz_entropy.erl +++ b/src/yz_entropy.erl @@ -22,32 +22,63 @@ -compile(export_all). -include("yokozuna.hrl"). - -%%%=================================================================== -%%% Private -%%%=================================================================== - %% @doc Iterate all the entropy data in `Index' calling `Fun' for %% every 100 entries. --spec iterate_entropy_data(index_name(), list(), function()) -> ok. +-spec iterate_entropy_data(index_name(), list(), function()) -> + ok|error|not_available. iterate_entropy_data(Index, Filter, Fun) -> case yz_solr:ping(Index) of true -> Filter2 = [{continuation, none}, - {limit, app_helper:get_env(?YZ_APP_NAME, entropy_data_limit, 100)}|Filter], - ED = yz_solr:entropy_data(Index, Filter2), - iterate_entropy_data(Index, Filter2, Fun, ED); + {limit, + app_helper:get_env(?YZ_APP_NAME, + entropy_data_limit, 100)}|Filter], + case get_entropy_data(Index, Filter2) of + {ok, ED} -> + iterate_entropy_data(Index, Filter2, Fun, ED); + {Err, _ED} -> + Err + end; _ -> - ok + ?NOTICE("Can't ping Solr to start iterating over entropy data"), + not_available end. +%%%=================================================================== +%%% Private +%%%=================================================================== + +-spec iterate_entropy_data(index_name(), list(), function(), ED::entropy_data()) + -> ok|error. iterate_entropy_data(Index, Filter, Fun, #entropy_data{more=true, continuation=Cont, pairs=Pairs}) -> + %% apply function to pairs before iterating through the next set lists:foreach(Fun, Pairs), Filter2 = lists:keyreplace(continuation, 1, Filter, {continuation, Cont}), - ED = yz_solr:entropy_data(Index, Filter2), - iterate_entropy_data(Index, Filter2, Fun, ED); + case get_entropy_data(Index, Filter2) of + {ok, ED} -> + iterate_entropy_data(Index, Filter2, Fun, ED); + {Err, _ED} -> + Err + end; iterate_entropy_data(_, _, Fun, #entropy_data{more=false, pairs=Pairs}) -> lists:foreach(Fun, Pairs). + +-spec get_entropy_data(index_name(), list()) -> + {ok|error, entropy_data()}. +get_entropy_data(Index, Filter) -> + case yz_solr:entropy_data(Index, Filter) of + {error, {error, req_timedout}} -> + ?ERROR("failed to iterate over entropy data due to request" + ++ " exceeding timeout ~b for filter params ~p", + [?YZ_SOLR_REQUEST_TIMEOUT, Filter]), + {error, #entropy_data{more=false, pairs=[]}}; + {error, Err} -> + ?ERROR("failed to iterate over entropy data due to request" + ++ " error ~p for filter params ~p", [Err, Filter]), + {error, #entropy_data{more=false, pairs=[]}}; + ED -> + {ok, ED} + end. diff --git a/src/yz_exchange_fsm.erl b/src/yz_exchange_fsm.erl index 6a843a36..118baeb0 100644 --- a/src/yz_exchange_fsm.erl +++ b/src/yz_exchange_fsm.erl @@ -207,8 +207,7 @@ repair(Partition, {remote_missing, KeyBin}) -> %% assume that Yokozuna is enabled and current node is owner. case yz_kv:should_index(Index) of true -> - yz_kv:index(FakeObj, delete, Ring, Partition, BKey, ShortPL, Index), - full_repair; + index(FakeObj, delete, Ring, Partition, BKey, ShortPL, Index); false -> yz_kv:dont_index(FakeObj, delete, Partition, BKey, ShortPL), tree_repair @@ -226,18 +225,8 @@ repair(Partition, {_Reason, KeyBin}) -> case yz_kv:should_index(Index) of true -> Ring = yz_misc:get_ring(transformed), - try - yz_kv:index(Obj, anti_entropy, Ring, Partition, BKey, ShortPL, Index), - full_repair - catch _:_Err -> - %% The key can't be indexed; just repair the tree - %% We already track the 'failed to index' - %% error trace in yz_kv:index/3 - lager:notice("failed to repair object ~p, as it failed to index properly", - [BKey]), - yz_kv:dont_index(Obj, anti_entropy, Partition, BKey, ShortPL), - tree_repair - end; + index(Obj, anti_entropy, Ring, Partition, BKey, ShortPL, + Index); false -> %% TODO: pass obj hash to repair fun to avoid %% object read just to update hash. @@ -255,6 +244,28 @@ repair(Partition, {_Reason, KeyBin}) -> failed_repair end. +%% @private +%% +%% @doc Call into yz_kv:index/7 with same try/catch checking as done in +%% yz_kv:index/3, but distinguishing between badrequest vs other errors. +index(Obj, Reason, Ring, Partition, BKey, ShortPL, Index) -> + try + yz_kv:index(Obj, Reason, Ring, Partition, BKey, ShortPL, Index), + full_repair + catch _:Err -> + yz_stat:index_fail(), + Trace = erlang:get_stacktrace(), + ?ERROR("failed to repair ~p request for docid ~p with error ~p because ~p", + [Reason, BKey, Err, Trace]), + case Err of + {_, badrequest, _} -> + yz_kv:dont_index(Obj, anti_entropy, Partition, BKey, ShortPL), + tree_repair; + _ -> + failed_repair + end + end. + %% @private fake_kv_object({Bucket, Key}) -> riak_object:new(Bucket, Key, <<"fake object">>). diff --git a/src/yz_index_hashtree.erl b/src/yz_index_hashtree.erl index 45d66bc8..8fa2bdcb 100644 --- a/src/yz_index_hashtree.erl +++ b/src/yz_index_hashtree.erl @@ -295,7 +295,7 @@ load_built(#state{trees=Trees}) -> _ -> false end. --spec fold_keys(p(), tree()) -> ok. +-spec fold_keys(p(), tree()) -> [ok|timeout|not_available]. fold_keys(Partition, Tree) -> LI = yz_cover:logical_index(yz_misc:get_ring(transformed)), LogicalPartition = yz_cover:logical_partition(LI, Partition), @@ -307,8 +307,7 @@ fold_keys(Partition, Tree) -> insert(async, IndexN, BKey, Hash, Tree, [if_missing]) end, Filter = [{partition, LogicalPartition}], - [yz_entropy:iterate_entropy_data(I, Filter, F) || I <- Indexes], - ok. + [yz_entropy:iterate_entropy_data(I, Filter, F) || I <- Indexes]. -spec do_new_tree({p(),n()}, state()) -> state(). do_new_tree(Id, S=#state{trees=Trees, path=Path}) -> @@ -552,9 +551,8 @@ build_or_rehash(Tree, Locked, Type, #state{index=Index, trees=Trees}) -> case {Locked, Type} of {true, build} -> lager:debug("Starting YZ AAE tree build: ~p", [Index]), - fold_keys(Index, Tree), - lager:debug("Finished YZ AAE tree build: ~p", [Index]), - gen_server:cast(Tree, build_finished); + IterKeys = fold_keys(Index, Tree), + handle_iter_keys(Tree, Index, IterKeys); {true, rehash} -> lager:debug("Starting YZ AAE tree rehash: ~p", [Index]), _ = [hashtree:rehash_tree(T) || {_,T} <- Trees], @@ -609,3 +607,18 @@ maybe_expire_caps_check(S) -> S#state{expired=true}; false -> S end. + +-spec handle_iter_keys(pid(), p(), []| [ok|timeout|not_available]) -> ok. +handle_iter_keys(Tree, Index, []) -> + lager:debug("Finished YZ AAE tree build: ~p", [Index]), + gen_server:cast(Tree, build_finished), + ok; +handle_iter_keys(Tree, Index, IterKeys) -> + case lists:all(fun(V) -> V =:= ok end, IterKeys) of + true -> + lager:debug("Finished YZ AAE tree build: ~p", [Index]), + gen_server:cast(Tree, build_finished); + _ -> + lager:debug("YZ AAE tree build failed: ~p", [Index]), + gen_server:cast(Tree, build_failed) + end. diff --git a/src/yz_solr.erl b/src/yz_solr.erl index 53673ab1..976851a0 100644 --- a/src/yz_solr.erl +++ b/src/yz_solr.erl @@ -35,7 +35,6 @@ -define(FIELD_ALIASES, [{continuation, continue}, {limit, n}]). -define(QUERY(Bin), {struct, [{'query', Bin}]}). --define(SOLR_TIMEOUT, 60000). -type delete_op() :: {id, binary()} | {bkey, bkey()} @@ -72,7 +71,8 @@ commit(Core) -> URL = ?FMT("~s/~s/update?~s", [base_url(), Core, Encoded]), Headers = [{content_type, "application/json"}], Opts = [{response_format, binary}], - case ibrowse:send_req(URL, Headers, post, JSON, Opts, ?SOLR_TIMEOUT) of + case ibrowse:send_req(URL, Headers, post, JSON, Opts, + ?YZ_SOLR_REQUEST_TIMEOUT) of {ok, "200", _, _} -> ok; Err -> throw({"Failed to commit", Err}) end. @@ -81,7 +81,7 @@ commit(Core) -> -spec core(atom(), proplist()) -> {ok, list(), binary()} | {error, term()}. core(Action, Props) -> - core(Action, Props, ?SOLR_TIMEOUT). + core(Action, Props, ?YZ_SOLR_REQUEST_TIMEOUT). -spec core(atom(), proplist(), ms()) -> {ok, list(), binary()} | {error, term()}. @@ -137,7 +137,8 @@ delete(Index, Ops) -> URL = ?FMT("~s/~s/update", [base_url(), Index]), Headers = [{content_type, "application/json"}], Opts = [{response_format, binary}], - case ibrowse:send_req(URL, Headers, post, JSON, Opts, ?SOLR_TIMEOUT) of + case ibrowse:send_req(URL, Headers, post, JSON, Opts, + ?YZ_SOLR_REQUEST_TIMEOUT) of {ok, "200", _, _} -> ok; Err -> {error, Err} end. @@ -172,7 +173,7 @@ entropy_data(Core, Filter) -> Opts = [{response_format, binary}], URL = ?FMT("~s/~s/entropy_data?~s", [base_url(), Core, mochiweb_util:urlencode(Params2)]), - case ibrowse:send_req(URL, [], get, [], Opts) of + case ibrowse:send_req(URL, [], get, [], Opts, ?YZ_SOLR_REQUEST_TIMEOUT) of {ok, "200", _Headers, Body} -> R = mochijson2:decode(Body), More = kvc:path([<<"more">>], R), @@ -196,9 +197,12 @@ index(Core, Docs, DelOps) -> URL = ?FMT("~s/~s/update", [base_url(), Core]), Headers = [{content_type, "application/json"}], Opts = [{response_format, binary}], - case ibrowse:send_req(URL, Headers, post, JSON, Opts, ?SOLR_TIMEOUT) of + case ibrowse:send_req(URL, Headers, post, JSON, Opts, + ?YZ_SOLR_REQUEST_TIMEOUT) of {ok, "200", _, _} -> ok; - Err -> throw({"Failed to index docs", Err}) + {ok, "400", _, ErrBody} -> throw({"Failed to index docs", badrequest, + ErrBody}); + Err -> throw({"Failed to index docs", other, Err}) end. %% @doc Determine if Solr is running. @@ -236,7 +240,7 @@ partition_list(Core) -> Encoded = mochiweb_util:urlencode(Params), URL = ?FMT("~s/~s/select?~s", [base_url(), Core, Encoded]), Opts = [{response_format, binary}], - case ibrowse:send_req(URL, [], get, [], Opts, ?SOLR_TIMEOUT) of + case ibrowse:send_req(URL, [], get, [], Opts, ?YZ_SOLR_REQUEST_TIMEOUT) of {ok, "200", _, Resp} -> {ok, Resp}; Err -> {error, Err} end. @@ -245,7 +249,7 @@ partition_list(Core) -> -spec ping(index_name()) -> boolean()|error. ping(Core) -> URL = ?FMT("~s/~s/admin/ping", [base_url(), Core]), - case ibrowse:send_req(URL, [], get) of + case ibrowse:send_req(URL, [], head) of {ok, "200", _, _} -> true; {ok, "404", _, _} -> false; _ -> error @@ -280,7 +284,8 @@ search(Core, Headers, Params) -> URL = ?FMT("~s/~s/select", [base_url(), Core]), Headers2 = [{content_type, "application/x-www-form-urlencoded"}|Headers], Opts = [{response_format, binary}], - case ibrowse:send_req(URL, Headers2, post, Body, Opts, ?SOLR_TIMEOUT) of + case ibrowse:send_req(URL, Headers2, post, Body, Opts, + ?YZ_SOLR_REQUEST_TIMEOUT) of {ok, "200", RHeaders, Resp} -> {RHeaders, Resp}; {ok, CodeStr, _, Err} -> {Code, _} = string:to_integer(CodeStr), diff --git a/test/yokozuna_schema_tests.erl b/test/yokozuna_schema_tests.erl index 877179bc..66e391df 100644 --- a/test/yokozuna_schema_tests.erl +++ b/test/yokozuna_schema_tests.erl @@ -20,6 +20,7 @@ basic_schema_test() -> "./data/yolo/yz_anti_entropy"), cuttlefish_unit:assert_config(Config, "yokozuna.root_dir", "./data/yolo/yz"), cuttlefish_unit:assert_config(Config, "yokozuna.temp_dir", "./data/yolo/yz_temp"), + cuttlefish_unit:assert_config(Config, "yokozuna.solr_request_timeout", 60000), ok. override_schema_test() -> @@ -34,7 +35,8 @@ override_schema_test() -> {["search", "solr", "jvm_options"], "-Xmx10G"}, {["search", "anti_entropy", "data_dir"], "/data/aae/search"}, {["search", "root_dir"], "/some/other/volume"}, - {["search", "temp_dir"], "/some/other/volume_temp"} + {["search", "temp_dir"], "/some/other/volume_temp"}, + {["search", "solr", "request_timeout"], "90s"} ], Config = cuttlefish_unit:generate_templated_config( "../priv/yokozuna.schema", Conf, context(), predefined_schema()), @@ -48,6 +50,7 @@ override_schema_test() -> "/data/aae/search"), cuttlefish_unit:assert_config(Config, "yokozuna.root_dir", "/some/other/volume"), cuttlefish_unit:assert_config(Config, "yokozuna.temp_dir", "/some/other/volume_temp"), + cuttlefish_unit:assert_config(Config, "yokozuna.solr_request_timeout", 90000), ok. %% this context() represents the substitution variables that rebar