Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Adding support for http calling of the graphql_server commands #3793

Merged
merged 4 commits into from
Oct 12, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
16 changes: 10 additions & 6 deletions big_tests/tests/distributed_helper.erl
Original file line number Diff line number Diff line change
Expand Up @@ -50,13 +50,17 @@ script_path(Node, Config, Script) ->
filename:join([get_cwd(Node, Config), "bin", Script]).

verify_result(Node, Op) ->
mongoose_helper:wait_until(fun() -> catch do_verify_result(Node, Op) end,
[],
#{
time_left => timer:seconds(20),
mongoose_helper:wait_until(fun() -> catch do_verify_result(Node, Op) end, [],
#{time_left => timer:seconds(20),
sleep_time => 1000,
name => verify_result
}).
name => verify_result}),
mongoose_helper:wait_until(fun() -> check_mongooseim_on_node_started(mim()) end, true,
#{time_left => timer:seconds(20),
sleep_time => 1000,
name => verify_mongooseim_started}).

check_mongooseim_on_node_started(Node) ->
lists:keymember(mongooseim, 1, rpc(Node, application, which_applications, [])).

do_verify_result(Node, Op) ->
VerifyNode = mim(),
Expand Down
102 changes: 95 additions & 7 deletions big_tests/tests/graphql_server_SUITE.erl
Original file line number Diff line number Diff line change
Expand Up @@ -16,19 +16,24 @@ suite() ->
require_rpc_nodes([mim]) ++ escalus:suite().

all() ->
[%{group, admin_http}, % http is not supported for the server category
[{group, admin_http},
{group, admin_cli}].

groups() ->
[{admin_http, [], admin_groups()},
{admin_cli, [], admin_groups()},
[{admin_http, [], admin_http_groups()},
{admin_cli, [], admin_cli_groups()},
{server_tests, [], admin_tests()},
{clustering_tests, [], clustering_tests()}].
{clustering_tests, [], clustering_tests()},
{clustering_http_tests, [], clustering_http_tests()}].

admin_groups() ->
admin_cli_groups() ->
[{group, server_tests},
{group, clustering_tests}].

admin_http_groups() ->
[{group, server_tests},
{group, clustering_http_tests}].

admin_tests() ->
[get_cookie_test,
set_and_get_loglevel_test,
Expand All @@ -45,6 +50,15 @@ clustering_tests() ->
remove_node_test,
stop_node_test].

clustering_http_tests() ->
[join_successful_http,
leave_successful_http,
join_unsuccessful_http,
remove_dead_from_cluster_http,
remove_alive_from_cluster_http,
remove_node_test,
stop_node_test].

init_per_suite(Config) ->
Config1 = dynamic_modules:save_modules(host_type(), Config),
Config2 = lists:foldl(fun(#{node := Node} = RPCNode, ConfigAcc) ->
Expand Down Expand Up @@ -88,11 +102,15 @@ init_per_testcase(CaseName, Config) ->

end_per_testcase(CaseName, Config) when CaseName == join_successful
orelse CaseName == leave_unsuccessful
orelse CaseName == join_successful_http
orelse CaseName == leave_unsuccessful_http
orelse CaseName == join_twice ->
remove_node_from_cluster(mim2(), Config),
escalus:end_per_testcase(CaseName, Config);
end_per_testcase(CaseName, Config) when CaseName == remove_alive_from_cluster
orelse CaseName == remove_dead_from_cluster ->
orelse CaseName == remove_dead_from_cluster
orelse CaseName == remove_alive_from_cluster_http
orelse CaseName == remove_dead_from_cluster_http ->
remove_node_from_cluster(mim3(), Config),
remove_node_from_cluster(mim2(), Config),
escalus:end_per_testcase(CaseName, Config);
Expand All @@ -111,7 +129,9 @@ set_and_get_loglevel_test(Config) ->
Value1 = get_ok_value([data, server, getLoglevel], get_loglevel(Config)),
?assertEqual(LogLevel, Value1)
end, LogLevels),
?assertEqual(<<"unknown_enum">>, get_err_code(set_loglevel(<<"AAAA">>, Config))).
{_, Res} = set_loglevel(<<"AAAA">>, Config),
[Res1] = maps:get(<<"errors">>, Res),
?assertEqual(<<"unknown_enum">>, graphql_helper:get_value([extensions, code], Res1)).

get_status_test(Config) ->
Result = get_ok_value([data, server, status], get_status(Config)),
Expand Down Expand Up @@ -198,10 +218,78 @@ stop_node_test(Config) ->
mongoose_helper:wait_until(F, {badrpc, nodedown}, #{sleep_time => 1000, name => stop_node}),
distributed_helper:start_node(Node3Nodename, Config).

join_successful_http(Config) ->
#{node := Node2} = RPCSpec2 = mim2(),
leave_cluster(Config),
distributed_helper:verify_result(RPCSpec2, remove),
get_ok_value([], join_cluster(atom_to_binary(Node2), Config)),
distributed_helper:verify_result(RPCSpec2, add).

leave_successful_http(Config) ->
#{node := Node2} = RPCSpec2 = mim2(),
join_cluster(atom_to_binary(Node2), Config),
distributed_helper:verify_result(RPCSpec2, add),
get_ok_value([], leave_cluster(Config)),
distributed_helper:verify_result(RPCSpec2, remove).

join_unsuccessful_http(Config) ->
Node2 = mim2(),
get_ok_value([], join_cluster(<<>>, Config)),
distributed_helper:verify_result(Node2, remove).

remove_dead_from_cluster_http(Config) ->
% given
Timeout = timer:seconds(60),
#{node := Node1Nodename} = Node1 = mim(),
#{node := _Node2Nodename} = Node2 = mim2(),
#{node := Node3Nodename} = Node3 = mim3(),
ok = rpc(Node2#{timeout => Timeout}, mongoose_cluster, join, [Node1Nodename]),
ok = rpc(Node3#{timeout => Timeout}, mongoose_cluster, join, [Node1Nodename]),
%% when
distributed_helper:stop_node(Node3Nodename, Config),
get_ok_value([data, server, removeFromCluster],
remove_from_cluster(atom_to_binary(Node3Nodename), Config)),
have_node_in_mnesia_wait(Node1, Node2, true),
have_node_in_mnesia_wait(Node1, Node3, false),
have_node_in_mnesia_wait(Node2, Node3, false),
% after node awakening nodes are clustered again
distributed_helper:start_node(Node3Nodename, Config),
have_node_in_mnesia_wait(Node1, Node3, true),
have_node_in_mnesia_wait(Node2, Node3, true).

remove_alive_from_cluster_http(Config) ->
% given
Timeout = timer:seconds(60),
#{node := Node1Name} = Node1 = mim(),
#{node := Node2Name} = Node2 = mim2(),
Node3 = mim3(),
ok = rpc(Node2#{timeout => Timeout}, mongoose_cluster, join, [Node1Name]),
ok = rpc(Node3#{timeout => Timeout}, mongoose_cluster, join, [Node1Name]),
%% when
%% Node2 is still running
%% then
get_ok_value([], remove_from_cluster(atom_to_binary(Node2Name), Config)),
have_node_in_mnesia_wait(Node1, Node3, true),
have_node_in_mnesia_wait(Node1, Node2, false),
have_node_in_mnesia_wait(Node3, Node2, false).

%-----------------------------------------------------------------------
% Helpers
%-----------------------------------------------------------------------

have_node_in_mnesia_wait(Node1, #{node := Node2}, Value) ->
mongoose_helper:wait_until(fun() ->
DbNodes1 = distributed_helper:rpc(Node1, mnesia,
system_info, [db_nodes]),
lists:member(Node2, DbNodes1)
end,
Value,
#{
time_left => timer:seconds(5),
sleep_time => 1000,
name => have_node_in_mnesia
}).

all_log_levels() ->
[<<"NONE">>,
<<"EMERGENCY">>,
Expand Down
2 changes: 1 addition & 1 deletion src/ejabberd_ctl.erl
Original file line number Diff line number Diff line change
Expand Up @@ -172,7 +172,7 @@ process(["mnesia", "info"]) ->
process(["graphql", Arg]) when is_list(Arg) ->
Doc = list_to_binary(Arg),
Ep = mongoose_graphql:get_endpoint(admin),
Result = mongoose_graphql:execute(Ep, undefined, Doc),
Result = mongoose_graphql:execute_cli(Ep, undefined, Doc),
handle_graphql_result(Result);
process(["graphql" | _]) ->
?PRINT("This command requires one string type argument!\n", []),
Expand Down
28 changes: 25 additions & 3 deletions src/graphql/admin/mongoose_graphql_server_admin_mutation.erl
Original file line number Diff line number Diff line change
Expand Up @@ -2,13 +2,14 @@
-behaviour(mongoose_graphql).

-export([execute/4]).
-export([await_execution/4]).

-import(mongoose_graphql_helper, [make_error/2]).
-ignore_xref([execute/4]).

-include("../mongoose_graphql_types.hrl").

execute(_Ctx, server, <<"joinCluster">>, #{<<"node">> := Node}) ->
execute(#{method := cli}, server, <<"joinCluster">>, #{<<"node">> := Node}) ->
case mongoose_server_api:join_cluster(binary_to_list(Node)) of
{mnesia_error, _} = Error ->
make_error(Error, #{cluster => Node});
Expand All @@ -20,14 +21,24 @@ execute(_Ctx, server, <<"joinCluster">>, #{<<"node">> := Node}) ->
{_, String} ->
{ok, String}
end;
execute(_Ctx, server, <<"removeFromCluster">>, #{<<"node">> := Node}) ->
execute(#{method := http}, server, <<"joinCluster">>, #{<<"node">> := Node}) ->
spawn(?MODULE, await_execution,
[1000, mongoose_server_api, join_cluster, [binary_to_list(Node)]]),
{ok, "JoinCluster scheduled"};

execute(#{method := cli}, server, <<"removeFromCluster">>, #{<<"node">> := Node}) ->
case mongoose_server_api:remove_from_cluster(binary_to_list(Node)) of
{ok, _} = Result ->
Result;
Error ->
make_error(Error, #{node => Node})
end;
execute(_Ctx, server, <<"leaveCluster">>, #{}) ->
execute(#{method := http}, server, <<"removeFromCluster">>, #{<<"node">> := Node}) ->
spawn(?MODULE, await_execution,
[1000, mongoose_server_api, remove_from_cluster, [binary_to_list(Node)]]),
{ok, "RemoveFromCluster scheduled"};

execute(#{method := cli}, server, <<"leaveCluster">>, #{}) ->
case mongoose_server_api:leave_cluster() of
{error, Message} ->
make_error({internal_server_error, io_lib:format("~p", [Message])}, #{});
Expand All @@ -36,8 +47,13 @@ execute(_Ctx, server, <<"leaveCluster">>, #{}) ->
{_, String} ->
{ok, String}
end;
execute(#{method := http}, server, <<"leaveCluster">>, #{}) ->
spawn(?MODULE, await_execution, [1000, mongoose_server_api, leave_cluster, []]),
{ok, "LeaveCluster scheduled"};

execute(_Ctx, server, <<"removeNode">>, #{<<"node">> := Node}) ->
mongoose_server_api:remove_node(binary_to_list(Node));

execute(_Ctx, server, <<"setLoglevel">>, #{<<"level">> := LogLevel}) ->
mongoose_server_api:set_loglevel(LogLevel);
execute(_Ctx, server, <<"stop">>, #{}) ->
Expand All @@ -46,3 +62,9 @@ execute(_Ctx, server, <<"stop">>, #{}) ->
execute(_Ctx, server, <<"restart">>, #{}) ->
spawn(mongoose_server_api, restart, []),
{ok, "Restart scheduled"}.

%% Helpers

await_execution(Timeout, Module, Fun, Args) ->
timer:sleep(Timeout),
apply(Module, Fun, Args).
13 changes: 12 additions & 1 deletion src/graphql/mongoose_graphql.erl
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,8 @@
get_endpoint/1,
create_endpoint/3,
execute/2,
execute/3]).
execute/3,
execute_cli/3]).

-ignore_xref([create_endpoint/3]).

Expand Down Expand Up @@ -103,6 +104,16 @@ execute(Ep, OpName, Doc) ->
ctx => #{}},
execute(Ep, Req).

-spec execute_cli(graphql:endpoint_context(), undefined | binary(), binary()) ->
{ok, map()} | {error, term()}.
execute_cli(Ep, OpName, Doc) ->
Req = #{document => Doc,
operation_name => OpName,
vars => #{},
authorized => true,
ctx => #{method => cli}},
execute(Ep, Req).

% Internal

-spec schema_global_patterns(file:name_all()) -> [file:filename_all()].
Expand Down
2 changes: 1 addition & 1 deletion src/graphql/mongoose_graphql_commands.erl
Original file line number Diff line number Diff line change
Expand Up @@ -344,7 +344,7 @@ execute(Ep, Doc, Vars) ->
operation_name => undefined,
vars => Vars,
authorized => true,
ctx => #{}}).
ctx => #{method => cli}}).

field_type_query() ->
nested_type_query("name kind possibleTypes {name kind}").
Expand Down
2 changes: 1 addition & 1 deletion src/graphql/mongoose_graphql_cowboy_handler.erl
Original file line number Diff line number Diff line change
Expand Up @@ -175,7 +175,7 @@ run_request(#{} = ReqCtx, Req, #{schema_endpoint := EpName,
authorized := AuthStatus} = State) ->
Ep = mongoose_graphql:get_endpoint(EpName),
Ctx = maps:get(schema_ctx, State, #{}),
ReqCtx2 = ReqCtx#{authorized => AuthStatus, ctx => Ctx},
ReqCtx2 = ReqCtx#{authorized => AuthStatus, ctx => Ctx#{method => http}},
case mongoose_graphql:execute(Ep, ReqCtx2) of
{ok, Response} ->
ResponseBody = mongoose_graphql_response:term_to_json(Response),
Expand Down