-
Notifications
You must be signed in to change notification settings - Fork 409
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
FLASH-475: Support BATCH COMMANDS in flash service #232
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,94 @@ | ||
#include <Flash/BatchCommandsHandler.h> | ||
#include <Flash/CoprocessorHandler.h> | ||
#include <common/ThreadPool.h> | ||
|
||
namespace DB | ||
{ | ||
|
||
BatchCommandsHandler::BatchCommandsHandler(BatchCommandsContext & batch_commands_context_, const tikvpb::BatchCommandsRequest & request_, | ||
tikvpb::BatchCommandsResponse & response_) | ||
: batch_commands_context(batch_commands_context_), request(request_), response(response_), log(&Logger::get("BatchCommandsHandler")) | ||
{} | ||
|
||
grpc::Status BatchCommandsHandler::execute() | ||
{ | ||
if (request.requests_size() == 0) | ||
return grpc::Status::OK; | ||
|
||
// TODO: Fill transport_layer_load into BatchCommandsResponse. | ||
|
||
auto command_handler_func | ||
= [](BatchCommandsContext::DBContextCreationFunc db_context_creation_func, grpc::ServerContext * grpc_server_context, | ||
const tikvpb::BatchCommandsRequest::Request & req, tikvpb::BatchCommandsResponse::Response & resp, grpc::Status & ret) { | ||
if (!req.has_coprocessor()) | ||
{ | ||
ret = grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); | ||
return; | ||
} | ||
|
||
const auto & cop_req = req.coprocessor(); | ||
auto cop_resp = resp.mutable_coprocessor(); | ||
|
||
auto [context, status] = db_context_creation_func(grpc_server_context); | ||
if (!status.ok()) | ||
{ | ||
ret = status; | ||
return; | ||
} | ||
|
||
CoprocessorContext cop_context(context, cop_req.context(), *grpc_server_context); | ||
CoprocessorHandler cop_handler(cop_context, &cop_req, cop_resp); | ||
|
||
ret = cop_handler.execute(); | ||
}; | ||
|
||
/// Shortcut for only one request by not going to thread pool. | ||
if (request.requests_size() == 1) | ||
{ | ||
LOG_DEBUG(log, __PRETTY_FUNCTION__ << ": Handling the only batch command in place."); | ||
|
||
const auto & req = request.requests(0); | ||
auto resp = response.add_responses(); | ||
response.add_request_ids(request.request_ids(0)); | ||
auto ret = grpc::Status::OK; | ||
command_handler_func(batch_commands_context.db_context_creation_func, &batch_commands_context.grpc_server_context, req, *resp, ret); | ||
return ret; | ||
} | ||
|
||
/// Use thread pool to handle requests concurrently. | ||
const Settings & settings = batch_commands_context.db_context.getSettingsRef(); | ||
size_t max_threads = settings.batch_commands_threads ? static_cast<size_t>(settings.batch_commands_threads) | ||
: static_cast<size_t>(settings.max_threads); | ||
|
||
LOG_DEBUG( | ||
log, __PRETTY_FUNCTION__ << ": Handling " << request.requests_size() << " batch commands using " << max_threads << " threads."); | ||
|
||
ThreadPool thread_pool(max_threads); | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. how about use a system level thread pool? As far as I know, tikv use system level thread pool There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Right, TiKV did it that way. But 1) we are two different scenarios: TP vs. AP, we tend to use as much resource as possible for a single query; 2) there is a similar case in ClickHouse own code - concurrent aggregation, I followed it, i.e. using individual thread pool and a control config default to 0 (to use max_threads); 3) there is no handy thread pool implementation that is designed to be a global one that has to deal problems such as "are all threads done for this particular job?" |
||
|
||
std::vector<grpc::Status> rets; | ||
size_t i = 0; | ||
|
||
for (const auto & req : request.requests()) | ||
{ | ||
auto resp = response.add_responses(); | ||
response.add_request_ids(request.request_ids(i++)); | ||
rets.emplace_back(grpc::Status::OK); | ||
thread_pool.schedule([&]() { | ||
command_handler_func( | ||
batch_commands_context.db_context_creation_func, &batch_commands_context.grpc_server_context, req, *resp, rets.back()); | ||
}); | ||
} | ||
|
||
thread_pool.wait(); | ||
|
||
// Iterate all return values of each individual commands, returns the first non-OK one if any. | ||
for (const auto & ret : rets) | ||
{ | ||
if (!ret.ok()) | ||
return ret; | ||
} | ||
|
||
return grpc::Status::OK; | ||
} | ||
|
||
} // namespace DB |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,50 @@ | ||
#pragma once | ||
|
||
#include <Interpreters/Context.h> | ||
#include <common/logger_useful.h> | ||
#include <grpcpp/server_context.h> | ||
#pragma GCC diagnostic push | ||
#pragma GCC diagnostic ignored "-Wunused-parameter" | ||
#include <kvproto/tikvpb.pb.h> | ||
#pragma GCC diagnostic pop | ||
|
||
namespace DB | ||
{ | ||
|
||
struct BatchCommandsContext | ||
{ | ||
/// Context for this batch commands. | ||
Context & db_context; | ||
|
||
/// Context creation function for each individual command - they should be handled isolated, | ||
/// given that context is being used to pass arguments regarding queries. | ||
using DBContextCreationFunc = std::function<std::tuple<Context, grpc::Status>(grpc::ServerContext *)>; | ||
DBContextCreationFunc db_context_creation_func; | ||
|
||
grpc::ServerContext & grpc_server_context; | ||
|
||
BatchCommandsContext( | ||
Context & db_context_, DBContextCreationFunc && db_context_creation_func_, grpc::ServerContext & grpc_server_context_) | ||
: db_context(db_context_), db_context_creation_func(std::move(db_context_creation_func_)), grpc_server_context(grpc_server_context_) | ||
{} | ||
}; | ||
|
||
class BatchCommandsHandler | ||
{ | ||
public: | ||
BatchCommandsHandler(BatchCommandsContext & batch_commands_context_, const tikvpb::BatchCommandsRequest & request_, | ||
tikvpb::BatchCommandsResponse & response_); | ||
|
||
~BatchCommandsHandler() = default; | ||
|
||
grpc::Status execute(); | ||
|
||
protected: | ||
BatchCommandsContext & batch_commands_context; | ||
const tikvpb::BatchCommandsRequest & request; | ||
tikvpb::BatchCommandsResponse & response; | ||
|
||
Logger * log; | ||
}; | ||
|
||
} // namespace DB |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
looks like both max_threads and batch_commands_threads are 0 by default
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Config
max_threads
has a special typeSettingMaxThreads
, which evaluates to number of physical cores when set to zero.