Skip to content

Commit

Permalink
Fixed a bug where the storage service might crash (vesoft-inc#583)
Browse files Browse the repository at this point in the history
  When the storage service deamon is started, the nebula::meta::MetaClient
  will use the MetaServerBasedPartManager object(MetaClient::registerListener),
  but the MetaServerBasedPartManager object will be released before
  nebula::meta::MetaClient(through KVStore object), so we need to manually
  release nebula::meta::MetaClient here before release KVStore object.
  • Loading branch information
monadbobo committed Jul 8, 2019
1 parent a8bdaef commit efee045
Showing 1 changed file with 6 additions and 5 deletions.
11 changes: 6 additions & 5 deletions src/daemons/StorageDaemon.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -154,21 +154,20 @@ int main(int argc, char *argv[]) {
auto ioThreadPool = std::make_shared<folly::IOThreadPoolExecutor>(FLAGS_num_io_threads);

// Meta client
auto metaClient = std::make_unique<nebula::meta::MetaClient>(ioThreadPool,
std::move(metaAddrsRet.value()),
true);
auto metaClient = new nebula::meta::MetaClient(
ioThreadPool, std::move(metaAddrsRet.value()), true);
metaClient->init();

LOG(INFO) << "Init schema manager";
auto schemaMan = nebula::meta::SchemaManager::create();
schemaMan->init(metaClient.get());
schemaMan->init(metaClient);

LOG(INFO) << "Init kvstore";
std::unique_ptr<KVStore> kvstore = getStoreInstance(localhost,
std::move(paths),
ioThreadPool,
workers,
metaClient.get(),
metaClient,
schemaMan.get());

LOG(INFO) << "Starting Storage HTTP Service";
Expand Down Expand Up @@ -200,10 +199,12 @@ int main(int argc, char *argv[]) {
gServer->setIOThreadPool(ioThreadPool);
gServer->serve(); // Will wait until the server shuts down
} catch (const std::exception& e) {
delete metaClient;
LOG(ERROR) << "Start thrift server failed, error:" << e.what();
return EXIT_FAILURE;
}

delete metaClient;
LOG(INFO) << "The storage Daemon stopped";
}

Expand Down

0 comments on commit efee045

Please sign in to comment.