diff --git a/cloud/src/recycler/azure_obj_client.cpp b/cloud/src/recycler/azure_obj_client.cpp index b50874f1fd37af..f3df2f9b6eee3e 100644 --- a/cloud/src/recycler/azure_obj_client.cpp +++ b/cloud/src/recycler/azure_obj_client.cpp @@ -137,7 +137,7 @@ class AzureListIterator final : public ObjectListIterator { SystemClockEpoch) .count()}); } - } catch (Azure::Storage::StorageException& e) { + } catch (Azure::Core::RequestFailedException& e) { LOG_WARNING( "Azure request failed because {}, http_code: {}, request_id: {}, url: {}, " "prefix: {}", @@ -145,6 +145,11 @@ class AzureListIterator final : public ObjectListIterator { req_.Prefix.Value()); is_valid_ = false; return false; + } catch (std::exception& e) { + LOG_WARNING("Azure request failed because {}, url: {}, prefix: {}", e.what(), + client_->GetUrl(), req_.Prefix.Value()); + is_valid_ = false; + return false; } return !results_.empty(); diff --git a/cloud/src/recycler/recycler.cpp b/cloud/src/recycler/recycler.cpp index ee07d695a72702..a16eecf4a52c6e 100644 --- a/cloud/src/recycler/recycler.cpp +++ b/cloud/src/recycler/recycler.cpp @@ -375,6 +375,7 @@ void Recycler::check_recycle_tasks() { int Recycler::start(brpc::Server* server) { instance_filter_.reset(config::recycle_whitelist, config::recycle_blacklist); g_bvar_recycler_task_max_concurrency.set_value(config::recycle_concurrency); + S3Environment::getInstance(); if (config::enable_checker) { checker_ = std::make_unique(txn_kv_); diff --git a/cloud/src/recycler/s3_accessor.cpp b/cloud/src/recycler/s3_accessor.cpp index 3c36f5a01bb248..464beb58e2e6d8 100644 --- a/cloud/src/recycler/s3_accessor.cpp +++ b/cloud/src/recycler/s3_accessor.cpp @@ -111,50 +111,52 @@ int reset_s3_rate_limiter(S3RateLimitType type, size_t max_speed, size_t max_bur return AccessorRateLimiter::instance().rate_limiter(type)->reset(max_speed, max_burst, limit); } -class S3Environment { -public: - S3Environment() { - aws_options_ = Aws::SDKOptions {}; - auto logLevel = static_cast(config::aws_log_level); - aws_options_.loggingOptions.logLevel = logLevel; - aws_options_.loggingOptions.logger_create_fn = [logLevel] { - return std::make_shared(logLevel); - }; - Aws::InitAPI(aws_options_); +S3Environment::S3Environment() { + LOG(INFO) << "Initializing S3 environment"; + aws_options_ = Aws::SDKOptions {}; + auto logLevel = static_cast(config::aws_log_level); + aws_options_.loggingOptions.logLevel = logLevel; + aws_options_.loggingOptions.logger_create_fn = [logLevel] { + return std::make_shared(logLevel); + }; + Aws::InitAPI(aws_options_); #ifdef USE_AZURE - auto azureLogLevel = - static_cast(config::azure_log_level); - Azure::Core::Diagnostics::Logger::SetLevel(azureLogLevel); - Azure::Core::Diagnostics::Logger::SetListener( - [&](Azure::Core::Diagnostics::Logger::Level level, const std::string& message) { - switch (level) { - case Azure::Core::Diagnostics::Logger::Level::Verbose: - LOG(INFO) << message; - break; - case Azure::Core::Diagnostics::Logger::Level::Informational: - LOG(INFO) << message; - break; - case Azure::Core::Diagnostics::Logger::Level::Warning: - LOG(WARNING) << message; - break; - case Azure::Core::Diagnostics::Logger::Level::Error: - LOG(ERROR) << message; - break; - default: - LOG(WARNING) << "Unknown level: " << static_cast(level) - << ", message: " << message; - break; - } - }); + auto azureLogLevel = + static_cast(config::azure_log_level); + Azure::Core::Diagnostics::Logger::SetLevel(azureLogLevel); + Azure::Core::Diagnostics::Logger::SetListener( + [&](Azure::Core::Diagnostics::Logger::Level level, const std::string& message) { + switch (level) { + case Azure::Core::Diagnostics::Logger::Level::Verbose: + LOG(INFO) << message; + break; + case Azure::Core::Diagnostics::Logger::Level::Informational: + LOG(INFO) << message; + break; + case Azure::Core::Diagnostics::Logger::Level::Warning: + LOG(WARNING) << message; + break; + case Azure::Core::Diagnostics::Logger::Level::Error: + LOG(ERROR) << message; + break; + default: + LOG(WARNING) << "Unknown level: " << static_cast(level) + << ", message: " << message; + break; + } + }); #endif - } +} - ~S3Environment() { Aws::ShutdownAPI(aws_options_); } +S3Environment& S3Environment::getInstance() { + static S3Environment instance; + return instance; +} -private: - Aws::SDKOptions aws_options_; -}; +S3Environment::~S3Environment() { + Aws::ShutdownAPI(aws_options_); +} class S3ListIterator final : public ListIterator { public: @@ -316,6 +318,7 @@ int S3Accessor::init() { std::make_shared(config::recycle_pool_parallelism, "s3_accessor"); worker_pool->start(); }); + S3Environment::getInstance(); switch (conf_.provider) { case S3Conf::AZURE: { #ifdef USE_AZURE @@ -355,8 +358,6 @@ int S3Accessor::init() { uri_ = conf_.endpoint + '/' + conf_.bucket + '/' + conf_.prefix; } - static S3Environment s3_env; - // S3Conf::S3 Aws::Client::ClientConfiguration aws_config; aws_config.endpointOverride = conf_.endpoint; diff --git a/cloud/src/recycler/s3_accessor.h b/cloud/src/recycler/s3_accessor.h index 544c142d09b786..dd93e83d9631f4 100644 --- a/cloud/src/recycler/s3_accessor.h +++ b/cloud/src/recycler/s3_accessor.h @@ -53,6 +53,19 @@ extern bvar::LatencyRecorder s3_get_bucket_version_latency; extern bvar::LatencyRecorder s3_copy_object_latency; }; // namespace s3_bvar +class S3Environment { +public: + S3Environment(const S3Environment&) = delete; + S3Environment& operator=(const S3Environment&) = delete; + + static S3Environment& getInstance(); + + ~S3Environment(); + +private: + S3Environment(); + Aws::SDKOptions aws_options_; +}; struct AccessorRateLimiter { public: ~AccessorRateLimiter() = default; diff --git a/cloud/test/s3_accessor_test.cpp b/cloud/test/s3_accessor_test.cpp index f95fb2dba1848d..d63baa2a7b5834 100644 --- a/cloud/test/s3_accessor_test.cpp +++ b/cloud/test/s3_accessor_test.cpp @@ -46,7 +46,7 @@ int main(int argc, char** argv) { std::cerr << "failed to init glog" << std::endl; return -1; } - doris::cloud::config::aws_log_level = 5; + LOG(INFO) << "s3_accessor_test starting"; ::testing::InitGoogleTest(&argc, argv); return RUN_ALL_TESTS(); }