diff --git a/be/src/io/fs/hdfs_file_reader.cpp b/be/src/io/fs/hdfs_file_reader.cpp index cb8b71f67d7960..0e278dff0c8847 100644 --- a/be/src/io/fs/hdfs_file_reader.cpp +++ b/be/src/io/fs/hdfs_file_reader.cpp @@ -119,6 +119,7 @@ Status HdfsFileReader::read_at_impl(size_t offset, Slice result, size_t* bytes_r const IOContext* io_ctx) { auto st = do_read_at_impl(offset, result, bytes_read, io_ctx); if (!st.ok()) { + _handle = nullptr; _accessor.destroy(); } return st; @@ -131,6 +132,11 @@ Status HdfsFileReader::do_read_at_impl(size_t offset, Slice result, size_t* byte return Status::InternalError("read closed file: {}", _path.native()); } + if (_handle == nullptr) [[unlikely]] { + return Status::InternalError("cached hdfs file handle has been destroyed: {}", + _path.native()); + } + if (offset > _handle->file_size()) { return Status::IOError("offset exceeds file size(offset: {}, file size: {}, path: {})", offset, _handle->file_size(), _path.native()); @@ -245,6 +251,10 @@ Status HdfsFileReader::do_read_at_impl(size_t offset, Slice result, size_t* byte void HdfsFileReader::_collect_profile_before_close() { if (_profile != nullptr && is_hdfs(_fs_name)) { #ifdef USE_HADOOP_HDFS + if (_handle == nullptr) [[unlikely]] { + return; + } + struct hdfsReadStatistics* hdfs_statistics = nullptr; auto r = hdfsFileGetReadStatistics(_handle->file(), &hdfs_statistics); if (r != 0) {