Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
17 changes: 13 additions & 4 deletions be/src/io/fs/hdfs_file_reader.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -114,9 +114,18 @@ Status HdfsFileReader::close() {
return Status::OK();
}

#ifdef USE_HADOOP_HDFS
Status HdfsFileReader::read_at_impl(size_t offset, Slice result, size_t* bytes_read,
const IOContext* /*io_ctx*/) {
const IOContext* io_ctx) {
auto st = do_read_at_impl(offset, result, bytes_read, io_ctx);
if (!st.ok()) {
_accessor.destroy();
}
return st;
}

#ifdef USE_HADOOP_HDFS
Status HdfsFileReader::do_read_at_impl(size_t offset, Slice result, size_t* bytes_read,
const IOContext* /*io_ctx*/) {
if (closed()) [[unlikely]] {
return Status::InternalError("read closed file: {}", _path.native());
}
Expand Down Expand Up @@ -169,8 +178,8 @@ Status HdfsFileReader::read_at_impl(size_t offset, Slice result, size_t* bytes_r
#else
// The hedged read only support hdfsPread().
// TODO: rethink here to see if there are some difference between hdfsPread() and hdfsRead()
Status HdfsFileReader::read_at_impl(size_t offset, Slice result, size_t* bytes_read,
const IOContext* /*io_ctx*/) {
Status HdfsFileReader::do_read_at_impl(size_t offset, Slice result, size_t* bytes_read,
const IOContext* /*io_ctx*/) {
if (closed()) [[unlikely]] {
return Status::InternalError("read closed file: ", _path.native());
}
Expand Down
3 changes: 3 additions & 0 deletions be/src/io/fs/hdfs_file_reader.h
Original file line number Diff line number Diff line change
Expand Up @@ -63,6 +63,9 @@ class HdfsFileReader final : public FileReader {

void _collect_profile_before_close() override;

Status do_read_at_impl(size_t offset, Slice result, size_t* bytes_read,
const IOContext* io_ctx);

private:
#ifdef USE_HADOOP_HDFS
struct HDFSProfile {
Expand Down
Loading