Skip to content

Commit

Permalink
Fix cpplint + clang-format issues
Browse files Browse the repository at this point in the history
  • Loading branch information
wesm committed Jun 20, 2016
1 parent 5bef855 commit eaad768
Show file tree
Hide file tree
Showing 7 changed files with 260 additions and 284 deletions.
15 changes: 14 additions & 1 deletion cpp/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -566,12 +566,25 @@ endif (UNIX)
# "make lint" target
############################################################
if (UNIX)

file(GLOB_RECURSE LINT_FILES
"${CMAKE_CURRENT_SOURCE_DIR}/src/*.h"
"${CMAKE_CURRENT_SOURCE_DIR}/src/*.cc"
)

FOREACH(item ${LINT_FILES})
IF(NOT (item MATCHES "_generated.h" OR
item MATCHES ".*libhdfs_shim.*"))
LIST(APPEND FILTERED_LINT_FILES ${item})
ENDIF()
ENDFOREACH(item ${LINT_FILES})

# Full lint
add_custom_target(lint ${BUILD_SUPPORT_DIR}/cpplint.py
--verbose=2
--linelength=90
--filter=-whitespace/comments,-readability/todo,-build/header_guard,-build/c++11,-runtime/references
`find ${CMAKE_CURRENT_SOURCE_DIR}/src -name \\*.cc -or -name \\*.h | sed -e '/_generated/g'`)
${FILTERED_LINT_FILES})
endif (UNIX)


Expand Down
26 changes: 10 additions & 16 deletions cpp/src/arrow/io/hdfs-io-test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -15,12 +15,12 @@
// specific language governing permissions and limitations
// under the License.

#include "gtest/gtest.h"

#include <cstdlib>
#include <sstream>
#include <string>

#include "gtest/gtest.h"

#include "arrow/io/hdfs.h"
#include "arrow/test-util.h"
#include "arrow/util/status.h"
Expand All @@ -37,18 +37,16 @@ std::vector<uint8_t> RandomData(int64_t size) {
class TestHDFSClient : public ::testing::Test {
public:
void MakeScratchDir() {
if (client_->Exists(scratch_dir_)) {
ASSERT_OK(client_->Delete(scratch_dir_, true));
}
if (client_->Exists(scratch_dir_)) { ASSERT_OK(client_->Delete(scratch_dir_, true)); }
ASSERT_OK(client_->CreateDirectory(scratch_dir_));
}

void WriteDummyFile(const std::string& path, const uint8_t* buffer,
int64_t size, bool append=false, int buffer_size=0, int replication=0,
int default_block_size=0) {
void WriteDummyFile(const std::string& path, const uint8_t* buffer, int64_t size,
bool append = false, int buffer_size = 0, int replication = 0,
int default_block_size = 0) {
std::shared_ptr<HDFSWriteableFile> file;
ASSERT_OK(client_->OpenWriteable(path, append, buffer_size, replication,
default_block_size, &file));
ASSERT_OK(client_->OpenWriteable(
path, append, buffer_size, replication, default_block_size, &file));

ASSERT_OK(file->Write(buffer, size));
ASSERT_OK(file->Close());
Expand Down Expand Up @@ -82,9 +80,7 @@ class TestHDFSClient : public ::testing::Test {
ASSERT_OK(HDFSClient::Connect(host_, port_, user_, &client_));
}

static void TearDownTestCase() {
EXPECT_OK(client_->Disconnect());
}
static void TearDownTestCase() { EXPECT_OK(client_->Disconnect()); }

// Resources shared amongst unit tests
static std::string host_;
Expand All @@ -109,9 +105,7 @@ TEST_F(TestHDFSClient, ConnectsAgain) {
TEST_F(TestHDFSClient, CreateDirectory) {
std::string path = "/tmp/arrow-hdfs-test/create-directory";

if (client_->Exists(path)) {
ASSERT_OK(client_->Delete(path, true));
}
if (client_->Exists(path)) { ASSERT_OK(client_->Delete(path, true)); }

ASSERT_OK(client_->CreateDirectory(path));
ASSERT_TRUE(client_->Exists(path));
Expand Down
85 changes: 35 additions & 50 deletions cpp/src/arrow/io/hdfs.cc
Original file line number Diff line number Diff line change
Expand Up @@ -15,25 +15,25 @@
// specific language governing permissions and limitations
// under the License.

#include <hdfs.h>

#include <cstdint>
#include <sstream>
#include <string>

#include <hdfs.h>

#include "arrow/io/hdfs.h"
#include "arrow/util/status.h"

namespace arrow {
namespace io {

#define CHECK_FAILURE(RETURN_VALUE, WHAT) \
do { \
if (RETURN_VALUE == -1) { \
std::stringstream ss; \
ss << "HDFS: " << WHAT << " failed"; \
return Status::IOError(ss.str()); \
} \
#define CHECK_FAILURE(RETURN_VALUE, WHAT) \
do { \
if (RETURN_VALUE == -1) { \
std::stringstream ss; \
ss << "HDFS: " << WHAT << " failed"; \
return Status::IOError(ss.str()); \
} \
} while (0)

static Status CheckReadResult(int ret) {
Expand Down Expand Up @@ -74,7 +74,7 @@ class HDFSAnyFileImpl {
return Status::OK();
}

bool is_open() const { return is_open_;}
bool is_open() const { return is_open_; }

protected:
std::string path_;
Expand All @@ -100,8 +100,7 @@ class HDFSReadableFile::HDFSReadableFileImpl : public HDFSAnyFileImpl {
return Status::OK();
}

Status ReadAt(int64_t position, int32_t nbytes, int32_t* bytes_read,
uint8_t* buffer) {
Status ReadAt(int64_t position, int32_t nbytes, int32_t* bytes_read, uint8_t* buffer) {
tSize ret = hdfsPread(fs_, file_, static_cast<tOffset>(position),
reinterpret_cast<void*>(buffer), nbytes);
RETURN_NOT_OK(CheckReadResult(ret));
Expand All @@ -118,9 +117,7 @@ class HDFSReadableFile::HDFSReadableFileImpl : public HDFSAnyFileImpl {

Status GetSize(int64_t* size) {
hdfsFileInfo* entry = hdfsGetPathInfo(fs_, path_.c_str());
if (entry == nullptr) {
return Status::IOError("HDFS: GetPathInfo failed");
}
if (entry == nullptr) { return Status::IOError("HDFS: GetPathInfo failed"); }

*size = entry->mSize;
hdfsFreeFileInfo(entry, 1);
Expand All @@ -133,17 +130,15 @@ HDFSReadableFile::HDFSReadableFile() {
}

HDFSReadableFile::~HDFSReadableFile() {
if (impl_->is_open()) {
impl_->Close();
}
if (impl_->is_open()) { impl_->Close(); }
}

Status HDFSReadableFile::Close() {
return impl_->Close();
}

Status HDFSReadableFile::ReadAt(int64_t position, int32_t nbytes, int32_t* bytes_read,
uint8_t* buffer) {
Status HDFSReadableFile::ReadAt(
int64_t position, int32_t nbytes, int32_t* bytes_read, uint8_t* buffer) {
return impl_->ReadAt(position, nbytes, bytes_read, buffer);
}

Expand Down Expand Up @@ -193,17 +188,15 @@ HDFSWriteableFile::HDFSWriteableFile() {
}

HDFSWriteableFile::~HDFSWriteableFile() {
if (impl_->is_open()) {
impl_->Close();
}
if (impl_->is_open()) { impl_->Close(); }
}

Status HDFSWriteableFile::Close() {
return impl_->Close();
}

Status HDFSWriteableFile::Write(const uint8_t* buffer, int32_t nbytes,
int32_t* bytes_read) {
Status HDFSWriteableFile::Write(
const uint8_t* buffer, int32_t nbytes, int32_t* bytes_read) {
return impl_->Write(buffer, nbytes, bytes_read);
}

Expand All @@ -222,8 +215,7 @@ Status HDFSWriteableFile::Tell(int64_t* position) {
// TODO(wesm): this could throw std::bad_alloc in the course of copying strings
// into the path info object
static void SetPathInfo(const hdfsFileInfo* input, HDFSPathInfo* out) {
out->kind = input->mKind == kObjectKindFile ?
ObjectType::FILE : ObjectType::DIRECTORY;
out->kind = input->mKind == kObjectKindFile ? ObjectType::FILE : ObjectType::DIRECTORY;
out->name = std::string(input->mName);
out->owner = std::string(input->mOwner);
out->group = std::string(input->mGroup);
Expand All @@ -248,9 +240,7 @@ class HDFSClient::HDFSClientImpl {

fs_ = hdfsConnectAsUser(namenode_host.c_str(), port, user.c_str());

if (fs_ == nullptr) {
return Status::IOError("HDFS connection failed");
}
if (fs_ == nullptr) { return Status::IOError("HDFS connection failed"); }
namenode_host_ = namenode_host;
port_ = port;
user_ = user;
Expand Down Expand Up @@ -300,18 +290,16 @@ class HDFSClient::HDFSClientImpl {
Status GetPathInfo(const std::string& path, HDFSPathInfo* info) {
hdfsFileInfo* entry = hdfsGetPathInfo(fs_, path.c_str());

if (entry == nullptr) {
return Status::IOError("HDFS: GetPathInfo failed");
}
if (entry == nullptr) { return Status::IOError("HDFS: GetPathInfo failed"); }

SetPathInfo(entry, info);
hdfsFreeFileInfo(entry, 1);

return Status::OK();
}

Status ListDirectory(const std::string& path,
std::shared_ptr<HDFSDirectoryListing>* listing) {
Status ListDirectory(
const std::string& path, std::shared_ptr<HDFSDirectoryListing>* listing) {
int num_entries = 0;
hdfsFileInfo* entries = hdfsListDirectory(fs_, path.c_str(), &num_entries);

Expand All @@ -320,11 +308,8 @@ class HDFSClient::HDFSClientImpl {
// errno indicates error
//
// Note: errno is thread-locala
if (errno == 0) {
num_entries = 0;
} {
return Status::IOError("HDFS: list directory failed");
}
if (errno == 0) { num_entries = 0; }
{ return Status::IOError("HDFS: list directory failed"); }
}

std::vector<HDFSPathInfo> elements(num_entries);
Expand Down Expand Up @@ -363,8 +348,8 @@ class HDFSClient::HDFSClientImpl {
int flags = O_WRONLY;
if (append) flags |= O_APPEND;

hdfsFile handle = hdfsOpenFile(fs_, path.c_str(), flags, buffer_size,
replication, default_block_size);
hdfsFile handle = hdfsOpenFile(
fs_, path.c_str(), flags, buffer_size, replication, default_block_size);

if (handle == nullptr) {
// TODO(wesm): determine cause of failure
Expand Down Expand Up @@ -440,25 +425,25 @@ Status HDFSClient::GetUsed(int64_t* nbytes) {
return impl_->GetUsed(nbytes);
}

Status HDFSClient::ListDirectory(const std::string& path,
std::shared_ptr<HDFSDirectoryListing>* listing) {
Status HDFSClient::ListDirectory(
const std::string& path, std::shared_ptr<HDFSDirectoryListing>* listing) {
return impl_->ListDirectory(path, listing);
}

Status HDFSClient::OpenReadable(const std::string& path,
std::shared_ptr<HDFSReadableFile>* file) {
Status HDFSClient::OpenReadable(
const std::string& path, std::shared_ptr<HDFSReadableFile>* file) {
return impl_->OpenReadable(path, file);
}

Status HDFSClient::OpenWriteable(const std::string& path, bool append,
int32_t buffer_size, int16_t replication, int64_t default_block_size,
std::shared_ptr<HDFSWriteableFile>* file) {
return impl_->OpenWriteable(path, append, buffer_size, replication,
default_block_size, file);
return impl_->OpenWriteable(
path, append, buffer_size, replication, default_block_size, file);
}

Status HDFSClient::OpenWriteable(const std::string& path, bool append,
std::shared_ptr<HDFSWriteableFile>* file) {
Status HDFSClient::OpenWriteable(
const std::string& path, bool append, std::shared_ptr<HDFSWriteableFile>* file) {
return OpenWriteable(path, append, 0, 0, 0, file);
}

Expand Down
31 changes: 13 additions & 18 deletions cpp/src/arrow/io/hdfs.h
Original file line number Diff line number Diff line change
Expand Up @@ -58,17 +58,13 @@ struct HDFSPathInfo {

class HDFSDirectoryListing {
public:
HDFSDirectoryListing(std::vector<HDFSPathInfo>&& entries)
explicit HDFSDirectoryListing(std::vector<HDFSPathInfo>&& entries)
: entries_(entries) {}

int num_entries() const {
return static_cast<int>(entries_.size());
}
int num_entries() const { return static_cast<int>(entries_.size()); }

// The file info is owned by the directory listing
const HDFSPathInfo* entry(int i) const {
return &entries_[i];
}
const HDFSPathInfo* entry(int i) const { return &entries_[i]; }

private:
std::vector<HDFSPathInfo> entries_;
Expand Down Expand Up @@ -98,7 +94,7 @@ class HDFSClient : public FileSystemClient {
// @param path: absolute path to data
// @param recursive: if path is a directory, delete contents as well
// @returns error status on failure
Status Delete(const std::string& path, bool recursive=false);
Status Delete(const std::string& path, bool recursive = false);

// Disconnect from cluster
//
Expand All @@ -122,14 +118,13 @@ class HDFSClient : public FileSystemClient {
// @returns Status
Status GetUsed(int64_t* nbytes);

Status ListDirectory(const std::string& path,
std::shared_ptr<HDFSDirectoryListing>* listing);
Status ListDirectory(
const std::string& path, std::shared_ptr<HDFSDirectoryListing>* listing);

// @param path file path to change
// @param owner pass nullptr for no change
// @param group pass nullptr for no change
Status Chown(const std::string& path, const char* owner,
const char* group);
Status Chown(const std::string& path, const char* owner, const char* group);

Status Chmod(const std::string& path, int mode);

Expand All @@ -150,12 +145,12 @@ class HDFSClient : public FileSystemClient {
// @param buffer_size, 0 for default
// @param replication, 0 for default
// @param default_block_size, 0 for default
Status OpenWriteable(const std::string& path, bool append,
int32_t buffer_size, int16_t replication, int64_t default_block_size,
Status OpenWriteable(const std::string& path, bool append, int32_t buffer_size,
int16_t replication, int64_t default_block_size,
std::shared_ptr<HDFSWriteableFile>* file);

Status OpenWriteable(const std::string& path, bool append,
std::shared_ptr<HDFSWriteableFile>* file);
Status OpenWriteable(
const std::string& path, bool append, std::shared_ptr<HDFSWriteableFile>* file);

private:
friend class HDFSReadableFile;
Expand All @@ -175,8 +170,8 @@ class HDFSReadableFile : public RandomAccessFile {

Status GetSize(int64_t* size) override;

Status ReadAt(int64_t position, int32_t nbytes, int32_t* bytes_read,
uint8_t* buffer) override;
Status ReadAt(
int64_t position, int32_t nbytes, int32_t* bytes_read, uint8_t* buffer) override;

Status Seek(int64_t position) override;
Status Tell(int64_t* position) override;
Expand Down
Loading

0 comments on commit eaad768

Please sign in to comment.