From af89ec62e38571dfc63e9ad56d6b43d21f4aafcb Mon Sep 17 00:00:00 2001 From: Kai Li Date: Fri, 12 Sep 2014 17:42:12 +0800 Subject: [PATCH 1/8] Add make and cmake building flags and C++ macros to make LMDB optional --- CMakeLists.txt | 3 +++ Makefile | 6 +++++- examples/mnist/convert_mnist_data.cpp | 22 +++++++++++++++++++++- include/caffe/data_layers.hpp | 4 ++++ scripts/travis/travis_build_and_test.sh | 2 +- src/caffe/CMakeLists.txt | 8 ++++++-- src/caffe/layers/data_layer.cpp | 12 ++++++++++++ src/caffe/test/test_data_layer.cpp | 4 ++++ tools/compute_image_mean.cpp | 18 +++++++++++++++++- tools/convert_imageset.cpp | 18 +++++++++++++++++- 10 files changed, 90 insertions(+), 7 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 3cb7d583504..ebe429370ad 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -4,6 +4,9 @@ project( Caffe ) ### Build Options ########################################################################## option(CPU_ONLY "Build Caffe without GPU support" OFF) + +option(WITH_LMDB "Include LMDB support" ON) + option(BUILD_PYTHON "Build Python wrapper" OFF) option(BUILD_MATLAB "Build Matlab wrapper" OFF) option(BUILD_EXAMPLES "Build examples" ON) diff --git a/Makefile b/Makefile index 5020b4109d6..3efde91c9a3 100644 --- a/Makefile +++ b/Makefile @@ -167,7 +167,6 @@ ifneq ($(CPU_ONLY), 1) endif LIBRARIES += pthread \ glog gflags protobuf leveldb snappy \ - lmdb \ boost_system \ hdf5_hl hdf5 \ opencv_core opencv_highgui opencv_imgproc @@ -309,6 +308,11 @@ endif INCLUDE_DIRS += $(BLAS_INCLUDE) LIBRARY_DIRS += $(BLAS_LIB) +ifneq ($(WITH_LMDB), 0) + COMMON_FLAGS += -DHAVE_LMDB + LIBRARIES += lmdb +endif + # Complete build flags. COMMON_FLAGS += $(foreach includedir,$(INCLUDE_DIRS),-I$(includedir)) CXXFLAGS += -pthread -fPIC $(COMMON_FLAGS) $(WARNINGS) diff --git a/examples/mnist/convert_mnist_data.cpp b/examples/mnist/convert_mnist_data.cpp index 19040153c86..7f5d36996c5 100644 --- a/examples/mnist/convert_mnist_data.cpp +++ b/examples/mnist/convert_mnist_data.cpp @@ -12,7 +12,9 @@ #include #include #include +#ifdef HAVE_LMDB #include +#endif #include #include @@ -24,7 +26,11 @@ using namespace caffe; // NOLINT(build/namespaces) using std::string; +#ifdef HAVE_LMDB DEFINE_string(backend, "lmdb", "The backend for storing the result"); +#else +DEFINE_string(backend, "leveldb", "The backend for storing the result"); +#endif uint32_t swap_endian(uint32_t val) { val = ((val << 8) & 0xFF00FF00) | ((val >> 8) & 0xFF00FF); @@ -61,11 +67,13 @@ void convert_dataset(const char* image_filename, const char* label_filename, image_file.read(reinterpret_cast(&cols), 4); cols = swap_endian(cols); +#ifdef HAVE_LMDB // lmdb MDB_env *mdb_env; MDB_dbi mdb_dbi; MDB_val mdb_key, mdb_data; MDB_txn *mdb_txn; +#endif // leveldb leveldb::DB* db; leveldb::Options options; @@ -82,6 +90,7 @@ void convert_dataset(const char* image_filename, const char* label_filename, CHECK(status.ok()) << "Failed to open leveldb " << db_path << ". Is it already existing?"; batch = new leveldb::WriteBatch(); +#ifdef HAVE_LMDB } else if (db_backend == "lmdb") { // lmdb LOG(INFO) << "Opening lmdb " << db_path; CHECK_EQ(mkdir(db_path, 0744), 0) @@ -95,6 +104,7 @@ void convert_dataset(const char* image_filename, const char* label_filename, << "mdb_txn_begin failed"; CHECK_EQ(mdb_open(mdb_txn, NULL, 0, &mdb_dbi), MDB_SUCCESS) << "mdb_open failed. Does the lmdb already exist? "; +#endif } else { LOG(FATAL) << "Unknown db backend " << db_backend; } @@ -125,6 +135,7 @@ void convert_dataset(const char* image_filename, const char* label_filename, // Put in db if (db_backend == "leveldb") { // leveldb batch->Put(keystr, value); +#ifdef HAVE_LMDB } else if (db_backend == "lmdb") { // lmdb mdb_data.mv_size = value.size(); mdb_data.mv_data = reinterpret_cast(&value[0]); @@ -132,6 +143,7 @@ void convert_dataset(const char* image_filename, const char* label_filename, mdb_key.mv_data = reinterpret_cast(&keystr[0]); CHECK_EQ(mdb_put(mdb_txn, mdb_dbi, &mdb_key, &mdb_data, 0), MDB_SUCCESS) << "mdb_put failed"; +#endif } else { LOG(FATAL) << "Unknown db backend " << db_backend; } @@ -142,11 +154,13 @@ void convert_dataset(const char* image_filename, const char* label_filename, db->Write(leveldb::WriteOptions(), batch); delete batch; batch = new leveldb::WriteBatch(); +#ifdef HAVE_LMDB } else if (db_backend == "lmdb") { // lmdb CHECK_EQ(mdb_txn_commit(mdb_txn), MDB_SUCCESS) << "mdb_txn_commit failed"; CHECK_EQ(mdb_txn_begin(mdb_env, NULL, 0, &mdb_txn), MDB_SUCCESS) << "mdb_txn_begin failed"; +#endif } else { LOG(FATAL) << "Unknown db backend " << db_backend; } @@ -158,10 +172,12 @@ void convert_dataset(const char* image_filename, const char* label_filename, db->Write(leveldb::WriteOptions(), batch); delete batch; delete db; +#ifdef HAVE_LMDB } else if (db_backend == "lmdb") { // lmdb CHECK_EQ(mdb_txn_commit(mdb_txn), MDB_SUCCESS) << "mdb_txn_commit failed"; mdb_close(mdb_env, mdb_dbi); mdb_env_close(mdb_env); +#endif } else { LOG(FATAL) << "Unknown db backend " << db_backend; } @@ -176,7 +192,11 @@ int main(int argc, char** argv) { #endif gflags::SetUsageMessage("This script converts the MNIST dataset to\n" - "the leveldb/lmdb format used by Caffe to perform classification.\n" + "the leveldb" +#ifdef HAVE_LMDB + "/lmdb" +#endif + " format used by Caffe to perform classification.\n" "Usage:\n" " convert_mnist_data [FLAGS] input_image_file input_label_file " "output_db_file\n" diff --git a/include/caffe/data_layers.hpp b/include/caffe/data_layers.hpp index 15158029436..840798502d9 100644 --- a/include/caffe/data_layers.hpp +++ b/include/caffe/data_layers.hpp @@ -8,7 +8,9 @@ #include "boost/scoped_ptr.hpp" #include "hdf5.h" #include "leveldb/db.h" +#ifdef HAVE_LMDB #include "lmdb.h" +#endif #include "caffe/blob.hpp" #include "caffe/common.hpp" @@ -114,12 +116,14 @@ class DataLayer : public BasePrefetchingDataLayer { // LEVELDB shared_ptr db_; shared_ptr iter_; +#ifdef HAVE_LMDB // LMDB MDB_env* mdb_env_; MDB_dbi mdb_dbi_; MDB_txn* mdb_txn_; MDB_cursor* mdb_cursor_; MDB_val mdb_key_, mdb_value_; +#endif }; /** diff --git a/scripts/travis/travis_build_and_test.sh b/scripts/travis/travis_build_and_test.sh index dec4d097c17..08fd7a3674e 100755 --- a/scripts/travis/travis_build_and_test.sh +++ b/scripts/travis/travis_build_and_test.sh @@ -7,7 +7,7 @@ MAKE="make --jobs=$NUM_THREADS --keep-going" if $WITH_CMAKE; then mkdir build cd build - cmake -DBUILD_PYTHON=ON -DBUILD_EXAMPLES=ON -DCMAKE_BUILD_TYPE=Release -DCPU_ONLY=ON .. + cmake -DBUILD_PYTHON=ON -DBUILD_EXAMPLES=ON -DCMAKE_BUILD_TYPE=Release -DCPU_ONLY=ON -DWITH_LMDB=ON .. $MAKE if ! $WITH_CUDA; then $MAKE runtest diff --git a/src/caffe/CMakeLists.txt b/src/caffe/CMakeLists.txt index 86c7c7eb4a3..d86b0e09d72 100644 --- a/src/caffe/CMakeLists.txt +++ b/src/caffe/CMakeLists.txt @@ -53,8 +53,12 @@ if(LEVELDB_FOUND) endif() # LMDB -find_package(LMDB REQUIRED) -include_directories(${LMDB_INCLUDE_DIR}) +if(WITH_LMDB) + message(STATUS "LMDB enabled") + find_package(LMDB REQUIRED) + include_directories(${LMDB_INCLUDE_DIR}) + add_definitions(-DHAVE_LMDB) +endif() # Boost find_package(Boost 1.46 COMPONENTS system thread REQUIRED) diff --git a/src/caffe/layers/data_layer.cpp b/src/caffe/layers/data_layer.cpp index d2071e2fa4f..0fa34ba7a5f 100644 --- a/src/caffe/layers/data_layer.cpp +++ b/src/caffe/layers/data_layer.cpp @@ -21,12 +21,14 @@ DataLayer::~DataLayer() { switch (this->layer_param_.data_param().backend()) { case DataParameter_DB_LEVELDB: break; // do nothing +#ifdef HAVE_LMDB case DataParameter_DB_LMDB: mdb_cursor_close(mdb_cursor_); mdb_close(mdb_env_, mdb_dbi_); mdb_txn_abort(mdb_txn_); mdb_env_close(mdb_env_); break; +#endif default: LOG(FATAL) << "Unknown database backend"; } @@ -53,6 +55,7 @@ void DataLayer::DataLayerSetUp(const vector*>& bottom, iter_->SeekToFirst(); } break; +#ifdef HAVE_LMDB case DataParameter_DB_LMDB: CHECK_EQ(mdb_env_create(&mdb_env_), MDB_SUCCESS) << "mdb_env_create failed"; CHECK_EQ(mdb_env_set_mapsize(mdb_env_, 1099511627776), MDB_SUCCESS); // 1TB @@ -69,6 +72,7 @@ void DataLayer::DataLayerSetUp(const vector*>& bottom, CHECK_EQ(mdb_cursor_get(mdb_cursor_, &mdb_key_, &mdb_value_, MDB_FIRST), MDB_SUCCESS) << "mdb_cursor_get failed"; break; +#endif default: LOG(FATAL) << "Unknown database backend"; } @@ -86,6 +90,7 @@ void DataLayer::DataLayerSetUp(const vector*>& bottom, iter_->SeekToFirst(); } break; +#ifdef HAVE_LMDB case DataParameter_DB_LMDB: if (mdb_cursor_get(mdb_cursor_, &mdb_key_, &mdb_value_, MDB_NEXT) != MDB_SUCCESS) { @@ -93,6 +98,7 @@ void DataLayer::DataLayerSetUp(const vector*>& bottom, MDB_FIRST), MDB_SUCCESS); } break; +#endif default: LOG(FATAL) << "Unknown database backend"; } @@ -104,9 +110,11 @@ void DataLayer::DataLayerSetUp(const vector*>& bottom, case DataParameter_DB_LEVELDB: datum.ParseFromString(iter_->value().ToString()); break; +#ifdef HAVE_LMDB case DataParameter_DB_LMDB: datum.ParseFromArray(mdb_value_.mv_data, mdb_value_.mv_size); break; +#endif default: LOG(FATAL) << "Unknown database backend"; } @@ -161,12 +169,14 @@ void DataLayer::InternalThreadEntry() { CHECK(iter_->Valid()); datum.ParseFromString(iter_->value().ToString()); break; +#ifdef HAVE_LMDB case DataParameter_DB_LMDB: CHECK_EQ(mdb_cursor_get(mdb_cursor_, &mdb_key_, &mdb_value_, MDB_GET_CURRENT), MDB_SUCCESS); datum.ParseFromArray(mdb_value_.mv_data, mdb_value_.mv_size); break; +#endif default: LOG(FATAL) << "Unknown database backend"; } @@ -188,6 +198,7 @@ void DataLayer::InternalThreadEntry() { iter_->SeekToFirst(); } break; +#ifdef HAVE_LMDB case DataParameter_DB_LMDB: if (mdb_cursor_get(mdb_cursor_, &mdb_key_, &mdb_value_, MDB_NEXT) != MDB_SUCCESS) { @@ -197,6 +208,7 @@ void DataLayer::InternalThreadEntry() { &mdb_value_, MDB_FIRST), MDB_SUCCESS); } break; +#endif default: LOG(FATAL) << "Unknown database backend"; } diff --git a/src/caffe/test/test_data_layer.cpp b/src/caffe/test/test_data_layer.cpp index 887124aa5bc..c1fbcb1deb8 100644 --- a/src/caffe/test/test_data_layer.cpp +++ b/src/caffe/test/test_data_layer.cpp @@ -64,6 +64,7 @@ class DataLayerTest : public MultiDeviceTest { delete db; } +#ifdef HAVE_LMDB // Fill the LMDB with data: unique_pixels has same meaning as in FillLevelDB. void FillLMDB(const bool unique_pixels) { backend_ = DataParameter_DB_LMDB; @@ -111,6 +112,7 @@ class DataLayerTest : public MultiDeviceTest { mdb_close(env, dbi); mdb_env_close(env); } +#endif void TestRead() { const Dtype scale = 3; @@ -363,6 +365,7 @@ TYPED_TEST(DataLayerTest, TestReadCropTestLevelDB) { this->TestReadCrop(); } +#ifdef HAVE_LMDB TYPED_TEST(DataLayerTest, TestReadLMDB) { const bool unique_pixels = false; // all pixels the same; images different this->FillLMDB(unique_pixels); @@ -400,5 +403,6 @@ TYPED_TEST(DataLayerTest, TestReadCropTestLMDB) { this->FillLMDB(unique_pixels); this->TestReadCrop(); } +#endif } // namespace caffe diff --git a/tools/compute_image_mean.cpp b/tools/compute_image_mean.cpp index fe3497fa87d..3f032f68171 100644 --- a/tools/compute_image_mean.cpp +++ b/tools/compute_image_mean.cpp @@ -1,6 +1,8 @@ #include #include +#ifdef HAVE_LMDB #include +#endif #include #include @@ -18,7 +20,11 @@ int main(int argc, char** argv) { ::google::InitGoogleLogging(argv[0]); if (argc < 3 || argc > 4) { LOG(ERROR) << "Usage: compute_image_mean input_leveldb output_file" - << " db_backend[leveldb or lmdb]"; + << " db_backend[leveldb" +#ifdef HAVE_LMDB + << " or lmdb" +#endif + <<"]"; return 1; } @@ -32,12 +38,14 @@ int main(int argc, char** argv) { leveldb::Options options; options.create_if_missing = false; leveldb::Iterator* it = NULL; +#ifdef HAVE_LMDB // lmdb MDB_env* mdb_env; MDB_dbi mdb_dbi; MDB_val mdb_key, mdb_value; MDB_txn* mdb_txn; MDB_cursor* mdb_cursor; +#endif // Open db if (db_backend == "leveldb") { // leveldb @@ -49,6 +57,7 @@ int main(int argc, char** argv) { read_options.fill_cache = false; it = db->NewIterator(read_options); it->SeekToFirst(); +#ifdef HAVE_LMDB } else if (db_backend == "lmdb") { // lmdb LOG(INFO) << "Opening lmdb " << argv[1]; CHECK_EQ(mdb_env_create(&mdb_env), MDB_SUCCESS) << "mdb_env_create failed"; @@ -63,6 +72,7 @@ int main(int argc, char** argv) { << "mdb_cursor_open failed"; CHECK_EQ(mdb_cursor_get(mdb_cursor, &mdb_key, &mdb_value, MDB_FIRST), MDB_SUCCESS); +#endif } else { LOG(FATAL) << "Unknown db backend " << db_backend; } @@ -73,8 +83,10 @@ int main(int argc, char** argv) { // load first datum if (db_backend == "leveldb") { datum.ParseFromString(it->value().ToString()); +#ifdef HAVE_LMDB } else if (db_backend == "lmdb") { datum.ParseFromArray(mdb_value.mv_data, mdb_value.mv_size); +#endif } else { LOG(FATAL) << "Unknown db backend " << db_backend; } @@ -114,6 +126,7 @@ int main(int argc, char** argv) { LOG(ERROR) << "Processed " << count << " files."; } } +#ifdef HAVE_LMDB } else if (db_backend == "lmdb") { // lmdb CHECK_EQ(mdb_cursor_get(mdb_cursor, &mdb_key, &mdb_value, MDB_FIRST), MDB_SUCCESS); @@ -141,6 +154,7 @@ int main(int argc, char** argv) { } } while (mdb_cursor_get(mdb_cursor, &mdb_key, &mdb_value, MDB_NEXT) == MDB_SUCCESS); +#endif } else { LOG(FATAL) << "Unknown db backend " << db_backend; } @@ -158,11 +172,13 @@ int main(int argc, char** argv) { // Clean up if (db_backend == "leveldb") { delete db; +#ifdef HAVE_LMDB } else if (db_backend == "lmdb") { mdb_cursor_close(mdb_cursor); mdb_close(mdb_env, mdb_dbi); mdb_txn_abort(mdb_txn); mdb_env_close(mdb_env); +#endif } else { LOG(FATAL) << "Unknown db backend " << db_backend; } diff --git a/tools/convert_imageset.cpp b/tools/convert_imageset.cpp index 1c58f3de33f..ac3f192aeae 100644 --- a/tools/convert_imageset.cpp +++ b/tools/convert_imageset.cpp @@ -17,7 +17,9 @@ #include #include #include +#ifdef HAVE_LMDB #include +#endif #include #include @@ -49,7 +51,11 @@ int main(int argc, char** argv) { namespace gflags = google; #endif - gflags::SetUsageMessage("Convert a set of images to the leveldb/lmdb\n" + gflags::SetUsageMessage("Convert a set of images to the leveldb" +#ifdef HAVE_LMDB + "/lmdb" +#endif + "\n" "format used as input for Caffe.\n" "Usage:\n" " convert_imageset [FLAGS] ROOTFOLDER/ LISTFILE DB_NAME\n" @@ -84,11 +90,13 @@ int main(int argc, char** argv) { int resize_width = std::max(0, FLAGS_resize_width); // Open new db +#ifdef HAVE_LMDB // lmdb MDB_env *mdb_env; MDB_dbi mdb_dbi; MDB_val mdb_key, mdb_data; MDB_txn *mdb_txn; +#endif // leveldb leveldb::DB* db; leveldb::Options options; @@ -105,6 +113,7 @@ int main(int argc, char** argv) { CHECK(status.ok()) << "Failed to open leveldb " << db_path << ". Is it already existing?"; batch = new leveldb::WriteBatch(); +#ifdef HAVE_LMDB } else if (db_backend == "lmdb") { // lmdb LOG(INFO) << "Opening lmdb " << db_path; CHECK_EQ(mkdir(db_path, 0744), 0) @@ -118,6 +127,7 @@ int main(int argc, char** argv) { << "mdb_txn_begin failed"; CHECK_EQ(mdb_open(mdb_txn, NULL, 0, &mdb_dbi), MDB_SUCCESS) << "mdb_open failed. Does the lmdb already exist? "; +#endif } else { LOG(FATAL) << "Unknown db backend " << db_backend; } @@ -154,6 +164,7 @@ int main(int argc, char** argv) { // Put in db if (db_backend == "leveldb") { // leveldb batch->Put(keystr, value); +#ifdef HAVE_LMDB } else if (db_backend == "lmdb") { // lmdb mdb_data.mv_size = value.size(); mdb_data.mv_data = reinterpret_cast(&value[0]); @@ -161,6 +172,7 @@ int main(int argc, char** argv) { mdb_key.mv_data = reinterpret_cast(&keystr[0]); CHECK_EQ(mdb_put(mdb_txn, mdb_dbi, &mdb_key, &mdb_data, 0), MDB_SUCCESS) << "mdb_put failed"; +#endif } else { LOG(FATAL) << "Unknown db backend " << db_backend; } @@ -171,11 +183,13 @@ int main(int argc, char** argv) { db->Write(leveldb::WriteOptions(), batch); delete batch; batch = new leveldb::WriteBatch(); +#ifdef HAVE_LMDB } else if (db_backend == "lmdb") { // lmdb CHECK_EQ(mdb_txn_commit(mdb_txn), MDB_SUCCESS) << "mdb_txn_commit failed"; CHECK_EQ(mdb_txn_begin(mdb_env, NULL, 0, &mdb_txn), MDB_SUCCESS) << "mdb_txn_begin failed"; +#endif } else { LOG(FATAL) << "Unknown db backend " << db_backend; } @@ -188,10 +202,12 @@ int main(int argc, char** argv) { db->Write(leveldb::WriteOptions(), batch); delete batch; delete db; +#ifdef HAVE_LMDB } else if (db_backend == "lmdb") { // lmdb CHECK_EQ(mdb_txn_commit(mdb_txn), MDB_SUCCESS) << "mdb_txn_commit failed"; mdb_close(mdb_env, mdb_dbi); mdb_env_close(mdb_env); +#endif } else { LOG(FATAL) << "Unknown db backend " << db_backend; } From e5e70de544caa9ca433f31bc331f18b1c3fec5fd Mon Sep 17 00:00:00 2001 From: Kai Li Date: Fri, 12 Sep 2014 18:59:41 +0800 Subject: [PATCH 2/8] Add make and cmake building flags and C++ macros to make HDF5 optional --- CMakeLists.txt | 1 + Makefile | 6 +++++- include/caffe/data_layers.hpp | 6 ++++++ include/caffe/neuron_layers.hpp | 2 ++ include/caffe/util/io.hpp | 6 ++++++ scripts/travis/travis_build_and_test.sh | 4 ++-- src/caffe/CMakeLists.txt | 12 ++++++++---- src/caffe/layer_factory.cpp | 2 ++ src/caffe/layers/dropout_layer.cpp | 1 + src/caffe/layers/hdf5_data_layer.cpp | 4 ++++ src/caffe/layers/hdf5_data_layer.cu | 4 ++++ src/caffe/layers/hdf5_output_layer.cpp | 4 ++++ src/caffe/layers/hdf5_output_layer.cu | 4 ++++ src/caffe/test/test_hdf5_output_layer.cpp | 4 ++++ src/caffe/test/test_hdf5data_layer.cpp | 4 ++++ src/caffe/util/io.cpp | 2 ++ 16 files changed, 59 insertions(+), 7 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index ebe429370ad..b4a03fbb652 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -6,6 +6,7 @@ project( Caffe ) option(CPU_ONLY "Build Caffe without GPU support" OFF) option(WITH_LMDB "Include LMDB support" ON) +option(WITH_HDF5 "Include HDF5 support" ON) option(BUILD_PYTHON "Build Python wrapper" OFF) option(BUILD_MATLAB "Build Matlab wrapper" OFF) diff --git a/Makefile b/Makefile index 3efde91c9a3..ef2d399b6b9 100644 --- a/Makefile +++ b/Makefile @@ -168,7 +168,6 @@ endif LIBRARIES += pthread \ glog gflags protobuf leveldb snappy \ boost_system \ - hdf5_hl hdf5 \ opencv_core opencv_highgui opencv_imgproc PYTHON_LIBRARIES := boost_python python2.7 WARNINGS := -Wall -Wno-sign-compare @@ -313,6 +312,11 @@ ifneq ($(WITH_LMDB), 0) LIBRARIES += lmdb endif +ifneq ($(WITH_HDF5), 0) + COMMON_FLAGS += -DHAVE_HDF5 + LIBRARIES += hdf5_hl hdf5 +endif + # Complete build flags. COMMON_FLAGS += $(foreach includedir,$(INCLUDE_DIRS),-I$(includedir)) CXXFLAGS += -pthread -fPIC $(COMMON_FLAGS) $(WARNINGS) diff --git a/include/caffe/data_layers.hpp b/include/caffe/data_layers.hpp index 840798502d9..863d5b446b2 100644 --- a/include/caffe/data_layers.hpp +++ b/include/caffe/data_layers.hpp @@ -6,7 +6,9 @@ #include #include "boost/scoped_ptr.hpp" +#ifdef HAVE_HDF5 #include "hdf5.h" +#endif #include "leveldb/db.h" #ifdef HAVE_LMDB #include "lmdb.h" @@ -22,8 +24,10 @@ namespace caffe { +#ifdef HAVE_HDF5 #define HDF5_DATA_DATASET_NAME "data" #define HDF5_DATA_LABEL_NAME "label" +#endif /** * @brief Provides base for data layers that feed blobs to the Net. @@ -157,6 +161,7 @@ class DummyDataLayer : public Layer { vector refill_; }; +#ifdef HAVE_HDF5 /** * @brief Provides data to the Net from HDF5 files. * @@ -234,6 +239,7 @@ class HDF5OutputLayer : public Layer { Blob data_blob_; Blob label_blob_; }; +#endif /** * @brief Provides data to the Net from image files. diff --git a/include/caffe/neuron_layers.hpp b/include/caffe/neuron_layers.hpp index 36acf96e5af..cfeb4ada09c 100644 --- a/include/caffe/neuron_layers.hpp +++ b/include/caffe/neuron_layers.hpp @@ -10,8 +10,10 @@ #include "caffe/layer.hpp" #include "caffe/proto/caffe.pb.h" +#ifdef HAVE_HDF5 #define HDF5_DATA_DATASET_NAME "data" #define HDF5_DATA_LABEL_NAME "label" +#endif namespace caffe { diff --git a/include/caffe/util/io.hpp b/include/caffe/util/io.hpp index 8dd338d2603..f532f0d4194 100644 --- a/include/caffe/util/io.hpp +++ b/include/caffe/util/io.hpp @@ -5,13 +5,17 @@ #include #include "google/protobuf/message.h" +#ifdef HAVE_HDF5 #include "hdf5.h" #include "hdf5_hl.h" +#endif #include "caffe/blob.hpp" #include "caffe/proto/caffe.pb.h" +#ifdef HAVE_HDF5 #define HDF5_NUM_DIMS 4 +#endif namespace leveldb { // Forward declaration for leveldb::Options to be used in GetlevelDBOptions(). @@ -104,6 +108,7 @@ inline bool ReadImageToDatum(const string& filename, const int label, leveldb::Options GetLevelDBOptions(); +#ifdef HAVE_HDF5 template void hdf5_load_nd_dataset_helper( hid_t file_id, const char* dataset_name_, int min_dim, int max_dim, @@ -117,6 +122,7 @@ void hdf5_load_nd_dataset( template void hdf5_save_nd_dataset( const hid_t file_id, const string dataset_name, const Blob& blob); +#endif } // namespace caffe diff --git a/scripts/travis/travis_build_and_test.sh b/scripts/travis/travis_build_and_test.sh index 08fd7a3674e..44ef8abd4c3 100755 --- a/scripts/travis/travis_build_and_test.sh +++ b/scripts/travis/travis_build_and_test.sh @@ -7,7 +7,7 @@ MAKE="make --jobs=$NUM_THREADS --keep-going" if $WITH_CMAKE; then mkdir build cd build - cmake -DBUILD_PYTHON=ON -DBUILD_EXAMPLES=ON -DCMAKE_BUILD_TYPE=Release -DCPU_ONLY=ON -DWITH_LMDB=ON .. + cmake -DBUILD_PYTHON=ON -DBUILD_EXAMPLES=ON -DCMAKE_BUILD_TYPE=Release -DCPU_ONLY=ON -DWITH_LMDB=ON -DWITH_HDF5=ON .. $MAKE if ! $WITH_CUDA; then $MAKE runtest @@ -19,7 +19,7 @@ else if ! $WITH_CUDA; then export CPU_ONLY=1 fi - $MAKE all test pycaffe warn lint || true + WITH_LMDB=1 WITH_HDF5=1 $MAKE all test pycaffe warn lint || true if ! $WITH_CUDA; then $MAKE runtest fi diff --git a/src/caffe/CMakeLists.txt b/src/caffe/CMakeLists.txt index d86b0e09d72..f96892d857b 100644 --- a/src/caffe/CMakeLists.txt +++ b/src/caffe/CMakeLists.txt @@ -33,8 +33,12 @@ elseif(BLAS STREQUAL "mkl") endif() # HDF5 -find_package(HDF5 COMPONENTS HL REQUIRED) -include_directories(${HDF5_INCLUDE_DIRS}) +if(WITH_HDF5) + message(STATUS "HDF5 enabled") + find_package(HDF5 COMPONENTS HL REQUIRED) + include_directories(${HDF5_INCLUDE_DIRS}) + add_definitions(-DHAVE_HDF5) +endif() # OpenCV find_package(OpenCV REQUIRED core highgui imgproc) @@ -57,8 +61,8 @@ if(WITH_LMDB) message(STATUS "LMDB enabled") find_package(LMDB REQUIRED) include_directories(${LMDB_INCLUDE_DIR}) - add_definitions(-DHAVE_LMDB) -endif() + add_definitions(-DHAVE_LMDB) +endif() # Boost find_package(Boost 1.46 COMPONENTS system thread REQUIRED) diff --git a/src/caffe/layer_factory.cpp b/src/caffe/layer_factory.cpp index 41c547b8ad4..23ada0d029e 100644 --- a/src/caffe/layer_factory.cpp +++ b/src/caffe/layer_factory.cpp @@ -203,10 +203,12 @@ Layer* GetLayer(const LayerParameter& param) { return new EltwiseLayer(param); case LayerParameter_LayerType_FLATTEN: return new FlattenLayer(param); +#ifdef HAVE_HDF5 case LayerParameter_LayerType_HDF5_DATA: return new HDF5DataLayer(param); case LayerParameter_LayerType_HDF5_OUTPUT: return new HDF5OutputLayer(param); +#endif case LayerParameter_LayerType_HINGE_LOSS: return new HingeLossLayer(param); case LayerParameter_LayerType_IMAGE_DATA: diff --git a/src/caffe/layers/dropout_layer.cpp b/src/caffe/layers/dropout_layer.cpp index 52537d1aba9..4e4d108c766 100644 --- a/src/caffe/layers/dropout_layer.cpp +++ b/src/caffe/layers/dropout_layer.cpp @@ -1,5 +1,6 @@ // TODO (sergeyk): effect should not be dependent on phase. wasted memcpy. +#include #include #include "caffe/common.hpp" diff --git a/src/caffe/layers/hdf5_data_layer.cpp b/src/caffe/layers/hdf5_data_layer.cpp index 1f2a83582f2..3b1315e77df 100644 --- a/src/caffe/layers/hdf5_data_layer.cpp +++ b/src/caffe/layers/hdf5_data_layer.cpp @@ -1,3 +1,5 @@ +#ifdef HAVE_HDF5 + /* TODO: - load file in a separate thread ("prefetch") @@ -117,3 +119,5 @@ STUB_GPU_FORWARD(HDF5DataLayer, Forward); INSTANTIATE_CLASS(HDF5DataLayer); } // namespace caffe + +#endif // #ifdef HAVE_HDF5 diff --git a/src/caffe/layers/hdf5_data_layer.cu b/src/caffe/layers/hdf5_data_layer.cu index 79cc536eb28..72c0566b539 100644 --- a/src/caffe/layers/hdf5_data_layer.cu +++ b/src/caffe/layers/hdf5_data_layer.cu @@ -1,3 +1,5 @@ +#ifdef HAVE_HDF5 + /* TODO: - only load parts of the file, in accordance with a prototxt param "max_mem" @@ -49,3 +51,5 @@ void HDF5DataLayer::Forward_gpu(const vector*>& bottom, INSTANTIATE_CLASS(HDF5DataLayer); } // namespace caffe + +#endif // #ifdef HAVE_HDF5 diff --git a/src/caffe/layers/hdf5_output_layer.cpp b/src/caffe/layers/hdf5_output_layer.cpp index 3cdbbb31a6a..56cf7c2517c 100644 --- a/src/caffe/layers/hdf5_output_layer.cpp +++ b/src/caffe/layers/hdf5_output_layer.cpp @@ -1,3 +1,5 @@ +#ifdef HAVE_HDF5 + #include #include "hdf5.h" @@ -72,3 +74,5 @@ STUB_GPU(HDF5OutputLayer); INSTANTIATE_CLASS(HDF5OutputLayer); } // namespace caffe + +#endif // #ifdef HAVE_HDF5 diff --git a/src/caffe/layers/hdf5_output_layer.cu b/src/caffe/layers/hdf5_output_layer.cu index 0813c02a440..62f93cf7f4d 100644 --- a/src/caffe/layers/hdf5_output_layer.cu +++ b/src/caffe/layers/hdf5_output_layer.cu @@ -1,3 +1,5 @@ +#ifdef HAVE_HDF5 + #include #include "hdf5.h" @@ -41,3 +43,5 @@ void HDF5OutputLayer::Backward_gpu(const vector*>& top, INSTANTIATE_CLASS(HDF5OutputLayer); } // namespace caffe + +#endif // #ifdef HAVE_HDF5 diff --git a/src/caffe/test/test_hdf5_output_layer.cpp b/src/caffe/test/test_hdf5_output_layer.cpp index eb09c8d1f3a..ceed931c442 100644 --- a/src/caffe/test/test_hdf5_output_layer.cpp +++ b/src/caffe/test/test_hdf5_output_layer.cpp @@ -1,3 +1,5 @@ +#ifdef HAVE_HDF5 + #include #include @@ -121,3 +123,5 @@ TYPED_TEST(HDF5OutputLayerTest, TestForward) { } } // namespace caffe + +#endif // #ifdef HAVE_HDF5 diff --git a/src/caffe/test/test_hdf5data_layer.cpp b/src/caffe/test/test_hdf5data_layer.cpp index acca75b19b1..3b701a6afcf 100644 --- a/src/caffe/test/test_hdf5data_layer.cpp +++ b/src/caffe/test/test_hdf5data_layer.cpp @@ -1,3 +1,5 @@ +#ifdef HAVE_HDF5 + #include #include @@ -122,3 +124,5 @@ TYPED_TEST(HDF5DataLayerTest, TestRead) { } } // namespace caffe + +#endif // #ifdef HAVE_HDF5 diff --git a/src/caffe/util/io.cpp b/src/caffe/util/io.cpp index 7b6c67cf633..417a0cb055d 100644 --- a/src/caffe/util/io.cpp +++ b/src/caffe/util/io.cpp @@ -117,6 +117,7 @@ leveldb::Options GetLevelDBOptions() { return options; } +#ifdef HAVE_HDF5 // Verifies format of data stored in HDF5 file and reshapes blob accordingly. template void hdf5_load_nd_dataset_helper( @@ -188,5 +189,6 @@ void hdf5_save_nd_dataset( file_id, dataset_name.c_str(), HDF5_NUM_DIMS, dims, blob.cpu_data()); CHECK_GE(status, 0) << "Failed to make double dataset " << dataset_name; } +#endif // #ifdef HAVE_HDF5 } // namespace caffe From de57322f684a77b26ce8cd1c2d9e771f25a407e6 Mon Sep 17 00:00:00 2001 From: Kai Li Date: Fri, 12 Sep 2014 19:43:46 +0800 Subject: [PATCH 3/8] Add BUILD_TESTS for cmake to make tests optional --- CMakeLists.txt | 9 +++++++-- scripts/travis/travis_build_and_test.sh | 2 +- src/caffe/CMakeLists.txt | 4 +++- 3 files changed, 11 insertions(+), 4 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index b4a03fbb652..6dce545af4d 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -8,10 +8,11 @@ option(CPU_ONLY "Build Caffe without GPU support" OFF) option(WITH_LMDB "Include LMDB support" ON) option(WITH_HDF5 "Include HDF5 support" ON) +option(BUILD_EXAMPLES "Build examples" ON) option(BUILD_PYTHON "Build Python wrapper" OFF) option(BUILD_MATLAB "Build Matlab wrapper" OFF) -option(BUILD_EXAMPLES "Build examples" ON) option(BUILD_SHARED_LIBS "Build SHARED libs if ON and STATIC otherwise" OFF) +option(BUILD_TESTS "Build tests" ON) if(NOT BLAS) set(BLAS atlas) @@ -57,7 +58,6 @@ endif() ### Subdirectories ########################################################################## -add_subdirectory(src/gtest) add_subdirectory(src/caffe) add_subdirectory(tools) @@ -76,6 +76,11 @@ if(BUILD_MATLAB) add_subdirectory(matlab) endif() +if(BUILD_TESTS) + message(STATUS "Tests enabled") + add_subdirectory(src/gtest) +endif() + ### Lint Target Setup ########################################################################## set(LINT_TARGET lint) diff --git a/scripts/travis/travis_build_and_test.sh b/scripts/travis/travis_build_and_test.sh index 44ef8abd4c3..133384493ea 100755 --- a/scripts/travis/travis_build_and_test.sh +++ b/scripts/travis/travis_build_and_test.sh @@ -7,7 +7,7 @@ MAKE="make --jobs=$NUM_THREADS --keep-going" if $WITH_CMAKE; then mkdir build cd build - cmake -DBUILD_PYTHON=ON -DBUILD_EXAMPLES=ON -DCMAKE_BUILD_TYPE=Release -DCPU_ONLY=ON -DWITH_LMDB=ON -DWITH_HDF5=ON .. + cmake -DBUILD_PYTHON=ON -DBUILD_EXAMPLES=ON -DBUILD_TESTS=ON -DCMAKE_BUILD_TYPE=Release -DCPU_ONLY=ON -DWITH_LMDB=ON -DWITH_HDF5=ON .. $MAKE if ! $WITH_CUDA; then $MAKE runtest diff --git a/src/caffe/CMakeLists.txt b/src/caffe/CMakeLists.txt index f96892d857b..a3cc171b0d8 100644 --- a/src/caffe/CMakeLists.txt +++ b/src/caffe/CMakeLists.txt @@ -134,7 +134,9 @@ set_target_properties(caffe PROPERTIES LIBRARY_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/lib ) -add_subdirectory(test) +if(BUILD_TESTS) + add_subdirectory(test) +endif() ### Install ################################################################################# From 332be3cda0e54542da15e8c13e765686f5d1d89e Mon Sep 17 00:00:00 2001 From: Kai Li Date: Fri, 12 Sep 2014 20:29:00 +0800 Subject: [PATCH 4/8] Add make & cmake building flags and C++ macros to make LevelDB optional --- CMakeLists.txt | 3 +- Makefile | 17 ++++-- examples/cifar10/convert_cifar_data.cpp | 17 ++++++ examples/mnist/convert_mnist_data.cpp | 73 ++++++++++++++++++++----- include/caffe/data_layers.hpp | 7 +++ include/caffe/util/io.hpp | 4 ++ scripts/travis/travis_build_and_test.sh | 6 +- src/caffe/CMakeLists.txt | 22 +++++--- src/caffe/layers/data_layer.cpp | 20 +++++++ src/caffe/test/test_data_layer.cpp | 6 ++ src/caffe/test/test_hdf5data_layer.cpp | 2 - src/caffe/util/io.cpp | 4 ++ tools/compute_image_mean.cpp | 47 +++++++++++++--- tools/convert_imageset.cpp | 69 +++++++++++++++++++---- tools/extract_features.cpp | 21 ++++++- 15 files changed, 262 insertions(+), 56 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 6dce545af4d..7fcacf430d5 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -5,8 +5,9 @@ project( Caffe ) option(CPU_ONLY "Build Caffe without GPU support" OFF) -option(WITH_LMDB "Include LMDB support" ON) option(WITH_HDF5 "Include HDF5 support" ON) +option(WITH_LEVELDB "Include LEVELDB support" ON) +option(WITH_LMDB "Include LMDB support" ON) option(BUILD_EXAMPLES "Build examples" ON) option(BUILD_PYTHON "Build Python wrapper" OFF) diff --git a/Makefile b/Makefile index ef2d399b6b9..a0c31117ca9 100644 --- a/Makefile +++ b/Makefile @@ -166,7 +166,7 @@ ifneq ($(CPU_ONLY), 1) LIBRARIES := cudart cublas curand endif LIBRARIES += pthread \ - glog gflags protobuf leveldb snappy \ + glog gflags protobuf \ boost_system \ opencv_core opencv_highgui opencv_imgproc PYTHON_LIBRARIES := boost_python python2.7 @@ -307,16 +307,21 @@ endif INCLUDE_DIRS += $(BLAS_INCLUDE) LIBRARY_DIRS += $(BLAS_LIB) -ifneq ($(WITH_LMDB), 0) - COMMON_FLAGS += -DHAVE_LMDB - LIBRARIES += lmdb -endif - ifneq ($(WITH_HDF5), 0) COMMON_FLAGS += -DHAVE_HDF5 LIBRARIES += hdf5_hl hdf5 endif +ifneq ($(WITH_LMDB), 0) + COMMON_FLAGS += -DHAVE_LEVELDB + LIBRARIES += leveldb snappy +endif + +ifneq ($(WITH_LMDB), 0) + COMMON_FLAGS += -DHAVE_LMDB + LIBRARIES += lmdb +endif + # Complete build flags. COMMON_FLAGS += $(foreach includedir,$(INCLUDE_DIRS),-I$(includedir)) CXXFLAGS += -pthread -fPIC $(COMMON_FLAGS) $(WARNINGS) diff --git a/examples/cifar10/convert_cifar_data.cpp b/examples/cifar10/convert_cifar_data.cpp index 90ecb6d9a88..c89ca755f44 100644 --- a/examples/cifar10/convert_cifar_data.cpp +++ b/examples/cifar10/convert_cifar_data.cpp @@ -11,7 +11,9 @@ #include "glog/logging.h" #include "google/protobuf/text_format.h" +#ifdef HAVE_LEVELDB #include "leveldb/db.h" +#endif #include "stdint.h" #include "caffe/proto/caffe.pb.h" @@ -32,10 +34,12 @@ void read_image(std::ifstream* file, int* label, char* buffer) { } void convert_dataset(const string& input_folder, const string& output_folder) { +#ifdef HAVE_LEVELDB // Leveldb options leveldb::Options options; options.create_if_missing = true; options.error_if_exists = true; +#endif // Data buffer int label; char str_buffer[kCIFARImageNBytes]; @@ -46,11 +50,13 @@ void convert_dataset(const string& input_folder, const string& output_folder) { datum.set_width(kCIFARSize); LOG(INFO) << "Writing Training data"; +#ifdef HAVE_LEVELDB leveldb::DB* train_db; leveldb::Status status; status = leveldb::DB::Open(options, output_folder + "/cifar10_train_leveldb", &train_db); CHECK(status.ok()) << "Failed to open leveldb."; +#endif for (int fileid = 0; fileid < kCIFARTrainBatches; ++fileid) { // Open files LOG(INFO) << "Training Batch " << fileid + 1; @@ -65,14 +71,18 @@ void convert_dataset(const string& input_folder, const string& output_folder) { datum.SerializeToString(&value); snprintf(str_buffer, kCIFARImageNBytes, "%05d", fileid * kCIFARBatchSize + itemid); +#ifdef HAVE_LEVELDB train_db->Put(leveldb::WriteOptions(), string(str_buffer), value); +#endif } } LOG(INFO) << "Writing Testing data"; +#ifdef HAVE_LEVELDB leveldb::DB* test_db; CHECK(leveldb::DB::Open(options, output_folder + "/cifar10_test_leveldb", &test_db).ok()) << "Failed to open leveldb."; +#endif // Open files std::ifstream data_file((input_folder + "/test_batch.bin").c_str(), std::ios::in | std::ios::binary); @@ -83,14 +93,21 @@ void convert_dataset(const string& input_folder, const string& output_folder) { datum.set_data(str_buffer, kCIFARImageNBytes); datum.SerializeToString(&value); snprintf(str_buffer, kCIFARImageNBytes, "%05d", itemid); +#ifdef HAVE_LEVELDB test_db->Put(leveldb::WriteOptions(), string(str_buffer), value); +#endif } +#ifdef HAVE_LEVELDB delete train_db; delete test_db; +#endif } int main(int argc, char** argv) { +#ifndef HAVE_LEVELDB + LOG(FATAL) << "No DB library available to save the converted dataset"; +#endif if (argc != 3) { printf("This script converts the CIFAR dataset to the leveldb format used\n" "by caffe to perform classification.\n" diff --git a/examples/mnist/convert_mnist_data.cpp b/examples/mnist/convert_mnist_data.cpp index 7f5d36996c5..06a1bb5d262 100644 --- a/examples/mnist/convert_mnist_data.cpp +++ b/examples/mnist/convert_mnist_data.cpp @@ -10,8 +10,11 @@ #include #include #include +#ifdef HAVE_LEVELDB #include #include +#endif + #ifdef HAVE_LMDB #include #endif @@ -26,10 +29,12 @@ using namespace caffe; // NOLINT(build/namespaces) using std::string; -#ifdef HAVE_LMDB +#if defined HAVE_LMDB DEFINE_string(backend, "lmdb", "The backend for storing the result"); -#else +#elif defined HAVE_LEVELDB DEFINE_string(backend, "leveldb", "The backend for storing the result"); +#else +DEFINE_string(backend, "", "The backend for storing the result"); #endif uint32_t swap_endian(uint32_t val) { @@ -67,13 +72,7 @@ void convert_dataset(const char* image_filename, const char* label_filename, image_file.read(reinterpret_cast(&cols), 4); cols = swap_endian(cols); -#ifdef HAVE_LMDB - // lmdb - MDB_env *mdb_env; - MDB_dbi mdb_dbi; - MDB_val mdb_key, mdb_data; - MDB_txn *mdb_txn; -#endif +#ifdef HAVE_LEVELDB // leveldb leveldb::DB* db; leveldb::Options options; @@ -81,15 +80,29 @@ void convert_dataset(const char* image_filename, const char* label_filename, options.create_if_missing = true; options.write_buffer_size = 268435456; leveldb::WriteBatch* batch = NULL; +#endif + +#ifdef HAVE_LMDB + // lmdb + MDB_env *mdb_env; + MDB_dbi mdb_dbi; + MDB_val mdb_key, mdb_data; + MDB_txn *mdb_txn; +#endif // Open db - if (db_backend == "leveldb") { // leveldb + if (db_backend == "") { + LOG(FATAL) << "Unknown db backend " << db_backend; +#ifdef HAVE_LEVELDB + } else if (db_backend == "leveldb") { // leveldb LOG(INFO) << "Opening leveldb " << db_path; leveldb::Status status = leveldb::DB::Open( options, db_path, &db); CHECK(status.ok()) << "Failed to open leveldb " << db_path << ". Is it already existing?"; batch = new leveldb::WriteBatch(); +#endif + #ifdef HAVE_LMDB } else if (db_backend == "lmdb") { // lmdb LOG(INFO) << "Opening lmdb " << db_path; @@ -133,8 +146,13 @@ void convert_dataset(const char* image_filename, const char* label_filename, string keystr(key_cstr); // Put in db - if (db_backend == "leveldb") { // leveldb + if (db_backend == "") { + LOG(FATAL) << "Unknown db backend " << db_backend; +#ifdef HAVE_LEVELDB + } else if (db_backend == "leveldb") { // leveldb batch->Put(keystr, value); +#endif + #ifdef HAVE_LMDB } else if (db_backend == "lmdb") { // lmdb mdb_data.mv_size = value.size(); @@ -150,10 +168,15 @@ void convert_dataset(const char* image_filename, const char* label_filename, if (++count % 1000 == 0) { // Commit txn - if (db_backend == "leveldb") { // leveldb + if (db_backend == "") { + LOG(FATAL) << "Unknown db backend " << db_backend; +#ifdef HAVE_LEVELDB + } else if (db_backend == "leveldb") { // leveldb db->Write(leveldb::WriteOptions(), batch); delete batch; batch = new leveldb::WriteBatch(); +#endif + #ifdef HAVE_LMDB } else if (db_backend == "lmdb") { // lmdb CHECK_EQ(mdb_txn_commit(mdb_txn), MDB_SUCCESS) @@ -168,10 +191,15 @@ void convert_dataset(const char* image_filename, const char* label_filename, } // write the last batch if (count % 1000 != 0) { - if (db_backend == "leveldb") { // leveldb + if (db_backend == "") { + LOG(FATAL) << "Unknown db backend " << db_backend; +#ifdef HAVE_LEVELDB + } else if (db_backend == "leveldb") { // leveldb db->Write(leveldb::WriteOptions(), batch); delete batch; delete db; +#endif + #ifdef HAVE_LMDB } else if (db_backend == "lmdb") { // lmdb CHECK_EQ(mdb_txn_commit(mdb_txn), MDB_SUCCESS) << "mdb_txn_commit failed"; @@ -187,14 +215,29 @@ void convert_dataset(const char* image_filename, const char* label_filename, } int main(int argc, char** argv) { +#ifndef HAVE_LEVELDB +#ifndef HAVE_LMDB + LOG(FATAL) << "No DB library available to save the converted dataset"; +#endif +#endif + #ifndef GFLAGS_GFLAGS_H_ namespace gflags = google; #endif gflags::SetUsageMessage("This script converts the MNIST dataset to\n" - "the leveldb" + "the " +#ifdef HAVE_LEVELDB + "leveldb" #ifdef HAVE_LMDB - "/lmdb" + "/lmdb" +#endif +#else +#ifdef HAVE_LMDB + "lmdb" +#else + "non-specified" +#endif #endif " format used by Caffe to perform classification.\n" "Usage:\n" diff --git a/include/caffe/data_layers.hpp b/include/caffe/data_layers.hpp index 863d5b446b2..f0a1f625316 100644 --- a/include/caffe/data_layers.hpp +++ b/include/caffe/data_layers.hpp @@ -9,7 +9,11 @@ #ifdef HAVE_HDF5 #include "hdf5.h" #endif + +#ifdef HAVE_LEVELDB #include "leveldb/db.h" +#endif + #ifdef HAVE_LMDB #include "lmdb.h" #endif @@ -117,9 +121,12 @@ class DataLayer : public BasePrefetchingDataLayer { protected: virtual void InternalThreadEntry(); +#ifdef HAVE_LEVELDB // LEVELDB shared_ptr db_; shared_ptr iter_; +#endif + #ifdef HAVE_LMDB // LMDB MDB_env* mdb_env_; diff --git a/include/caffe/util/io.hpp b/include/caffe/util/io.hpp index f532f0d4194..0871242675a 100644 --- a/include/caffe/util/io.hpp +++ b/include/caffe/util/io.hpp @@ -17,10 +17,12 @@ #define HDF5_NUM_DIMS 4 #endif +#ifdef HAVE_LEVELDB namespace leveldb { // Forward declaration for leveldb::Options to be used in GetlevelDBOptions(). struct Options; } +#endif namespace caffe { @@ -106,7 +108,9 @@ inline bool ReadImageToDatum(const string& filename, const int label, return ReadImageToDatum(filename, label, 0, 0, datum); } +#ifdef HAVE_LEVELDB leveldb::Options GetLevelDBOptions(); +#endif #ifdef HAVE_HDF5 template diff --git a/scripts/travis/travis_build_and_test.sh b/scripts/travis/travis_build_and_test.sh index 133384493ea..c6559dddebd 100755 --- a/scripts/travis/travis_build_and_test.sh +++ b/scripts/travis/travis_build_and_test.sh @@ -7,7 +7,9 @@ MAKE="make --jobs=$NUM_THREADS --keep-going" if $WITH_CMAKE; then mkdir build cd build - cmake -DBUILD_PYTHON=ON -DBUILD_EXAMPLES=ON -DBUILD_TESTS=ON -DCMAKE_BUILD_TYPE=Release -DCPU_ONLY=ON -DWITH_LMDB=ON -DWITH_HDF5=ON .. + cmake -DBUILD_PYTHON=ON -DBUILD_EXAMPLES=ON -DBUILD_TESTS=ON \ + -DCMAKE_BUILD_TYPE=Release -DCPU_ONLY=ON -DWITH_HDF5=ON -DWITH_LEVELDB=ON \ + -DWITH_LMDB=ON .. $MAKE if ! $WITH_CUDA; then $MAKE runtest @@ -19,7 +21,7 @@ else if ! $WITH_CUDA; then export CPU_ONLY=1 fi - WITH_LMDB=1 WITH_HDF5=1 $MAKE all test pycaffe warn lint || true + WITH_HDF5=1 WITH_LEVELDB=1 WITH_LMDB=1 $MAKE all test pycaffe warn lint || true if ! $WITH_CUDA; then $MAKE runtest fi diff --git a/src/caffe/CMakeLists.txt b/src/caffe/CMakeLists.txt index a3cc171b0d8..11b0fc9c773 100644 --- a/src/caffe/CMakeLists.txt +++ b/src/caffe/CMakeLists.txt @@ -45,15 +45,19 @@ find_package(OpenCV REQUIRED core highgui imgproc) include_directories(${OpenCV_INCLUDE_DIRS}) # LevelDB -find_package(LevelDB REQUIRED) -include_directories(${LEVELDB_INCLUDE}) -if(LEVELDB_FOUND) - find_package(Snappy REQUIRED) - include_directories(${SNAPPY_INCLUDE_DIR}) - set(LEVELDB_LIBS - ${LEVELDB_LIBS} - ${SNAPPY_LIBS} - ) +if(WITH_LEVELDB) + message(STATUS "LevelDB enabled") + find_package(LevelDB REQUIRED) + include_directories(${LEVELDB_INCLUDE}) + if(LEVELDB_FOUND) + find_package(Snappy REQUIRED) + include_directories(${SNAPPY_INCLUDE_DIR}) + set(LEVELDB_LIBS + ${LEVELDB_LIBS} + ${SNAPPY_LIBS} + ) + endif() + add_definitions(-DHAVE_LEVELDB) endif() # LMDB diff --git a/src/caffe/layers/data_layer.cpp b/src/caffe/layers/data_layer.cpp index 0fa34ba7a5f..f39afe03b64 100644 --- a/src/caffe/layers/data_layer.cpp +++ b/src/caffe/layers/data_layer.cpp @@ -1,4 +1,6 @@ +#ifdef HAVE_LEVELDB #include +#endif #include #include @@ -19,8 +21,11 @@ DataLayer::~DataLayer() { this->JoinPrefetchThread(); // clean up the database resources switch (this->layer_param_.data_param().backend()) { +#ifdef HAVE_LEVELDB case DataParameter_DB_LEVELDB: break; // do nothing +#endif + #ifdef HAVE_LMDB case DataParameter_DB_LMDB: mdb_cursor_close(mdb_cursor_); @@ -39,6 +44,7 @@ void DataLayer::DataLayerSetUp(const vector*>& bottom, vector*>* top) { // Initialize DB switch (this->layer_param_.data_param().backend()) { +#ifdef HAVE_LEVELDB case DataParameter_DB_LEVELDB: { leveldb::DB* db_temp; @@ -55,6 +61,8 @@ void DataLayer::DataLayerSetUp(const vector*>& bottom, iter_->SeekToFirst(); } break; +#endif + #ifdef HAVE_LMDB case DataParameter_DB_LMDB: CHECK_EQ(mdb_env_create(&mdb_env_), MDB_SUCCESS) << "mdb_env_create failed"; @@ -84,12 +92,15 @@ void DataLayer::DataLayerSetUp(const vector*>& bottom, LOG(INFO) << "Skipping first " << skip << " data points."; while (skip-- > 0) { switch (this->layer_param_.data_param().backend()) { +#ifdef HAVE_LEVELDB case DataParameter_DB_LEVELDB: iter_->Next(); if (!iter_->Valid()) { iter_->SeekToFirst(); } break; +#endif + #ifdef HAVE_LMDB case DataParameter_DB_LMDB: if (mdb_cursor_get(mdb_cursor_, &mdb_key_, &mdb_value_, MDB_NEXT) @@ -107,9 +118,12 @@ void DataLayer::DataLayerSetUp(const vector*>& bottom, // Read a data point, and use it to initialize the top blob. Datum datum; switch (this->layer_param_.data_param().backend()) { +#ifdef HAVE_LEVELDB case DataParameter_DB_LEVELDB: datum.ParseFromString(iter_->value().ToString()); break; +#endif + #ifdef HAVE_LMDB case DataParameter_DB_LMDB: datum.ParseFromArray(mdb_value_.mv_data, mdb_value_.mv_size); @@ -164,11 +178,14 @@ void DataLayer::InternalThreadEntry() { for (int item_id = 0; item_id < batch_size; ++item_id) { // get a blob switch (this->layer_param_.data_param().backend()) { +#ifdef HAVE_LEVELDB case DataParameter_DB_LEVELDB: CHECK(iter_); CHECK(iter_->Valid()); datum.ParseFromString(iter_->value().ToString()); break; +#endif + #ifdef HAVE_LMDB case DataParameter_DB_LMDB: CHECK_EQ(mdb_cursor_get(mdb_cursor_, &mdb_key_, @@ -190,6 +207,7 @@ void DataLayer::InternalThreadEntry() { // go to the next iter switch (this->layer_param_.data_param().backend()) { +#ifdef HAVE_LEVELDB case DataParameter_DB_LEVELDB: iter_->Next(); if (!iter_->Valid()) { @@ -198,6 +216,8 @@ void DataLayer::InternalThreadEntry() { iter_->SeekToFirst(); } break; +#endif + #ifdef HAVE_LMDB case DataParameter_DB_LMDB: if (mdb_cursor_get(mdb_cursor_, &mdb_key_, diff --git a/src/caffe/test/test_data_layer.cpp b/src/caffe/test/test_data_layer.cpp index c1fbcb1deb8..6c358191211 100644 --- a/src/caffe/test/test_data_layer.cpp +++ b/src/caffe/test/test_data_layer.cpp @@ -2,7 +2,9 @@ #include #include "gtest/gtest.h" +#ifdef HAVE_LEVELDB #include "leveldb/db.h" +#endif #include "caffe/blob.hpp" #include "caffe/common.hpp" @@ -33,6 +35,7 @@ class DataLayerTest : public MultiDeviceTest { blob_top_vec_.push_back(blob_top_label_); } +#ifdef HAVE_LEVELDB // Fill the LevelDB with data: if unique_pixels, each pixel is unique but // all images are the same; else each image is unique but all pixels within // an image are the same. @@ -63,6 +66,7 @@ class DataLayerTest : public MultiDeviceTest { } delete db; } +#endif #ifdef HAVE_LMDB // Fill the LMDB with data: unique_pixels has same meaning as in FillLevelDB. @@ -327,6 +331,7 @@ class DataLayerTest : public MultiDeviceTest { TYPED_TEST_CASE(DataLayerTest, TestDtypesAndDevices); +#ifdef HAVE_LEVELDB TYPED_TEST(DataLayerTest, TestReadLevelDB) { const bool unique_pixels = false; // all pixels the same; images different this->FillLevelDB(unique_pixels); @@ -364,6 +369,7 @@ TYPED_TEST(DataLayerTest, TestReadCropTestLevelDB) { this->FillLevelDB(unique_pixels); this->TestReadCrop(); } +#endif #ifdef HAVE_LMDB TYPED_TEST(DataLayerTest, TestReadLMDB) { diff --git a/src/caffe/test/test_hdf5data_layer.cpp b/src/caffe/test/test_hdf5data_layer.cpp index 3b701a6afcf..6c8a134d32a 100644 --- a/src/caffe/test/test_hdf5data_layer.cpp +++ b/src/caffe/test/test_hdf5data_layer.cpp @@ -3,8 +3,6 @@ #include #include -#include "leveldb/db.h" - #include "gtest/gtest.h" #include "caffe/blob.hpp" diff --git a/src/caffe/util/io.cpp b/src/caffe/util/io.cpp index 417a0cb055d..c74e070526c 100644 --- a/src/caffe/util/io.cpp +++ b/src/caffe/util/io.cpp @@ -2,7 +2,9 @@ #include #include #include +#ifdef HAVE_LEVELDB #include +#endif #include #include #include @@ -109,6 +111,7 @@ bool ReadImageToDatum(const string& filename, const int label, return true; } +#ifdef HAVE_LEVELDB leveldb::Options GetLevelDBOptions() { // In default, we will return the leveldb option and set the max open files // in order to avoid using up the operating system's limit. @@ -116,6 +119,7 @@ leveldb::Options GetLevelDBOptions() { options.max_open_files = 100; return options; } +#endif #ifdef HAVE_HDF5 // Verifies format of data stored in HDF5 file and reshapes blob accordingly. diff --git a/tools/compute_image_mean.cpp b/tools/compute_image_mean.cpp index 3f032f68171..dad967ea4b0 100644 --- a/tools/compute_image_mean.cpp +++ b/tools/compute_image_mean.cpp @@ -1,5 +1,8 @@ #include +#ifdef HAVE_LEVELDB #include +#endif + #ifdef HAVE_LMDB #include #endif @@ -19,8 +22,12 @@ using std::max; int main(int argc, char** argv) { ::google::InitGoogleLogging(argv[0]); if (argc < 3 || argc > 4) { - LOG(ERROR) << "Usage: compute_image_mean input_leveldb output_file" - << " db_backend[leveldb" + LOG(ERROR) << "Usage: compute_image_mean input_db output_file" + << " db_backend[" +#ifdef HAVE_LEVELDB + "leveldb" +#endif + #ifdef HAVE_LMDB << " or lmdb" #endif @@ -28,16 +35,22 @@ int main(int argc, char** argv) { return 1; } - string db_backend = "leveldb"; + string db_backend = ""; +#ifdef HAVE_LEVELDB + db_backend = "leveldb"; +#endif if (argc == 4) { db_backend = string(argv[3]); } +#ifdef HAVE_LEVELDB // leveldb leveldb::DB* db; leveldb::Options options; options.create_if_missing = false; leveldb::Iterator* it = NULL; +#endif + #ifdef HAVE_LMDB // lmdb MDB_env* mdb_env; @@ -48,7 +61,10 @@ int main(int argc, char** argv) { #endif // Open db - if (db_backend == "leveldb") { // leveldb + if (db_backend == "") { + LOG(FATAL) << "Unknown db backend " << db_backend; +#ifdef HAVE_LEVELDB + } else if (db_backend == "leveldb") { // leveldb LOG(INFO) << "Opening leveldb " << argv[1]; leveldb::Status status = leveldb::DB::Open( options, argv[1], &db); @@ -57,6 +73,8 @@ int main(int argc, char** argv) { read_options.fill_cache = false; it = db->NewIterator(read_options); it->SeekToFirst(); +#endif + #ifdef HAVE_LMDB } else if (db_backend == "lmdb") { // lmdb LOG(INFO) << "Opening lmdb " << argv[1]; @@ -81,8 +99,13 @@ int main(int argc, char** argv) { BlobProto sum_blob; int count = 0; // load first datum - if (db_backend == "leveldb") { + if (db_backend == "") { + LOG(FATAL) << "Unknown db backend " << db_backend; +#ifdef HAVE_LEVELDB + } else if (db_backend == "leveldb") { // leveldb datum.ParseFromString(it->value().ToString()); +#endif + #ifdef HAVE_LMDB } else if (db_backend == "lmdb") { datum.ParseFromArray(mdb_value.mv_data, mdb_value.mv_size); @@ -102,7 +125,10 @@ int main(int argc, char** argv) { sum_blob.add_data(0.); } LOG(INFO) << "Starting Iteration"; - if (db_backend == "leveldb") { // leveldb + if (db_backend == "") { + LOG(FATAL) << "Unknown db backend " << db_backend; +#ifdef HAVE_LEVELDB + } else if (db_backend == "leveldb") { // leveldb for (it->SeekToFirst(); it->Valid(); it->Next()) { // just a dummy operation datum.ParseFromString(it->value().ToString()); @@ -126,6 +152,8 @@ int main(int argc, char** argv) { LOG(ERROR) << "Processed " << count << " files."; } } +#endif + #ifdef HAVE_LMDB } else if (db_backend == "lmdb") { // lmdb CHECK_EQ(mdb_cursor_get(mdb_cursor, &mdb_key, &mdb_value, MDB_FIRST), @@ -170,8 +198,13 @@ int main(int argc, char** argv) { WriteProtoToBinaryFile(sum_blob, argv[2]); // Clean up - if (db_backend == "leveldb") { + if (db_backend == "") { + LOG(FATAL) << "Unknown db backend " << db_backend; +#ifdef HAVE_LEVELDB + } else if (db_backend == "leveldb") { // leveldb delete db; +#endif + #ifdef HAVE_LMDB } else if (db_backend == "lmdb") { mdb_cursor_close(mdb_cursor); diff --git a/tools/convert_imageset.cpp b/tools/convert_imageset.cpp index ac3f192aeae..3283e724f6f 100644 --- a/tools/convert_imageset.cpp +++ b/tools/convert_imageset.cpp @@ -15,8 +15,11 @@ #include #include +#ifdef HAVE_LEVELDB #include #include +#endif + #ifdef HAVE_LMDB #include #endif @@ -40,20 +43,39 @@ DEFINE_bool(gray, false, "When this option is on, treat images as grayscale ones"); DEFINE_bool(shuffle, false, "Randomly shuffle the order of images and their labels"); +#ifdef HAVE_LEVELDB DEFINE_string(backend, "leveldb", "The backend for storing the result"); +#else +DEFINE_string(backend, "", "The backend for storing the result"); +#endif DEFINE_int32(resize_width, 0, "Width images are resized to"); DEFINE_int32(resize_height, 0, "Height images are resized to"); int main(int argc, char** argv) { +#ifndef HAVE_LEVELDB +#ifndef HAVE_LMDB + LOG(FATAL) << "No DB library available to save the converted dataset"; +#endif +#endif + ::google::InitGoogleLogging(argv[0]); #ifndef GFLAGS_GFLAGS_H_ namespace gflags = google; #endif - gflags::SetUsageMessage("Convert a set of images to the leveldb" + gflags::SetUsageMessage("Convert a set of images to the " +#ifdef HAVE_LEVELDB + "leveldb" #ifdef HAVE_LMDB "/lmdb" +#endif +#else +#ifdef HAVE_LMDB + "lmdb" +#else + "non-specified" +#endif #endif "\n" "format used as input for Caffe.\n" @@ -90,13 +112,7 @@ int main(int argc, char** argv) { int resize_width = std::max(0, FLAGS_resize_width); // Open new db -#ifdef HAVE_LMDB - // lmdb - MDB_env *mdb_env; - MDB_dbi mdb_dbi; - MDB_val mdb_key, mdb_data; - MDB_txn *mdb_txn; -#endif +#ifdef HAVE_LEVELDB // leveldb leveldb::DB* db; leveldb::Options options; @@ -104,15 +120,29 @@ int main(int argc, char** argv) { options.create_if_missing = true; options.write_buffer_size = 268435456; leveldb::WriteBatch* batch = NULL; +#endif + +#ifdef HAVE_LMDB + // lmdb + MDB_env *mdb_env; + MDB_dbi mdb_dbi; + MDB_val mdb_key, mdb_data; + MDB_txn *mdb_txn; +#endif // Open db - if (db_backend == "leveldb") { // leveldb + if (db_backend == "") { + LOG(FATAL) << "Unknown db backend " << db_backend; +#ifdef HAVE_LEVELDB + } else if (db_backend == "leveldb") { // leveldb LOG(INFO) << "Opening leveldb " << db_path; leveldb::Status status = leveldb::DB::Open( options, db_path, &db); CHECK(status.ok()) << "Failed to open leveldb " << db_path << ". Is it already existing?"; batch = new leveldb::WriteBatch(); +#endif + #ifdef HAVE_LMDB } else if (db_backend == "lmdb") { // lmdb LOG(INFO) << "Opening lmdb " << db_path; @@ -162,8 +192,13 @@ int main(int argc, char** argv) { string keystr(key_cstr); // Put in db - if (db_backend == "leveldb") { // leveldb + if (db_backend == "") { + LOG(FATAL) << "Unknown db backend " << db_backend; +#ifdef HAVE_LEVELDB + } else if (db_backend == "leveldb") { // leveldb batch->Put(keystr, value); +#endif + #ifdef HAVE_LMDB } else if (db_backend == "lmdb") { // lmdb mdb_data.mv_size = value.size(); @@ -179,10 +214,15 @@ int main(int argc, char** argv) { if (++count % 1000 == 0) { // Commit txn - if (db_backend == "leveldb") { // leveldb + if (db_backend == "") { + LOG(FATAL) << "Unknown db backend " << db_backend; +#ifdef HAVE_LEVELDB + } else if (db_backend == "leveldb") { // leveldb db->Write(leveldb::WriteOptions(), batch); delete batch; batch = new leveldb::WriteBatch(); +#endif + #ifdef HAVE_LMDB } else if (db_backend == "lmdb") { // lmdb CHECK_EQ(mdb_txn_commit(mdb_txn), MDB_SUCCESS) @@ -198,10 +238,15 @@ int main(int argc, char** argv) { } // write the last batch if (count % 1000 != 0) { - if (db_backend == "leveldb") { // leveldb + if (db_backend == "") { + LOG(FATAL) << "Unknown db backend " << db_backend; +#ifdef HAVE_LEVELDB + } else if (db_backend == "leveldb") { // leveldb db->Write(leveldb::WriteOptions(), batch); delete batch; delete db; +#endif + #ifdef HAVE_LMDB } else if (db_backend == "lmdb") { // lmdb CHECK_EQ(mdb_txn_commit(mdb_txn), MDB_SUCCESS) << "mdb_txn_commit failed"; diff --git a/tools/extract_features.cpp b/tools/extract_features.cpp index 49e8f98971c..798f7df09d5 100644 --- a/tools/extract_features.cpp +++ b/tools/extract_features.cpp @@ -4,8 +4,10 @@ #include "boost/algorithm/string.hpp" #include "google/protobuf/text_format.h" +#ifdef HAVE_LEVELDB #include "leveldb/db.h" #include "leveldb/write_batch.h" +#endif #include "caffe/blob.hpp" #include "caffe/common.hpp" @@ -26,6 +28,10 @@ int main(int argc, char** argv) { template int feature_extraction_pipeline(int argc, char** argv) { +#ifndef HAVE_LEVELDB + LOG(FATAL) << "No DB library available to save the features"; +#endif + ::google::InitGoogleLogging(argv[0]); const int num_required_args = 6; if (argc < num_required_args) { @@ -42,6 +48,7 @@ int feature_extraction_pipeline(int argc, char** argv) { " and leveldbs must be equal."; return 1; } + int arg_pos = num_required_args; arg_pos = num_required_args; @@ -114,6 +121,7 @@ int feature_extraction_pipeline(int argc, char** argv) { << " in the network " << feature_extraction_proto; } +#ifdef HAVE_LEVELDB leveldb::Options options; options.error_if_exists = true; options.create_if_missing = true; @@ -128,15 +136,18 @@ int feature_extraction_pipeline(int argc, char** argv) { CHECK(status.ok()) << "Failed to open leveldb " << leveldb_names[i]; feature_dbs.push_back(shared_ptr(db)); } +#endif int num_mini_batches = atoi(argv[++arg_pos]); LOG(ERROR)<< "Extacting Features"; Datum datum; +#ifdef HAVE_LEVELDB vector > feature_batches( num_features, shared_ptr(new leveldb::WriteBatch())); +#endif const int kMaxKeyStrLength = 100; char key_str[kMaxKeyStrLength]; vector*> input_vec; @@ -163,14 +174,18 @@ int feature_extraction_pipeline(int argc, char** argv) { string value; datum.SerializeToString(&value); snprintf(key_str, kMaxKeyStrLength, "%d", image_indices[i]); +#ifdef HAVE_LEVELDB feature_batches[i]->Put(string(key_str), value); +#endif ++image_indices[i]; if (image_indices[i] % 1000 == 0) { - feature_dbs[i]->Write(leveldb::WriteOptions(), - feature_batches[i].get()); LOG(ERROR)<< "Extracted features of " << image_indices[i] << " query images for feature blob " << blob_names[i]; +#ifdef HAVE_LEVELDB + feature_dbs[i]->Write(leveldb::WriteOptions(), + feature_batches[i].get()); feature_batches[i].reset(new leveldb::WriteBatch()); +#endif } } // for (int n = 0; n < batch_size; ++n) } // for (int i = 0; i < num_features; ++i) @@ -178,7 +193,9 @@ int feature_extraction_pipeline(int argc, char** argv) { // write the last batch for (int i = 0; i < num_features; ++i) { if (image_indices[i] % 1000 != 0) { +#ifdef HAVE_LEVELDB feature_dbs[i]->Write(leveldb::WriteOptions(), feature_batches[i].get()); +#endif } LOG(ERROR)<< "Extracted features of " << image_indices[i] << " query images for feature blob " << blob_names[i]; From 4844dcfd9462513bb14f74e6636252bfb1bafecd Mon Sep 17 00:00:00 2001 From: Kai Li Date: Fri, 12 Sep 2014 23:27:04 +0800 Subject: [PATCH 5/8] Update installation documentation about the optional dependencies --- docs/installation.md | 22 ++++++++++++++++++---- 1 file changed, 18 insertions(+), 4 deletions(-) diff --git a/docs/installation.md b/docs/installation.md index dbf73d2c067..ddb244cfaed 100644 --- a/docs/installation.md +++ b/docs/installation.md @@ -15,11 +15,25 @@ We have installed Caffe on Ubuntu 14.04, Ubuntu 12.04, OS X 10.9, and OS X 10.8. Caffe depends on several software packages. -* [CUDA](https://developer.nvidia.com/cuda-zone) library version 6.5 (recommended), 6.0, 5.5, or 5.0 and the latest driver version for CUDA 6 or 319.* for CUDA 5 (and NOT 331.*) -* [BLAS](http://en.wikipedia.org/wiki/Basic_Linear_Algebra_Subprograms) (provided via ATLAS, MKL, or OpenBLAS). -* [OpenCV](http://opencv.org/). +#### Required + +* [BLAS](http://en.wikipedia.org/wiki/Basic_Linear_Algebra_Subprograms) (provided via ATLAS, MKL, or OpenBLAS) * [Boost](http://www.boost.org/) (>= 1.55, although only 1.55 is tested) -* `glog`, `gflags`, `protobuf`, `leveldb`, `snappy`, `hdf5`, `lmdb` +* [gflags](http://code.google.com/p/gflags/) +* [glog](http://code.google.com/p/google-glog/) +* [OpenCV](http://opencv.org/) +* [Protocol Buffers](http://code.google.com/p/protobuf/) + +#### Optional + +* [CUDA](https://developer.nvidia.com/cuda-zone) library version 6.5 (recommended), 6.0, 5.5, or 5.0 and the latest driver version for CUDA 6 or 319.* for CUDA 5 (and NOT 331.*) +* [HDF5](http://www.hdfgroup.org/HDF5/) + + You can disable it with the CMake flag: -DWITH_HDF5=OFF +* [LevelDB](http://code.google.com/p/leveldb/) and its dependency [snappy](http://code.google.com/p/snappy/). + + You can disable it with the CMake flag: -DWITH_LEVELDB=OFF +* [LMDB](http://symas.com/mdb/) + + You can disable it with the CMake flag: -DWITH_LDMB=OFF + * For the Python wrapper * `Python 2.7`, `numpy (>= 1.7)`, boost-provided `boost.python` * For the MATLAB wrapper From e3321bf6c6b13f368af4665ff6e3dbd25e95d614 Mon Sep 17 00:00:00 2001 From: Kai Li Date: Fri, 12 Sep 2014 23:52:03 +0800 Subject: [PATCH 6/8] Add WITH_CUDA to deprecate CPU_ONLY for building without CUDA --- CMakeLists.txt | 9 +++++++-- docs/installation.md | 1 + scripts/travis/travis_build_and_test.sh | 7 ++++++- src/caffe/CMakeLists.txt | 7 +++++-- 4 files changed, 19 insertions(+), 5 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 7fcacf430d5..b2c9cd97178 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -5,6 +5,7 @@ project( Caffe ) option(CPU_ONLY "Build Caffe without GPU support" OFF) +option(WITH_CUDA "Include NVIDIA CUDA support" ON) option(WITH_HDF5 "Include HDF5 support" ON) option(WITH_LEVELDB "Include LEVELDB support" ON) option(WITH_LMDB "Include LMDB support" ON) @@ -37,7 +38,8 @@ set(CMAKE_CXX_FLAGS_RELEASE ${CMAKE_CXX_FLAGS_RELEASE}) # set release flags # Global Definitions if(CPU_ONLY) - add_definitions(-DCPU_ONLY) + message("CPU_ONLY is deprecated, use WITH_CUDA=ON or WITH_CUDA=OFF") + set(WITH_CUDA OFF) endif() # Include Directories @@ -52,9 +54,12 @@ set(CMAKE_SCRIPT_DIR ${CMAKE_SOURCE_DIR}/CMakeScripts) set( CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} ${CMAKE_SCRIPT_DIR}) # CUDA is required globally -if(NOT CPU_ONLY) +if(WITH_CUDA) + message(STATUS "NVIDIA CUDA enabled") find_package(CUDA 5.5 REQUIRED) include_directories(${CUDA_INCLUDE_DIRS}) +else() + add_definitions(-DCPU_ONLY) endif() ### Subdirectories ########################################################################## diff --git a/docs/installation.md b/docs/installation.md index ddb244cfaed..d84f293618f 100644 --- a/docs/installation.md +++ b/docs/installation.md @@ -27,6 +27,7 @@ Caffe depends on several software packages. #### Optional * [CUDA](https://developer.nvidia.com/cuda-zone) library version 6.5 (recommended), 6.0, 5.5, or 5.0 and the latest driver version for CUDA 6 or 319.* for CUDA 5 (and NOT 331.*) + + You can disable it with the CMake flag: -DWITH_CUDA=OFF * [HDF5](http://www.hdfgroup.org/HDF5/) + You can disable it with the CMake flag: -DWITH_HDF5=OFF * [LevelDB](http://code.google.com/p/leveldb/) and its dependency [snappy](http://code.google.com/p/snappy/). diff --git a/scripts/travis/travis_build_and_test.sh b/scripts/travis/travis_build_and_test.sh index c6559dddebd..cbf7b895a47 100755 --- a/scripts/travis/travis_build_and_test.sh +++ b/scripts/travis/travis_build_and_test.sh @@ -7,8 +7,13 @@ MAKE="make --jobs=$NUM_THREADS --keep-going" if $WITH_CMAKE; then mkdir build cd build + if ! $WITH_CUDA; then + export WITH_CUDA=OFF + else + export WITH_CUDA=ON + fi cmake -DBUILD_PYTHON=ON -DBUILD_EXAMPLES=ON -DBUILD_TESTS=ON \ - -DCMAKE_BUILD_TYPE=Release -DCPU_ONLY=ON -DWITH_HDF5=ON -DWITH_LEVELDB=ON \ + -DCMAKE_BUILD_TYPE=Release -DWITH_CUDA=$WITH_CUDA -DWITH_HDF5=ON -DWITH_LEVELDB=ON \ -DWITH_LMDB=ON .. $MAKE if ! $WITH_CUDA; then diff --git a/src/caffe/CMakeLists.txt b/src/caffe/CMakeLists.txt index 11b0fc9c773..ab2eb79d97c 100644 --- a/src/caffe/CMakeLists.txt +++ b/src/caffe/CMakeLists.txt @@ -90,14 +90,17 @@ add_library(caffe ${CPP_SOURCES}) add_dependencies(caffe proto) # CUDA -if(NOT CPU_ONLY) +if(WITH_CUDA) set(CUDA_NVCC_FLAGS ${CUDA_NVCC_FLAGS} -gencode arch=compute_20,code=sm_20 -gencode arch=compute_20,code=sm_21 -gencode arch=compute_30,code=sm_30 -gencode arch=compute_35,code=sm_35 + -gencode arch=compute_50,code=sm_50 + -gencode arch=compute_50,code=compute_50 ) - +endif() + # https://github.com/ComputationalRadiationPhysics/picongpu/blob/master/src/picongpu/CMakeLists.txt # work-arounds if(Boost_VERSION EQUAL 105500) From 575e673ac854c6a27b05849cabfa5e7aa63cdc1b Mon Sep 17 00:00:00 2001 From: Kai Li Date: Sat, 13 Sep 2014 00:13:44 +0800 Subject: [PATCH 7/8] Add CMake flag WITH_CUDNN to optionally build with cuDNN support --- CMakeLists.txt | 1 + docs/installation.md | 10 ++++++---- scripts/travis/travis_build_and_test.sh | 3 ++- src/caffe/CMakeLists.txt | 9 +++++++++ 4 files changed, 18 insertions(+), 5 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index b2c9cd97178..962ba618ef8 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -6,6 +6,7 @@ project( Caffe ) option(CPU_ONLY "Build Caffe without GPU support" OFF) option(WITH_CUDA "Include NVIDIA CUDA support" ON) +option(WITH_CUDNN "Include NVIDIA CUDNN support" OFF) option(WITH_HDF5 "Include HDF5 support" ON) option(WITH_LEVELDB "Include LEVELDB support" ON) option(WITH_LMDB "Include LMDB support" ON) diff --git a/docs/installation.md b/docs/installation.md index d84f293618f..f17d1cf64cb 100644 --- a/docs/installation.md +++ b/docs/installation.md @@ -27,13 +27,15 @@ Caffe depends on several software packages. #### Optional * [CUDA](https://developer.nvidia.com/cuda-zone) library version 6.5 (recommended), 6.0, 5.5, or 5.0 and the latest driver version for CUDA 6 or 319.* for CUDA 5 (and NOT 331.*) - + You can disable it with the CMake flag: -DWITH_CUDA=OFF + + Enabled by default. You can disable it with the CMake flag: -DWITH_CUDA=OFF +* [cuDNN](https://developer.nvidia.com/cuDNN) + + Disabled by default. You can enable it with the CMake flag: -DWITH_CUDNN=ON * [HDF5](http://www.hdfgroup.org/HDF5/) - + You can disable it with the CMake flag: -DWITH_HDF5=OFF + + Enabled by default. You can disable it with the CMake flag: -DWITH_HDF5=OFF * [LevelDB](http://code.google.com/p/leveldb/) and its dependency [snappy](http://code.google.com/p/snappy/). - + You can disable it with the CMake flag: -DWITH_LEVELDB=OFF + + Enabled by default. You can disable it with the CMake flag: -DWITH_LEVELDB=OFF * [LMDB](http://symas.com/mdb/) - + You can disable it with the CMake flag: -DWITH_LDMB=OFF + + Enabled by default. You can disable it with the CMake flag: -DWITH_LDMB=OFF * For the Python wrapper * `Python 2.7`, `numpy (>= 1.7)`, boost-provided `boost.python` diff --git a/scripts/travis/travis_build_and_test.sh b/scripts/travis/travis_build_and_test.sh index cbf7b895a47..485310adc77 100755 --- a/scripts/travis/travis_build_and_test.sh +++ b/scripts/travis/travis_build_and_test.sh @@ -13,7 +13,8 @@ if $WITH_CMAKE; then export WITH_CUDA=ON fi cmake -DBUILD_PYTHON=ON -DBUILD_EXAMPLES=ON -DBUILD_TESTS=ON \ - -DCMAKE_BUILD_TYPE=Release -DWITH_CUDA=$WITH_CUDA -DWITH_HDF5=ON -DWITH_LEVELDB=ON \ + -DCMAKE_BUILD_TYPE=Release -DWITH_CUDA=$WITH_CUDA -DWITH_CUDNN=OFF \ + -DWITH_HDF5=ON -DWITH_LEVELDB=ON \ -DWITH_LMDB=ON .. $MAKE if ! $WITH_CUDA; then diff --git a/src/caffe/CMakeLists.txt b/src/caffe/CMakeLists.txt index ab2eb79d97c..a593e28682b 100644 --- a/src/caffe/CMakeLists.txt +++ b/src/caffe/CMakeLists.txt @@ -100,6 +100,14 @@ if(WITH_CUDA) -gencode arch=compute_50,code=compute_50 ) endif() + +# cuDNN +if(WITH_CUDA) + set(CUDNN_LIBRARIES cudnn) + ## TODO: replace USE_CUDNN with HAVE_CUDNN in the code and Makefile + add_definitions(-DUSE_CUDNN) + add_definitions(-DHAVE_CUDNN) +endif() # https://github.com/ComputationalRadiationPhysics/picongpu/blob/master/src/picongpu/CMakeLists.txt # work-arounds @@ -126,6 +134,7 @@ endif() target_link_libraries(caffe proto ${BLAS_LIBRARIES} ${Boost_LIBRARIES} + ${CUDNN_LIBRARIES} ${CMAKE_THREAD_LIBS_INIT} ${GFLAGS_LIBRARIES} ${GLOG_LIBRARIES} From c3f6b1062dec4289bfc7e08ea561a1221a2105be Mon Sep 17 00:00:00 2001 From: Kai Li Date: Sat, 13 Sep 2014 00:31:22 +0800 Subject: [PATCH 8/8] Fix an ENDIF command was found outside of a proper IF ENDIF structure --- src/caffe/CMakeLists.txt | 31 +++++++++++++++---------------- 1 file changed, 15 insertions(+), 16 deletions(-) diff --git a/src/caffe/CMakeLists.txt b/src/caffe/CMakeLists.txt index a593e28682b..0d21524a8b1 100644 --- a/src/caffe/CMakeLists.txt +++ b/src/caffe/CMakeLists.txt @@ -99,25 +99,16 @@ if(WITH_CUDA) -gencode arch=compute_50,code=sm_50 -gencode arch=compute_50,code=compute_50 ) -endif() - -# cuDNN -if(WITH_CUDA) - set(CUDNN_LIBRARIES cudnn) - ## TODO: replace USE_CUDNN with HAVE_CUDNN in the code and Makefile - add_definitions(-DUSE_CUDNN) - add_definitions(-DHAVE_CUDNN) -endif() # https://github.com/ComputationalRadiationPhysics/picongpu/blob/master/src/picongpu/CMakeLists.txt # work-arounds -if(Boost_VERSION EQUAL 105500) - # see https://svn.boost.org/trac/boost/ticket/9392 - message(STATUS "Boost: Applying noinline work around") - # avoid warning for CMake >= 2.8.12 - set(CUDA_NVCC_FLAGS - "${CUDA_NVCC_FLAGS} \"-DBOOST_NOINLINE=__attribute__((noinline))\" ") -endif(Boost_VERSION EQUAL 105500) + if(Boost_VERSION EQUAL 105500) + # see https://svn.boost.org/trac/boost/ticket/9392 + message(STATUS "Boost: Applying noinline work around") + # avoid warning for CMake >= 2.8.12 + set(CUDA_NVCC_FLAGS + "${CUDA_NVCC_FLAGS} \"-DBOOST_NOINLINE=__attribute__((noinline))\" ") + endif(Boost_VERSION EQUAL 105500) # cuda sources file(GLOB_RECURSE CU_SOURCES ${CMAKE_CURRENT_SOURCE_DIR}/*.cu) @@ -130,6 +121,14 @@ endif(Boost_VERSION EQUAL 105500) ${CUDA_curand_LIBRARY} ) endif() + +# cuDNN +if(WITH_CUDA) + set(CUDNN_LIBRARIES cudnn) + ## TODO: replace USE_CUDNN with HAVE_CUDNN in the code and Makefile + add_definitions(-DUSE_CUDNN) + add_definitions(-DHAVE_CUDNN) +endif() target_link_libraries(caffe proto ${BLAS_LIBRARIES}