diff --git a/.github/ISSUE_TEMPLATE.md b/.github/ISSUE_TEMPLATE.md new file mode 100644 index 00000000000..d78a3dc3455 --- /dev/null +++ b/.github/ISSUE_TEMPLATE.md @@ -0,0 +1,19 @@ +Please use the [caffe-users list](https://groups.google.com/forum/#!forum/caffe-users) for usage, installation, or modeling questions, or other requests for help. +_Do not post such requests to Issues._ Doing so interferes with the development of Caffe. + +Please read the [guidelines for contributing](https://github.com/BVLC/caffe/blob/master/CONTRIBUTING.md) before submitting this issue. + +### Issue summary + + +### Steps to reproduce + +If you are having difficulty building Caffe or training a model, please ask the caffe-users mailing list. If you are reporting a build error that seems to be due to a bug in Caffe, please attach your build configuration (either Makefile.config or CMakeCache.txt) and the output of the make (or cmake) command. + +### Your system configuration +Operating system: +Compiler: +CUDA version (if applicable): +CUDNN version (if applicable): +BLAS: +Python or MATLAB version (for pycaffe and matcaffe respectively): diff --git a/.gitignore b/.gitignore index 53c1fb056bb..eff292b7f61 100644 --- a/.gitignore +++ b/.gitignore @@ -47,6 +47,9 @@ # PyCharm files .idea +# Visual Studio Code files +.vscode + # OSX dir files .DS_Store @@ -81,6 +84,7 @@ cmake_build # Generated documentation docs/_site +docs/_includes docs/gathered _site doxygen diff --git a/.travis.yml b/.travis.yml index 4849a7ac289..3297954755d 100644 --- a/.travis.yml +++ b/.travis.yml @@ -28,7 +28,6 @@ env: - BUILD_NAME="cudnn-cmake" WITH_CMAKE=true WITH_CUDA=true WITH_CUDNN=true cache: - timeout: 604800 # 1 week apt: true directories: - ~/protobuf3 diff --git a/CMakeLists.txt b/CMakeLists.txt index da7142c9b3c..08f56a33a59 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -10,14 +10,15 @@ endif() project(Caffe C CXX) # ---[ Caffe version -set(CAFFE_TARGET_VERSION "1.0.0-rc3" CACHE STRING "Caffe logical version") -set(CAFFE_TARGET_SOVERSION "1.0.0-rc3" CACHE STRING "Caffe soname version") +set(CAFFE_TARGET_VERSION "1.0.0" CACHE STRING "Caffe logical version") +set(CAFFE_TARGET_SOVERSION "1.0.0" CACHE STRING "Caffe soname version") add_definitions(-DCAFFE_VERSION=${CAFFE_TARGET_VERSION}) # ---[ Using cmake scripts and modules list(APPEND CMAKE_MODULE_PATH ${PROJECT_SOURCE_DIR}/cmake/Modules) include(ExternalProject) +include(GNUInstallDirs) include(cmake/Utils.cmake) include(cmake/Targets.cmake) @@ -28,6 +29,7 @@ include(cmake/ConfigGen.cmake) # ---[ Options caffe_option(CPU_ONLY "Build Caffe without CUDA support" OFF) # TODO: rename to USE_CUDA caffe_option(USE_CUDNN "Build Caffe with cuDNN library support" ON IF NOT CPU_ONLY) +caffe_option(USE_NCCL "Build Caffe with NCCL library support" OFF) caffe_option(BUILD_SHARED_LIBS "Build shared libraries" ON) caffe_option(BUILD_python "Build Python wrapper" ON) set(python_version "2" CACHE STRING "Specify which Python version to use") @@ -38,6 +40,7 @@ caffe_option(USE_OPENCV "Build with OpenCV support" ON) caffe_option(USE_LEVELDB "Build with levelDB" ON) caffe_option(USE_LMDB "Build with lmdb" ON) caffe_option(ALLOW_LMDB_NOLOCK "Allow MDB_NOLOCK when reading LMDB files (only if necessary)" OFF) +caffe_option(USE_OPENMP "Link with OpenMP (when your BLAS wants OpenMP and you get linker errors)" OFF) # ---[ Dependencies include(cmake/Dependencies.cmake) @@ -54,8 +57,6 @@ if(USE_libstdcpp) message("-- Warning: forcing libstdc++ (controlled by USE_libstdcpp option in cmake)") endif() -add_definitions(-DGTEST_USE_OWN_TR1_TUPLE) - # ---[ Warnings caffe_warnings_disable(CMAKE_CXX_FLAGS -Wno-sign-compare -Wno-uninitialized) @@ -64,8 +65,26 @@ configure_file(cmake/Templates/caffe_config.h.in "${PROJECT_BINARY_DIR}/caffe_co # ---[ Includes set(Caffe_INCLUDE_DIR ${PROJECT_SOURCE_DIR}/include) -include_directories(${Caffe_INCLUDE_DIR} ${PROJECT_BINARY_DIR}) -include_directories(BEFORE src) # This is needed for gtest. +set(Caffe_SRC_DIR ${PROJECT_SOURCE_DIR}/src) +include_directories(${PROJECT_BINARY_DIR}) + +# ---[ Includes & defines for CUDA + +# cuda_compile() does not have per-call dependencies or include pathes +# (cuda_compile() has per-call flags, but we set them here too for clarity) +# +# list(REMOVE_ITEM ...) invocations remove PRIVATE and PUBLIC keywords from collected definitions and include pathes +if(HAVE_CUDA) + # pass include pathes to cuda_include_directories() + set(Caffe_ALL_INCLUDE_DIRS ${Caffe_INCLUDE_DIRS}) + list(REMOVE_ITEM Caffe_ALL_INCLUDE_DIRS PRIVATE PUBLIC) + cuda_include_directories(${Caffe_INCLUDE_DIR} ${Caffe_SRC_DIR} ${Caffe_ALL_INCLUDE_DIRS}) + + # add definitions to nvcc flags directly + set(Caffe_ALL_DEFINITIONS ${Caffe_DEFINITIONS}) + list(REMOVE_ITEM Caffe_ALL_DEFINITIONS PRIVATE PUBLIC) + list(APPEND CUDA_NVCC_FLAGS ${Caffe_ALL_DEFINITIONS}) +endif() # ---[ Subdirectories add_subdirectory(src/gtest) @@ -85,8 +104,19 @@ if(BUILD_python) add_dependencies(pytest pycaffe) endif() +# ---[ uninstall target +configure_file( + ${CMAKE_CURRENT_SOURCE_DIR}/cmake/Uninstall.cmake.in + ${CMAKE_CURRENT_BINARY_DIR}/cmake/Uninstall.cmake + IMMEDIATE @ONLY) + +add_custom_target(uninstall + COMMAND ${CMAKE_COMMAND} -P + ${CMAKE_CURRENT_BINARY_DIR}/cmake/Uninstall.cmake) + # ---[ Configuration summary caffe_print_configuration_summary() # ---[ Export configs generation caffe_generate_export_configs() + diff --git a/CONTRIBUTORS.md b/CONTRIBUTORS.md index 8db66ea82c6..3fd767812e9 100644 --- a/CONTRIBUTORS.md +++ b/CONTRIBUTORS.md @@ -1,6 +1,6 @@ # Contributors -Caffe is developed by a core set of BVLC members and the open-source community. +Caffe is developed by a core set of BAIR members and the open-source community. We thank all of our [contributors](https://github.com/BVLC/caffe/graphs/contributors)! diff --git a/LICENSE b/LICENSE index 8940e9ad6e2..783ec73fc62 100644 --- a/LICENSE +++ b/LICENSE @@ -11,11 +11,11 @@ BSD 2-Clause license. All rights reserved. All contributions by the University of California: -Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +Copyright (c) 2014-2017 The Regents of the University of California (Regents) All rights reserved. All other contributions: -Copyright (c) 2014, 2015, the respective contributors +Copyright (c) 2014-2017, the respective contributors All rights reserved. Caffe uses a shared copyright model: each contributor holds copyright over diff --git a/Makefile b/Makefile index 403e00a38a1..c85c695acff 100644 --- a/Makefile +++ b/Makefile @@ -34,7 +34,7 @@ LIB_BUILD_DIR := $(BUILD_DIR)/lib STATIC_NAME := $(LIB_BUILD_DIR)/lib$(LIBRARY_NAME).a DYNAMIC_VERSION_MAJOR := 1 DYNAMIC_VERSION_MINOR := 0 -DYNAMIC_VERSION_REVISION := 0-rc3 +DYNAMIC_VERSION_REVISION := 0 DYNAMIC_NAME_SHORT := lib$(LIBRARY_NAME).so #DYNAMIC_SONAME_SHORT := $(DYNAMIC_NAME_SHORT).$(DYNAMIC_VERSION_MAJOR) DYNAMIC_VERSIONED_NAME_SHORT := $(DYNAMIC_NAME_SHORT).$(DYNAMIC_VERSION_MAJOR).$(DYNAMIC_VERSION_MINOR).$(DYNAMIC_VERSION_REVISION) @@ -192,12 +192,12 @@ ifeq ($(USE_LMDB), 1) LIBRARIES += lmdb endif ifeq ($(USE_OPENCV), 1) - LIBRARIES += opencv_core opencv_highgui opencv_imgproc + LIBRARIES += opencv_core opencv_highgui opencv_imgproc ifeq ($(OPENCV_VERSION), 3) LIBRARIES += opencv_imgcodecs endif - + endif PYTHON_LIBRARIES ?= boost_python python2.7 WARNINGS := -Wall -Wno-sign-compare @@ -328,6 +328,12 @@ ifeq ($(USE_CUDNN), 1) COMMON_FLAGS += -DUSE_CUDNN endif +# NCCL acceleration configuration +ifeq ($(USE_NCCL), 1) + LIBRARIES += nccl + COMMON_FLAGS += -DUSE_NCCL +endif + # configure IO libraries ifeq ($(USE_OPENCV), 1) COMMON_FLAGS += -DUSE_OPENCV @@ -382,8 +388,11 @@ else LIBRARIES += cblas # 10.10 has accelerate while 10.9 has veclib XCODE_CLT_VER := $(shell pkgutil --pkg-info=com.apple.pkg.CLTools_Executables | grep 'version' | sed 's/[^0-9]*\([0-9]\).*/\1/') + XCODE_CLT_GEQ_7 := $(shell [ $(XCODE_CLT_VER) -gt 6 ] && echo 1) XCODE_CLT_GEQ_6 := $(shell [ $(XCODE_CLT_VER) -gt 5 ] && echo 1) - ifeq ($(XCODE_CLT_GEQ_6), 1) + ifeq ($(XCODE_CLT_GEQ_7), 1) + BLAS_INCLUDE ?= /Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/$(shell ls /Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/ | sort | tail -1)/System/Library/Frameworks/Accelerate.framework/Versions/A/Frameworks/vecLib.framework/Versions/A/Headers + else ifeq ($(XCODE_CLT_GEQ_6), 1) BLAS_INCLUDE ?= /System/Library/Frameworks/Accelerate.framework/Versions/Current/Frameworks/vecLib.framework/Headers/ LDFLAGS += -framework Accelerate else @@ -568,7 +577,7 @@ $(STATIC_NAME): $(OBJS) | $(LIB_BUILD_DIR) @ echo AR -o $@ $(Q)ar rcs $@ $(OBJS) -$(BUILD_DIR)/%.o: %.cpp | $(ALL_BUILD_DIRS) +$(BUILD_DIR)/%.o: %.cpp $(PROTO_GEN_HEADER) | $(ALL_BUILD_DIRS) @ echo CXX $< $(Q)$(CXX) $< $(CXXFLAGS) -c -o $@ 2> $@.$(WARNS_EXT) \ || (cat $@.$(WARNS_EXT); exit 1) @@ -685,6 +694,6 @@ $(DISTRIBUTE_DIR): all py | $(DISTRIBUTE_SUBDIRS) install -m 644 $(DYNAMIC_NAME) $(DISTRIBUTE_DIR)/lib cd $(DISTRIBUTE_DIR)/lib; rm -f $(DYNAMIC_NAME_SHORT); ln -s $(DYNAMIC_VERSIONED_NAME_SHORT) $(DYNAMIC_NAME_SHORT) # add python - it's not the standard way, indeed... - cp -r python $(DISTRIBUTE_DIR)/python + cp -r python $(DISTRIBUTE_DIR)/ -include $(DEPS) diff --git a/Makefile.config.example b/Makefile.config.example index 07bed63ae40..79905935f15 100644 --- a/Makefile.config.example +++ b/Makefile.config.example @@ -31,13 +31,18 @@ CUDA_DIR := /usr/local/cuda # CUDA_DIR := /usr # CUDA architecture setting: going with all of them. -# For CUDA < 6.0, comment the *_50 lines for compatibility. +# For CUDA < 6.0, comment the *_50 through *_61 lines for compatibility. +# For CUDA < 8.0, comment the *_60 and *_61 lines for compatibility. +# For CUDA >= 9.0, comment the *_20 and *_21 lines for compatibility. CUDA_ARCH := -gencode arch=compute_20,code=sm_20 \ -gencode arch=compute_20,code=sm_21 \ -gencode arch=compute_30,code=sm_30 \ -gencode arch=compute_35,code=sm_35 \ -gencode arch=compute_50,code=sm_50 \ - -gencode arch=compute_50,code=compute_50 + -gencode arch=compute_52,code=sm_52 \ + -gencode arch=compute_60,code=sm_60 \ + -gencode arch=compute_61,code=sm_61 \ + -gencode arch=compute_61,code=compute_61 # BLAS choice: # atlas for ATLAS (default) @@ -68,7 +73,7 @@ PYTHON_INCLUDE := /usr/include/python2.7 \ # ANACONDA_HOME := $(HOME)/anaconda # PYTHON_INCLUDE := $(ANACONDA_HOME)/include \ # $(ANACONDA_HOME)/include/python2.7 \ - # $(ANACONDA_HOME)/lib/python2.7/site-packages/numpy/core/include \ + # $(ANACONDA_HOME)/lib/python2.7/site-packages/numpy/core/include # Uncomment to use Python 3 (default is Python 2) # PYTHON_LIBRARIES := boost_python3 python3.5m @@ -94,6 +99,10 @@ LIBRARY_DIRS := $(PYTHON_LIB) /usr/local/lib /usr/lib # INCLUDE_DIRS += $(shell brew --prefix)/include # LIBRARY_DIRS += $(shell brew --prefix)/lib +# NCCL acceleration switch (uncomment to build with NCCL) +# https://github.com/NVIDIA/nccl (last tested version: v1.2.3-1+cuda8.0) +# USE_NCCL := 1 + # Uncomment to use `pkg-config` to specify OpenCV library paths. # (Usually not necessary -- OpenCV libraries are normally installed in one of the above $LIBRARY_DIRS.) # USE_PKG_CONFIG := 1 diff --git a/README.md b/README.md index 44b9e62c157..fe259535865 100644 --- a/README.md +++ b/README.md @@ -4,17 +4,25 @@ [![License](https://img.shields.io/badge/license-BSD-blue.svg)](LICENSE) Caffe is a deep learning framework made with expression, speed, and modularity in mind. -It is developed by the Berkeley Vision and Learning Center ([BVLC](http://bvlc.eecs.berkeley.edu)) and community contributors. +It is developed by Berkeley AI Research ([BAIR](http://bair.berkeley.edu))/The Berkeley Vision and Learning Center (BVLC) and community contributors. Check out the [project site](http://caffe.berkeleyvision.org) for all the details like - [DIY Deep Learning for Vision with Caffe](https://docs.google.com/presentation/d/1UeKXVgRvvxg9OUdh_UiC5G71UMscNPlvArsWER41PsU/edit#slide=id.p) - [Tutorial Documentation](http://caffe.berkeleyvision.org/tutorial/) -- [BVLC reference models](http://caffe.berkeleyvision.org/model_zoo.html) and the [community model zoo](https://github.com/BVLC/caffe/wiki/Model-Zoo) +- [BAIR reference models](http://caffe.berkeleyvision.org/model_zoo.html) and the [community model zoo](https://github.com/BVLC/caffe/wiki/Model-Zoo) - [Installation instructions](http://caffe.berkeleyvision.org/installation.html) and step-by-step examples. +## Custom distributions + + - [Intel Caffe](https://github.com/BVLC/caffe/tree/intel) (Optimized for CPU and support for multi-node), in particular Xeon processors (HSW, BDW, SKX, Xeon Phi). +- [OpenCL Caffe](https://github.com/BVLC/caffe/tree/opencl) e.g. for AMD or Intel devices. +- [Windows Caffe](https://github.com/BVLC/caffe/tree/windows) + +## Community + [![Join the chat at https://gitter.im/BVLC/caffe](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/BVLC/caffe?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) Please join the [caffe-users group](https://groups.google.com/forum/#!forum/caffe-users) or [gitter chat](https://gitter.im/BVLC/caffe) to ask questions and talk about methods and models. @@ -25,7 +33,7 @@ Happy brewing! ## License and Citation Caffe is released under the [BSD 2-Clause license](https://github.com/BVLC/caffe/blob/master/LICENSE). -The BVLC reference models are released for unrestricted use. +The BAIR/BVLC reference models are released for unrestricted use. Please cite Caffe in your publications if it helps your research: diff --git a/cmake/ConfigGen.cmake b/cmake/ConfigGen.cmake index 056371110b5..09bb09b4ff2 100644 --- a/cmake/ConfigGen.cmake +++ b/cmake/ConfigGen.cmake @@ -1,31 +1,4 @@ -################################################################################################ -# Helper function to fetch caffe includes which will be passed to dependent projects -# Usage: -# caffe_get_current_includes() -function(caffe_get_current_includes includes_variable) - get_property(current_includes DIRECTORY PROPERTY INCLUDE_DIRECTORIES) - caffe_convert_absolute_paths(current_includes) - - # remove at most one ${PROJECT_BINARY_DIR} include added for caffe_config.h - list(FIND current_includes ${PROJECT_BINARY_DIR} __index) - list(REMOVE_AT current_includes ${__index}) - - # removing numpy includes (since not required for client libs) - set(__toremove "") - foreach(__i ${current_includes}) - if(${__i} MATCHES "python") - list(APPEND __toremove ${__i}) - endif() - endforeach() - if(__toremove) - list(REMOVE_ITEM current_includes ${__toremove}) - endif() - - caffe_list_unique(current_includes) - set(${includes_variable} ${current_includes} PARENT_SCOPE) -endfunction() - ################################################################################################ # Helper function to get all list items that begin with given prefix # Usage: @@ -47,60 +20,24 @@ endfunction() function(caffe_generate_export_configs) set(install_cmake_suffix "share/Caffe") - # ---[ Configure build-tree CaffeConfig.cmake file ]--- - caffe_get_current_includes(Caffe_INCLUDE_DIRS) - - set(Caffe_DEFINITIONS "") if(NOT HAVE_CUDA) set(HAVE_CUDA FALSE) - list(APPEND Caffe_DEFINITIONS -DCPU_ONLY) - endif() - - if(USE_OPENCV) - list(APPEND Caffe_DEFINITIONS -DUSE_OPENCV) - endif() - - if(USE_LMDB) - list(APPEND Caffe_DEFINITIONS -DUSE_LMDB) - if (ALLOW_LMDB_NOLOCK) - list(APPEND Caffe_DEFINITIONS -DALLOW_LMDB_NOLOCK) - endif() - endif() - - if(USE_LEVELDB) - list(APPEND Caffe_DEFINITIONS -DUSE_LEVELDB) endif() if(NOT HAVE_CUDNN) set(HAVE_CUDNN FALSE) - else() - list(APPEND DEFINITIONS -DUSE_CUDNN) endif() - if(BLAS STREQUAL "MKL" OR BLAS STREQUAL "mkl") - list(APPEND Caffe_DEFINITIONS -DUSE_MKL) - endif() + # ---[ Configure build-tree CaffeConfig.cmake file ]--- configure_file("cmake/Templates/CaffeConfig.cmake.in" "${PROJECT_BINARY_DIR}/CaffeConfig.cmake" @ONLY) # Add targets to the build-tree export set - export(TARGETS caffe proto FILE "${PROJECT_BINARY_DIR}/CaffeTargets.cmake") + export(TARGETS caffe caffeproto FILE "${PROJECT_BINARY_DIR}/CaffeTargets.cmake") export(PACKAGE Caffe) # ---[ Configure install-tree CaffeConfig.cmake file ]--- - # remove source and build dir includes - caffe_get_items_with_prefix(${PROJECT_SOURCE_DIR} Caffe_INCLUDE_DIRS __insource) - caffe_get_items_with_prefix(${PROJECT_BINARY_DIR} Caffe_INCLUDE_DIRS __inbinary) - list(REMOVE_ITEM Caffe_INCLUDE_DIRS ${__insource} ${__inbinary}) - - # add `install` include folder - set(lines - "get_filename_component(__caffe_include \"\${Caffe_CMAKE_DIR}/../../include\" ABSOLUTE)\n" - "list(APPEND Caffe_INCLUDE_DIRS \${__caffe_include})\n" - "unset(__caffe_include)\n") - string(REPLACE ";" "" Caffe_INSTALL_INCLUDE_DIR_APPEND_COMMAND ${lines}) - configure_file("cmake/Templates/CaffeConfig.cmake.in" "${PROJECT_BINARY_DIR}/cmake/CaffeConfig.cmake" @ONLY) # Install the CaffeConfig.cmake and export set to use with install-tree @@ -109,7 +46,7 @@ function(caffe_generate_export_configs) # ---[ Configure and install version file ]--- - # TODO: Lines below are commented because Caffe does't declare its version in headers. + # TODO: Lines below are commented because Caffe doesn't declare its version in headers. # When the declarations are added, modify `caffe_extract_caffe_version()` macro and uncomment # configure_file(cmake/Templates/CaffeConfigVersion.cmake.in "${PROJECT_BINARY_DIR}/CaffeConfigVersion.cmake" @ONLY) diff --git a/cmake/Cuda.cmake b/cmake/Cuda.cmake index 286a42802b4..54e26fd5c8e 100644 --- a/cmake/Cuda.cmake +++ b/cmake/Cuda.cmake @@ -4,7 +4,7 @@ endif() # Known NVIDIA GPU achitectures Caffe can be compiled for. # This list will be used for CUDA_ARCH_NAME = All option -set(Caffe_known_gpu_archs "20 21(20) 30 35 50") +set(Caffe_known_gpu_archs "20 21(20) 30 35 50 60 61") ################################################################################################ # A function for automatic detection of GPUs installed (if autodetection is enabled) @@ -56,7 +56,7 @@ endfunction() # caffe_select_nvcc_arch_flags(out_variable) function(caffe_select_nvcc_arch_flags out_variable) # List of arch names - set(__archs_names "Fermi" "Kepler" "Maxwell" "All" "Manual") + set(__archs_names "Fermi" "Kepler" "Maxwell" "Pascal" "All" "Manual") set(__archs_name_default "All") if(NOT CMAKE_CROSSCOMPILING) list(APPEND __archs_names "Auto") @@ -89,6 +89,8 @@ function(caffe_select_nvcc_arch_flags out_variable) set(__cuda_arch_bin "30 35") elseif(${CUDA_ARCH_NAME} STREQUAL "Maxwell") set(__cuda_arch_bin "50") + elseif(${CUDA_ARCH_NAME} STREQUAL "Pascal") + set(__cuda_arch_bin "60 61") elseif(${CUDA_ARCH_NAME} STREQUAL "All") set(__cuda_arch_bin ${Caffe_known_gpu_archs}) elseif(${CUDA_ARCH_NAME} STREQUAL "Auto") @@ -174,11 +176,18 @@ function(detect_cuDNN) PATHS ${CUDNN_ROOT} $ENV{CUDNN_ROOT} ${CUDA_TOOLKIT_INCLUDE} DOC "Path to cuDNN include directory." ) - get_filename_component(__libpath_hist ${CUDA_CUDART_LIBRARY} PATH) - find_library(CUDNN_LIBRARY NAMES libcudnn.so # libcudnn_static.a - PATHS ${CUDNN_ROOT} $ENV{CUDNN_ROOT} ${CUDNN_INCLUDE} ${__libpath_hist} - DOC "Path to cuDNN library.") + # dynamic libs have different suffix in mac and linux + if(APPLE) + set(CUDNN_LIB_NAME "libcudnn.dylib") + else() + set(CUDNN_LIB_NAME "libcudnn.so") + endif() + get_filename_component(__libpath_hist ${CUDA_CUDART_LIBRARY} PATH) + find_library(CUDNN_LIBRARY NAMES ${CUDNN_LIB_NAME} + PATHS ${CUDNN_ROOT} $ENV{CUDNN_ROOT} ${CUDNN_INCLUDE} ${__libpath_hist} ${__libpath_hist}/../lib + DOC "Path to cuDNN library.") + if(CUDNN_INCLUDE AND CUDNN_LIBRARY) set(HAVE_CUDNN TRUE PARENT_SCOPE) set(CUDNN_FOUND TRUE PARENT_SCOPE) @@ -223,7 +232,7 @@ endfunction() ################################################################################################ find_package(CUDA 5.5 QUIET) -find_cuda_helper_libs(curand) # cmake 2.8.7 compartibility which doesn't search for curand +find_cuda_helper_libs(curand) # cmake 2.8.7 compatibility which doesn't search for curand if(NOT CUDA_FOUND) return() @@ -231,17 +240,17 @@ endif() set(HAVE_CUDA TRUE) message(STATUS "CUDA detected: " ${CUDA_VERSION}) -include_directories(SYSTEM ${CUDA_INCLUDE_DIRS}) -list(APPEND Caffe_LINKER_LIBS ${CUDA_CUDART_LIBRARY} - ${CUDA_curand_LIBRARY} ${CUDA_CUBLAS_LIBRARIES}) +list(APPEND Caffe_INCLUDE_DIRS PUBLIC ${CUDA_INCLUDE_DIRS}) +list(APPEND Caffe_LINKER_LIBS PUBLIC ${CUDA_CUDART_LIBRARY} + ${CUDA_curand_LIBRARY} ${CUDA_CUBLAS_LIBRARIES}) # cudnn detection if(USE_CUDNN) detect_cuDNN() if(HAVE_CUDNN) - add_definitions(-DUSE_CUDNN) - include_directories(SYSTEM ${CUDNN_INCLUDE}) - list(APPEND Caffe_LINKER_LIBS ${CUDNN_LIBRARY}) + list(APPEND Caffe_DEFINITIONS PUBLIC -DUSE_CUDNN) + list(APPEND Caffe_INCLUDE_DIRS PUBLIC ${CUDNN_INCLUDE}) + list(APPEND Caffe_LINKER_LIBS PUBLIC ${CUDNN_LIBRARY}) endif() endif() @@ -275,7 +284,7 @@ mark_as_advanced(CUDA_SDK_ROOT_DIR CUDA_SEPARABLE_COMPILATION) if(APPLE) caffe_detect_darwin_version(OSX_VERSION) - # OSX 10.9 and higher uses clang/libc++ by default which is incompartible with old CUDA toolkits + # OSX 10.9 and higher uses clang/libc++ by default which is incompatible with old CUDA toolkits if(OSX_VERSION VERSION_GREATER 10.8) # enabled by default if and only if CUDA version is less than 7.0 caffe_option(USE_libstdcpp "Use libstdc++ instead of libc++" (CUDA_VERSION VERSION_LESS 7.0)) diff --git a/cmake/Dependencies.cmake b/cmake/Dependencies.cmake index c7b6a17aa69..c48255c89f2 100644 --- a/cmake/Dependencies.cmake +++ b/cmake/Dependencies.cmake @@ -1,57 +1,76 @@ -# This list is required for static linking and exported to CaffeConfig.cmake +# These lists are later turned into target properties on main caffe library target set(Caffe_LINKER_LIBS "") +set(Caffe_INCLUDE_DIRS "") +set(Caffe_DEFINITIONS "") +set(Caffe_COMPILE_OPTIONS "") # ---[ Boost -find_package(Boost 1.46 REQUIRED COMPONENTS system thread filesystem) -include_directories(SYSTEM ${Boost_INCLUDE_DIR}) -list(APPEND Caffe_LINKER_LIBS ${Boost_LIBRARIES}) +find_package(Boost 1.54 REQUIRED COMPONENTS system thread filesystem) +list(APPEND Caffe_INCLUDE_DIRS PUBLIC ${Boost_INCLUDE_DIRS}) +list(APPEND Caffe_LINKER_LIBS PUBLIC ${Boost_LIBRARIES}) # ---[ Threads find_package(Threads REQUIRED) -list(APPEND Caffe_LINKER_LIBS ${CMAKE_THREAD_LIBS_INIT}) +list(APPEND Caffe_LINKER_LIBS PRIVATE ${CMAKE_THREAD_LIBS_INIT}) + +# ---[ OpenMP +if(USE_OPENMP) + # Ideally, this should be provided by the BLAS library IMPORTED target. However, + # nobody does this, so we need to link to OpenMP explicitly and have the maintainer + # to flick the switch manually as needed. + # + # Moreover, OpenMP package does not provide an IMPORTED target as well, and the + # suggested way of linking to OpenMP is to append to CMAKE_{C,CXX}_FLAGS. + # However, this naïve method will force any user of Caffe to add the same kludge + # into their buildsystem again, so we put these options into per-target PUBLIC + # compile options and link flags, so that they will be exported properly. + find_package(OpenMP REQUIRED) + list(APPEND Caffe_LINKER_LIBS PRIVATE ${OpenMP_CXX_FLAGS}) + list(APPEND Caffe_COMPILE_OPTIONS PRIVATE ${OpenMP_CXX_FLAGS}) +endif() # ---[ Google-glog include("cmake/External/glog.cmake") -include_directories(SYSTEM ${GLOG_INCLUDE_DIRS}) -list(APPEND Caffe_LINKER_LIBS ${GLOG_LIBRARIES}) +list(APPEND Caffe_INCLUDE_DIRS PUBLIC ${GLOG_INCLUDE_DIRS}) +list(APPEND Caffe_LINKER_LIBS PUBLIC ${GLOG_LIBRARIES}) # ---[ Google-gflags include("cmake/External/gflags.cmake") -include_directories(SYSTEM ${GFLAGS_INCLUDE_DIRS}) -list(APPEND Caffe_LINKER_LIBS ${GFLAGS_LIBRARIES}) +list(APPEND Caffe_INCLUDE_DIRS PUBLIC ${GFLAGS_INCLUDE_DIRS}) +list(APPEND Caffe_LINKER_LIBS PUBLIC ${GFLAGS_LIBRARIES}) # ---[ Google-protobuf include(cmake/ProtoBuf.cmake) # ---[ HDF5 find_package(HDF5 COMPONENTS HL REQUIRED) -include_directories(SYSTEM ${HDF5_INCLUDE_DIRS} ${HDF5_HL_INCLUDE_DIR}) -list(APPEND Caffe_LINKER_LIBS ${HDF5_LIBRARIES}) +list(APPEND Caffe_INCLUDE_DIRS PUBLIC ${HDF5_INCLUDE_DIRS}) +list(APPEND Caffe_LINKER_LIBS PUBLIC ${HDF5_LIBRARIES} ${HDF5_HL_LIBRARIES}) # ---[ LMDB if(USE_LMDB) find_package(LMDB REQUIRED) - include_directories(SYSTEM ${LMDB_INCLUDE_DIR}) - list(APPEND Caffe_LINKER_LIBS ${LMDB_LIBRARIES}) - add_definitions(-DUSE_LMDB) + list(APPEND Caffe_INCLUDE_DIRS PUBLIC ${LMDB_INCLUDE_DIR}) + list(APPEND Caffe_LINKER_LIBS PUBLIC ${LMDB_LIBRARIES}) + list(APPEND Caffe_DEFINITIONS PUBLIC -DUSE_LMDB) if(ALLOW_LMDB_NOLOCK) - add_definitions(-DALLOW_LMDB_NOLOCK) + list(APPEND Caffe_DEFINITIONS PRIVATE -DALLOW_LMDB_NOLOCK) endif() endif() # ---[ LevelDB if(USE_LEVELDB) find_package(LevelDB REQUIRED) - include_directories(SYSTEM ${LevelDB_INCLUDE}) - list(APPEND Caffe_LINKER_LIBS ${LevelDB_LIBRARIES}) - add_definitions(-DUSE_LEVELDB) + list(APPEND Caffe_INCLUDE_DIRS PUBLIC ${LevelDB_INCLUDES}) + list(APPEND Caffe_LINKER_LIBS PUBLIC ${LevelDB_LIBRARIES}) + list(APPEND Caffe_DEFINITIONS PUBLIC -DUSE_LEVELDB) endif() # ---[ Snappy if(USE_LEVELDB) find_package(Snappy REQUIRED) - include_directories(SYSTEM ${Snappy_INCLUDE_DIR}) - list(APPEND Caffe_LINKER_LIBS ${Snappy_LIBRARIES}) + list(APPEND Caffe_INCLUDE_DIRS PRIVATE ${Snappy_INCLUDE_DIR}) + list(APPEND Caffe_LINKER_LIBS PRIVATE ${Snappy_LIBRARIES}) endif() # ---[ CUDA @@ -63,8 +82,14 @@ if(NOT HAVE_CUDA) message(WARNING "-- CUDA is not detected by cmake. Building without it...") endif() - # TODO: remove this not cross platform define in future. Use caffe_config.h instead. - add_definitions(-DCPU_ONLY) + list(APPEND Caffe_DEFINITIONS PUBLIC -DCPU_ONLY) +endif() + +if(USE_NCCL) + find_package(NCCL REQUIRED) + include_directories(SYSTEM ${NCCL_INCLUDE_DIR}) + list(APPEND Caffe_LINKER_LIBS ${NCCL_LIBRARIES}) + add_definitions(-DUSE_NCCL) endif() # ---[ OpenCV @@ -73,10 +98,10 @@ if(USE_OPENCV) if(NOT OpenCV_FOUND) # if not OpenCV 3.x, then imgcodecs are not found find_package(OpenCV REQUIRED COMPONENTS core highgui imgproc) endif() - include_directories(SYSTEM ${OpenCV_INCLUDE_DIRS}) - list(APPEND Caffe_LINKER_LIBS ${OpenCV_LIBS}) + list(APPEND Caffe_INCLUDE_DIRS PUBLIC ${OpenCV_INCLUDE_DIRS}) + list(APPEND Caffe_LINKER_LIBS PUBLIC ${OpenCV_LIBS}) message(STATUS "OpenCV found (${OpenCV_CONFIG_PATH})") - add_definitions(-DUSE_OPENCV) + list(APPEND Caffe_DEFINITIONS PUBLIC -DUSE_OPENCV) endif() # ---[ BLAS @@ -86,22 +111,28 @@ if(NOT APPLE) if(BLAS STREQUAL "Atlas" OR BLAS STREQUAL "atlas") find_package(Atlas REQUIRED) - include_directories(SYSTEM ${Atlas_INCLUDE_DIR}) - list(APPEND Caffe_LINKER_LIBS ${Atlas_LIBRARIES}) + list(APPEND Caffe_INCLUDE_DIRS PUBLIC ${Atlas_INCLUDE_DIR}) + list(APPEND Caffe_LINKER_LIBS PUBLIC ${Atlas_LIBRARIES}) elseif(BLAS STREQUAL "Open" OR BLAS STREQUAL "open") find_package(OpenBLAS REQUIRED) - include_directories(SYSTEM ${OpenBLAS_INCLUDE_DIR}) - list(APPEND Caffe_LINKER_LIBS ${OpenBLAS_LIB}) + list(APPEND Caffe_INCLUDE_DIRS PUBLIC ${OpenBLAS_INCLUDE_DIR}) + list(APPEND Caffe_LINKER_LIBS PUBLIC ${OpenBLAS_LIB}) elseif(BLAS STREQUAL "MKL" OR BLAS STREQUAL "mkl") find_package(MKL REQUIRED) - include_directories(SYSTEM ${MKL_INCLUDE_DIR}) - list(APPEND Caffe_LINKER_LIBS ${MKL_LIBRARIES}) - add_definitions(-DUSE_MKL) + list(APPEND Caffe_INCLUDE_DIRS PUBLIC ${MKL_INCLUDE_DIR}) + list(APPEND Caffe_LINKER_LIBS PUBLIC ${MKL_LIBRARIES}) + list(APPEND Caffe_DEFINITIONS PUBLIC -DUSE_MKL) endif() elseif(APPLE) find_package(vecLib REQUIRED) - include_directories(SYSTEM ${vecLib_INCLUDE_DIR}) - list(APPEND Caffe_LINKER_LIBS ${vecLib_LINKER_LIBS}) + list(APPEND Caffe_INCLUDE_DIRS PUBLIC ${vecLib_INCLUDE_DIR}) + list(APPEND Caffe_LINKER_LIBS PUBLIC ${vecLib_LINKER_LIBS}) + + if(VECLIB_FOUND) + if(NOT vecLib_INCLUDE_DIR MATCHES "^/System/Library/Frameworks/vecLib.framework.*") + list(APPEND Caffe_DEFINITIONS PUBLIC -DUSE_ACCELERATE) + endif() + endif() endif() # ---[ Python @@ -113,18 +144,18 @@ if(BUILD_python) find_package(NumPy 1.7.1) # Find the matching boost python implementation set(version ${PYTHONLIBS_VERSION_STRING}) - + STRING( REGEX REPLACE "[^0-9]" "" boost_py_version ${version} ) find_package(Boost 1.46 COMPONENTS "python-py${boost_py_version}") set(Boost_PYTHON_FOUND ${Boost_PYTHON-PY${boost_py_version}_FOUND}) - + while(NOT "${version}" STREQUAL "" AND NOT Boost_PYTHON_FOUND) STRING( REGEX REPLACE "([0-9.]+).[0-9]+" "\\1" version ${version} ) - + STRING( REGEX REPLACE "[^0-9]" "" boost_py_version ${version} ) find_package(Boost 1.46 COMPONENTS "python-py${boost_py_version}") set(Boost_PYTHON_FOUND ${Boost_PYTHON-PY${boost_py_version}_FOUND}) - + STRING( REGEX MATCHALL "([0-9.]+).[0-9]+" has_more_version ${version} ) if("${has_more_version}" STREQUAL "") break() @@ -143,9 +174,9 @@ if(BUILD_python) if(PYTHONLIBS_FOUND AND NUMPY_FOUND AND Boost_PYTHON_FOUND) set(HAVE_PYTHON TRUE) if(BUILD_python_layer) - add_definitions(-DWITH_PYTHON_LAYER) - include_directories(SYSTEM ${PYTHON_INCLUDE_DIRS} ${NUMPY_INCLUDE_DIR} ${Boost_INCLUDE_DIRS}) - list(APPEND Caffe_LINKER_LIBS ${PYTHON_LIBRARIES} ${Boost_LIBRARIES}) + list(APPEND Caffe_DEFINITIONS PRIVATE -DWITH_PYTHON_LAYER) + list(APPEND Caffe_INCLUDE_DIRS PRIVATE ${PYTHON_INCLUDE_DIRS} ${NUMPY_INCLUDE_DIR} PUBLIC ${Boost_INCLUDE_DIRS}) + list(APPEND Caffe_LINKER_LIBS PRIVATE ${PYTHON_LIBRARIES} PUBLIC ${Boost_LIBRARIES}) endif() endif() endif() diff --git a/cmake/External/glog.cmake b/cmake/External/glog.cmake index a44672f2753..f9d0549cd90 100644 --- a/cmake/External/glog.cmake +++ b/cmake/External/glog.cmake @@ -37,6 +37,7 @@ if (NOT __GLOG_INCLUDED) GIT_TAG "v0.3.4" UPDATE_COMMAND "" INSTALL_DIR ${gflags_INSTALL} + PATCH_COMMAND autoreconf -i ${glog_PREFIX}/src/glog CONFIGURE_COMMAND env "CFLAGS=${GLOG_C_FLAGS}" "CXXFLAGS=${GLOG_CXX_FLAGS}" ${glog_PREFIX}/src/glog/configure --prefix=${glog_INSTALL} --enable-shared=no --enable-static=yes --with-gflags=${GFLAGS_LIBRARY_DIRS}/.. LOG_DOWNLOAD 1 LOG_CONFIGURE 1 diff --git a/cmake/Misc.cmake b/cmake/Misc.cmake index 9dd2609b36a..fcb246472f0 100644 --- a/cmake/Misc.cmake +++ b/cmake/Misc.cmake @@ -32,9 +32,10 @@ endif() set(CMAKE_INSTALL_RPATH_USE_LINK_PATH TRUE CACHE BOOLEAN "Use link paths for shared library rpath") set(CMAKE_MACOSX_RPATH TRUE) -list(FIND CMAKE_PLATFORM_IMPLICIT_LINK_DIRECTORIES ${CMAKE_INSTALL_PREFIX}/lib __is_systtem_dir) +list(FIND CMAKE_PLATFORM_IMPLICIT_LINK_DIRECTORIES + ${CMAKE_INSTALL_PREFIX}/${CMAKE_INSTALL_LIBDIR} __is_systtem_dir) if(${__is_systtem_dir} STREQUAL -1) - set(CMAKE_INSTALL_RPATH ${CMAKE_INSTALL_PREFIX}/lib) + set(CMAKE_INSTALL_RPATH ${CMAKE_INSTALL_PREFIX}/${CMAKE_INSTALL_LIBDIR}) endif() # ---[ Funny target diff --git a/cmake/Modules/FindAtlas.cmake b/cmake/Modules/FindAtlas.cmake index 6e1564351c7..7ffa6393bbc 100644 --- a/cmake/Modules/FindAtlas.cmake +++ b/cmake/Modules/FindAtlas.cmake @@ -26,9 +26,9 @@ set(Atlas_LIB_SEARCH_PATHS find_path(Atlas_CBLAS_INCLUDE_DIR NAMES cblas.h PATHS ${Atlas_INCLUDE_SEARCH_PATHS}) find_path(Atlas_CLAPACK_INCLUDE_DIR NAMES clapack.h PATHS ${Atlas_INCLUDE_SEARCH_PATHS}) -find_library(Atlas_CBLAS_LIBRARY NAMES ptcblas_r ptcblas cblas_r cblas PATHS ${Atlas_LIB_SEARCH_PATHS}) -find_library(Atlas_BLAS_LIBRARY NAMES atlas_r atlas PATHS ${Atlas_LIB_SEARCH_PATHS}) -find_library(Atlas_LAPACK_LIBRARY NAMES alapack_r alapack lapack_atlas PATHS ${Atlas_LIB_SEARCH_PATHS}) +find_library(Atlas_CBLAS_LIBRARY NAMES ptcblas_r ptcblas cblas_r cblas PATHS ${Atlas_LIB_SEARCH_PATHS}) +find_library(Atlas_BLAS_LIBRARY NAMES atlas_r atlas PATHS ${Atlas_LIB_SEARCH_PATHS}) +find_library(Atlas_LAPACK_LIBRARY NAMES lapack alapack_r alapack lapack_atlas atllapack PATHS ${Atlas_LIB_SEARCH_PATHS}) set(LOOKED_FOR Atlas_CBLAS_INCLUDE_DIR @@ -47,6 +47,6 @@ if(ATLAS_FOUND) set(Atlas_LIBRARIES ${Atlas_LAPACK_LIBRARY} ${Atlas_CBLAS_LIBRARY} ${Atlas_BLAS_LIBRARY}) mark_as_advanced(${LOOKED_FOR}) - message(STATUS "Found Atlas (include: ${Atlas_CBLAS_INCLUDE_DIR}, library: ${Atlas_BLAS_LIBRARY})") + message(STATUS "Found Atlas (include: ${Atlas_CBLAS_INCLUDE_DIR} library: ${Atlas_BLAS_LIBRARY} lapack: ${Atlas_LAPACK_LIBRARY}") endif(ATLAS_FOUND) diff --git a/cmake/Modules/FindNCCL.cmake b/cmake/Modules/FindNCCL.cmake new file mode 100644 index 00000000000..c8845934102 --- /dev/null +++ b/cmake/Modules/FindNCCL.cmake @@ -0,0 +1,26 @@ +set(NCCL_INC_PATHS + /usr/include + /usr/local/include + $ENV{NCCL_DIR}/include + ) + +set(NCCL_LIB_PATHS + /lib + /lib64 + /usr/lib + /usr/lib64 + /usr/local/lib + /usr/local/lib64 + $ENV{NCCL_DIR}/lib + ) + +find_path(NCCL_INCLUDE_DIR NAMES nccl.h PATHS ${NCCL_INC_PATHS}) +find_library(NCCL_LIBRARIES NAMES nccl PATHS ${NCCL_LIB_PATHS}) + +include(FindPackageHandleStandardArgs) +find_package_handle_standard_args(NCCL DEFAULT_MSG NCCL_INCLUDE_DIR NCCL_LIBRARIES) + +if (NCCL_FOUND) + message(STATUS "Found NCCL (include: ${NCCL_INCLUDE_DIR}, library: ${NCCL_LIBRARIES})") + mark_as_advanced(NCCL_INCLUDE_DIR NCCL_LIBRARIES) +endif () diff --git a/cmake/Modules/FindvecLib.cmake b/cmake/Modules/FindvecLib.cmake index 9600da43647..4d44e613a00 100644 --- a/cmake/Modules/FindvecLib.cmake +++ b/cmake/Modules/FindvecLib.cmake @@ -12,11 +12,13 @@ endif() set(__veclib_include_suffix "Frameworks/vecLib.framework/Versions/Current/Headers") +exec_program(xcode-select ARGS -print-path OUTPUT_VARIABLE CMAKE_XCODE_DEVELOPER_DIR) find_path(vecLib_INCLUDE_DIR vecLib.h DOC "vecLib include directory" - PATHS /System/Library/${__veclib_include_suffix} - /System/Library/Frameworks/Accelerate.framework/Versions/Current/${__veclib_include_suffix} - /Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX10.9.sdk/System/Library/Frameworks/Accelerate.framework/Versions/Current/Frameworks/vecLib.framework/Headers/) + PATHS /System/Library/Frameworks/Accelerate.framework/Versions/Current/${__veclib_include_suffix} + /System/Library/${__veclib_include_suffix} + ${CMAKE_XCODE_DEVELOPER_DIR}/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk/System/Library/Frameworks/Accelerate.framework/Versions/Current/Frameworks/vecLib.framework/Headers/ + NO_DEFAULT_PATH) include(FindPackageHandleStandardArgs) find_package_handle_standard_args(vecLib DEFAULT_MSG vecLib_INCLUDE_DIR) diff --git a/cmake/ProtoBuf.cmake b/cmake/ProtoBuf.cmake index 73f647f5fae..8005b448707 100644 --- a/cmake/ProtoBuf.cmake +++ b/cmake/ProtoBuf.cmake @@ -2,8 +2,8 @@ # the standard cmake script with version and python generation support find_package( Protobuf REQUIRED ) -include_directories(SYSTEM ${PROTOBUF_INCLUDE_DIR}) -list(APPEND Caffe_LINKER_LIBS ${PROTOBUF_LIBRARIES}) +list(APPEND Caffe_INCLUDE_DIRS PUBLIC ${PROTOBUF_INCLUDE_DIR}) +list(APPEND Caffe_LINKER_LIBS PUBLIC ${PROTOBUF_LIBRARIES}) # As of Ubuntu 14.04 protoc is no longer a part of libprotobuf-dev package # and should be installed separately as in: sudo apt-get install protobuf-compiler diff --git a/cmake/Summary.cmake b/cmake/Summary.cmake index ba025cf81e0..ed8c25268db 100644 --- a/cmake/Summary.cmake +++ b/cmake/Summary.cmake @@ -117,6 +117,7 @@ function(caffe_print_configuration_summary) caffe_status(" USE_OPENCV : ${USE_OPENCV}") caffe_status(" USE_LEVELDB : ${USE_LEVELDB}") caffe_status(" USE_LMDB : ${USE_LMDB}") + caffe_status(" USE_NCCL : ${USE_NCCL}") caffe_status(" ALLOW_LMDB_NOLOCK : ${ALLOW_LMDB_NOLOCK}") caffe_status("") caffe_status("Dependencies:") diff --git a/cmake/Targets.cmake b/cmake/Targets.cmake index a796d00548f..090f86c5500 100644 --- a/cmake/Targets.cmake +++ b/cmake/Targets.cmake @@ -88,13 +88,13 @@ function(caffe_pickup_caffe_sources root) file(GLOB_RECURSE proto_files ${root}/src/caffe/*.proto) list(APPEND srcs ${proto_files}) - # convet to absolute paths + # convert to absolute paths caffe_convert_absolute_paths(srcs) caffe_convert_absolute_paths(cuda) caffe_convert_absolute_paths(test_srcs) caffe_convert_absolute_paths(test_cuda) - # propogate to parent scope + # propagate to parent scope set(srcs ${srcs} PARENT_SCOPE) set(cuda ${cuda} PARENT_SCOPE) set(test_srcs ${test_srcs} PARENT_SCOPE) @@ -102,7 +102,7 @@ function(caffe_pickup_caffe_sources root) endfunction() ################################################################################################ -# Short command for setting defeault target properties +# Short command for setting default target properties # Usage: # caffe_default_properties() function(caffe_default_properties target) @@ -111,7 +111,7 @@ function(caffe_default_properties target) ARCHIVE_OUTPUT_DIRECTORY "${PROJECT_BINARY_DIR}/lib" LIBRARY_OUTPUT_DIRECTORY "${PROJECT_BINARY_DIR}/lib" RUNTIME_OUTPUT_DIRECTORY "${PROJECT_BINARY_DIR}/bin") - # make sure we build all external depepdencies first + # make sure we build all external dependencies first if (DEFINED external_project_dependencies) add_dependencies(${target} ${external_project_dependencies}) endif() diff --git a/cmake/Templates/CaffeConfig.cmake.in b/cmake/Templates/CaffeConfig.cmake.in index 73f57ac2d74..77c4059e560 100644 --- a/cmake/Templates/CaffeConfig.cmake.in +++ b/cmake/Templates/CaffeConfig.cmake.in @@ -9,9 +9,9 @@ # After successful configuration the following variables # will be defined: # -# Caffe_INCLUDE_DIRS - Caffe include directories -# Caffe_LIBRARIES - libraries to link against -# Caffe_DEFINITIONS - a list of definitions to pass to compiler +# Caffe_LIBRARIES - IMPORTED targets to link against +# (There is no Caffe_INCLUDE_DIRS and Caffe_DEFINITIONS +# because they are specified in the IMPORTED target interface.) # # Caffe_HAVE_CUDA - signals about CUDA support # Caffe_HAVE_CUDNN - signals about cuDNN support @@ -27,7 +27,7 @@ if(@USE_OPENCV@) if(EXISTS ${Caffe_OpenCV_CONFIG_PATH} AND NOT TARGET opencv_core) message(STATUS "Caffe: using OpenCV config from ${Caffe_OpenCV_CONFIG_PATH}") - include(${Caffe_OpenCV_CONFIG_PATH}/OpenCVModules.cmake) + include(${Caffe_OpenCV_CONFIG_PATH}/OpenCVConfig.cmake) endif() else() @@ -39,9 +39,6 @@ endif() # Compute paths get_filename_component(Caffe_CMAKE_DIR "${CMAKE_CURRENT_LIST_FILE}" PATH) -set(Caffe_INCLUDE_DIRS "@Caffe_INCLUDE_DIRS@") - -@Caffe_INSTALL_INCLUDE_DIR_APPEND_COMMAND@ # Our library dependencies if(NOT TARGET caffe AND NOT caffe_BINARY_DIR) @@ -49,11 +46,9 @@ if(NOT TARGET caffe AND NOT caffe_BINARY_DIR) endif() # List of IMPORTED libs created by CaffeTargets.cmake +# These targets already specify all needed definitions and include pathes set(Caffe_LIBRARIES caffe) -# Definitions -set(Caffe_DEFINITIONS "@Caffe_DEFINITIONS@") - # Cuda support variables set(Caffe_CPU_ONLY @CPU_ONLY@) set(Caffe_HAVE_CUDA @HAVE_CUDA@) diff --git a/cmake/Templates/caffe_config.h.in b/cmake/Templates/caffe_config.h.in index 8a31b43cabf..2080c63df36 100644 --- a/cmake/Templates/caffe_config.h.in +++ b/cmake/Templates/caffe_config.h.in @@ -4,35 +4,9 @@ /* Binaries directory */ #define BINARY_FOLDER "${PROJECT_BINARY_DIR}" -/* NVIDA Cuda */ -#cmakedefine HAVE_CUDA - -/* NVIDA cuDNN */ -#cmakedefine HAVE_CUDNN -#cmakedefine USE_CUDNN - -/* NVIDA cuDNN */ -#cmakedefine CPU_ONLY +/* This is an absolute path so that we can run test from any build + * directory */ +#define ABS_TEST_DATA_DIR "${PROJECT_SOURCE_DIR}/src/caffe/test/test_data/" /* Test device */ #define CUDA_TEST_DEVICE ${CUDA_TEST_DEVICE} - -/* Temporary (TODO: remove) */ -#if 1 - #define CMAKE_SOURCE_DIR SOURCE_FOLDER "/src/" - #define EXAMPLES_SOURCE_DIR BINARY_FOLDER "/examples/" - #define CMAKE_EXT ".gen.cmake" -#else - #define CMAKE_SOURCE_DIR "src/" - #define EXAMPLES_SOURCE_DIR "examples/" - #define CMAKE_EXT "" -#endif - -/* Matlab */ -#cmakedefine HAVE_MATLAB - -/* IO libraries */ -#cmakedefine USE_OPENCV -#cmakedefine USE_LEVELDB -#cmakedefine USE_LMDB -#cmakedefine ALLOW_LMDB_NOLOCK diff --git a/cmake/Uninstall.cmake.in b/cmake/Uninstall.cmake.in new file mode 100644 index 00000000000..bb8e2964e46 --- /dev/null +++ b/cmake/Uninstall.cmake.in @@ -0,0 +1,26 @@ +if(NOT EXISTS "@CMAKE_CURRENT_BINARY_DIR@/install_manifest.txt") + message(FATAL_ERROR "Cannot find install manifest: @CMAKE_CURRENT_BINARY_DIR@/install_manifest.txt") +endif(NOT EXISTS "@CMAKE_CURRENT_BINARY_DIR@/install_manifest.txt") + +if (NOT DEFINED CMAKE_INSTALL_PREFIX) + set (CMAKE_INSTALL_PREFIX "@CMAKE_INSTALL_PREFIX@") +endif () + message(${CMAKE_INSTALL_PREFIX}) + +file(READ "@CMAKE_CURRENT_BINARY_DIR@/install_manifest.txt" files) +string(REGEX REPLACE "\n" ";" files "${files}") +foreach(file ${files}) + message(STATUS "Uninstalling $ENV{DESTDIR}${file}") + if(IS_SYMLINK "$ENV{DESTDIR}${file}" OR EXISTS "$ENV{DESTDIR}${file}") + exec_program( + "@CMAKE_COMMAND@" ARGS "-E remove \"$ENV{DESTDIR}${file}\"" + OUTPUT_VARIABLE rm_out + RETURN_VALUE rm_retval + ) + if(NOT "${rm_retval}" STREQUAL 0) + message(FATAL_ERROR "Problem when removing $ENV{DESTDIR}${file}") + endif(NOT "${rm_retval}" STREQUAL 0) + else(IS_SYMLINK "$ENV{DESTDIR}${file}" OR EXISTS "$ENV{DESTDIR}${file}") + message(STATUS "File $ENV{DESTDIR}${file} does not exist.") + endif(IS_SYMLINK "$ENV{DESTDIR}${file}" OR EXISTS "$ENV{DESTDIR}${file}") +endforeach(file) \ No newline at end of file diff --git a/data/cifar10/get_cifar10.sh b/data/cifar10/get_cifar10.sh index 623c848513e..423f10989c4 100755 --- a/data/cifar10/get_cifar10.sh +++ b/data/cifar10/get_cifar10.sh @@ -2,7 +2,7 @@ # This scripts downloads the CIFAR10 (binary version) data and unzips it. DIR="$( cd "$(dirname "$0")" ; pwd -P )" -cd $DIR +cd "$DIR" echo "Downloading..." diff --git a/data/ilsvrc12/get_ilsvrc_aux.sh b/data/ilsvrc12/get_ilsvrc_aux.sh index 90935f25099..dc0d0a72790 100755 --- a/data/ilsvrc12/get_ilsvrc_aux.sh +++ b/data/ilsvrc12/get_ilsvrc_aux.sh @@ -8,7 +8,7 @@ # - the training splits with labels DIR="$( cd "$(dirname "$0")" ; pwd -P )" -cd $DIR +cd "$DIR" echo "Downloading..." diff --git a/data/mnist/get_mnist.sh b/data/mnist/get_mnist.sh index 6d875219489..ecadffa44f7 100755 --- a/data/mnist/get_mnist.sh +++ b/data/mnist/get_mnist.sh @@ -2,7 +2,7 @@ # This scripts downloads the mnist data and unzips it. DIR="$( cd "$(dirname "$0")" ; pwd -P )" -cd $DIR +cd "$DIR" echo "Downloading..." diff --git a/docker/Makefile b/docker/Makefile deleted file mode 100644 index 3a6575b0c43..00000000000 --- a/docker/Makefile +++ /dev/null @@ -1,50 +0,0 @@ -# A makefile to build the docker images for caffe. -# Two caffe images will be built: -# caffe:cpu --> A CPU-only build of caffe. -# caffe:gpu --> A GPU-enabled build using the latest CUDA and CUDNN versions. - -DOCKER ?= docker - -all: docker_files standalone - -.PHONY: standalone devel - -standalone: cpu_standalone gpu_standalone - - -cpu_standalone: standalone/cpu/Dockerfile - $(DOCKER) build -t caffe:cpu standalone/cpu - -gpu_standalone: standalone/gpu/Dockerfile - $(DOCKER) build -t caffe:gpu standalone/gpu - -docker_files: standalone_files - -standalone_files: standalone/cpu/Dockerfile standalone/gpu/Dockerfile - -FROM_GPU = "nvidia/cuda:7.5-cudnn5-devel-ubuntu14.04" -FROM_CPU = "ubuntu:14.04" -GPU_CMAKE_ARGS = -DUSE_CUDNN=1 -CPU_CMAKE_ARGS = -DCPU_ONLY=1 - -# A make macro to select the CPU or GPU base image. -define from_image -$(if $(strip $(findstring gpu,$@)),$(FROM_GPU),$(FROM_CPU)) -endef - -# A make macro to select the CPU or GPU build args. -define build_args -$(if $(strip $(findstring gpu,$@)),$(GPU_CMAKE_ARGS),$(CPU_CMAKE_ARGS)) -endef - -# A make macro to construct the CPU or GPU Dockerfile from the template -define create_docker_file - @echo creating $@ - @echo "FROM "$(from_image) > $@ - @cat $^ | sed 's/$${CMAKE_ARGS}/$(build_args)/' >> $@ -endef - - -standalone/%/Dockerfile: templates/Dockerfile.template - $(create_docker_file) - diff --git a/docker/README.md b/docker/README.md index fdab641bdca..f9c7c756fe6 100644 --- a/docker/README.md +++ b/docker/README.md @@ -1,52 +1,47 @@ -# Caffe standalone Dockerfiles. +### Running an official image -The `standalone` subfolder contains docker files for generating both CPU and GPU executable images for Caffe. The images can be built using make, or by running: +You can run one of the automatic [builds](https://hub.docker.com/r/bvlc/caffe). E.g. for the CPU version: -``` -docker build -t caffe:cpu standalone/cpu -``` -for example. (Here `gpu` can be substituted for `cpu`, but to keep the readme simple, only the `cpu` case will be discussed in detail). +`docker run -ti bvlc/caffe:cpu caffe --version` -Note that the GPU standalone requires a CUDA 7.5 capable driver to be installed on the system and [nvidia-docker] for running the Docker containers. Here it is generally sufficient to use `nvidia-docker` instead of `docker` in any of the commands mentioned. +or for GPU support (You need a CUDA 8.0 capable driver and +[nvidia-docker](https://github.com/NVIDIA/nvidia-docker)): -# Running Caffe using the docker image +`nvidia-docker run -ti bvlc/caffe:gpu caffe --version` -In order to test the Caffe image, run: -``` -docker run -ti caffe:cpu caffe --version -``` -which should show a message like: -``` -libdc1394 error: Failed to initialize libdc1394 -caffe version 1.0.0-rc3 -``` +You might see an error about libdc1394, ignore it. -One can also build and run the Caffe tests in the image using: -``` -docker run -ti caffe:cpu bash -c "cd /opt/caffe/build; make runtest" -``` +### Docker run options -In order to get the most out of the caffe image, some more advanced `docker run` options could be used. For example, running: -``` -docker run -ti --volume=$(pwd):/workspace caffe:cpu caffe train --solver=example_solver.prototxt -``` -will train a network defined in the `example_solver.prototxt` file in the current directory (`$(pwd)` is maped to the container volume `/workspace` using the `--volume=` Docker flag). +By default caffe runs as root, thus any output files, e.g. snapshots, will be owned +by root. It also runs by default in a container-private folder. -Note that docker runs all commands as root by default, and thus any output files (e.g. snapshots) generated will be owned by the root user. In order to ensure that the current user is used instead, the following command can be used: -``` -docker run -ti --volume=$(pwd):/workspace -u $(id -u):$(id -g) caffe:cpu caffe train --solver=example_solver.prototxt -``` -where the `-u` Docker command line option runs the commands in the container as the specified user, and the shell command `id` is used to determine the user and group ID of the current user. Note that the Caffe docker images have `/workspace` defined as the default working directory. This can be overridden using the `--workdir=` Docker command line option. +You can change this using flags, like user (-u), current directory, and volumes (-w and -v). +E.g. this behaves like the usual caffe executable: -# Other use-cases +`docker run --rm -u $(id -u):$(id -g) -v $(pwd):$(pwd) -w $(pwd) bvlc/caffe:cpu caffe train --solver=example_solver.prototxt` -Although running the `caffe` command in the docker containers as described above serves many purposes, the container can also be used for more interactive use cases. For example, specifying `bash` as the command instead of `caffe` yields a shell that can be used for interactive tasks. (Since the caffe build requirements are included in the container, this can also be used to build and run local versions of caffe). +Containers can also be used interactively, specifying e.g. `bash` or `ipython` +instead of `caffe`. -Another use case is to run python scripts that depend on `caffe`'s Python modules. Using the `python` command instead of `bash` or `caffe` will allow this, and an interactive interpreter can be started by running: ``` -docker run -ti caffe:cpu python +docker run -ti bvlc/caffe:cpu ipython +import caffe +... ``` -(`ipython` is also available in the container). -Since the `caffe/python` folder is also added to the path, the utility executable scripts defined there can also be used as executables. This includes `draw_net.py`, `classify.py`, and `detect.py` +The caffe build requirements are included in the container, so this can be used to +build and run custom versions of caffe. Also, `caffe/python` is in PATH, so python +utilities can be used directly, e.g. `draw_net.py`, `classify.py`, or `detect.py`. + +### Building images yourself + +Examples: + +`docker build -t caffe:cpu cpu` + +`docker build -t caffe:gpu gpu` + +You can also build Caffe and run the tests in the image: +`docker run -ti caffe:cpu bash -c "cd /opt/caffe/build; make runtest"` diff --git a/docker/standalone/cpu/Dockerfile b/docker/cpu/Dockerfile similarity index 73% rename from docker/standalone/cpu/Dockerfile rename to docker/cpu/Dockerfile index 4fef25aa6a1..67e2e61bd57 100644 --- a/docker/standalone/cpu/Dockerfile +++ b/docker/cpu/Dockerfile @@ -1,5 +1,5 @@ -FROM ubuntu:14.04 -MAINTAINER caffe-maint@googlegroups.com +FROM ubuntu:16.04 +LABEL maintainer caffe-maint@googlegroups.com RUN apt-get update && apt-get install -y --no-install-recommends \ build-essential \ @@ -20,17 +20,20 @@ RUN apt-get update && apt-get install -y --no-install-recommends \ python-dev \ python-numpy \ python-pip \ + python-setuptools \ python-scipy && \ rm -rf /var/lib/apt/lists/* ENV CAFFE_ROOT=/opt/caffe WORKDIR $CAFFE_ROOT -# FIXME: clone a specific git tag and use ARG instead of ENV once DockerHub supports this. -ENV CLONE_TAG=master +# FIXME: use ARG instead of ENV once DockerHub supports this +# https://github.com/docker/hub-feedback/issues/460 +ENV CLONE_TAG=1.0 RUN git clone -b ${CLONE_TAG} --depth 1 https://github.com/BVLC/caffe.git . && \ - for req in $(cat python/requirements.txt) pydot; do pip install $req; done && \ + pip install --upgrade pip && \ + cd python && for req in $(cat requirements.txt) pydot; do pip install $req; done && cd .. && \ mkdir build && cd build && \ cmake -DCPU_ONLY=1 .. && \ make -j"$(nproc)" diff --git a/docker/standalone/gpu/Dockerfile b/docker/gpu/Dockerfile similarity index 64% rename from docker/standalone/gpu/Dockerfile rename to docker/gpu/Dockerfile index daf6a7223ff..dcdbdf326fb 100644 --- a/docker/standalone/gpu/Dockerfile +++ b/docker/gpu/Dockerfile @@ -1,5 +1,5 @@ -FROM nvidia/cuda:7.5-cudnn5-devel-ubuntu14.04 -MAINTAINER caffe-maint@googlegroups.com +FROM nvidia/cuda:8.0-cudnn6-devel-ubuntu16.04 +LABEL maintainer caffe-maint@googlegroups.com RUN apt-get update && apt-get install -y --no-install-recommends \ build-essential \ @@ -20,19 +20,23 @@ RUN apt-get update && apt-get install -y --no-install-recommends \ python-dev \ python-numpy \ python-pip \ + python-setuptools \ python-scipy && \ rm -rf /var/lib/apt/lists/* ENV CAFFE_ROOT=/opt/caffe WORKDIR $CAFFE_ROOT -# FIXME: clone a specific git tag and use ARG instead of ENV once DockerHub supports this. -ENV CLONE_TAG=master +# FIXME: use ARG instead of ENV once DockerHub supports this +# https://github.com/docker/hub-feedback/issues/460 +ENV CLONE_TAG=1.0 RUN git clone -b ${CLONE_TAG} --depth 1 https://github.com/BVLC/caffe.git . && \ - for req in $(cat python/requirements.txt) pydot; do pip install $req; done && \ + pip install --upgrade pip && \ + cd python && for req in $(cat requirements.txt) pydot; do pip install $req; done && cd .. && \ + git clone https://github.com/NVIDIA/nccl.git && cd nccl && make -j install && cd .. && rm -rf nccl && \ mkdir build && cd build && \ - cmake -DUSE_CUDNN=1 .. && \ + cmake -DUSE_CUDNN=1 -DUSE_NCCL=1 .. && \ make -j"$(nproc)" ENV PYCAFFE_ROOT $CAFFE_ROOT/python diff --git a/docker/templates/Dockerfile.template b/docker/templates/Dockerfile.template deleted file mode 100644 index 8834f057968..00000000000 --- a/docker/templates/Dockerfile.template +++ /dev/null @@ -1,42 +0,0 @@ -MAINTAINER caffe-maint@googlegroups.com - -RUN apt-get update && apt-get install -y --no-install-recommends \ - build-essential \ - cmake \ - git \ - wget \ - libatlas-base-dev \ - libboost-all-dev \ - libgflags-dev \ - libgoogle-glog-dev \ - libhdf5-serial-dev \ - libleveldb-dev \ - liblmdb-dev \ - libopencv-dev \ - libprotobuf-dev \ - libsnappy-dev \ - protobuf-compiler \ - python-dev \ - python-numpy \ - python-pip \ - python-scipy && \ - rm -rf /var/lib/apt/lists/* - -ENV CAFFE_ROOT=/opt/caffe -WORKDIR $CAFFE_ROOT - -# FIXME: clone a specific git tag and use ARG instead of ENV once DockerHub supports this. -ENV CLONE_TAG=master - -RUN git clone -b ${CLONE_TAG} --depth 1 https://github.com/BVLC/caffe.git . && \ - for req in $(cat python/requirements.txt) pydot; do pip install $req; done && \ - mkdir build && cd build && \ - cmake ${CMAKE_ARGS} .. && \ - make -j"$(nproc)" - -ENV PYCAFFE_ROOT $CAFFE_ROOT/python -ENV PYTHONPATH $PYCAFFE_ROOT:$PYTHONPATH -ENV PATH $CAFFE_ROOT/build/tools:$PYCAFFE_ROOT:$PATH -RUN echo "$CAFFE_ROOT/build/lib" >> /etc/ld.so.conf.d/caffe.conf && ldconfig - -WORKDIR /workspace diff --git a/docs/_layouts/default.html b/docs/_layouts/default.html index b8efe60bc3b..3799e95afde 100644 --- a/docs/_layouts/default.html +++ b/docs/_layouts/default.html @@ -36,7 +36,7 @@

Caffe

- Deep learning framework by the BVLC + Deep learning framework by BAIR

Created by diff --git a/docs/development.md b/docs/development.md index 107c2c3b281..36cd399512e 100644 --- a/docs/development.md +++ b/docs/development.md @@ -4,7 +4,7 @@ title: Developing and Contributing # Development and Contributing Caffe is developed with active participation of the community.
-The [BVLC](http://bvlc.eecs.berkeley.edu/) brewers welcome all contributions! +The [BAIR](http://bair.berkeley.edu/)/BVLC brewers welcome all contributions! The exact details of contributions are recorded by versioning and cited in our [acknowledgements](http://caffe.berkeleyvision.org/#acknowledgements). This method is impartial and always up-to-date. @@ -37,7 +37,7 @@ We absolutely appreciate any contribution to this effort! The `master` branch receives all new development including community contributions. We try to keep it in a reliable state, but it is the bleeding edge, and things do get broken every now and then. -BVLC maintainers will periodically make releases by marking stable checkpoints as tags and maintenance branches. [Past releases](https://github.com/BVLC/caffe/releases) are catalogued online. +BAIR maintainers will periodically make releases by marking stable checkpoints as tags and maintenance branches. [Past releases](https://github.com/BVLC/caffe/releases) are catalogued online. #### Issues & Pull Request Protocol @@ -116,5 +116,5 @@ To get a list of all options `googletest` provides, simply pass the `--help` fla - **Run `make lint` to check C++ code.** - Wrap lines at 80 chars. -- Follow [Google C++ style](http://google-styleguide.googlecode.com/svn/trunk/cppguide.xml) and [Google python style](http://google-styleguide.googlecode.com/svn/trunk/pyguide.html) + [PEP 8](http://legacy.python.org/dev/peps/pep-0008/). +- Follow [Google C++ style](https://google.github.io/styleguide/cppguide.html) and [Google python style](https://google.github.io/styleguide/pyguide.html) + [PEP 8](http://legacy.python.org/dev/peps/pep-0008/). - Remember that “a foolish consistency is the hobgoblin of little minds,” so use your best judgement to write the clearest code for your particular case. diff --git a/docs/index.md b/docs/index.md index 932b3b58d1d..b633f7cfddc 100644 --- a/docs/index.md +++ b/docs/index.md @@ -5,7 +5,7 @@ title: Deep Learning Framework # Caffe Caffe is a deep learning framework made with expression, speed, and modularity in mind. -It is developed by the Berkeley Vision and Learning Center ([BVLC](http://bvlc.eecs.berkeley.edu)) and by community contributors. +It is developed by Berkeley AI Research ([BAIR](http://bair.berkeley.edu)) and by community contributors. [Yangqing Jia](http://daggerfs.com) created the project during his PhD at UC Berkeley. Caffe is released under the [BSD 2-Clause license](https://github.com/BVLC/caffe/blob/master/LICENSE). @@ -23,21 +23,20 @@ Thanks to these contributors the framework tracks the state-of-the-art in both c **Speed** makes Caffe perfect for research experiments and industry deployment. Caffe can process **over 60M images per day** with a single NVIDIA K40 GPU\*. -That's 1 ms/image for inference and 4 ms/image for learning. -We believe that Caffe is the fastest convnet implementation available. +That's 1 ms/image for inference and 4 ms/image for learning and more recent library versions and hardware are faster still. +We believe that Caffe is among the fastest convnet implementations available. **Community**: Caffe already powers academic research projects, startup prototypes, and even large-scale industrial applications in vision, speech, and multimedia. Join our community of brewers on the [caffe-users group](https://groups.google.com/forum/#!forum/caffe-users) and [Github](https://github.com/BVLC/caffe/).

-\* With the ILSVRC2012-winning [SuperVision](http://www.image-net.org/challenges/LSVRC/2012/supervision.pdf) model and caching IO. -Consult performance [details](/performance_hardware.html). +\* With the ILSVRC2012-winning [SuperVision](http://www.image-net.org/challenges/LSVRC/2012/supervision.pdf) model and prefetching IO.

## Documentation -- [DIY Deep Learning for Vision with Caffe](https://docs.google.com/presentation/d/1UeKXVgRvvxg9OUdh_UiC5G71UMscNPlvArsWER41PsU/edit#slide=id.p)
-Tutorial presentation. +- [DIY Deep Learning for Vision with Caffe](https://docs.google.com/presentation/d/1UeKXVgRvvxg9OUdh_UiC5G71UMscNPlvArsWER41PsU/edit#slide=id.p) and [Caffe in a Day](https://docs.google.com/presentation/d/1HxGdeq8MPktHaPb-rlmYYQ723iWzq9ur6Gjo71YiG0Y/edit#slide=id.gc2fcdcce7_216_0)
+Tutorial presentation of the framework and a full-day crash course. - [Tutorial Documentation](/tutorial)
Practical guide and framework reference. - [arXiv / ACM MM '14 paper](http://arxiv.org/abs/1408.5093)
@@ -45,18 +44,13 @@ A 4-page report for the ACM Multimedia Open Source competition (arXiv:1408.5093v - [Installation instructions](/installation.html)
Tested on Ubuntu, Red Hat, OS X. * [Model Zoo](/model_zoo.html)
-BVLC suggests a standard distribution format for Caffe models, and provides trained models. +BAIR suggests a standard distribution format for Caffe models, and provides trained models. * [Developing & Contributing](/development.html)
Guidelines for development and contributing to Caffe. * [API Documentation](/doxygen/annotated.html)
Developer documentation automagically generated from code comments. - -### Examples - -{% assign examples = site.pages | where:'category','example' | sort: 'priority' %} -{% for page in examples %} --
{{page.title}}
{{page.description}}
-{% endfor %} +* [Benchmarking](https://docs.google.com/spreadsheets/d/1Yp4rqHpT7mKxOPbpzYeUfEFLnELDAgxSSBQKp5uKDGQ/edit#gid=0)
+Comparison of inference and learning for different networks and GPUs. ### Notebook Examples @@ -65,6 +59,13 @@ Developer documentation automagically generated from code comments. -
{{page.title}}
{{page.description}}
{% endfor %} +### Command Line Examples + +{% assign examples = site.pages | where:'category','example' | sort: 'priority' %} +{% for page in examples %} +-
{{page.title}}
{{page.description}}
+{% endfor %} + ## Citing Caffe Please cite Caffe in your publications if it helps your research: @@ -76,8 +77,7 @@ Please cite Caffe in your publications if it helps your research: Year = {2014} } -If you do publish a paper where Caffe helped your research, we encourage you to update the [publications wiki](https://github.com/BVLC/caffe/wiki/Publications). -Citations are also tracked automatically by [Google Scholar](http://scholar.google.com/scholar?oi=bibs&hl=en&cites=17333247995453974016). +If you do publish a paper where Caffe helped your research, we encourage you to cite the framework for tracking by [Google Scholar](https://scholar.google.com/citations?view_op=view_citation&hl=en&citation_for_view=-ltRSM0AAAAJ:u5HHmVD_uO8C). ## Contacting Us @@ -85,17 +85,12 @@ Join the [caffe-users group](https://groups.google.com/forum/#!forum/caffe-users Framework development discussions and thorough bug reports are collected on [Issues](https://github.com/BVLC/caffe/issues). -Contact [caffe-dev](mailto:caffe-dev@googlegroups.com) if you have a confidential proposal for the framework *and the ability to act on it*. -Requests for features, explanations, or personal help will be ignored; post to [caffe-users](https://groups.google.com/forum/#!forum/caffe-users) instead. - -The core Caffe developers offer [consulting services](mailto:caffe-coldpress@googlegroups.com) for appropriate projects. - ## Acknowledgements -The BVLC Caffe developers would like to thank NVIDIA for GPU donation, A9 and Amazon Web Services for a research grant in support of Caffe development and reproducible research in deep learning, and BVLC PI [Trevor Darrell](http://www.eecs.berkeley.edu/~trevor/) for guidance. +The BAIR Caffe developers would like to thank NVIDIA for GPU donation, A9 and Amazon Web Services for a research grant in support of Caffe development and reproducible research in deep learning, and BAIR PI [Trevor Darrell](http://www.eecs.berkeley.edu/~trevor/) for guidance. -The BVLC members who have contributed to Caffe are (alphabetical by first name): -[Eric Tzeng](https://github.com/erictzeng), [Evan Shelhamer](http://imaginarynumber.net/), [Jeff Donahue](http://jeffdonahue.com/), [Jon Long](https://github.com/longjon), [Ross Girshick](http://www.cs.berkeley.edu/~rbg/), [Sergey Karayev](http://sergeykarayev.com/), [Sergio Guadarrama](http://www.eecs.berkeley.edu/~sguada/), and [Yangqing Jia](http://daggerfs.com/). +The BAIR members who have contributed to Caffe are (alphabetical by first name): +[Carl Doersch](http://www.carldoersch.com/), [Eric Tzeng](https://github.com/erictzeng), [Evan Shelhamer](http://imaginarynumber.net/), [Jeff Donahue](http://jeffdonahue.com/), [Jon Long](https://github.com/longjon), [Philipp Krähenbühl](http://www.philkr.net/), [Ronghang Hu](http://ronghanghu.com/), [Ross Girshick](http://www.cs.berkeley.edu/~rbg/), [Sergey Karayev](http://sergeykarayev.com/), [Sergio Guadarrama](http://www.eecs.berkeley.edu/~sguada/), [Takuya Narihira](https://github.com/tnarihi), and [Yangqing Jia](http://daggerfs.com/). The open-source community plays an important and growing role in Caffe's development. Check out the Github [project pulse](https://github.com/BVLC/caffe/pulse) for recent activity and the [contributors](https://github.com/BVLC/caffe/graphs/contributors) for the full list. @@ -103,4 +98,4 @@ Check out the Github [project pulse](https://github.com/BVLC/caffe/pulse) for re We sincerely appreciate your interest and contributions! If you'd like to contribute, please read the [developing & contributing](development.html) guide. -Yangqing would like to give a personal thanks to the NVIDIA Academic program for providing GPUs, [Oriol Vinyals](http://www1.icsi.berkeley.edu/~vinyals/) for discussions along the journey, and BVLC PI [Trevor Darrell](http://www.eecs.berkeley.edu/~trevor/) for advice. +Yangqing would like to give a personal thanks to the NVIDIA Academic program for providing GPUs, [Oriol Vinyals](http://www1.icsi.berkeley.edu/~vinyals/) for discussions along the journey, and BAIR PI [Trevor Darrell](http://www.eecs.berkeley.edu/~trevor/) for advice. diff --git a/docs/install_apt.md b/docs/install_apt.md index 2976e3cd07c..e361a92da3c 100644 --- a/docs/install_apt.md +++ b/docs/install_apt.md @@ -1,35 +1,67 @@ --- -title: Installation: Ubuntu +title: "Installation: Ubuntu" --- # Ubuntu Installation +### For Ubuntu (>= 17.04) + +**Installing pre-compiled Caffe** + +Everything including caffe itself is packaged in 17.04 and higher versions. +To install pre-compiled Caffe package, just do it by + + sudo apt install caffe-cpu + +for CPU-only version, or + + sudo apt install caffe-cuda + +for CUDA version. Note, the cuda version may break if your NVIDIA driver +and CUDA toolkit are not installed by APT. + +[Package status of CPU-only version](https://launchpad.net/ubuntu/+source/caffe) + +[Package status of CUDA version](https://launchpad.net/ubuntu/+source/caffe-contrib) + +**Installing Caffe from source** + +We may install the dependencies by merely one line + + sudo apt build-dep caffe-cpu # dependencies for CPU-only version + sudo apt build-dep caffe-cuda # dependencies for CUDA version + +It requires a `deb-src` line in your `sources.list`. +Continue with [compilation](installation.html#compilation). + +### For Ubuntu (\< 17.04) + **General dependencies** sudo apt-get install libprotobuf-dev libleveldb-dev libsnappy-dev libopencv-dev libhdf5-serial-dev protobuf-compiler sudo apt-get install --no-install-recommends libboost-all-dev + sudo apt-get install libgflags-dev libgoogle-glog-dev liblmdb-dev -**CUDA**: Install via the NVIDIA package instead of `apt-get` to be certain of the library and driver versions. -Install the library and latest driver separately; the driver bundled with the library is usually out-of-date. +**CUDA**: Install by `apt-get` or the NVIDIA `.run` package. +The NVIDIA package tends to follow more recent library and driver versions, but the installation is more manual. +If installing from packages, install the library and latest driver separately; the driver bundled with the library is usually out-of-date. This can be skipped for CPU-only installation. -**BLAS**: install ATLAS by `sudo apt-get install libatlas-base-dev` or install OpenBLAS or MKL for better CPU performance. +**BLAS**: install ATLAS by `sudo apt-get install libatlas-base-dev` or install OpenBLAS by `sudo apt-get install libopenblas-dev` or MKL for better CPU performance. **Python** (optional): if you use the default Python you will need to `sudo apt-get install` the `python-dev` package to have the Python headers for building the pycaffe interface. -**Remaining dependencies, 14.04** +**Compatibility notes, 16.04** -Everything is packaged in 14.04. - - sudo apt-get install libgflags-dev libgoogle-glog-dev liblmdb-dev +CUDA 8 is required on Ubuntu 16.04. **Remaining dependencies, 12.04** These dependencies need manual installation in 12.04. # glog - wget https://google-glog.googlecode.com/files/glog-0.3.3.tar.gz - tar zxvf glog-0.3.3.tar.gz + wget https://github.com/google/glog/archive/v0.3.3.tar.gz + tar zxvf v0.3.3.tar.gz cd glog-0.3.3 ./configure make && make install diff --git a/docs/install_apt_debian.md b/docs/install_apt_debian.md new file mode 100644 index 00000000000..0a6a3b962e5 --- /dev/null +++ b/docs/install_apt_debian.md @@ -0,0 +1,163 @@ +--- +title: "Installation: Debian" +--- + +# Debian Installation + +Caffe packages are available for several Debian versions, as shown in the +following chart: + +``` +Your Distro | CPU_ONLY | CUDA | Codename +----------------+------------+--------+------------------- +Debian/oldstable| ✘ | ✘ | Jessie (8.0) +Debian/stable | ✔ | ✔ | Stretch (9.0) +Debian/testing | ✔ | ✔ | Buster +Debian/unstable | ✔ | ✔ | Buster +``` + +* `✘ ` You should take a look at [Ubuntu installation instruction](install_apt.html). + +* `✔ ` You can install caffe with a single command line following this guide. + +* [Package status of CPU-only version](https://tracker.debian.org/pkg/caffe) + +* [Package status of CUDA version](https://tracker.debian.org/pkg/caffe-contrib) + +Last update: 2017-07-08 + +## Binary installation with APT + +Apart from the installation methods based on source, Debian users can install +pre-compiled Caffe packages from the official archive with APT. + +Make sure that your `/etc/apt/sources.list` contains `contrib` and `non-free` +sections if you want to install the CUDA version, for instance: + +``` +deb http://ftp2.cn.debian.org/debian sid main contrib non-free +``` + +Then we update APT cache and directly install Caffe. Note, the cpu version and +the cuda version cannot coexist. + +``` +$ sudo apt update +$ sudo apt install [ caffe-cpu | caffe-cuda ] +$ caffe # command line interface working +$ python3 -c 'import caffe; print(caffe.__path__)' # python3 interface working +``` + +These Caffe packages should work for you out of box. However, the CUDA version +may break if your NVIDIA driver and CUDA toolkit are not installed with APT. + +#### Customizing caffe packages + +Some users may need to customize the Caffe package. The way to customize +the package is beyond this guide. Here is only a brief guide of producing +the customized `.deb` packages. + +Make sure that there is a `dec-src` source in your `/etc/apt/sources.list`, +for instance: + +``` +deb http://ftp2.cn.debian.org/debian sid main contrib non-free +deb-src http://ftp2.cn.debian.org/debian sid main contrib non-free +``` + +Then we build caffe deb files with the following commands: + +``` +$ sudo apt update +$ sudo apt install build-essential debhelper devscripts # standard package building tools +$ sudo apt build-dep [ caffe-cpu | caffe-cuda ] # the most elegant way to pull caffe build dependencies +$ apt source [ caffe-cpu | caffe-cuda ] # download the source tarball and extract +$ cd caffe-XXXX +[ ... optional, customizing caffe code/build ... ] +$ dch --local "Modified XXX" # bump package version and write changelog +$ debuild -B -j4 # build caffe with 4 parallel jobs (similar to make -j4) +[ ... building ...] +$ debc # optional, if you want to check the package contents +$ sudo debi # optional, install the generated packages +$ ls ../ # optional, you will see the resulting packages +``` + +It is a BUG if the package failed to build without any change. +The changelog will be installed at e.g. `/usr/share/doc/caffe-cpu/changelog.Debian.gz`. + +## Source installation + +Source installation under Debian/unstable and Debian/testing is similar to that of Ubuntu, but +here is a more elegant way to pull caffe build dependencies: + +``` +$ sudo apt build-dep [ caffe-cpu | caffe-cuda ] +``` + +Note, this requires a `deb-src` entry in your `/etc/apt/sources.list`. + +#### Compiler Combinations + +Some users may find their favorate compiler doesn't work with CUDA. + +``` +CXX compiler | CUDA 7.5 | CUDA 8.0 | CUDA 9.0 | +-------------+------------+------------+------------+ +GCC-8 | ? | ? | ? | +GCC-7 | ? | ? | ? | +GCC-6 | ✘ | ✘ | ✔ | +GCC-5 | ✔ [1] | ✔ | ✔ | +-------------+------------+------------+------------+ +CLANG-4.0 | ? | ? | ? | +CLANG-3.9 | ✘ | ✘ | ✔ | +CLANG-3.8 | ? | ✔ | ✔ | +``` + +`[1]` CUDA 7.5 's `host_config.h` must be patched before working with GCC-5. + +`[2]` CUDA 9.0: https://devblogs.nvidia.com/parallelforall/cuda-9-features-revealed/ + +BTW, please forget the GCC-4.X series, since its `libstdc++` ABI is not compatible with GCC-5's. +You may encounter failure linking GCC-4.X object files against GCC-5 libraries. +(See https://wiki.debian.org/GCC5 ) + +## Notes + +* Consider re-compiling OpenBLAS locally with optimization flags for sake of +performance. This is highly recommended for any kind of production use, including +academic research. + +* If you are installing `caffe-cuda`, APT will automatically pull some of the +CUDA packages and the nvidia driver packages. Please be careful if you have +manually installed or hacked nvidia driver or CUDA toolkit or any other +related stuff, because in this case APT may fail. + +* Additionally, a manpage (`man caffe`) and a bash complementation script +(`caffe `, `caffe train `) are provided. +Both of the two files are still not merged into caffe master. + +* The python interface is Python 3 version: `python3-caffe-{cpu,cuda}`. +No plan to support python2. + +* If you encountered any problem related to the packaging system (e.g. failed to install `caffe-*`), +please report bug to Debian via Debian's bug tracking system. See https://www.debian.org/Bugs/ . +Patches and suggestions are also welcome. + +## FAQ + +* where is caffe-cudnn? + +CUDNN library seems not redistributable currently. If you really want the +caffe-cudnn deb packages, the workaround is to install cudnn by yourself, +and hack the packaging scripts, then build your customized package. + +* I installed the CPU version. How can I switch to the CUDA version? + +`sudo apt install caffe-cuda`, apt's dependency resolver is smart enough to deal with this. + +* Where are the examples, the models and other documentation stuff? + +``` +$ sudo apt install caffe-doc +$ dpkg -L caffe-doc +``` diff --git a/docs/install_osx.md b/docs/install_osx.md index 6405d8ad046..a2da82f0fb2 100644 --- a/docs/install_osx.md +++ b/docs/install_osx.md @@ -1,5 +1,5 @@ --- -title: Installation: OS X +title: "Installation: OS X" --- # OS X Installation diff --git a/docs/install_yum.md b/docs/install_yum.md index 2104912e482..842fbd64177 100644 --- a/docs/install_yum.md +++ b/docs/install_yum.md @@ -1,5 +1,5 @@ --- -title: Installation: RHEL / Fedora / CentOS +title: "Installation: RHEL / Fedora / CentOS" --- # RHEL / Fedora / CentOS Installation @@ -15,7 +15,7 @@ title: Installation: RHEL / Fedora / CentOS **Remaining dependencies, if not found** # glog - wget https://google-glog.googlecode.com/files/glog-0.3.3.tar.gz + wget https://storage.googleapis.com/google-code-archive-downloads/v2/code.google.com/google-glog/glog-0.3.3.tar.gz tar zxvf glog-0.3.3.tar.gz cd glog-0.3.3 ./configure diff --git a/docs/installation.md b/docs/installation.md index 4aac7c42d27..2c4b30d392d 100644 --- a/docs/installation.md +++ b/docs/installation.md @@ -12,10 +12,12 @@ The official Makefile and `Makefile.config` build are complemented by a [communi - [Docker setup](https://github.com/BVLC/caffe/tree/master/docker) *out-of-the-box brewing* - [Ubuntu installation](install_apt.html) *the standard platform* +- [Debian installation](install_apt_debian.html) *install caffe with a single command* - [OS X installation](install_osx.html) - [RHEL / CentOS / Fedora installation](install_yum.html) -- [Windows](https://github.com/BVLC/caffe/tree/windows) *see the Windows branch led by Microsoft* +- [Windows](https://github.com/BVLC/caffe/tree/windows) *see the Windows branch led by Guillaume Dumont* - [OpenCL](https://github.com/BVLC/caffe/tree/opencl) *see the OpenCL branch led by Fabian Tschopp* +- [AWS AMI](https://aws.amazon.com/marketplace/pp/B01M0AXXQB) *official deep learning amazon machine image from AWS* **Overview**: @@ -40,14 +42,14 @@ Optional dependencies: * [OpenCV](http://opencv.org/) >= 2.4 including 3.0 * IO libraries: `lmdb`, `leveldb` (note: leveldb requires `snappy`) -* cuDNN for GPU acceleration (v5) +* cuDNN for GPU acceleration (v7) Pycaffe and Matcaffe interfaces have their own natural needs. * For Python Caffe: `Python 2.7` or `Python 3.3+`, `numpy (>= 1.7)`, boost-provided `boost.python` * For MATLAB Caffe: MATLAB with the `mex` compiler. -**cuDNN Caffe**: for fastest operation Caffe is accelerated by drop-in integration of [NVIDIA cuDNN](https://developer.nvidia.com/cudnn). To speed up your Caffe models, install cuDNN then uncomment the `USE_CUDNN := 1` flag in `Makefile.config` when installing Caffe. Acceleration is automatic. The current version is cuDNN v5; older versions are supported in older Caffe. +**cuDNN Caffe**: for fastest operation Caffe is accelerated by drop-in integration of [NVIDIA cuDNN](https://developer.nvidia.com/cudnn). To speed up your Caffe models, install cuDNN then uncomment the `USE_CUDNN := 1` flag in `Makefile.config` when installing Caffe. Acceleration is automatic. The current version is cuDNN v7; older versions are supported in older Caffe. **CPU-only Caffe**: for cold-brewed CPU-only Caffe uncomment the `CPU_ONLY := 1` flag in `Makefile.config` to configure and build Caffe without CUDA. This is helpful for cloud or cluster deployment. @@ -62,7 +64,7 @@ Caffe requires BLAS as the backend of its matrix and vector computations. There are several implementations of this library. The choice is yours: * [ATLAS](http://math-atlas.sourceforge.net/): free, open source, and so the default for Caffe. -* [Intel MKL](http://software.intel.com/en-us/intel-mkl): commercial and optimized for Intel CPUs, with a free trial and [student](http://software.intel.com/en-us/intel-education-offerings) licenses. +* [Intel MKL](http://software.intel.com/en-us/intel-mkl): commercial and optimized for Intel CPUs, with [free](https://registrationcenter.intel.com/en/forms/?productid=2558) licenses. 1. Install MKL. 2. Set up MKL environment (Details: [Linux](https://software.intel.com/en-us/node/528499), [OS X](https://software.intel.com/en-us/node/528659)). Example: *source /opt/intel/mkl/bin/mklvars.sh intel64* 3. Set `BLAS := mkl` in `Makefile.config` diff --git a/docs/model_zoo.md b/docs/model_zoo.md index 06dc0a49ec7..3f77e82572c 100644 --- a/docs/model_zoo.md +++ b/docs/model_zoo.md @@ -3,7 +3,7 @@ title: Model Zoo --- # Caffe Model Zoo -Lots of researchers and engineers have made Caffe models for different tasks with all kinds of architectures and data. +Lots of researchers and engineers have made Caffe models for different tasks with all kinds of architectures and data: check out the [model zoo](https://github.com/BVLC/caffe/wiki/Model-Zoo)! These models are learned and applied for problems ranging from simple regression, to large-scale visual classification, to Siamese networks for image similarity, to speech and robotics applications. To help share these models, we introduce the model zoo framework: @@ -14,17 +14,17 @@ To help share these models, we introduce the model zoo framework: ## Where to get trained models -First of all, we bundle BVLC-trained models for unrestricted, out of the box use. +First of all, we bundle BAIR-trained models for unrestricted, out of the box use.
-See the [BVLC model license](#bvlc-model-license) for details. +See the [BAIR model license](#bair-model-license) for details. Each one of these can be downloaded by running `scripts/download_model_binary.py ` where `` is specified below: -- **BVLC Reference CaffeNet** in `models/bvlc_reference_caffenet`: AlexNet trained on ILSVRC 2012, with a minor variation from the version as described in [ImageNet classification with deep convolutional neural networks](http://papers.nips.cc/paper/4824-imagenet-classification-with-deep-convolutional-neural-networks) by Krizhevsky et al. in NIPS 2012. (Trained by Jeff Donahue @jeffdonahue) -- **BVLC AlexNet** in `models/bvlc_alexnet`: AlexNet trained on ILSVRC 2012, almost exactly as described in [ImageNet classification with deep convolutional neural networks](http://papers.nips.cc/paper/4824-imagenet-classification-with-deep-convolutional-neural-networks) by Krizhevsky et al. in NIPS 2012. (Trained by Evan Shelhamer @shelhamer) -- **BVLC Reference R-CNN ILSVRC-2013** in `models/bvlc_reference_rcnn_ilsvrc13`: pure Caffe implementation of [R-CNN](https://github.com/rbgirshick/rcnn) as described by Girshick et al. in CVPR 2014. (Trained by Ross Girshick @rbgirshick) -- **BVLC GoogLeNet** in `models/bvlc_googlenet`: GoogLeNet trained on ILSVRC 2012, almost exactly as described in [Going Deeper with Convolutions](http://arxiv.org/abs/1409.4842) by Szegedy et al. in ILSVRC 2014. (Trained by Sergio Guadarrama @sguada) +- **BAIR Reference CaffeNet** in `models/bvlc_reference_caffenet`: AlexNet trained on ILSVRC 2012, with a minor variation from the version as described in [ImageNet classification with deep convolutional neural networks](http://papers.nips.cc/paper/4824-imagenet-classification-with-deep-convolutional-neural-networks) by Krizhevsky et al. in NIPS 2012. (Trained by Jeff Donahue @jeffdonahue) +- **BAIR AlexNet** in `models/bvlc_alexnet`: AlexNet trained on ILSVRC 2012, almost exactly as described in [ImageNet classification with deep convolutional neural networks](http://papers.nips.cc/paper/4824-imagenet-classification-with-deep-convolutional-neural-networks) by Krizhevsky et al. in NIPS 2012. (Trained by Evan Shelhamer @shelhamer) +- **BAIR Reference R-CNN ILSVRC-2013** in `models/bvlc_reference_rcnn_ilsvrc13`: pure Caffe implementation of [R-CNN](https://github.com/rbgirshick/rcnn) as described by Girshick et al. in CVPR 2014. (Trained by Ross Girshick @rbgirshick) +- **BAIR GoogLeNet** in `models/bvlc_googlenet`: GoogLeNet trained on ILSVRC 2012, almost exactly as described in [Going Deeper with Convolutions](http://arxiv.org/abs/1409.4842) by Szegedy et al. in ILSVRC 2014. (Trained by Sergio Guadarrama @sguada) -**Community models** made by Caffe users are posted to a publicly editable [wiki page](https://github.com/BVLC/caffe/wiki/Model-Zoo). +**Community models** made by Caffe users are posted to a publicly editable [model zoo wiki page](https://github.com/BVLC/caffe/wiki/Model-Zoo). These models are subject to conditions of their respective authors such as citation and license. Thank you for sharing your models! @@ -42,6 +42,8 @@ A caffe model is distributed as a directory containing: - License information. - [optional] Other helpful scripts. +This simple format can be handled through bundled scripts or manually if need be. + ### Hosting model info Github Gist is a good format for model info distribution because it can contain multiple files, is versionable, and has in-browser syntax highlighting and markdown rendering. @@ -55,14 +57,14 @@ Downloading model info is done just as easily with `scripts/download_model_from_ ### Hosting trained models It is up to the user where to host the `.caffemodel` file. -We host our BVLC-provided models on our own server. +We host our BAIR-provided models on our own server. Dropbox also works fine (tip: make sure that `?dl=1` is appended to the end of the URL). `scripts/download_model_binary.py ` downloads the `.caffemodel` from the URL specified in the `/readme.md` frontmatter and confirms SHA1. -## BVLC model license +## BAIR model license -The Caffe models bundled by the BVLC are released for unrestricted use. +The Caffe models bundled by the BAIR are released for unrestricted use. These models are trained on data from the [ImageNet project](http://www.image-net.org/) and training data includes internet photos that may be subject to copyright. diff --git a/docs/multigpu.md b/docs/multigpu.md index d91acef980d..e04ebb0b7c8 100644 --- a/docs/multigpu.md +++ b/docs/multigpu.md @@ -13,7 +13,7 @@ The GPUs to be used for training can be set with the "-gpu" flag on the command # Hardware Configuration Assumptions The current implementation uses a tree reduction strategy. e.g. if there are 4 GPUs in the system, 0:1, 2:3 will exchange gradients, then 0:2 (top of the tree) will exchange gradients, 0 will calculate -updated model, 0\-\>2, and then 0\-\>1, 2\-\>3. +updated model, 0\-\>2, and then 0\-\>1, 2\-\>3. For best performance, P2P DMA access between devices is needed. Without P2P access, for example crossing PCIe root complex, data is copied through host and effective exchange bandwidth is greatly reduced. @@ -23,4 +23,4 @@ Current implementation has a "soft" assumption that the devices being used are h # Scaling Performance -Performance is **heavily** dependent on the PCIe topology of the system, the configuration of the neural network you are training, and the speed of each of the layers. Systems like the DIGITS DevBox have an optimized PCIe topology (X99-E WS chipset). In general, scaling on 2 GPUs tends to be ~1.8X on average for networks like AlexNet, CaffeNet, VGG, GoogleNet. 4 GPUs begins to have falloff in scaling. Generally with "weak scaling" where the batchsize increases with the number of GPUs you will see 3.5x scaling or so. With "strong scaling", the system can become communication bound, especially with layer performance optimizations like those in [cuDNNv3](http://nvidia.com/cudnn), and you will likely see closer to mid 2.x scaling in performance. Networks that have heavy computation compared to the number of parameters tend to have the best scaling performance. \ No newline at end of file +Performance is **heavily** dependent on the PCIe topology of the system, the configuration of the neural network you are training, and the speed of each of the layers. Systems like the DIGITS DevBox have an optimized PCIe topology (X99-E WS chipset). In general, scaling on 2 GPUs tends to be ~1.8X on average for networks like AlexNet, CaffeNet, VGG, GoogleNet. 4 GPUs begins to have falloff in scaling. Generally with "weak scaling" where the batchsize increases with the number of GPUs you will see 3.5x scaling or so. With "strong scaling", the system can become communication bound, especially with layer performance optimizations like those in [cuDNNv3](http://nvidia.com/cudnn), and you will likely see closer to mid 2.x scaling in performance. Networks that have heavy computation compared to the number of parameters tend to have the best scaling performance. diff --git a/docs/performance_hardware.md b/docs/performance_hardware.md deleted file mode 100644 index cdd4b361dea..00000000000 --- a/docs/performance_hardware.md +++ /dev/null @@ -1,73 +0,0 @@ ---- -title: Performance and Hardware Configuration ---- - -# Performance and Hardware Configuration - -To measure performance on different NVIDIA GPUs we use CaffeNet, the Caffe reference ImageNet model. - -For training, each time point is 20 iterations/minibatches of 256 images for 5,120 images total. For testing, a 50,000 image validation set is classified. - -**Acknowledgements**: BVLC members are very grateful to NVIDIA for providing several GPUs to conduct this research. - -## NVIDIA K40 - -Performance is best with ECC off and boost clock enabled. While ECC makes a negligible difference in speed, disabling it frees ~1 GB of GPU memory. - -Best settings with ECC off and maximum clock speed in standard Caffe: - -* Training is 26.5 secs / 20 iterations (5,120 images) -* Testing is 100 secs / validation set (50,000 images) - -Best settings with Caffe + [cuDNN acceleration](http://nvidia.com/cudnn): - -* Training is 19.2 secs / 20 iterations (5,120 images) -* Testing is 60.7 secs / validation set (50,000 images) - -Other settings: - -* ECC on, max speed: training 26.7 secs / 20 iterations, test 101 secs / validation set -* ECC on, default speed: training 31 secs / 20 iterations, test 117 secs / validation set -* ECC off, default speed: training 31 secs / 20 iterations, test 118 secs / validation set - -### K40 configuration tips - -For maximum K40 performance, turn off ECC and boost the clock speed (at your own risk). - -To turn off ECC, do - - sudo nvidia-smi -i 0 --ecc-config=0 # repeat with -i x for each GPU ID - -then reboot. - -Set the "persistence" mode of the GPU settings by - - sudo nvidia-smi -pm 1 - -and then set the clock speed with - - sudo nvidia-smi -i 0 -ac 3004,875 # repeat with -i x for each GPU ID - -but note that this configuration resets across driver reloading / rebooting. Include these commands in a boot script to initialize these settings. For a simple fix, add these commands to `/etc/rc.local` (on Ubuntu). - -## NVIDIA Titan - -Training: 26.26 secs / 20 iterations (5,120 images). -Testing: 100 secs / validation set (50,000 images). - -cuDNN Training: 20.25 secs / 20 iterations (5,120 images). -cuDNN Testing: 66.3 secs / validation set (50,000 images). - - -## NVIDIA K20 - -Training: 36.0 secs / 20 iterations (5,120 images). -Testing: 133 secs / validation set (50,000 images). - -## NVIDIA GTX 770 - -Training: 33.0 secs / 20 iterations (5,120 images). -Testing: 129 secs / validation set (50,000 images). - -cuDNN Training: 24.3 secs / 20 iterations (5,120 images). -cuDNN Testing: 104 secs / validation set (50,000 images). diff --git a/docs/tutorial/interfaces.md b/docs/tutorial/interfaces.md index d7ff378239d..b5a4f1ad069 100644 --- a/docs/tutorial/interfaces.md +++ b/docs/tutorial/interfaces.md @@ -91,7 +91,7 @@ In MatCaffe, you can * Run for a certain number of iterations and give back control to Matlab * Intermingle arbitrary Matlab code with gradient steps -An ILSVRC image classification demo is in caffe/matlab/demo/classification_demo.m (you need to download BVLC CaffeNet from [Model Zoo](http://caffe.berkeleyvision.org/model_zoo.html) to run it). +An ILSVRC image classification demo is in caffe/matlab/demo/classification_demo.m (you need to download BAIR CaffeNet from [Model Zoo](http://caffe.berkeleyvision.org/model_zoo.html) to run it). ### Build MatCaffe @@ -114,7 +114,7 @@ You can save your Matlab search PATH by running `savepath` so that you don't hav MatCaffe is very similar to PyCaffe in usage. -Examples below shows detailed usages and assumes you have downloaded BVLC CaffeNet from [Model Zoo](http://caffe.berkeleyvision.org/model_zoo.html) and started `matlab` from caffe root folder. +Examples below shows detailed usages and assumes you have downloaded BAIR CaffeNet from [Model Zoo](http://caffe.berkeleyvision.org/model_zoo.html) and started `matlab` from caffe root folder. model = './models/bvlc_reference_caffenet/deploy.prototxt'; weights = './models/bvlc_reference_caffenet/bvlc_reference_caffenet.caffemodel'; diff --git a/docs/tutorial/layers.md b/docs/tutorial/layers.md index 7362aac298a..78a46f3a7ee 100644 --- a/docs/tutorial/layers.md +++ b/docs/tutorial/layers.md @@ -1,186 +1,77 @@ --- title: Layer Catalogue --- + # Layers To create a Caffe model you need to define the model architecture in a protocol buffer definition file (prototxt). Caffe layers and their parameters are defined in the protocol buffer definitions for the project in [caffe.proto](https://github.com/BVLC/caffe/blob/master/src/caffe/proto/caffe.proto). -### Vision Layers - -* Header: `./include/caffe/vision_layers.hpp` - -Vision layers usually take *images* as input and produce other *images* as output. -A typical "image" in the real-world may have one color channel ($$c = 1$$), as in a grayscale image, or three color channels ($$c = 3$$) as in an RGB (red, green, blue) image. -But in this context, the distinguishing characteristic of an image is its spatial structure: usually an image has some non-trivial height $$h > 1$$ and width $$w > 1$$. -This 2D geometry naturally lends itself to certain decisions about how to process the input. -In particular, most of the vision layers work by applying a particular operation to some region of the input to produce a corresponding region of the output. -In contrast, other layers (with few exceptions) ignore the spatial structure of the input, effectively treating it as "one big vector" with dimension $$chw$$. - - -#### Convolution - -* Layer type: `Convolution` -* CPU implementation: `./src/caffe/layers/convolution_layer.cpp` -* CUDA GPU implementation: `./src/caffe/layers/convolution_layer.cu` -* Parameters (`ConvolutionParameter convolution_param`) - - Required - - `num_output` (`c_o`): the number of filters - - `kernel_size` (or `kernel_h` and `kernel_w`): specifies height and width of each filter - - Strongly Recommended - - `weight_filler` [default `type: 'constant' value: 0`] - - Optional - - `bias_term` [default `true`]: specifies whether to learn and apply a set of additive biases to the filter outputs - - `pad` (or `pad_h` and `pad_w`) [default 0]: specifies the number of pixels to (implicitly) add to each side of the input - - `stride` (or `stride_h` and `stride_w`) [default 1]: specifies the intervals at which to apply the filters to the input - - `group` (g) [default 1]: If g > 1, we restrict the connectivity of each filter to a subset of the input. Specifically, the input and output channels are separated into g groups, and the $$i$$th output group channels will be only connected to the $$i$$th input group channels. -* Input - - `n * c_i * h_i * w_i` -* Output - - `n * c_o * h_o * w_o`, where `h_o = (h_i + 2 * pad_h - kernel_h) / stride_h + 1` and `w_o` likewise. -* Sample (as seen in `./models/bvlc_reference_caffenet/train_val.prototxt`) - - layer { - name: "conv1" - type: "Convolution" - bottom: "data" - top: "conv1" - # learning rate and decay multipliers for the filters - param { lr_mult: 1 decay_mult: 1 } - # learning rate and decay multipliers for the biases - param { lr_mult: 2 decay_mult: 0 } - convolution_param { - num_output: 96 # learn 96 filters - kernel_size: 11 # each filter is 11x11 - stride: 4 # step 4 pixels between each filter application - weight_filler { - type: "gaussian" # initialize the filters from a Gaussian - std: 0.01 # distribution with stdev 0.01 (default mean: 0) - } - bias_filler { - type: "constant" # initialize the biases to zero (0) - value: 0 - } - } - } - -The `Convolution` layer convolves the input image with a set of learnable filters, each producing one feature map in the output image. - -#### Pooling - -* Layer type: `Pooling` -* CPU implementation: `./src/caffe/layers/pooling_layer.cpp` -* CUDA GPU implementation: `./src/caffe/layers/pooling_layer.cu` -* Parameters (`PoolingParameter pooling_param`) - - Required - - `kernel_size` (or `kernel_h` and `kernel_w`): specifies height and width of each filter - - Optional - - `pool` [default MAX]: the pooling method. Currently MAX, AVE, or STOCHASTIC - - `pad` (or `pad_h` and `pad_w`) [default 0]: specifies the number of pixels to (implicitly) add to each side of the input - - `stride` (or `stride_h` and `stride_w`) [default 1]: specifies the intervals at which to apply the filters to the input -* Input - - `n * c * h_i * w_i` -* Output - - `n * c * h_o * w_o`, where h_o and w_o are computed in the same way as convolution. -* Sample (as seen in `./models/bvlc_reference_caffenet/train_val.prototxt`) - - layer { - name: "pool1" - type: "Pooling" - bottom: "conv1" - top: "pool1" - pooling_param { - pool: MAX - kernel_size: 3 # pool over a 3x3 region - stride: 2 # step two pixels (in the bottom blob) between pooling regions - } - } - -#### Local Response Normalization (LRN) - -* Layer type: `LRN` -* CPU Implementation: `./src/caffe/layers/lrn_layer.cpp` -* CUDA GPU Implementation: `./src/caffe/layers/lrn_layer.cu` -* Parameters (`LRNParameter lrn_param`) - - Optional - - `local_size` [default 5]: the number of channels to sum over (for cross channel LRN) or the side length of the square region to sum over (for within channel LRN) - - `alpha` [default 1]: the scaling parameter (see below) - - `beta` [default 5]: the exponent (see below) - - `norm_region` [default `ACROSS_CHANNELS`]: whether to sum over adjacent channels (`ACROSS_CHANNELS`) or nearby spatial locaitons (`WITHIN_CHANNEL`) +## Data Layers -The local response normalization layer performs a kind of "lateral inhibition" by normalizing over local input regions. In `ACROSS_CHANNELS` mode, the local regions extend across nearby channels, but have no spatial extent (i.e., they have shape `local_size x 1 x 1`). In `WITHIN_CHANNEL` mode, the local regions extend spatially, but are in separate channels (i.e., they have shape `1 x local_size x local_size`). Each input value is divided by $$(1 + (\alpha/n) \sum_i x_i^2)^\beta$$, where $$n$$ is the size of each local region, and the sum is taken over the region centered at that value (zero padding is added where necessary). - -#### im2col - -`Im2col` is a helper for doing the image-to-column transformation that you most likely do not need to know about. This is used in Caffe's original convolution to do matrix multiplication by laying out all patches into a matrix. - -### Loss Layers +Data enters Caffe through data layers: they lie at the bottom of nets. Data can come from efficient databases (LevelDB or LMDB), directly from memory, or, when efficiency is not critical, from files on disk in HDF5 or common image formats. -Loss drives learning by comparing an output to a target and assigning cost to minimize. The loss itself is computed by the forward pass and the gradient w.r.t. to the loss is computed by the backward pass. +Common input preprocessing (mean subtraction, scaling, random cropping, and mirroring) is available by specifying `TransformationParameter`s by some of the layers. +The [bias](layers/bias.html), [scale](layers/scale.html), and [crop](layers/crop.html) layers can be helpful with transforming the inputs, when `TransformationParameter` isn't available. -#### Softmax +Layers: -* Layer type: `SoftmaxWithLoss` +* [Image Data](layers/imagedata.html) - read raw images. +* [Database](layers/data.html) - read data from LEVELDB or LMDB. +* [HDF5 Input](layers/hdf5data.html) - read HDF5 data, allows data of arbitrary dimensions. +* [HDF5 Output](layers/hdf5output.html) - write data as HDF5. +* [Input](layers/input.html) - typically used for networks that are being deployed. +* [Window Data](layers/windowdata.html) - read window data file. +* [Memory Data](layers/memorydata.html) - read data directly from memory. +* [Dummy Data](layers/dummydata.html) - for static data and debugging. -The softmax loss layer computes the multinomial logistic loss of the softmax of its inputs. It's conceptually identical to a softmax layer followed by a multinomial logistic loss layer, but provides a more numerically stable gradient. +Note that the [Python](layers/python.html) Layer can be useful for create custom data layers. -#### Sum-of-Squares / Euclidean +## Vision Layers -* Layer type: `EuclideanLoss` +Vision layers usually take *images* as input and produce other *images* as output, although they can take data of other types and dimensions. +A typical "image" in the real-world may have one color channel ($$c = 1$$), as in a grayscale image, or three color channels ($$c = 3$$) as in an RGB (red, green, blue) image. +But in this context, the distinguishing characteristic of an image is its spatial structure: usually an image has some non-trivial height $$h > 1$$ and width $$w > 1$$. +This 2D geometry naturally lends itself to certain decisions about how to process the input. +In particular, most of the vision layers work by applying a particular operation to some region of the input to produce a corresponding region of the output. +In contrast, other layers (with few exceptions) ignore the spatial structure of the input, effectively treating it as "one big vector" with dimension $$chw$$. -The Euclidean loss layer computes the sum of squares of differences of its two inputs, $$\frac 1 {2N} \sum_{i=1}^N \| x^1_i - x^2_i \|_2^2$$. +Layers: -#### Hinge / Margin +* [Convolution Layer](layers/convolution.html) - convolves the input image with a set of learnable filters, each producing one feature map in the output image. +* [Pooling Layer](layers/pooling.html) - max, average, or stochastic pooling. +* [Spatial Pyramid Pooling (SPP)](layers/spp.html) +* [Crop](layers/crop.html) - perform cropping transformation. +* [Deconvolution Layer](layers/deconvolution.html) - transposed convolution. -* Layer type: `HingeLoss` -* CPU implementation: `./src/caffe/layers/hinge_loss_layer.cpp` -* CUDA GPU implementation: none yet -* Parameters (`HingeLossParameter hinge_loss_param`) - - Optional - - `norm` [default L1]: the norm used. Currently L1, L2 -* Inputs - - `n * c * h * w` Predictions - - `n * 1 * 1 * 1` Labels -* Output - - `1 * 1 * 1 * 1` Computed Loss -* Samples +* [Im2Col](layers/im2col.html) - relic helper layer that is not used much anymore. - # L1 Norm - layer { - name: "loss" - type: "HingeLoss" - bottom: "pred" - bottom: "label" - } +## Recurrent Layers - # L2 Norm - layer { - name: "loss" - type: "HingeLoss" - bottom: "pred" - bottom: "label" - top: "loss" - hinge_loss_param { - norm: L2 - } - } +Layers: -The hinge loss layer computes a one-vs-all hinge or squared hinge loss. +* [Recurrent](layers/recurrent.html) +* [RNN](layers/rnn.html) +* [Long-Short Term Memory (LSTM)](layers/lstm.html) -#### Sigmoid Cross-Entropy +## Common Layers -`SigmoidCrossEntropyLoss` +Layers: -#### Infogain +* [Inner Product](layers/innerproduct.html) - fully connected layer. +* [Dropout](layers/dropout.html) +* [Embed](layers/embed.html) - for learning embeddings of one-hot encoded vector (takes index as input). -`InfogainLoss` +## Normalization Layers -#### Accuracy and Top-k +* [Local Response Normalization (LRN)](layers/lrn.html) - performs a kind of "lateral inhibition" by normalizing over local input regions. +* [Mean Variance Normalization (MVN)](layers/mvn.html) - performs contrast normalization / instance normalization. +* [Batch Normalization](layers/batchnorm.html) - performs normalization over mini-batches. -`Accuracy` scores the output as the accuracy of output with respect to target -- it is not actually a loss and has no backward step. +The [bias](layers/bias.html) and [scale](layers/scale.html) layers can be helpful in combination with normalization. -### Activation / Neuron Layers +## Activation / Neuron Layers In general, activation / Neuron layers are element-wise operators, taking one bottom blob and producing one top blob of the same size. In the layers below, we will ignore the input and out sizes as they are identical: @@ -189,337 +80,56 @@ In general, activation / Neuron layers are element-wise operators, taking one bo * Output - n * c * h * w -#### ReLU / Rectified-Linear and Leaky-ReLU - -* Layer type: `ReLU` -* CPU implementation: `./src/caffe/layers/relu_layer.cpp` -* CUDA GPU implementation: `./src/caffe/layers/relu_layer.cu` -* Parameters (`ReLUParameter relu_param`) - - Optional - - `negative_slope` [default 0]: specifies whether to leak the negative part by multiplying it with the slope value rather than setting it to 0. -* Sample (as seen in `./models/bvlc_reference_caffenet/train_val.prototxt`) - - layer { - name: "relu1" - type: "ReLU" - bottom: "conv1" - top: "conv1" - } - -Given an input value x, The `ReLU` layer computes the output as x if x > 0 and negative_slope * x if x <= 0. When the negative slope parameter is not set, it is equivalent to the standard ReLU function of taking max(x, 0). It also supports in-place computation, meaning that the bottom and the top blob could be the same to preserve memory consumption. - -#### Sigmoid - -* Layer type: `Sigmoid` -* CPU implementation: `./src/caffe/layers/sigmoid_layer.cpp` -* CUDA GPU implementation: `./src/caffe/layers/sigmoid_layer.cu` -* Sample (as seen in `./examples/mnist/mnist_autoencoder.prototxt`) - - layer { - name: "encode1neuron" - bottom: "encode1" - top: "encode1neuron" - type: "Sigmoid" - } - -The `Sigmoid` layer computes the output as sigmoid(x) for each input element x. - -#### TanH / Hyperbolic Tangent - -* Layer type: `TanH` -* CPU implementation: `./src/caffe/layers/tanh_layer.cpp` -* CUDA GPU implementation: `./src/caffe/layers/tanh_layer.cu` -* Sample - - layer { - name: "layer" - bottom: "in" - top: "out" - type: "TanH" - } - -The `TanH` layer computes the output as tanh(x) for each input element x. - -#### Absolute Value - -* Layer type: `AbsVal` -* CPU implementation: `./src/caffe/layers/absval_layer.cpp` -* CUDA GPU implementation: `./src/caffe/layers/absval_layer.cu` -* Sample - - layer { - name: "layer" - bottom: "in" - top: "out" - type: "AbsVal" - } - -The `AbsVal` layer computes the output as abs(x) for each input element x. - -#### Power - -* Layer type: `Power` -* CPU implementation: `./src/caffe/layers/power_layer.cpp` -* CUDA GPU implementation: `./src/caffe/layers/power_layer.cu` -* Parameters (`PowerParameter power_param`) - - Optional - - `power` [default 1] - - `scale` [default 1] - - `shift` [default 0] -* Sample - - layer { - name: "layer" - bottom: "in" - top: "out" - type: "Power" - power_param { - power: 1 - scale: 1 - shift: 0 - } - } - -The `Power` layer computes the output as (shift + scale * x) ^ power for each input element x. - -#### BNLL - -* Layer type: `BNLL` -* CPU implementation: `./src/caffe/layers/bnll_layer.cpp` -* CUDA GPU implementation: `./src/caffe/layers/bnll_layer.cu` -* Sample - - layer { - name: "layer" - bottom: "in" - top: "out" - type: BNLL - } - -The `BNLL` (binomial normal log likelihood) layer computes the output as log(1 + exp(x)) for each input element x. - - -### Data Layers - -Data enters Caffe through data layers: they lie at the bottom of nets. Data can come from efficient databases (LevelDB or LMDB), directly from memory, or, when efficiency is not critical, from files on disk in HDF5 or common image formats. - -Common input preprocessing (mean subtraction, scaling, random cropping, and mirroring) is available by specifying `TransformationParameter`s. - -#### Database +Layers: -* Layer type: `Data` -* Parameters - - Required - - `source`: the name of the directory containing the database - - `batch_size`: the number of inputs to process at one time - - Optional - - `rand_skip`: skip up to this number of inputs at the beginning; useful for asynchronous sgd - - `backend` [default `LEVELDB`]: choose whether to use a `LEVELDB` or `LMDB` +* [ReLU / Rectified-Linear and Leaky-ReLU](layers/relu.html) - ReLU and Leaky-ReLU rectification. +* [PReLU](layers/prelu.html) - parametric ReLU. +* [ELU](layers/elu.html) - exponential linear rectification. +* [Sigmoid](layers/sigmoid.html) +* [TanH](layers/tanh.html) +* [Absolute Value](layers/absval.html) +* [Power](layers/power.html) - f(x) = (shift + scale * x) ^ power. +* [Exp](layers/exp.html) - f(x) = base ^ (shift + scale * x). +* [Log](layers/log.html) - f(x) = log(x). +* [BNLL](layers/bnll.html) - f(x) = log(1 + exp(x)). +* [Threshold](layers/threshold.html) - performs step function at user defined threshold. +* [Bias](layers/bias.html) - adds a bias to a blob that can either be learned or fixed. +* [Scale](layers/scale.html) - scales a blob by an amount that can either be learned or fixed. +## Utility Layers +Layers: -#### In-Memory +* [Flatten](layers/flatten.html) +* [Reshape](layers/reshape.html) +* [Batch Reindex](layers/batchreindex.html) -* Layer type: `MemoryData` -* Parameters - - Required - - `batch_size`, `channels`, `height`, `width`: specify the size of input chunks to read from memory +* [Split](layers/split.html) +* [Concat](layers/concat.html) +* [Slicing](layers/slice.html) +* [Eltwise](layers/eltwise.html) - element-wise operations such as product or sum between two blobs. +* [Filter / Mask](layers/filter.html) - mask or select output using last blob. +* [Parameter](layers/parameter.html) - enable parameters to be shared between layers. +* [Reduction](layers/reduction.html) - reduce input blob to scalar blob using operations such as sum or mean. +* [Silence](layers/silence.html) - prevent top-level blobs from being printed during training. -The memory data layer reads data directly from memory, without copying it. In order to use it, one must call `MemoryDataLayer::Reset` (from C++) or `Net.set_input_arrays` (from Python) in order to specify a source of contiguous data (as 4D row major array), which is read one batch-sized chunk at a time. +* [ArgMax](layers/argmax.html) +* [Softmax](layers/softmax.html) -#### HDF5 Input +* [Python](layers/python.html) - allows custom Python layers. -* Layer type: `HDF5Data` -* Parameters - - Required - - `source`: the name of the file to read from - - `batch_size` +## Loss Layers -#### HDF5 Output - -* Layer type: `HDF5Output` -* Parameters - - Required - - `file_name`: name of file to write to - -The HDF5 output layer performs the opposite function of the other layers in this section: it writes its input blobs to disk. - -#### Images - -* Layer type: `ImageData` -* Parameters - - Required - - `source`: name of a text file, with each line giving an image filename and label - - `batch_size`: number of images to batch together - - Optional - - `rand_skip` - - `shuffle` [default false] - - `new_height`, `new_width`: if provided, resize all images to this size - -#### Windows - -`WindowData` - -#### Dummy - -`DummyData` is for development and debugging. See `DummyDataParameter`. - -### Common Layers - -#### Inner Product - -* Layer type: `InnerProduct` -* CPU implementation: `./src/caffe/layers/inner_product_layer.cpp` -* CUDA GPU implementation: `./src/caffe/layers/inner_product_layer.cu` -* Parameters (`InnerProductParameter inner_product_param`) - - Required - - `num_output` (`c_o`): the number of filters - - Strongly recommended - - `weight_filler` [default `type: 'constant' value: 0`] - - Optional - - `bias_filler` [default `type: 'constant' value: 0`] - - `bias_term` [default `true`]: specifies whether to learn and apply a set of additive biases to the filter outputs -* Input - - `n * c_i * h_i * w_i` -* Output - - `n * c_o * 1 * 1` -* Sample - - layer { - name: "fc8" - type: "InnerProduct" - # learning rate and decay multipliers for the weights - param { lr_mult: 1 decay_mult: 1 } - # learning rate and decay multipliers for the biases - param { lr_mult: 2 decay_mult: 0 } - inner_product_param { - num_output: 1000 - weight_filler { - type: "gaussian" - std: 0.01 - } - bias_filler { - type: "constant" - value: 0 - } - } - bottom: "fc7" - top: "fc8" - } - -The `InnerProduct` layer (also usually referred to as the fully connected layer) treats the input as a simple vector and produces an output in the form of a single vector (with the blob's height and width set to 1). - -#### Splitting - -The `Split` layer is a utility layer that splits an input blob to multiple output blobs. This is used when a blob is fed into multiple output layers. - -#### Flattening - -The `Flatten` layer is a utility layer that flattens an input of shape `n * c * h * w` to a simple vector output of shape `n * (c*h*w)` - -#### Reshape - -* Layer type: `Reshape` -* Implementation: `./src/caffe/layers/reshape_layer.cpp` -* Parameters (`ReshapeParameter reshape_param`) - - Optional: (also see detailed description below) - - `shape` - -* Input - - a single blob with arbitrary dimensions -* Output - - the same blob, with modified dimensions, as specified by `reshape_param` - -* Sample - - layer { - name: "reshape" - type: "Reshape" - bottom: "input" - top: "output" - reshape_param { - shape { - dim: 0 # copy the dimension from below - dim: 2 - dim: 3 - dim: -1 # infer it from the other dimensions - } - } - } - -The `Reshape` layer can be used to change the dimensions of its input, without changing its data. Just like the `Flatten` layer, only the dimensions are changed; no data is copied in the process. - -Output dimensions are specified by the `ReshapeParam` proto. Positive numbers are used directly, setting the corresponding dimension of the output blob. In addition, two special values are accepted for any of the target dimension values: - -* **0** means "copy the respective dimension of the bottom layer". That is, if the bottom has 2 as its 1st dimension, the top will have 2 as its 1st dimension as well, given `dim: 0` as the 1st target dimension. -* **-1** stands for "infer this from the other dimensions". This behavior is similar to that of -1 in *numpy*'s or `[]` for *MATLAB*'s reshape: this dimension is calculated to keep the overall element count the same as in the bottom layer. At most one -1 can be used in a reshape operation. - -As another example, specifying `reshape_param { shape { dim: 0 dim: -1 } }` makes the layer behave in exactly the same way as the `Flatten` layer. - -#### Concatenation - -* Layer type: `Concat` -* CPU implementation: `./src/caffe/layers/concat_layer.cpp` -* CUDA GPU implementation: `./src/caffe/layers/concat_layer.cu` -* Parameters (`ConcatParameter concat_param`) - - Optional - - `axis` [default 1]: 0 for concatenation along num and 1 for channels. -* Input - - `n_i * c_i * h * w` for each input blob i from 1 to K. -* Output - - if `axis = 0`: `(n_1 + n_2 + ... + n_K) * c_1 * h * w`, and all input `c_i` should be the same. - - if `axis = 1`: `n_1 * (c_1 + c_2 + ... + c_K) * h * w`, and all input `n_i` should be the same. -* Sample - - layer { - name: "concat" - bottom: "in1" - bottom: "in2" - top: "out" - type: "Concat" - concat_param { - axis: 1 - } - } - -The `Concat` layer is a utility layer that concatenates its multiple input blobs to one single output blob. - -#### Slicing - -The `Slice` layer is a utility layer that slices an input layer to multiple output layers along a given dimension (currently num or channel only) with given slice indices. - -* Sample - - layer { - name: "slicer_label" - type: "Slice" - bottom: "label" - ## Example of label with a shape N x 3 x 1 x 1 - top: "label1" - top: "label2" - top: "label3" - slice_param { - axis: 1 - slice_point: 1 - slice_point: 2 - } - } - -`axis` indicates the target axis; `slice_point` indicates indexes in the selected dimension (the number of indices must be equal to the number of top blobs minus one). - - -#### Elementwise Operations - -`Eltwise` - -#### Argmax - -`ArgMax` - -#### Softmax +Loss drives learning by comparing an output to a target and assigning cost to minimize. The loss itself is computed by the forward pass and the gradient w.r.t. to the loss is computed by the backward pass. -`Softmax` +Layers: -#### Mean-Variance Normalization +* [Multinomial Logistic Loss](layers/multinomiallogisticloss.html) +* [Infogain Loss](layers/infogainloss.html) - a generalization of MultinomialLogisticLossLayer. +* [Softmax with Loss](layers/softmaxwithloss.html) - computes the multinomial logistic loss of the softmax of its inputs. It's conceptually identical to a softmax layer followed by a multinomial logistic loss layer, but provides a more numerically stable gradient. +* [Sum-of-Squares / Euclidean](layers/euclideanloss.html) - computes the sum of squares of differences of its two inputs, $$\frac 1 {2N} \sum_{i=1}^N \| x^1_i - x^2_i \|_2^2$$. +* [Hinge / Margin](layers/hingeloss.html) - The hinge loss layer computes a one-vs-all hinge (L1) or squared hinge loss (L2). +* [Sigmoid Cross-Entropy Loss](layers/sigmoidcrossentropyloss.html) - computes the cross-entropy (logistic) loss, often used for predicting targets interpreted as probabilities. +* [Accuracy / Top-k layer](layers/accuracy.html) - scores the output as an accuracy with respect to target -- it is not actually a loss and has no backward step. +* [Contrastive Loss](layers/contrastiveloss.html) -`MVN` diff --git a/docs/tutorial/layers/absval.md b/docs/tutorial/layers/absval.md new file mode 100644 index 00000000000..220c41189be --- /dev/null +++ b/docs/tutorial/layers/absval.md @@ -0,0 +1,22 @@ +--- +title: Absolute Value Layer +--- + +# Absolute Value Layer + +* Layer type: `AbsVal` +* [Doxygen Documentation](http://caffe.berkeleyvision.org/doxygen/classcaffe_1_1AbsValLayer.html) +* Header: [`./include/caffe/layers/absval_layer.hpp`](https://github.com/BVLC/caffe/blob/master/include/caffe/layers/absval_layer.hpp) +* CPU implementation: [`./src/caffe/layers/absval_layer.cpp`](https://github.com/BVLC/caffe/blob/master/src/caffe/layers/absval_layer.cpp) +* CUDA GPU implementation: [`./src/caffe/layers/absval_layer.cu`](https://github.com/BVLC/caffe/blob/master/src/caffe/layers/absval_layer.cu) + +* Sample + + layer { + name: "layer" + bottom: "in" + top: "out" + type: "AbsVal" + } + +The `AbsVal` layer computes the output as abs(x) for each input element x. diff --git a/docs/tutorial/layers/accuracy.md b/docs/tutorial/layers/accuracy.md new file mode 100644 index 00000000000..80293b1c6bf --- /dev/null +++ b/docs/tutorial/layers/accuracy.md @@ -0,0 +1,20 @@ +--- +title: Accuracy and Top-k +--- + +# Accuracy and Top-k + +`Accuracy` scores the output as the accuracy of output with respect to target -- it is not actually a loss and has no backward step. + +* Layer type: `Accuracy` +* [Doxygen Documentation](http://caffe.berkeleyvision.org/doxygen/classcaffe_1_1AccuracyLayer.html) +* Header: [`./include/caffe/layers/accuracy_layer.hpp`](https://github.com/BVLC/caffe/blob/master/include/caffe/layers/accuracy_layer.hpp) +* CPU implementation: [`./src/caffe/layers/accuracy_layer.cpp`](https://github.com/BVLC/caffe/blob/master/src/caffe/layers/accuracy_layer.cpp) + +## Parameters +* Parameters (`AccuracyParameter accuracy_param`) +* From [`./src/caffe/proto/caffe.proto`](https://github.com/BVLC/caffe/blob/master/src/caffe/proto/caffe.proto)): + +{% highlight Protobuf %} +{% include proto/AccuracyParameter.txt %} +{% endhighlight %} diff --git a/docs/tutorial/layers/argmax.md b/docs/tutorial/layers/argmax.md new file mode 100644 index 00000000000..9eb8b7739f5 --- /dev/null +++ b/docs/tutorial/layers/argmax.md @@ -0,0 +1,18 @@ +--- +title: ArgMax Layer +--- + +# ArgMax Layer + +* Layer type: `ArgMax` +* [Doxygen Documentation](http://caffe.berkeleyvision.org/doxygen/classcaffe_1_1ArgMaxLayer.html) +* Header: [`./include/caffe/layers/argmax_layer.hpp`](https://github.com/BVLC/caffe/blob/master/include/caffe/layers/argmax_layer.hpp) +* CPU implementation: [`./src/caffe/layers/argmax_layer.cpp`](https://github.com/BVLC/caffe/blob/master/src/caffe/layers/argmax_layer.cpp) + +## Parameters +* Parameters (`ArgMaxParameter argmax_param`) +* From [`./src/caffe/proto/caffe.proto`](https://github.com/BVLC/caffe/blob/master/src/caffe/proto/caffe.proto)): + +{% highlight Protobuf %} +{% include proto/ArgMaxParameter.txt %} +{% endhighlight %} diff --git a/docs/tutorial/layers/batchnorm.md b/docs/tutorial/layers/batchnorm.md new file mode 100644 index 00000000000..a5be5ce08bf --- /dev/null +++ b/docs/tutorial/layers/batchnorm.md @@ -0,0 +1,20 @@ +--- +title: Batch Norm Layer +--- + +# Batch Norm Layer + +* Layer type: `BatchNorm` +* [Doxygen Documentation](http://caffe.berkeleyvision.org/doxygen/classcaffe_1_1BatchNormLayer.html) +* Header: [`./include/caffe/layers/batch_norm_layer.hpp`](https://github.com/BVLC/caffe/blob/master/include/caffe/layers/batch_norm_layer.hpp) +* CPU implementation: [`./src/caffe/layers/batch_norm_layer.cpp`](https://github.com/BVLC/caffe/blob/master/src/caffe/layers/batch_norm_layer.cpp) +* CUDA GPU implementation: [`./src/caffe/layers/batch_norm_layer.cu`](https://github.com/BVLC/caffe/blob/master/src/caffe/layers/batch_norm_layer.cu) + +## Parameters + +* Parameters (`BatchNormParameter batch_norm_param`) +* From [`./src/caffe/proto/caffe.proto`](https://github.com/BVLC/caffe/blob/master/src/caffe/proto/caffe.proto): + +{% highlight Protobuf %} +{% include proto/BatchNormParameter.txt %} +{% endhighlight %} diff --git a/docs/tutorial/layers/batchreindex.md b/docs/tutorial/layers/batchreindex.md new file mode 100644 index 00000000000..21b36c39ba5 --- /dev/null +++ b/docs/tutorial/layers/batchreindex.md @@ -0,0 +1,16 @@ +--- +title: Batch Reindex Layer +--- + +# Batch Reindex Layer + +* Layer type: `BatchReindex` +* [Doxygen Documentation](http://caffe.berkeleyvision.org/doxygen/classcaffe_1_1BatchReindexLayer.html) +* Header: [`./include/caffe/layers/batch_reindex_layer.hpp`](https://github.com/BVLC/caffe/blob/master/include/caffe/layers/batch_reindex_layer.hpp) +* CPU implementation: [`./src/caffe/layers/batch_reindex_layer.cpp`](https://github.com/BVLC/caffe/blob/master/src/caffe/layers/batch_reindex_layer.cpp) +* CUDA GPU implementation: [`./src/caffe/layers/batch_reindex_layer.cu`](https://github.com/BVLC/caffe/blob/master/src/caffe/layers/batch_reindex_layer.cu) + + +## Parameters + +No parameters. diff --git a/docs/tutorial/layers/bias.md b/docs/tutorial/layers/bias.md new file mode 100644 index 00000000000..d3a00c2fc78 --- /dev/null +++ b/docs/tutorial/layers/bias.md @@ -0,0 +1,19 @@ +--- +title: Bias Layer +--- + +# Bias Layer + +* Layer type: `Bias` +* [Doxygen Documentation](http://caffe.berkeleyvision.org/doxygen/classcaffe_1_1BiasLayer.html) +* Header: [`./include/caffe/layers/bias_layer.hpp`](https://github.com/BVLC/caffe/blob/master/include/caffe/layers/bias_layer.hpp) +* CPU implementation: [`./src/caffe/layers/bias_layer.cpp`](https://github.com/BVLC/caffe/blob/master/src/caffe/layers/bias_layer.cpp) +* CUDA GPU implementation: [`./src/caffe/layers/bias_layer.cu`](https://github.com/BVLC/caffe/blob/master/src/caffe/layers/bias_layer.cu) + +## Parameters +* Parameters (`BiasParameter bias_param`) +* From [`./src/caffe/proto/caffe.proto`](https://github.com/BVLC/caffe/blob/master/src/caffe/proto/caffe.proto)): + +{% highlight Protobuf %} +{% include proto/BiasParameter.txt %} +{% endhighlight %} diff --git a/docs/tutorial/layers/bnll.md b/docs/tutorial/layers/bnll.md new file mode 100644 index 00000000000..2b68b79ff83 --- /dev/null +++ b/docs/tutorial/layers/bnll.md @@ -0,0 +1,25 @@ +--- +title: BNLL Layer +--- + +# BNLL Layer + +* Layer type: `BNLL` +* [Doxygen Documentation](http://caffe.berkeleyvision.org/doxygen/classcaffe_1_1BNLLLayer.html) +* Header: [`./include/caffe/layers/bnll_layer.hpp`](https://github.com/BVLC/caffe/blob/master/include/caffe/layers/bnll_layer.hpp) +* CPU implementation: [`./src/caffe/layers/bnll_layer.cpp`](https://github.com/BVLC/caffe/blob/master/src/caffe/layers/bnll_layer.cpp) +* CUDA GPU implementation: [`./src/caffe/layers/bnll_layer.cu`](https://github.com/BVLC/caffe/blob/master/src/caffe/layers/bnll_layer.cu) + +The `BNLL` (binomial normal log likelihood) layer computes the output as log(1 + exp(x)) for each input element x. + +## Parameters +No parameters. + +## Sample + + layer { + name: "layer" + bottom: "in" + top: "out" + type: BNLL + } diff --git a/docs/tutorial/layers/concat.md b/docs/tutorial/layers/concat.md new file mode 100644 index 00000000000..c7b253953d7 --- /dev/null +++ b/docs/tutorial/layers/concat.md @@ -0,0 +1,40 @@ +--- +title: Concat Layer +--- + +# Concat Layer + +* Layer type: `Concat` +* [Doxygen Documentation](http://caffe.berkeleyvision.org/doxygen/classcaffe_1_1ConcatLayer.html) +* Header: [`./include/caffe/layers/concat_layer.hpp`](https://github.com/BVLC/caffe/blob/master/include/caffe/layers/concat_layer.hpp) +* CPU implementation: [`./src/caffe/layers/concat_layer.cpp`](https://github.com/BVLC/caffe/blob/master/src/caffe/layers/concat_layer.cpp) +* CUDA GPU implementation: [`./src/caffe/layers/concat_layer.cu`](https://github.com/BVLC/caffe/blob/master/src/caffe/layers/concat_layer.cu) +* Input + - `n_i * c_i * h * w` for each input blob i from 1 to K. +* Output + - if `axis = 0`: `(n_1 + n_2 + ... + n_K) * c_1 * h * w`, and all input `c_i` should be the same. + - if `axis = 1`: `n_1 * (c_1 + c_2 + ... + c_K) * h * w`, and all input `n_i` should be the same. +* Sample + + layer { + name: "concat" + bottom: "in1" + bottom: "in2" + top: "out" + type: "Concat" + concat_param { + axis: 1 + } + } + +The `Concat` layer is a utility layer that concatenates its multiple input blobs to one single output blob. + +## Parameters +* Parameters (`ConcatParameter concat_param`) + - Optional + - `axis` [default 1]: 0 for concatenation along num and 1 for channels. +* From [`./src/caffe/proto/caffe.proto`](https://github.com/BVLC/caffe/blob/master/src/caffe/proto/caffe.proto)): + +{% highlight Protobuf %} +{% include proto/ConcatParameter.txt %} +{% endhighlight %} diff --git a/docs/tutorial/layers/contrastiveloss.md b/docs/tutorial/layers/contrastiveloss.md new file mode 100644 index 00000000000..bb1859d9f37 --- /dev/null +++ b/docs/tutorial/layers/contrastiveloss.md @@ -0,0 +1,20 @@ +--- +title: Contrastive Loss Layer +--- + +# Contrastive Loss Layer + +* Layer type: `ContrastiveLoss` +* [Doxygen Documentation](http://caffe.berkeleyvision.org/doxygen/classcaffe_1_1ContrastiveLossLayer.html) +* Header: [`./include/caffe/layers/contrastive_loss_layer.hpp`](https://github.com/BVLC/caffe/blob/master/include/caffe/layers/contrastive_loss_layer.hpp) +* CPU implementation: [`./src/caffe/layers/contrastive_loss_layer.cpp`](https://github.com/BVLC/caffe/blob/master/src/caffe/layers/contrastive_loss_layer.cpp) +* CUDA GPU implementation: [`./src/caffe/layers/contrastive_loss_layer.cu`](https://github.com/BVLC/caffe/blob/master/src/caffe/layers/contrastive_loss_layer.cu) + +## Parameters + +* Parameters (`ContrastiveLossParameter contrastive_loss_param`) +* From [`./src/caffe/proto/caffe.proto`](https://github.com/BVLC/caffe/blob/master/src/caffe/proto/caffe.proto)): + +{% highlight Protobuf %} +{% include proto/ContrastiveLossParameter.txt %} +{% endhighlight %} diff --git a/docs/tutorial/layers/convolution.md b/docs/tutorial/layers/convolution.md new file mode 100644 index 00000000000..cc9f4fd0449 --- /dev/null +++ b/docs/tutorial/layers/convolution.md @@ -0,0 +1,63 @@ +--- +title: Convolution Layer +--- + +# Convolution Layer + +* Layer type: `Convolution` +* [Doxygen Documentation](http://caffe.berkeleyvision.org/doxygen/classcaffe_1_1ConvolutionLayer.html) +* Header: [`./include/caffe/layers/conv_layer.hpp`](https://github.com/BVLC/caffe/blob/master/include/caffe/layers/conv_layer.hpp) +* CPU implementation: [`./src/caffe/layers/conv_layer.cpp`](https://github.com/BVLC/caffe/blob/master/src/caffe/layers/conv_layer.cpp) +* CUDA GPU implementation: [`./src/caffe/layers/conv_layer.cu`](https://github.com/BVLC/caffe/blob/master/src/caffe/layers/conv_layer.cu) +* Input + - `n * c_i * h_i * w_i` +* Output + - `n * c_o * h_o * w_o`, where `h_o = (h_i + 2 * pad_h - kernel_h) / stride_h + 1` and `w_o` likewise. + +The `Convolution` layer convolves the input image with a set of learnable filters, each producing one feature map in the output image. + +## Sample + +Sample (as seen in [`./models/bvlc_reference_caffenet/train_val.prototxt`](https://github.com/BVLC/caffe/blob/master/models/bvlc_reference_caffenet/train_val.prototxt)): + + layer { + name: "conv1" + type: "Convolution" + bottom: "data" + top: "conv1" + # learning rate and decay multipliers for the filters + param { lr_mult: 1 decay_mult: 1 } + # learning rate and decay multipliers for the biases + param { lr_mult: 2 decay_mult: 0 } + convolution_param { + num_output: 96 # learn 96 filters + kernel_size: 11 # each filter is 11x11 + stride: 4 # step 4 pixels between each filter application + weight_filler { + type: "gaussian" # initialize the filters from a Gaussian + std: 0.01 # distribution with stdev 0.01 (default mean: 0) + } + bias_filler { + type: "constant" # initialize the biases to zero (0) + value: 0 + } + } + } + +## Parameters +* Parameters (`ConvolutionParameter convolution_param`) + - Required + - `num_output` (`c_o`): the number of filters + - `kernel_size` (or `kernel_h` and `kernel_w`): specifies height and width of each filter + - Strongly Recommended + - `weight_filler` [default `type: 'constant' value: 0`] + - Optional + - `bias_term` [default `true`]: specifies whether to learn and apply a set of additive biases to the filter outputs + - `pad` (or `pad_h` and `pad_w`) [default 0]: specifies the number of pixels to (implicitly) add to each side of the input + - `stride` (or `stride_h` and `stride_w`) [default 1]: specifies the intervals at which to apply the filters to the input + - `group` (g) [default 1]: If g > 1, we restrict the connectivity of each filter to a subset of the input. Specifically, the input and output channels are separated into g groups, and the $$i$$th output group channels will be only connected to the $$i$$th input group channels. +* From [`./src/caffe/proto/caffe.proto`](https://github.com/BVLC/caffe/blob/master/src/caffe/proto/caffe.proto)): + +{% highlight Protobuf %} +{% include proto/ConvolutionParameter.txt %} +{% endhighlight %} diff --git a/docs/tutorial/layers/crop.md b/docs/tutorial/layers/crop.md new file mode 100644 index 00000000000..28f91241f74 --- /dev/null +++ b/docs/tutorial/layers/crop.md @@ -0,0 +1,20 @@ +--- +title: Crop Layer +--- + +# Crop Layer + +* Layer type: `Crop` +* [Doxygen Documentation](http://caffe.berkeleyvision.org/doxygen/classcaffe_1_1CropLayer.html) +* Header: [`./include/caffe/layers/crop_layer.hpp`](https://github.com/BVLC/caffe/blob/master/include/caffe/layers/crop_layer.hpp) +* CPU implementation: [`./src/caffe/layers/crop_layer.cpp`](https://github.com/BVLC/caffe/blob/master/src/caffe/layers/crop_layer.cpp) +* CUDA GPU implementation: [`./src/caffe/layers/crop_layer.cu`](https://github.com/BVLC/caffe/blob/master/src/caffe/layers/crop_layer.cu) + +## Parameters + +* Parameters (`CropParameter crop_param`) +* From [`./src/caffe/proto/caffe.proto`](https://github.com/BVLC/caffe/blob/master/src/caffe/proto/caffe.proto)): + +{% highlight Protobuf %} +{% include proto/CropParameter.txt %} +{% endhighlight %} diff --git a/docs/tutorial/layers/data.md b/docs/tutorial/layers/data.md new file mode 100644 index 00000000000..58e0dcaab22 --- /dev/null +++ b/docs/tutorial/layers/data.md @@ -0,0 +1,29 @@ +--- +title: Database Layer +--- + +# Database Layer + +* Layer type: `Data` +* [Doxygen Documentation](http://caffe.berkeleyvision.org/doxygen/classcaffe_1_1DataLayer.html) +* Header: [`./include/caffe/layers/data_layer.hpp`](https://github.com/BVLC/caffe/blob/master/include/caffe/layers/data_layer.hpp) +* CPU implementation: [`./src/caffe/layers/data_layer.cpp`](https://github.com/BVLC/caffe/blob/master/src/caffe/layers/data_layer.cpp) + + +## Parameters + +* Parameters (`DataParameter data_param`) +* From [`./src/caffe/proto/caffe.proto`](https://github.com/BVLC/caffe/blob/master/src/caffe/proto/caffe.proto)): + +{% highlight Protobuf %} +{% include proto/DataParameter.txt %} +{% endhighlight %} + +* Parameters + - Required + - `source`: the name of the directory containing the database + - `batch_size`: the number of inputs to process at one time + - Optional + - `rand_skip`: skip up to this number of inputs at the beginning; useful for asynchronous sgd + - `backend` [default `LEVELDB`]: choose whether to use a `LEVELDB` or `LMDB` + diff --git a/docs/tutorial/layers/deconvolution.md b/docs/tutorial/layers/deconvolution.md new file mode 100644 index 00000000000..2eff967d613 --- /dev/null +++ b/docs/tutorial/layers/deconvolution.md @@ -0,0 +1,22 @@ +--- +title: Deconvolution Layer +--- + +# Deconvolution Layer + +* Layer type: `Deconvolution` +* [Doxygen Documentation](http://caffe.berkeleyvision.org/doxygen/classcaffe_1_1DeconvolutionLayer.html) +* Header: [`./include/caffe/layers/deconv_layer.hpp`](https://github.com/BVLC/caffe/blob/master/include/caffe/layers/deconv_layer.hpp) +* CPU implementation: [`./src/caffe/layers/deconv_layer.cpp`](https://github.com/BVLC/caffe/blob/master/src/caffe/layers/deconv_layer.cpp) +* CUDA GPU implementation: [`./src/caffe/layers/deconv_layer.cu`](https://github.com/BVLC/caffe/blob/master/src/caffe/layers/deconv_layer.cu) + +## Parameters + +Uses the same parameters as the Convolution layer. + +* Parameters (`ConvolutionParameter convolution_param`) +* From [`./src/caffe/proto/caffe.proto`](https://github.com/BVLC/caffe/blob/master/src/caffe/proto/caffe.proto)): + +{% highlight Protobuf %} +{% include proto/ConvolutionParameter.txt %} +{% endhighlight %} diff --git a/docs/tutorial/layers/dropout.md b/docs/tutorial/layers/dropout.md new file mode 100644 index 00000000000..d8c6f9556be --- /dev/null +++ b/docs/tutorial/layers/dropout.md @@ -0,0 +1,20 @@ +--- +title: Dropout Layer +--- + +# Dropout Layer + +* Layer type: `Dropout` +* [Doxygen Documentation](http://caffe.berkeleyvision.org/doxygen/classcaffe_1_1DropoutLayer.html) +* Header: [`./include/caffe/layers/dropout_layer.hpp`](https://github.com/BVLC/caffe/blob/master/include/caffe/layers/dropout_layer.hpp) +* CPU implementation: [`./src/caffe/layers/dropout_layer.cpp`](https://github.com/BVLC/caffe/blob/master/src/caffe/layers/dropout_layer.cpp) +* CUDA GPU implementation: [`./src/caffe/layers/dropout_layer.cu`](https://github.com/BVLC/caffe/blob/master/src/caffe/layers/dropout_layer.cu) + +## Parameters + +* Parameters (`DropoutParameter dropout_param`) +* From [`./src/caffe/proto/caffe.proto`](https://github.com/BVLC/caffe/blob/master/src/caffe/proto/caffe.proto)): + +{% highlight Protobuf %} +{% include proto/DropoutParameter.txt %} +{% endhighlight %} diff --git a/docs/tutorial/layers/dummydata.md b/docs/tutorial/layers/dummydata.md new file mode 100644 index 00000000000..d069f9c595e --- /dev/null +++ b/docs/tutorial/layers/dummydata.md @@ -0,0 +1,20 @@ +--- +title: Dummy Data Layer +--- + +# Dummy Data Layer + +* Layer type: `DummyData` +* [Doxygen Documentation](http://caffe.berkeleyvision.org/doxygen/classcaffe_1_1DummyDataLayer.html) +* Header: [`./include/caffe/layers/dummy_data_layer.hpp`](https://github.com/BVLC/caffe/blob/master/include/caffe/layers/dummy_data_layer.hpp) +* CPU implementation: [`./src/caffe/layers/dummy_data_layer.cpp`](https://github.com/BVLC/caffe/blob/master/src/caffe/layers/dummy_data_layer.cpp) + + +## Parameters + +* Parameters (`DummyDataParameter dummy_data_param`) +* From [`./src/caffe/proto/caffe.proto`](https://github.com/BVLC/caffe/blob/master/src/caffe/proto/caffe.proto)): + +{% highlight Protobuf %} +{% include proto/DummyDataParameter.txt %} +{% endhighlight %} diff --git a/docs/tutorial/layers/eltwise.md b/docs/tutorial/layers/eltwise.md new file mode 100644 index 00000000000..70fe7910c5a --- /dev/null +++ b/docs/tutorial/layers/eltwise.md @@ -0,0 +1,20 @@ +--- +title: Eltwise Layer +--- + +# Eltwise Layer + +* Layer type: `Eltwise` +* [Doxygen Documentation](http://caffe.berkeleyvision.org/doxygen/classcaffe_1_1EltwiseLayer.html) +* Header: [`./include/caffe/layers/eltwise_layer.hpp`](https://github.com/BVLC/caffe/blob/master/include/caffe/layers/eltwise_layer.hpp) +* CPU implementation: [`./src/caffe/layers/eltwise_layer.cpp`](https://github.com/BVLC/caffe/blob/master/src/caffe/layers/eltwise_layer.cpp) +* CUDA GPU implementation: [`./src/caffe/layers/eltwise_layer.cu`](https://github.com/BVLC/caffe/blob/master/src/caffe/layers/eltwise_layer.cu) + +## Parameters + +* Parameters (`EltwiseParameter eltwise_param`) +* From [`./src/caffe/proto/caffe.proto`](https://github.com/BVLC/caffe/blob/master/src/caffe/proto/caffe.proto)): + +{% highlight Protobuf %} +{% include proto/EltwiseParameter.txt %} +{% endhighlight %} diff --git a/docs/tutorial/layers/elu.md b/docs/tutorial/layers/elu.md new file mode 100644 index 00000000000..11db0f0e3d6 --- /dev/null +++ b/docs/tutorial/layers/elu.md @@ -0,0 +1,25 @@ +--- +title: ELU Layer +--- + +# ELU Layer + +* Layer type: `ELU` +* [Doxygen Documentation](http://caffe.berkeleyvision.org/doxygen/classcaffe_1_1ELULayer.html) +* Header: [`./include/caffe/layers/elu_layer.hpp`](https://github.com/BVLC/caffe/blob/master/include/caffe/layers/elu_layer.hpp) +* CPU implementation: [`./src/caffe/layers/elu_layer.cpp`](https://github.com/BVLC/caffe/blob/master/src/caffe/layers/elu_layer.cpp) +* CUDA GPU implementation: [`./src/caffe/layers/elu_layer.cu`](https://github.com/BVLC/caffe/blob/master/src/caffe/layers/elu_layer.cu) + +## References + +* Clevert, Djork-Arne, Thomas Unterthiner, and Sepp Hochreiter. + "Fast and Accurate Deep Network Learning by Exponential Linear Units (ELUs)" [arXiv:1511.07289](https://arxiv.org/abs/1511.07289). (2015). + +## Parameters + +* Parameters (`ELUParameter elu_param`) +* From [`./src/caffe/proto/caffe.proto`](https://github.com/BVLC/caffe/blob/master/src/caffe/proto/caffe.proto): + +{% highlight Protobuf %} +{% include proto/ELUParameter.txt %} +{% endhighlight %} diff --git a/docs/tutorial/layers/embed.md b/docs/tutorial/layers/embed.md new file mode 100644 index 00000000000..271636d8d97 --- /dev/null +++ b/docs/tutorial/layers/embed.md @@ -0,0 +1,20 @@ +--- +title: Embed Layer +--- + +# Embed Layer + +* Layer type: `Embed` +* [Doxygen Documentation](http://caffe.berkeleyvision.org/doxygen/classcaffe_1_1EmbedLayer.html) +* Header: [`./include/caffe/layers/embed_layer.hpp`](https://github.com/BVLC/caffe/blob/master/include/caffe/layers/embed_layer.hpp) +* CPU implementation: [`./src/caffe/layers/embed_layer.cpp`](https://github.com/BVLC/caffe/blob/master/src/caffe/layers/embed_layer.cpp) +* CUDA GPU implementation: [`./src/caffe/layers/embed_layer.cu`](https://github.com/BVLC/caffe/blob/master/src/caffe/layers/embed_layer.cu) + +## Parameters + +* Parameters (`EmbedParameter embed_param`) +* From [`./src/caffe/proto/caffe.proto`](https://github.com/BVLC/caffe/blob/master/src/caffe/proto/caffe.proto): + +{% highlight Protobuf %} +{% include proto/EmbedParameter.txt %} +{% endhighlight %} diff --git a/docs/tutorial/layers/euclideanloss.md b/docs/tutorial/layers/euclideanloss.md new file mode 100644 index 00000000000..c1b72084c14 --- /dev/null +++ b/docs/tutorial/layers/euclideanloss.md @@ -0,0 +1,16 @@ +--- +title: Euclidean Loss Layer +--- +# Sum-of-Squares / Euclidean Loss Layer + +* Layer type: `EuclideanLoss` +* [Doxygen Documentation](http://caffe.berkeleyvision.org/doxygen/classcaffe_1_1EuclideanLossLayer.html) +* Header: [`./include/caffe/layers/euclidean_loss_layer.hpp`](https://github.com/BVLC/caffe/blob/master/include/caffe/layers/euclidean_loss_layer.hpp) +* CPU implementation: [`./src/caffe/layers/euclidean_loss_layer.cpp`](https://github.com/BVLC/caffe/blob/master/src/caffe/layers/euclidean_loss_layer.cpp) +* CUDA GPU implementation: [`./src/caffe/layers/euclidean_loss_layer.cu`](https://github.com/BVLC/caffe/blob/master/src/caffe/layers/euclidean_loss_layer.cu) + +The Euclidean loss layer computes the sum of squares of differences of its two inputs, $$\frac 1 {2N} \sum_{i=1}^N \| x^1_i - x^2_i \|_2^2$$. + +## Parameters + +Does not take any parameters. diff --git a/docs/tutorial/layers/exp.md b/docs/tutorial/layers/exp.md new file mode 100644 index 00000000000..ef2500ec214 --- /dev/null +++ b/docs/tutorial/layers/exp.md @@ -0,0 +1,24 @@ +--- +title: Exponential Layer +--- + +# Exponential Layer + +* Layer type: `Exp` +* [Doxygen Documentation](http://caffe.berkeleyvision.org/doxygen/classcaffe_1_1ExpLayer.html) +* Header: [`./include/caffe/layers/exp_layer.hpp`](https://github.com/BVLC/caffe/blob/master/include/caffe/layers/exp_layer.hpp) +* CPU implementation: [`./src/caffe/layers/exp_layer.cpp`](https://github.com/BVLC/caffe/blob/master/src/caffe/layers/exp_layer.cpp) +* CUDA GPU implementation: [`./src/caffe/layers/exp_layer.cu`](https://github.com/BVLC/caffe/blob/master/src/caffe/layers/exp_layer.cu) + +## Parameters + +* Parameters (`Parameter exp_param`) +* From [`./src/caffe/proto/caffe.proto`](https://github.com/BVLC/caffe/blob/master/src/caffe/proto/caffe.proto): + +{% highlight Protobuf %} +{% include proto/ExpParameter.txt %} +{% endhighlight %} + +## See also + +* [Power layer](power.html) diff --git a/docs/tutorial/layers/filter.md b/docs/tutorial/layers/filter.md new file mode 100644 index 00000000000..aeda9ee66f8 --- /dev/null +++ b/docs/tutorial/layers/filter.md @@ -0,0 +1,15 @@ +--- +title: Filter Layer +--- + +# Filter Layer + +* Layer type: `Filter` +* [Doxygen Documentation](http://caffe.berkeleyvision.org/doxygen/classcaffe_1_1FilterLayer.html) +* Header: [`./include/caffe/layers/filter_layer.hpp`](https://github.com/BVLC/caffe/blob/master/include/caffe/layers/filter_layer.hpp) +* CPU implementation: [`./src/caffe/layers/filter_layer.cpp`](https://github.com/BVLC/caffe/blob/master/src/caffe/layers/filter_layer.cpp) +* CUDA GPU implementation: [`./src/caffe/layers/filter_layer.cu`](https://github.com/BVLC/caffe/blob/master/src/caffe/layers/filter_layer.cu) + +## Parameters + +Does not take any parameters. diff --git a/docs/tutorial/layers/flatten.md b/docs/tutorial/layers/flatten.md new file mode 100644 index 00000000000..ecf08262707 --- /dev/null +++ b/docs/tutorial/layers/flatten.md @@ -0,0 +1,21 @@ +--- +title: Flatten Layer +--- + +# Flatten Layer + +* Layer type: `Flatten` +* [Doxygen Documentation](http://caffe.berkeleyvision.org/doxygen/classcaffe_1_1FlattenLayer.html) +* Header: [`./include/caffe/layers/flatten_layer.hpp`](https://github.com/BVLC/caffe/blob/master/include/caffe/layers/flatten_layer.hpp) +* CPU implementation: [`./src/caffe/layers/flatten_layer.cpp`](https://github.com/BVLC/caffe/blob/master/src/caffe/layers/flatten_layer.cpp) + +The `Flatten` layer is a utility layer that flattens an input of shape `n * c * h * w` to a simple vector output of shape `n * (c*h*w)`. + +## Parameters + +* Parameters (`FlattenParameter flatten_param`) +* From [`./src/caffe/proto/caffe.proto`](https://github.com/BVLC/caffe/blob/master/src/caffe/proto/caffe.proto): + +{% highlight Protobuf %} +{% include proto/FlattenParameter.txt %} +{% endhighlight %} diff --git a/docs/tutorial/layers/hdf5data.md b/docs/tutorial/layers/hdf5data.md new file mode 100644 index 00000000000..d6b7ea24d2e --- /dev/null +++ b/docs/tutorial/layers/hdf5data.md @@ -0,0 +1,20 @@ +--- +title: HDF5 Data Layer +--- + +# HDF5 Data Layer + +* Layer type: `HDF5Data` +* [Doxygen Documentation](http://caffe.berkeleyvision.org/doxygen/classcaffe_1_1HDF5DataLayer.html) +* Header: [`./include/caffe/layers/hdf5_data_layer.hpp`](https://github.com/BVLC/caffe/blob/master/include/caffe/layers/hdf5_data_layer.hpp) +* CPU implementation: [`./src/caffe/layers/hdf5_data_layer.cpp`](https://github.com/BVLC/caffe/blob/master/src/caffe/layers/hdf5_data_layer.cpp) +* CUDA GPU implementation: [`./src/caffe/layers/hdf5_data_layer.cu`](https://github.com/BVLC/caffe/blob/master/src/caffe/layers/hdf5_data_layer.cu) + +## Parameters + +* Parameters (`HDF5DataParameter hdf5_data_param`) +* From [`./src/caffe/proto/caffe.proto`](https://github.com/BVLC/caffe/blob/master/src/caffe/proto/caffe.proto): + +{% highlight Protobuf %} +{% include proto/HDF5DataParameter.txt %} +{% endhighlight %} diff --git a/docs/tutorial/layers/hdf5output.md b/docs/tutorial/layers/hdf5output.md new file mode 100644 index 00000000000..cfbe4ddb771 --- /dev/null +++ b/docs/tutorial/layers/hdf5output.md @@ -0,0 +1,25 @@ +--- +title: HDF5 Output Layer +--- + +# HDF5 Output Layer + +* Layer type: `HDF5Output` +* [Doxygen Documentation](http://caffe.berkeleyvision.org/doxygen/classcaffe_1_1HDF5OutputLayer.html) +* Header: [`./include/caffe/layers/hdf5_output_layer.hpp`](https://github.com/BVLC/caffe/blob/master/include/caffe/layers/hdf5_output_layer.hpp) +* CPU implementation: [`./src/caffe/layers/hdf5_output_layer.cpp`](https://github.com/BVLC/caffe/blob/master/src/caffe/layers/hdf5_output_layer.cpp) +* CUDA GPU implementation: [`./src/caffe/layers/hdf5_output_layer.cu`](https://github.com/BVLC/caffe/blob/master/src/caffe/layers/hdf5_output_layer.cu) + +The HDF5 output layer performs the opposite function of the other layers in this section: it writes its input blobs to disk. + +## Parameters + +* Parameters (`HDF5OutputParameter hdf5_output_param`) + - Required + - `file_name`: name of file to write to + +* From [`./src/caffe/proto/caffe.proto`](https://github.com/BVLC/caffe/blob/master/src/caffe/proto/caffe.proto): + +{% highlight Protobuf %} +{% include proto/HDF5OutputParameter.txt %} +{% endhighlight %} diff --git a/docs/tutorial/layers/hingeloss.md b/docs/tutorial/layers/hingeloss.md new file mode 100644 index 00000000000..ef4fd95e29d --- /dev/null +++ b/docs/tutorial/layers/hingeloss.md @@ -0,0 +1,19 @@ +--- +title: Hinge Loss Layer +--- + +# Hinge (L1, L2) Loss Layer + +* Layer type: `HingeLoss` +* [Doxygen Documentation](http://caffe.berkeleyvision.org/doxygen/classcaffe_1_1HingeLossLayer.html) +* Header: [`./include/caffe/layers/hinge_loss_layer.hpp`](https://github.com/BVLC/caffe/blob/master/include/caffe/layers/hinge_loss_layer.hpp) +* CPU implementation: [`./src/caffe/layers/hinge_loss_layer.cpp`](https://github.com/BVLC/caffe/blob/master/src/caffe/layers/hinge_loss_layer.cpp) + +## Parameters + +* Parameters (`HingeLossParameter hinge_loss_param`) +* From [`./src/caffe/proto/caffe.proto`](https://github.com/BVLC/caffe/blob/master/src/caffe/proto/caffe.proto): + +{% highlight Protobuf %} +{% include proto/HingeLossParameter.txt %} +{% endhighlight %} diff --git a/docs/tutorial/layers/im2col.md b/docs/tutorial/layers/im2col.md new file mode 100644 index 00000000000..0badc1cdd93 --- /dev/null +++ b/docs/tutorial/layers/im2col.md @@ -0,0 +1,16 @@ +--- +title: Im2col Layer +--- + +# im2col + +* File type: `Im2col` +* Header: [`./include/caffe/layers/im2col_layer.hpp`](https://github.com/BVLC/caffe/blob/master/include/caffe/layers/im2col_layer.hpp) +* CPU implementation: [`./src/caffe/layers/im2col_layer.cpp`](https://github.com/BVLC/caffe/blob/master/src/caffe/layers/im2col_layer.cpp) +* CUDA GPU implementation: [`./src/caffe/layers/im2col_layer.cu`](https://github.com/BVLC/caffe/blob/master/src/caffe/layers/im2col_layer.cu) + +`Im2col` is a helper for doing the image-to-column transformation that you most +likely do not need to know about. This is used in Caffe's original convolution +to do matrix multiplication by laying out all patches into a matrix. + + diff --git a/docs/tutorial/layers/imagedata.md b/docs/tutorial/layers/imagedata.md new file mode 100644 index 00000000000..82c8a600be3 --- /dev/null +++ b/docs/tutorial/layers/imagedata.md @@ -0,0 +1,27 @@ +--- +title: ImageData Layer +--- + +# ImageData Layer + +* Layer type: `ImageData` +* [Doxygen Documentation](http://caffe.berkeleyvision.org/doxygen/classcaffe_1_1ImageDataLayer.html) +* Header: [`./include/caffe/layers/image_data_layer.hpp`](https://github.com/BVLC/caffe/blob/master/include/caffe/layers/image_data_layer.hpp) +* CPU implementation: [`./src/caffe/layers/image_data_layer.cpp`](https://github.com/BVLC/caffe/blob/master/src/caffe/layers/image_data_layer.cpp) + +## Parameters + +* Parameters (`ImageDataParameter image_data_parameter`) + - Required + - `source`: name of a text file, with each line giving an image filename and label + - `batch_size`: number of images to batch together + - Optional + - `rand_skip` + - `shuffle` [default false] + - `new_height`, `new_width`: if provided, resize all images to this size + +* From [`./src/caffe/proto/caffe.proto`](https://github.com/BVLC/caffe/blob/master/src/caffe/proto/caffe.proto): + +{% highlight Protobuf %} +{% include proto/ImageDataParameter.txt %} +{% endhighlight %} diff --git a/docs/tutorial/layers/infogainloss.md b/docs/tutorial/layers/infogainloss.md new file mode 100644 index 00000000000..b3b690d2621 --- /dev/null +++ b/docs/tutorial/layers/infogainloss.md @@ -0,0 +1,23 @@ +--- +title: Infogain Loss Layer +--- + +# Infogain Loss Layer + +* Layer type: `InfogainLoss` +* [Doxygen Documentation](http://caffe.berkeleyvision.org/doxygen/classcaffe_1_1InfogainLossLayer.html) +* Header: [`./include/caffe/layers/infogain_loss_layer.hpp`](https://github.com/BVLC/caffe/blob/master/include/caffe/layers/infogain_loss_layer.hpp) +* CPU implementation: [`./src/caffe/layers/infogain_loss_layer.cpp`](https://github.com/BVLC/caffe/blob/master/src/caffe/layers/infogain_loss_layer.cpp) + +A generalization of [MultinomialLogisticLossLayer](multinomiallogisticloss.html) that takes an "information gain" (infogain) matrix specifying the "value" of all label pairs. + +Equivalent to the [MultinomialLogisticLossLayer](multinomiallogisticloss.html) if the infogain matrix is the identity. + +## Parameters + +* Parameters (`Parameter infogain_param`) +* From [`./src/caffe/proto/caffe.proto`](https://github.com/BVLC/caffe/blob/master/src/caffe/proto/caffe.proto): + +{% highlight Protobuf %} +{% include proto/InfogainLossParameter.txt %} +{% endhighlight %} diff --git a/docs/tutorial/layers/innerproduct.md b/docs/tutorial/layers/innerproduct.md new file mode 100644 index 00000000000..98b9bea81f5 --- /dev/null +++ b/docs/tutorial/layers/innerproduct.md @@ -0,0 +1,59 @@ +--- +title: Inner Product / Fully Connected Layer +--- + +# Inner Product / Fully Connected Layer + +* Layer type: `InnerProduct` +* [Doxygen Documentation](http://caffe.berkeleyvision.org/doxygen/classcaffe_1_1InnerProductLayer.html) +* Header: [`./include/caffe/layers/inner_product_layer.hpp`](https://github.com/BVLC/caffe/blob/master/include/caffe/layers/inner_product_layer.hpp) +* CPU implementation: [`./src/caffe/layers/inner_product_layer.cpp`](https://github.com/BVLC/caffe/blob/master/src/caffe/layers/inner_product_layer.cpp) +* CUDA GPU implementation: [`./src/caffe/layers/inner_product_layer.cu`](https://github.com/BVLC/caffe/blob/master/src/caffe/layers/inner_product_layer.cu) + +* Input + - `n * c_i * h_i * w_i` +* Output + - `n * c_o * 1 * 1` +* Sample + + layer { + name: "fc8" + type: "InnerProduct" + # learning rate and decay multipliers for the weights + param { lr_mult: 1 decay_mult: 1 } + # learning rate and decay multipliers for the biases + param { lr_mult: 2 decay_mult: 0 } + inner_product_param { + num_output: 1000 + weight_filler { + type: "gaussian" + std: 0.01 + } + bias_filler { + type: "constant" + value: 0 + } + } + bottom: "fc7" + top: "fc8" + } + +The `InnerProduct` layer (also usually referred to as the fully connected layer) treats the input as a simple vector and produces an output in the form of a single vector (with the blob's height and width set to 1). + + +## Parameters + +* Parameters (`InnerProductParameter inner_product_param`) + - Required + - `num_output` (`c_o`): the number of filters + - Strongly recommended + - `weight_filler` [default `type: 'constant' value: 0`] + - Optional + - `bias_filler` [default `type: 'constant' value: 0`] + - `bias_term` [default `true`]: specifies whether to learn and apply a set of additive biases to the filter outputs +* From [`./src/caffe/proto/caffe.proto`](https://github.com/BVLC/caffe/blob/master/src/caffe/proto/caffe.proto): + +{% highlight Protobuf %} +{% include proto/InnerProductParameter.txt %} +{% endhighlight %} + diff --git a/docs/tutorial/layers/input.md b/docs/tutorial/layers/input.md new file mode 100644 index 00000000000..b74c35d2fb5 --- /dev/null +++ b/docs/tutorial/layers/input.md @@ -0,0 +1,19 @@ +--- +title: Input Layer +--- + +# Input Layer + +* Layer type: `Input` +* [Doxygen Documentation](http://caffe.berkeleyvision.org/doxygen/classcaffe_1_1InputLayer.html) +* Header: [`./include/caffe/layers/input_layer.hpp`](https://github.com/BVLC/caffe/blob/master/include/caffe/layers/input_layer.hpp) +* CPU implementation: [`./src/caffe/layers/input_layer.cpp`](https://github.com/BVLC/caffe/blob/master/src/caffe/layers/input_layer.cpp) + +## Parameters + +* Parameters (`InputParameter input_param`) +* From [`./src/caffe/proto/caffe.proto`](https://github.com/BVLC/caffe/blob/master/src/caffe/proto/caffe.proto)): + +{% highlight Protobuf %} +{% include proto/InputParameter.txt %} +{% endhighlight %} diff --git a/docs/tutorial/layers/log.md b/docs/tutorial/layers/log.md new file mode 100644 index 00000000000..df52037489c --- /dev/null +++ b/docs/tutorial/layers/log.md @@ -0,0 +1,20 @@ +--- +title: Log Layer +--- + +# Log Layer + +* Layer type: `Log` +* [Doxygen Documentation](http://caffe.berkeleyvision.org/doxygen/classcaffe_1_1LogLayer.html) +* Header: [`./include/caffe/layers/log_layer.hpp`](https://github.com/BVLC/caffe/blob/master/include/caffe/layers/log_layer.hpp) +* CPU implementation: [`./src/caffe/layers/log_layer.cpp`](https://github.com/BVLC/caffe/blob/master/src/caffe/layers/log_layer.cpp) +* CUDA GPU implementation: [`./src/caffe/layers/log_layer.cu`](https://github.com/BVLC/caffe/blob/master/src/caffe/layers/log_layer.cu) + +## Parameters + +* Parameters (`Parameter log_param`) +* From [`./src/caffe/proto/caffe.proto`](https://github.com/BVLC/caffe/blob/master/src/caffe/proto/caffe.proto): + +{% highlight Protobuf %} +{% include proto/LogParameter.txt %} +{% endhighlight %} diff --git a/docs/tutorial/layers/lrn.md b/docs/tutorial/layers/lrn.md new file mode 100644 index 00000000000..f5e4829279d --- /dev/null +++ b/docs/tutorial/layers/lrn.md @@ -0,0 +1,28 @@ +--- +title: Local Response Normalization (LRN) +--- + +# Local Response Normalization (LRN) + +* Layer type: `LRN` +* [Doxygen Documentation](http://caffe.berkeleyvision.org/doxygen/classcaffe_1_1LRNLayer.html) +* Header: [`./include/caffe/layers/lrn_layer.hpp`](https://github.com/BVLC/caffe/blob/master/include/caffe/layers/lrn_layer.hpp) +* CPU Implementation: [`./src/caffe/layers/lrn_layer.cpp`](https://github.com/BVLC/caffe/blob/master/src/caffe/layers/lrn_layer.cpp) +* CUDA GPU Implementation: [`./src/caffe/layers/lrn_layer.cu`](https://github.com/BVLC/caffe/blob/master/src/caffe/layers/lrn_layer.cu) +* Parameters (`LRNParameter lrn_param`) + - Optional + - `local_size` [default 5]: the number of channels to sum over (for cross channel LRN) or the side length of the square region to sum over (for within channel LRN) + - `alpha` [default 1]: the scaling parameter (see below) + - `beta` [default 5]: the exponent (see below) + - `norm_region` [default `ACROSS_CHANNELS`]: whether to sum over adjacent channels (`ACROSS_CHANNELS`) or nearby spatial locations (`WITHIN_CHANNEL`) + +The local response normalization layer performs a kind of "lateral inhibition" by normalizing over local input regions. In `ACROSS_CHANNELS` mode, the local regions extend across nearby channels, but have no spatial extent (i.e., they have shape `local_size x 1 x 1`). In `WITHIN_CHANNEL` mode, the local regions extend spatially, but are in separate channels (i.e., they have shape `1 x local_size x local_size`). Each input value is divided by $$(1 + (\alpha/n) \sum_i x_i^2)^\beta$$, where $$n$$ is the size of each local region, and the sum is taken over the region centered at that value (zero padding is added where necessary). + +## Parameters + +* Parameters (`LRNParameter lrn_param`) +* From [`./src/caffe/proto/caffe.proto`](https://github.com/BVLC/caffe/blob/master/src/caffe/proto/caffe.proto): + +{% highlight Protobuf %} +{% include proto/LRNParameter.txt %} +{% endhighlight %} diff --git a/docs/tutorial/layers/lstm.md b/docs/tutorial/layers/lstm.md new file mode 100644 index 00000000000..8e4095e950b --- /dev/null +++ b/docs/tutorial/layers/lstm.md @@ -0,0 +1,21 @@ +--- +title: LSTM Layer +--- + +# LSTM Layer + +* Layer type: `LSTM` +* [Doxygen Documentation](http://caffe.berkeleyvision.org/doxygen/classcaffe_1_1LSTMLayer.html) +* Header: [`./include/caffe/layers/lstm_layer.hpp`](https://github.com/BVLC/caffe/blob/master/include/caffe/layers/lstm_layer.hpp) +* CPU implementation: [`./src/caffe/layers/lstm_layer.cpp`](https://github.com/BVLC/caffe/blob/master/src/caffe/layers/lstm_layer.cpp) +* CPU implementation (helper): [`./src/caffe/layers/lstm_unit_layer.cpp`](https://github.com/BVLC/caffe/blob/master/src/caffe/layers/lstm_unit_layer.cpp) +* CUDA GPU implementation (helper): [`./src/caffe/layers/lstm_unit_layer.cu`](https://github.com/BVLC/caffe/blob/master/src/caffe/layers/lstm_unit_layer.cu) + +## Parameters + +* Parameters (`Parameter recurrent_param`) +* From [`./src/caffe/proto/caffe.proto`](https://github.com/BVLC/caffe/blob/master/src/caffe/proto/caffe.proto): + +{% highlight Protobuf %} +{% include proto/RecurrentParameter.txt %} +{% endhighlight %} diff --git a/docs/tutorial/layers/memorydata.md b/docs/tutorial/layers/memorydata.md new file mode 100644 index 00000000000..afce4a24a28 --- /dev/null +++ b/docs/tutorial/layers/memorydata.md @@ -0,0 +1,25 @@ +--- +title: Memory Data Layer +--- + +# Memory Data Layer + +* Layer type: `MemoryData` +* [Doxygen Documentation](http://caffe.berkeleyvision.org/doxygen/classcaffe_1_1MemoryDataLayer.html) +* Header: [`./include/caffe/layers/memory_data_layer.hpp`](https://github.com/BVLC/caffe/blob/master/include/caffe/layers/memory_data_layer.hpp) +* CPU implementation: [`./src/caffe/layers/memory_data_layer.cpp`](https://github.com/BVLC/caffe/blob/master/src/caffe/layers/memory_data_layer.cpp) + +The memory data layer reads data directly from memory, without copying it. In order to use it, one must call `MemoryDataLayer::Reset` (from C++) or `Net.set_input_arrays` (from Python) in order to specify a source of contiguous data (as 4D row major array), which is read one batch-sized chunk at a time. + +# Parameters + +* Parameters (`MemoryDataParameter memory_data_param`) +* From [`./src/caffe/proto/caffe.proto`](https://github.com/BVLC/caffe/blob/master/src/caffe/proto/caffe.proto): + +{% highlight Protobuf %} +{% include proto/MemoryDataParameter.txt %} +{% endhighlight %} + +* Parameters + - Required + - `batch_size`, `channels`, `height`, `width`: specify the size of input chunks to read from memory diff --git a/docs/tutorial/layers/multinomiallogisticloss.md b/docs/tutorial/layers/multinomiallogisticloss.md new file mode 100644 index 00000000000..5eab74a8a69 --- /dev/null +++ b/docs/tutorial/layers/multinomiallogisticloss.md @@ -0,0 +1,19 @@ +--- +title: Multinomial Logistic Loss Layer +--- + +# Multinomial Logistic Loss Layer + +* Layer type: `MultinomialLogisticLoss` +* [Doxygen Documentation](http://caffe.berkeleyvision.org/doxygen/classcaffe_1_1MultinomialLogisticLossLayer.html) +* Header: [`./include/caffe/layers/multinomial_logistic_loss_layer.hpp`](https://github.com/BVLC/caffe/blob/master/include/caffe/layers/multinomial_logistic_loss_layer.hpp) +* CPU implementation: [`./src/caffe/layers/multinomial_logistic_loss_layer.cpp`](https://github.com/BVLC/caffe/blob/master/src/caffe/layers/multinomial_logistic_loss_layer.cpp) + +## Parameters + +* Parameters (`LossParameter loss_param`) +* From [`./src/caffe/proto/caffe.proto`](https://github.com/BVLC/caffe/blob/master/src/caffe/proto/caffe.proto): + +{% highlight Protobuf %} +{% include proto/LossParameter.txt %} +{% endhighlight %} diff --git a/docs/tutorial/layers/mvn.md b/docs/tutorial/layers/mvn.md new file mode 100644 index 00000000000..08e44887d22 --- /dev/null +++ b/docs/tutorial/layers/mvn.md @@ -0,0 +1,20 @@ +--- +title: Mean-Variance Normalization (MVN) Layer +--- + +# Mean-Variance Normalization (MVN) Layer + +* Layer type: `MVN` +* [Doxygen Documentation](http://caffe.berkeleyvision.org/doxygen/classcaffe_1_1MVNLayer.html) +* Header: [`./include/caffe/layers/mvn_layer.hpp`](https://github.com/BVLC/caffe/blob/master/include/caffe/layers/mvn_layer.hpp) +* CPU implementation: [`./src/caffe/layers/mvn_layer.cpp`](https://github.com/BVLC/caffe/blob/master/src/caffe/layers/mvn_layer.cpp) +* CUDA GPU implementation: [`./src/caffe/layers/mvn_layer.cu`](https://github.com/BVLC/caffe/blob/master/src/caffe/layers/mvn_layer.cu) + +## Parameters + +* Parameters (`MVNParameter mvn_param`) +* From [`./src/caffe/proto/caffe.proto`](https://github.com/BVLC/caffe/blob/master/src/caffe/proto/caffe.proto): + +{% highlight Protobuf %} +{% include proto/MVNParameter.txt %} +{% endhighlight %} diff --git a/docs/tutorial/layers/parameter.md b/docs/tutorial/layers/parameter.md new file mode 100644 index 00000000000..b7e85ec5c9a --- /dev/null +++ b/docs/tutorial/layers/parameter.md @@ -0,0 +1,21 @@ +--- +title: Parameter Layer +--- + +# Parameter Layer + +* Layer type: `Parameter` +* [Doxygen Documentation](http://caffe.berkeleyvision.org/doxygen/classcaffe_1_1ParameterLayer.html) +* Header: [`./include/caffe/layers/parameter_layer.hpp`](https://github.com/BVLC/caffe/blob/master/include/caffe/layers/parameter_layer.hpp) +* CPU implementation: [`./src/caffe/layers/parameter_layer.cpp`](https://github.com/BVLC/caffe/blob/master/src/caffe/layers/parameter_layer.cpp) + +See [https://github.com/BVLC/caffe/pull/2079](https://github.com/BVLC/caffe/pull/2079). + +## Parameters + +* Parameters (`ParameterParameter parameter_param`) +* From [`./src/caffe/proto/caffe.proto`](https://github.com/BVLC/caffe/blob/master/src/caffe/proto/caffe.proto): + +{% highlight Protobuf %} +{% include proto/ParameterParameter.txt %} +{% endhighlight %} diff --git a/docs/tutorial/layers/pooling.md b/docs/tutorial/layers/pooling.md new file mode 100644 index 00000000000..12669ee8d45 --- /dev/null +++ b/docs/tutorial/layers/pooling.md @@ -0,0 +1,47 @@ +--- +title: Pooling Layer +--- +# Pooling + +* Layer type: `Pooling` +* [Doxygen Documentation](http://caffe.berkeleyvision.org/doxygen/classcaffe_1_1PoolingLayer.html) +* Header: [`./include/caffe/layers/pooling_layer.hpp`](https://github.com/BVLC/caffe/blob/master/include/caffe/layers/pooling_layer.hpp) +* CPU implementation: [`./src/caffe/layers/pooling_layer.cpp`](https://github.com/BVLC/caffe/blob/master/src/caffe/layers/pooling_layer.cpp) +* CUDA GPU implementation: [`./src/caffe/layers/pooling_layer.cu`](https://github.com/BVLC/caffe/blob/master/src/caffe/layers/pooling_layer.cu) + +* Input + - `n * c * h_i * w_i` +* Output + - `n * c * h_o * w_o`, where h_o and w_o are computed in the same way as convolution. + +## Parameters + +* Parameters (`PoolingParameter pooling_param`) + - Required + - `kernel_size` (or `kernel_h` and `kernel_w`): specifies height and width of each filter + - Optional + - `pool` [default MAX]: the pooling method. Currently MAX, AVE, or STOCHASTIC + - `pad` (or `pad_h` and `pad_w`) [default 0]: specifies the number of pixels to (implicitly) add to each side of the input + - `stride` (or `stride_h` and `stride_w`) [default 1]: specifies the intervals at which to apply the filters to the input + + +* From [`./src/caffe/proto/caffe.proto`](https://github.com/BVLC/caffe/blob/master/src/caffe/proto/caffe.proto): + +{% highlight Protobuf %} +{% include proto/PoolingParameter.txt %} +{% endhighlight %} + +## Sample +* Sample (as seen in [`./models/bvlc_reference_caffenet/train_val.prototxt`](https://github.com/BVLC/caffe/blob/master/models/bvlc_reference_caffenet/train_val.prototxt)) + + layer { + name: "pool1" + type: "Pooling" + bottom: "conv1" + top: "pool1" + pooling_param { + pool: MAX + kernel_size: 3 # pool over a 3x3 region + stride: 2 # step two pixels (in the bottom blob) between pooling regions + } + } diff --git a/docs/tutorial/layers/power.md b/docs/tutorial/layers/power.md new file mode 100644 index 00000000000..d6617529b7d --- /dev/null +++ b/docs/tutorial/layers/power.md @@ -0,0 +1,46 @@ +--- +title: Power Layer +--- + +# Power Layer + +* Layer type: `Power` +* [Doxygen Documentation](http://caffe.berkeleyvision.org/doxygen/classcaffe_1_1PowerLayer.html) +* Header: [`./include/caffe/layers/power_layer.hpp`](https://github.com/BVLC/caffe/blob/master/include/caffe/layers/power_layer.hpp) +* CPU implementation: [`./src/caffe/layers/power_layer.cpp`](https://github.com/BVLC/caffe/blob/master/src/caffe/layers/power_layer.cpp) +* CUDA GPU implementation: [`./src/caffe/layers/power_layer.cu`](https://github.com/BVLC/caffe/blob/master/src/caffe/layers/power_layer.cu) + +The `Power` layer computes the output as (shift + scale * x) ^ power for each input element x. + +## Parameters +* Parameters (`PowerParameter power_param`) + - Optional + - `power` [default 1] + - `scale` [default 1] + - `shift` [default 0] + +* From [`./src/caffe/proto/caffe.proto`](https://github.com/BVLC/caffe/blob/master/src/caffe/proto/caffe.proto): + +{% highlight Protobuf %} +{% include proto/PowerParameter.txt %} +{% endhighlight %} + + + +## Sample + + layer { + name: "layer" + bottom: "in" + top: "out" + type: "Power" + power_param { + power: 1 + scale: 1 + shift: 0 + } + } + +## See also + +* [Exponential layer](exp.html) diff --git a/docs/tutorial/layers/prelu.md b/docs/tutorial/layers/prelu.md new file mode 100644 index 00000000000..e7b7b44acb6 --- /dev/null +++ b/docs/tutorial/layers/prelu.md @@ -0,0 +1,20 @@ +--- +title: PReLU Layer +--- + +# PReLU Layer + +* Layer type: `PReLU` +* [Doxygen Documentation](http://caffe.berkeleyvision.org/doxygen/classcaffe_1_1PReLULayer.html) +* Header: [`./include/caffe/layers/prelu_layer.hpp`](https://github.com/BVLC/caffe/blob/master/include/caffe/layers/prelu_layer.hpp) +* CPU implementation: [`./src/caffe/layers/prelu_layer.cpp`](https://github.com/BVLC/caffe/blob/master/src/caffe/layers/prelu_layer.cpp) +* CUDA GPU implementation: [`./src/caffe/layers/prelu_layer.cu`](https://github.com/BVLC/caffe/blob/master/src/caffe/layers/prelu_layer.cu) + +## Parameters + +* Parameters (`PReLUParameter prelu_param`) +* From [`./src/caffe/proto/caffe.proto`](https://github.com/BVLC/caffe/blob/master/src/caffe/proto/caffe.proto): + +{% highlight Protobuf %} +{% include proto/PReLUParameter.txt %} +{% endhighlight %} diff --git a/docs/tutorial/layers/python.md b/docs/tutorial/layers/python.md new file mode 100644 index 00000000000..2e30b3a79f6 --- /dev/null +++ b/docs/tutorial/layers/python.md @@ -0,0 +1,27 @@ +--- +title: Python Layer +--- + +# Python Layer + +* Layer type: `Python` +* [Doxygen Documentation](http://caffe.berkeleyvision.org/doxygen/classcaffe_1_1PythonLayer.html) +* Header: [`./include/caffe/layers/python_layer.hpp`](https://github.com/BVLC/caffe/blob/master/include/caffe/layers/python_layer.hpp) + +The Python layer allows users to add customized layers without modifying the Caffe core code. + +## Parameters + +* Parameters (`PythonParameter python_param`) +* From [`./src/caffe/proto/caffe.proto`](https://github.com/BVLC/caffe/blob/master/src/caffe/proto/caffe.proto): + +{% highlight Protobuf %} +{% include proto/PythonParameter.txt %} +{% endhighlight %} + +## Examples and tutorials + +* Simple Euclidean loss example +** [Python code](https://github.com/BVLC/caffe/blob/master/examples/pycaffe/layers/pyloss.py) +** [Prototxt](https://github.com/BVLC/caffe/blob/master/examples/pycaffe/linreg.prototxt) +* [Tutorial for writing Python layers with DIGITS](https://github.com/NVIDIA/DIGITS/tree/master/examples/python-layer) diff --git a/docs/tutorial/layers/recurrent.md b/docs/tutorial/layers/recurrent.md new file mode 100644 index 00000000000..a882b722f8a --- /dev/null +++ b/docs/tutorial/layers/recurrent.md @@ -0,0 +1,20 @@ +--- +title: Recurrent Layer +--- + +# Recurrent Layer + +* Layer type: `Recurrent` +* [Doxygen Documentation](http://caffe.berkeleyvision.org/doxygen/classcaffe_1_1RecurrentLayer.html) +* Header: [`./include/caffe/layers/recurrent_layer.hpp`](https://github.com/BVLC/caffe/blob/master/include/caffe/layers/recurrent_layer.hpp) +* CPU implementation: [`./src/caffe/layers/recurrent_layer.cpp`](https://github.com/BVLC/caffe/blob/master/src/caffe/layers/recurrent_layer.cpp) +* CUDA GPU implementation: [`./src/caffe/layers/recurrent_layer.cu`](https://github.com/BVLC/caffe/blob/master/src/caffe/layers/recurrent_layer.cu) + +## Parameters + +* Parameters (`RecurrentParameter recurrent_param`) +* From [`./src/caffe/proto/caffe.proto`](https://github.com/BVLC/caffe/blob/master/src/caffe/proto/caffe.proto): + +{% highlight Protobuf %} +{% include proto/RecurrentParameter.txt %} +{% endhighlight %} diff --git a/docs/tutorial/layers/reduction.md b/docs/tutorial/layers/reduction.md new file mode 100644 index 00000000000..db55414b0af --- /dev/null +++ b/docs/tutorial/layers/reduction.md @@ -0,0 +1,20 @@ +--- +title: Reduction Layer +--- + +# Reduction Layer + +* Layer type: `Reduction` +* [Doxygen Documentation](http://caffe.berkeleyvision.org/doxygen/classcaffe_1_1ReductionLayer.html) +* Header: [`./include/caffe/layers/reduction_layer.hpp`](https://github.com/BVLC/caffe/blob/master/include/caffe/layers/reduction_layer.hpp) +* CPU implementation: [`./src/caffe/layers/reduction_layer.cpp`](https://github.com/BVLC/caffe/blob/master/src/caffe/layers/reduction_layer.cpp) +* CUDA GPU implementation: [`./src/caffe/layers/reduction_layer.cu`](https://github.com/BVLC/caffe/blob/master/src/caffe/layers/reduction_layer.cu) + +## Parameters + +* Parameters (`ReductionParameter reduction_param`) +* From [`./src/caffe/proto/caffe.proto`](https://github.com/BVLC/caffe/blob/master/src/caffe/proto/caffe.proto): + +{% highlight Protobuf %} +{% include proto/ReductionParameter.txt %} +{% endhighlight %} diff --git a/docs/tutorial/layers/relu.md b/docs/tutorial/layers/relu.md new file mode 100644 index 00000000000..01aab0af4fa --- /dev/null +++ b/docs/tutorial/layers/relu.md @@ -0,0 +1,32 @@ +--- +title: ReLU / Rectified-Linear and Leaky-ReLU Layer +--- + +# ReLU / Rectified-Linear and Leaky-ReLU Layer + +* Layer type: `ReLU` +* [Doxygen Documentation](http://caffe.berkeleyvision.org/doxygen/classcaffe_1_1ReLULayer.html) +* Header: [`./include/caffe/layers/relu_layer.hpp`](https://github.com/BVLC/caffe/blob/master/include/caffe/layers/relu_layer.hpp) +* CPU implementation: [`./src/caffe/layers/relu_layer.cpp`](https://github.com/BVLC/caffe/blob/master/src/caffe/layers/relu_layer.cpp) +* CUDA GPU implementation: [`./src/caffe/layers/relu_layer.cu`](https://github.com/BVLC/caffe/blob/master/src/caffe/layers/relu_layer.cu) +* Sample (as seen in [`./models/bvlc_reference_caffenet/train_val.prototxt`](https://github.com/BVLC/caffe/blob/master/models/bvlc_reference_caffenet/train_val.prototxt)) + + layer { + name: "relu1" + type: "ReLU" + bottom: "conv1" + top: "conv1" + } + +Given an input value x, The `ReLU` layer computes the output as x if x > 0 and negative_slope * x if x <= 0. When the negative slope parameter is not set, it is equivalent to the standard ReLU function of taking max(x, 0). It also supports in-place computation, meaning that the bottom and the top blob could be the same to preserve memory consumption. + +## Parameters + +* Parameters (`ReLUParameter relu_param`) + - Optional + - `negative_slope` [default 0]: specifies whether to leak the negative part by multiplying it with the slope value rather than setting it to 0. +* From [`./src/caffe/proto/caffe.proto`](https://github.com/BVLC/caffe/blob/master/src/caffe/proto/caffe.proto): + +{% highlight Protobuf %} +{% include proto/ReLUParameter.txt %} +{% endhighlight %} diff --git a/docs/tutorial/layers/reshape.md b/docs/tutorial/layers/reshape.md new file mode 100644 index 00000000000..92d23f2c73e --- /dev/null +++ b/docs/tutorial/layers/reshape.md @@ -0,0 +1,51 @@ +--- +title: Reshape Layer +--- + +# Reshape Layer +* Layer type: `Reshape` +* [Doxygen Documentation](http://caffe.berkeleyvision.org/doxygen/classcaffe_1_1ReshapeLayer.html) +* Header: [`./include/caffe/layers/reshape_layer.hpp`](https://github.com/BVLC/caffe/blob/master/include/caffe/layers/reshape_layer.hpp) +* Implementation: [`./src/caffe/layers/reshape_layer.cpp`](https://github.com/BVLC/caffe/blob/master/src/caffe/layers/reshape_layer.cpp) + +* Input + - a single blob with arbitrary dimensions +* Output + - the same blob, with modified dimensions, as specified by `reshape_param` + +* Sample + + layer { + name: "reshape" + type: "Reshape" + bottom: "input" + top: "output" + reshape_param { + shape { + dim: 0 # copy the dimension from below + dim: 2 + dim: 3 + dim: -1 # infer it from the other dimensions + } + } + } + +The `Reshape` layer can be used to change the dimensions of its input, without changing its data. Just like the `Flatten` layer, only the dimensions are changed; no data is copied in the process. + +Output dimensions are specified by the `ReshapeParam` proto. Positive numbers are used directly, setting the corresponding dimension of the output blob. In addition, two special values are accepted for any of the target dimension values: + +* **0** means "copy the respective dimension of the bottom layer". That is, if the bottom has 2 as its 1st dimension, the top will have 2 as its 1st dimension as well, given `dim: 0` as the 1st target dimension. +* **-1** stands for "infer this from the other dimensions". This behavior is similar to that of -1 in *numpy*'s or `[]` for *MATLAB*'s reshape: this dimension is calculated to keep the overall element count the same as in the bottom layer. At most one -1 can be used in a reshape operation. + +As another example, specifying `reshape_param { shape { dim: 0 dim: -1 } }` makes the layer behave in exactly the same way as the `Flatten` layer. + +## Parameters + +* Parameters (`ReshapeParameter reshape_param`) + - Optional: (also see detailed description below) + - `shape` +* From [`./src/caffe/proto/caffe.proto`](https://github.com/BVLC/caffe/blob/master/src/caffe/proto/caffe.proto): + +{% highlight Protobuf %} +{% include proto/ReshapeParameter.txt %} +{% endhighlight %} diff --git a/docs/tutorial/layers/rnn.md b/docs/tutorial/layers/rnn.md new file mode 100644 index 00000000000..b6fcf47133f --- /dev/null +++ b/docs/tutorial/layers/rnn.md @@ -0,0 +1,19 @@ +--- +title: RNN Layer +--- + +# RNN Layer + +* Layer type: `RNN` +* [Doxygen Documentation](http://caffe.berkeleyvision.org/doxygen/classcaffe_1_1RNNLayer.html) +* Header: [`./include/caffe/layers/rnn_layer.hpp`](https://github.com/BVLC/caffe/blob/master/include/caffe/layers/rnn_layer.hpp) +* CPU implementation: [`./src/caffe/layers/rnn_layer.cpp`](https://github.com/BVLC/caffe/blob/master/src/caffe/layers/rnn_layer.cpp) + +## Parameters + +* Parameters (`RecurrentParameter recurrent_param`) +* From [`./src/caffe/proto/caffe.proto`](https://github.com/BVLC/caffe/blob/master/src/caffe/proto/caffe.proto): + +{% highlight Protobuf %} +{% include proto/RecurrentParameter.txt %} +{% endhighlight %} diff --git a/docs/tutorial/layers/scale.md b/docs/tutorial/layers/scale.md new file mode 100644 index 00000000000..0e27549ad52 --- /dev/null +++ b/docs/tutorial/layers/scale.md @@ -0,0 +1,20 @@ +--- +title: Scale Layer +--- + +# Scale Layer + +* Layer type: `Scale` +* [Doxygen Documentation](http://caffe.berkeleyvision.org/doxygen/classcaffe_1_1ScaleLayer.html) +* Header: [`./include/caffe/layers/scale_layer.hpp`](https://github.com/BVLC/caffe/blob/master/include/caffe/layers/scale_layer.hpp) +* CPU implementation: [`./src/caffe/layers/scale_layer.cpp`](https://github.com/BVLC/caffe/blob/master/src/caffe/layers/scale_layer.cpp) +* CUDA GPU implementation: [`./src/caffe/layers/scale_layer.cu`](https://github.com/BVLC/caffe/blob/master/src/caffe/layers/scale_layer.cu) + +## Parameters + +* Parameters (`ScaleParameter scale_param`) +* From [`./src/caffe/proto/caffe.proto`](https://github.com/BVLC/caffe/blob/master/src/caffe/proto/caffe.proto): + +{% highlight Protobuf %} +{% include proto/ScaleParameter.txt %} +{% endhighlight %} diff --git a/docs/tutorial/layers/sigmoid.md b/docs/tutorial/layers/sigmoid.md new file mode 100644 index 00000000000..f18ac4b84ec --- /dev/null +++ b/docs/tutorial/layers/sigmoid.md @@ -0,0 +1,30 @@ +--- +title: Sigmoid Layer +--- + +# Sigmoid Layer + +* Layer type: `Sigmoid` +* [Doxygen Documentation](http://caffe.berkeleyvision.org/doxygen/classcaffe_1_1SigmoidLayer.html) +* Header: [`./include/caffe/layers/sigmoid_layer.hpp`](https://github.com/BVLC/caffe/blob/master/include/caffe/layers/sigmoid_layer.hpp) +* CPU implementation: [`./src/caffe/layers/sigmoid_layer.cpp`](https://github.com/BVLC/caffe/blob/master/src/caffe/layers/sigmoid_layer.cpp) +* CUDA GPU implementation: [`./src/caffe/layers/sigmoid_layer.cu`](https://github.com/BVLC/caffe/blob/master/src/caffe/layers/sigmoid_layer.cu) +* Example (from [`./examples/mnist/mnist_autoencoder.prototxt`](https://github.com/BVLC/caffe/blob/master/examples/mnist/mnist_autoencoder.prototxt)): + + layer { + name: "encode1neuron" + bottom: "encode1" + top: "encode1neuron" + type: "Sigmoid" + } + +The `Sigmoid` layer computes `sigmoid(x)` for each element `x` in the bottom blob. + +## Parameters + +* Parameters (`SigmoidParameter sigmoid_param`) +* From [`./src/caffe/proto/caffe.proto`](https://github.com/BVLC/caffe/blob/master/src/caffe/proto/caffe.proto): + +{% highlight Protobuf %} +{% include proto/SigmoidParameter.txt %} +{% endhighlight %} diff --git a/docs/tutorial/layers/sigmoidcrossentropyloss.md b/docs/tutorial/layers/sigmoidcrossentropyloss.md new file mode 100644 index 00000000000..a6e42cadfa9 --- /dev/null +++ b/docs/tutorial/layers/sigmoidcrossentropyloss.md @@ -0,0 +1,13 @@ +--- +title: Sigmoid Cross-Entropy Loss Layer +--- + +# Sigmoid Cross-Entropy Loss Layer + +* Layer type: `SigmoidCrossEntropyLoss` +* [Doxygen Documentation](http://caffe.berkeleyvision.org/doxygen/classcaffe_1_1SigmoidCrossEntropyLossLayer.html) +* Header: [`./include/caffe/layers/sigmoid_cross_entropy_loss_layer.hpp`](https://github.com/BVLC/caffe/blob/master/include/caffe/layers/sigmoid_cross_entropy_loss_layer.hpp) +* CPU implementation: [`./src/caffe/layers/sigmoid_cross_entropy_loss_layer.cpp`](https://github.com/BVLC/caffe/blob/master/src/caffe/layers/sigmoid_cross_entropy_loss_layer.cpp) +* CUDA GPU implementation: [`./src/caffe/layers/sigmoid_cross_entropy_loss_layer.cu`](https://github.com/BVLC/caffe/blob/master/src/caffe/layers/sigmoid_cross_entropy_loss_layer.cu) + +To-do. diff --git a/docs/tutorial/layers/silence.md b/docs/tutorial/layers/silence.md new file mode 100644 index 00000000000..8b4579a9935 --- /dev/null +++ b/docs/tutorial/layers/silence.md @@ -0,0 +1,17 @@ +--- +title: Silence Layer +--- + +# Silence Layer + +* Layer type: `Silence` +* [Doxygen Documentation](http://caffe.berkeleyvision.org/doxygen/classcaffe_1_1SilenceLayer.html) +* Header: [`./include/caffe/layers/silence_layer.hpp`](https://github.com/BVLC/caffe/blob/master/include/caffe/layers/silence_layer.hpp) +* CPU implementation: [`./src/caffe/layers/silence_layer.cpp`](https://github.com/BVLC/caffe/blob/master/src/caffe/layers/silence_layer.cpp) +* CUDA GPU implementation: [`./src/caffe/layers/silence_layer.cu`](https://github.com/BVLC/caffe/blob/master/src/caffe/layers/silence_layer.cu) + +Silences a blob, so that it is not printed. + +## Parameters + +No parameters. diff --git a/docs/tutorial/layers/slice.md b/docs/tutorial/layers/slice.md new file mode 100644 index 00000000000..a492f1e82b9 --- /dev/null +++ b/docs/tutorial/layers/slice.md @@ -0,0 +1,42 @@ +--- +title: Slice Layer +--- + +# Slice Layer + +* Layer type: `Slice` +* [Doxygen Documentation](http://caffe.berkeleyvision.org/doxygen/classcaffe_1_1SliceLayer.html) +* Header: [`./include/caffe/layers/slice_layer.hpp`](https://github.com/BVLC/caffe/blob/master/include/caffe/layers/slice_layer.hpp) +* CPU implementation: [`./src/caffe/layers/slice_layer.cpp`](https://github.com/BVLC/caffe/blob/master/src/caffe/layers/slice_layer.cpp) +* CUDA GPU implementation: [`./src/caffe/layers/slice_layer.cu`](https://github.com/BVLC/caffe/blob/master/src/caffe/layers/slice_layer.cu) + +The `Slice` layer is a utility layer that slices an input layer to multiple output layers along a given dimension (currently num or channel only) with given slice indices. + +* Sample + + layer { + name: "slicer_label" + type: "Slice" + bottom: "label" + ## Example of label with a shape N x 3 x 1 x 1 + top: "label1" + top: "label2" + top: "label3" + slice_param { + axis: 1 + slice_point: 1 + slice_point: 2 + } + } + +`axis` indicates the target axis; `slice_point` indicates indexes in the selected dimension (the number of indices must be equal to the number of top blobs minus one). + +## Parameters + +* Parameters (`SliceParameter slice_param`) +* From [`./src/caffe/proto/caffe.proto`](https://github.com/BVLC/caffe/blob/master/src/caffe/proto/caffe.proto): + +{% highlight Protobuf %} +{% include proto/SliceParameter.txt %} +{% endhighlight %} + diff --git a/docs/tutorial/layers/softmax.md b/docs/tutorial/layers/softmax.md new file mode 100644 index 00000000000..e5d53425141 --- /dev/null +++ b/docs/tutorial/layers/softmax.md @@ -0,0 +1,24 @@ +--- +title: Softmax Layer +--- + +# Softmax Layer + +* Layer type: `Softmax` +* [Doxygen Documentation](http://caffe.berkeleyvision.org/doxygen/classcaffe_1_1SoftmaxLayer.html) +* Header: [`./include/caffe/layers/softmax_layer.hpp`](https://github.com/BVLC/caffe/blob/master/include/caffe/layers/softmax_layer.hpp) +* CPU implementation: [`./src/caffe/layers/softmax_layer.cpp`](https://github.com/BVLC/caffe/blob/master/src/caffe/layers/softmax_layer.cpp) +* CUDA GPU implementation: [`./src/caffe/layers/softmax_layer.cu`](https://github.com/BVLC/caffe/blob/master/src/caffe/layers/softmax_layer.cu) + +## Parameters + +* Parameters (`SoftmaxParameter softmax_param`) +* From [`./src/caffe/proto/caffe.proto`](https://github.com/BVLC/caffe/blob/master/src/caffe/proto/caffe.proto): + +{% highlight Protobuf %} +{% include proto/SoftmaxParameter.txt %} +{% endhighlight %} + +## See also + +* [Softmax loss layer](softmaxwithloss.html) diff --git a/docs/tutorial/layers/softmaxwithloss.md b/docs/tutorial/layers/softmaxwithloss.md new file mode 100644 index 00000000000..d9a6774a0ed --- /dev/null +++ b/docs/tutorial/layers/softmaxwithloss.md @@ -0,0 +1,33 @@ +--- +title: Softmax with Loss Layer +--- + +# Softmax with Loss Layer + +* Layer type: `SoftmaxWithLoss` +* [Doxygen Documentation](http://caffe.berkeleyvision.org/doxygen/classcaffe_1_1SoftmaxWithLossLayer.html) +* Header: [`./include/caffe/layers/softmax_loss_layer.hpp`](https://github.com/BVLC/caffe/blob/master/include/caffe/layers/softmax_loss_layer.hpp) +* CPU implementation: [`./src/caffe/layers/softmax_loss_layer.cpp`](https://github.com/BVLC/caffe/blob/master/src/caffe/layers/softmax_loss_layer.cpp) +* CUDA GPU implementation: [`./src/caffe/layers/softmax_loss_layer.cu`](https://github.com/BVLC/caffe/blob/master/src/caffe/layers/softmax_loss_layer.cu) + +The softmax loss layer computes the multinomial logistic loss of the softmax of its inputs. It's conceptually identical to a softmax layer followed by a multinomial logistic loss layer, but provides a more numerically stable gradient. + +## Parameters + +* Parameters (`SoftmaxParameter softmax_param`) +* From [`./src/caffe/proto/caffe.proto`](https://github.com/BVLC/caffe/blob/master/src/caffe/proto/caffe.proto): + +{% highlight Protobuf %} +{% include proto/SoftmaxParameter.txt %} +{% endhighlight %} + +* Parameters (`LossParameter loss_param`) +* From [`./src/caffe/proto/caffe.proto`](https://github.com/BVLC/caffe/blob/master/src/caffe/proto/caffe.proto): + +{% highlight Protobuf %} +{% include proto/LossParameter.txt %} +{% endhighlight %} + +## See also + +* [Softmax layer](softmax.html) diff --git a/docs/tutorial/layers/split.md b/docs/tutorial/layers/split.md new file mode 100644 index 00000000000..4fb71d1f26b --- /dev/null +++ b/docs/tutorial/layers/split.md @@ -0,0 +1,17 @@ +--- +title: Split Layer +--- + +# Split Layer + +* Layer type: `Split` +* [Doxygen Documentation](http://caffe.berkeleyvision.org/doxygen/classcaffe_1_1SplitLayer.html) +* Header: [`./include/caffe/layers/split_layer.hpp`](https://github.com/BVLC/caffe/blob/master/include/caffe/layers/split_layer.hpp) +* CPU implementation: [`./src/caffe/layers/split_layer.cpp`](https://github.com/BVLC/caffe/blob/master/src/caffe/layers/split_layer.cpp) +* CUDA GPU implementation: [`./src/caffe/layers/split_layer.cu`](https://github.com/BVLC/caffe/blob/master/src/caffe/layers/split_layer.cu) + +The `Split` layer is a utility layer that splits an input blob to multiple output blobs. This is used when a blob is fed into multiple output layers. + +## Parameters + +Does not take any parameters. diff --git a/docs/tutorial/layers/spp.md b/docs/tutorial/layers/spp.md new file mode 100644 index 00000000000..26e5862023e --- /dev/null +++ b/docs/tutorial/layers/spp.md @@ -0,0 +1,20 @@ +--- +title: Spatial Pyramid Pooling Layer +--- + +# Spatial Pyramid Pooling Layer + +* Layer type: `SPP` +* [Doxygen Documentation](http://caffe.berkeleyvision.org/doxygen/classcaffe_1_1SPPLayer.html) +* Header: [`./include/caffe/layers/spp_layer.hpp`](https://github.com/BVLC/caffe/blob/master/include/caffe/layers/spp_layer.hpp) +* CPU implementation: [`./src/caffe/layers/spp_layer.cpp`](https://github.com/BVLC/caffe/blob/master/src/caffe/layers/spp_layer.cpp) + + +## Parameters + +* Parameters (`SPPParameter spp_param`) +* From [`./src/caffe/proto/caffe.proto`](https://github.com/BVLC/caffe/blob/master/src/caffe/proto/caffe.proto): + +{% highlight Protobuf %} +{% include proto/SPPParameter.txt %} +{% endhighlight %} diff --git a/docs/tutorial/layers/tanh.md b/docs/tutorial/layers/tanh.md new file mode 100644 index 00000000000..360634596f9 --- /dev/null +++ b/docs/tutorial/layers/tanh.md @@ -0,0 +1,18 @@ +--- +title: TanH Layer +--- + +# TanH Layer + +* Header: [`./include/caffe/layers/tanh_layer.hpp`](https://github.com/BVLC/caffe/blob/master/include/caffe/layers/tanh_layer.hpp) +* CPU implementation: [`./src/caffe/layers/tanh_layer.cpp`](https://github.com/BVLC/caffe/blob/master/src/caffe/layers/tanh_layer.cpp) +* CUDA GPU implementation: [`./src/caffe/layers/tanh_layer.cu`](https://github.com/BVLC/caffe/blob/master/src/caffe/layers/tanh_layer.cu) + +## Parameters + +* Parameters (`TanHParameter tanh_param`) +* From [`./src/caffe/proto/caffe.proto`](https://github.com/BVLC/caffe/blob/master/src/caffe/proto/caffe.proto): + +{% highlight Protobuf %} +{% include proto/TanHParameter.txt %} +{% endhighlight %} diff --git a/docs/tutorial/layers/threshold.md b/docs/tutorial/layers/threshold.md new file mode 100644 index 00000000000..819e9e6f96d --- /dev/null +++ b/docs/tutorial/layers/threshold.md @@ -0,0 +1,18 @@ +--- +title: Threshold Layer +--- + +# Threshold Layer + +* Header: [`./include/caffe/layers/threshold_layer.hpp`](https://github.com/BVLC/caffe/blob/master/include/caffe/layers/threshold_layer.hpp) +* CPU implementation: [`./src/caffe/layers/threshold_layer.cpp`](https://github.com/BVLC/caffe/blob/master/src/caffe/layers/threshold_layer.cpp) +* CUDA GPU implementation: [`./src/caffe/layers/threshold_layer.cu`](https://github.com/BVLC/caffe/blob/master/src/caffe/layers/threshold_layer.cu) + +## Parameters + +* Parameters (`ThresholdParameter threshold_param`) +* From [`./src/caffe/proto/caffe.proto`](https://github.com/BVLC/caffe/blob/master/src/caffe/proto/caffe.proto): + +{% highlight Protobuf %} +{% include proto/ThresholdParameter.txt %} +{% endhighlight %} diff --git a/docs/tutorial/layers/tile.md b/docs/tutorial/layers/tile.md new file mode 100644 index 00000000000..ea03aaa43af --- /dev/null +++ b/docs/tutorial/layers/tile.md @@ -0,0 +1,20 @@ +--- +title: Tile Layer +--- + +# Tile Layer + +* Layer type: `Tile` +* [Doxygen Documentation](http://caffe.berkeleyvision.org/doxygen/classcaffe_1_1TileLayer.html) +* Header: [`./include/caffe/layers/tile_layer.hpp`](https://github.com/BVLC/caffe/blob/master/include/caffe/layers/tile_layer.hpp) +* CPU implementation: [`./src/caffe/layers/tile_layer.cpp`](https://github.com/BVLC/caffe/blob/master/src/caffe/layers/tile_layer.cpp) +* CUDA GPU implementation: [`./src/caffe/layers/tile_layer.cu`](https://github.com/BVLC/caffe/blob/master/src/caffe/layers/tile_layer.cu) + +## Parameters + +* Parameters (`TileParameter tile_param`) +* From [`./src/caffe/proto/caffe.proto`](https://github.com/BVLC/caffe/blob/master/src/caffe/proto/caffe.proto): + +{% highlight Protobuf %} +{% include proto/TileParameter.txt %} +{% endhighlight %} diff --git a/docs/tutorial/layers/windowdata.md b/docs/tutorial/layers/windowdata.md new file mode 100644 index 00000000000..0cb4a8dfeb7 --- /dev/null +++ b/docs/tutorial/layers/windowdata.md @@ -0,0 +1,19 @@ +--- +title: WindowData Layer +--- + +# WindowData Layer + +* Layer type: `WindowData` +* [Doxygen Documentation](http://caffe.berkeleyvision.org/doxygen/classcaffe_1_1WindowDataLayer.html) +* Header: [`./include/caffe/layers/window_data_layer.hpp`](https://github.com/BVLC/caffe/blob/master/include/caffe/layers/window_data_layer.hpp) +* CPU implementation: [`./src/caffe/layers/window_data_layer.cpp`](https://github.com/BVLC/caffe/blob/master/src/caffe/layers/window_data_layer.cpp) + +## Parameters + +* Parameters (`WindowDataParameter`) +* From [`./src/caffe/proto/caffe.proto`](https://github.com/BVLC/caffe/blob/master/src/caffe/proto/caffe.proto): + +{% highlight Protobuf %} +{% include proto/WindowDataParameter.txt %} +{% endhighlight %} diff --git a/docs/tutorial/solver.md b/docs/tutorial/solver.md index b719f715a4b..81c626386a2 100644 --- a/docs/tutorial/solver.md +++ b/docs/tutorial/solver.md @@ -209,18 +209,11 @@ What distinguishes the method from SGD is the weight setting $$ W $$ on which we The **RMSprop** (`type: "RMSProp"`), suggested by Tieleman in a Coursera course lecture, is a gradient-based optimization method (like SGD). The update formulas are $$ -(v_t)_i = -\begin{cases} -(v_{t-1})_i + \delta, &(\nabla L(W_t))_i(\nabla L(W_{t-1}))_i > 0\\ -(v_{t-1})_i \cdot (1-\delta), & \text{else} -\end{cases} +\operatorname{MS}((W_t)_i)= \delta\operatorname{MS}((W_{t-1})_i)+ (1-\delta)(\nabla L(W_t))_i^2 \\ +(W_{t+1})_i= (W_{t})_i -\alpha\frac{(\nabla L(W_t))_i}{\sqrt{\operatorname{MS}((W_t)_i)}} $$ -$$ -(W_{t+1})_i =(W_t)_i - \alpha (v_t)_i, -$$ - -If the gradient updates results in oscillations the gradient is reduced by times $$1-\delta$$. Otherwise it will be increased by $$\delta$$. The default value of $$\delta$$ (`rms_decay`) is set to $$\delta = 0.02$$. +The default value of $$\delta$$ (`rms_decay`) is set to $$\delta=0.99$$. [1] T. Tieleman, and G. Hinton. [RMSProp: Divide the gradient by a running average of its recent magnitude](http://www.cs.toronto.edu/~tijmen/csc321/slides/lecture_slides_lec6.pdf). diff --git a/examples/02-fine-tuning.ipynb b/examples/02-fine-tuning.ipynb index 07ca8df4d74..422259de424 100644 --- a/examples/02-fine-tuning.ipynb +++ b/examples/02-fine-tuning.ipynb @@ -70,7 +70,7 @@ "\n", "- `get_ilsvrc_aux.sh` to download the ImageNet data mean, labels, etc.\n", "- `download_model_binary.py` to download the pretrained reference model\n", - "- `finetune_flickr_style/assemble_data.py` downloadsd the style training and testing data\n", + "- `finetune_flickr_style/assemble_data.py` downloads the style training and testing data\n", "\n", "We'll download just a small subset of the full dataset for this exercise: just 2000 of the 80K images, from 5 of the 20 style categories. (To download the full dataset, set `full_dataset = True` in the cell below.)" ] @@ -146,7 +146,7 @@ "outputs": [], "source": [ "import os\n", - "weights = caffe_root + 'models/bvlc_reference_caffenet/bvlc_reference_caffenet.caffemodel'\n", + "weights = os.path.join(caffe_root, 'models/bvlc_reference_caffenet/bvlc_reference_caffenet.caffemodel')\n", "assert os.path.exists(weights)" ] }, @@ -1141,7 +1141,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "So we did finetuning and it is awesome. Let's take a look at what kind of results we are able to get with a longer, more complete run of the style recognition dataset. Note: the below URL might be occassionally down because it is run on a research machine.\n", + "So we did finetuning and it is awesome. Let's take a look at what kind of results we are able to get with a longer, more complete run of the style recognition dataset. Note: the below URL might be occasionally down because it is run on a research machine.\n", "\n", "http://demo.vislab.berkeleyvision.org/" ] diff --git a/examples/CMakeLists.txt b/examples/CMakeLists.txt index 663d7360b7d..43bbcb83789 100644 --- a/examples/CMakeLists.txt +++ b/examples/CMakeLists.txt @@ -19,11 +19,12 @@ foreach(source_file ${examples_srcs}) caffe_set_solution_folder(${name} examples) # install - install(TARGETS ${name} DESTINATION bin) + install(TARGETS ${name} DESTINATION ${CMAKE_INSTALL_BINDIR}) + if(UNIX OR APPLE) # Funny command to make tutorials work - # TODO: remove in future as soon as naming is standartaized everywhere + # TODO: remove in future as soon as naming is standardized everywhere set(__outname ${PROJECT_BINARY_DIR}/examples/${folder}/${name}${Caffe_POSTFIX}) add_custom_command(TARGET ${name} POST_BUILD COMMAND ln -sf "${__outname}" "${__outname}.bin") diff --git a/examples/brewing-logreg.ipynb b/examples/brewing-logreg.ipynb index c053b73b39f..0f87185a35b 100644 --- a/examples/brewing-logreg.ipynb +++ b/examples/brewing-logreg.ipynb @@ -73,12 +73,12 @@ ")\n", "\n", "# Split into train and test\n", - "X, Xt, y, yt = sklearn.cross_validation.train_test_split(X, y)\n", + "X, Xt, y, yt = sklearn.model_selection.train_test_split(X, y)\n", "\n", "# Visualize sample of the data\n", "ind = np.random.permutation(X.shape[0])[:1000]\n", "df = pd.DataFrame(X[ind])\n", - "_ = pd.scatter_matrix(df, figsize=(9, 9), diagonal='kde', marker='o', s=40, alpha=.4, c=y[ind])" + "_ = pd.plotting.scatter_matrix(df, figsize=(9, 9), diagonal='kde', marker='o', s=40, alpha=.4, c=y[ind])" ] }, { @@ -111,7 +111,7 @@ "%%timeit\n", "# Train and test the scikit-learn SGD logistic regression.\n", "clf = sklearn.linear_model.SGDClassifier(\n", - " loss='log', n_iter=1000, penalty='l2', alpha=5e-4, class_weight='auto')\n", + " loss='log', n_iter=1000, penalty='l2', alpha=5e-4, class_weight='balanced')\n", "\n", "clf.fit(X, y)\n", "yt_pred = clf.predict(Xt)\n", diff --git a/examples/cifar10/cifar10_full_sigmoid_solver.prototxt b/examples/cifar10/cifar10_full_sigmoid_solver.prototxt index 7dd3ecb9d8e..a8e5539937d 100644 --- a/examples/cifar10/cifar10_full_sigmoid_solver.prototxt +++ b/examples/cifar10/cifar10_full_sigmoid_solver.prototxt @@ -17,7 +17,7 @@ momentum: 0.9 lr_policy: "step" gamma: 1 stepsize: 5000 -# Display every 200 iterations +# Display every 100 iterations display: 100 # The maximum number of iterations max_iter: 60000 diff --git a/examples/cifar10/cifar10_full_sigmoid_solver_bn.prototxt b/examples/cifar10/cifar10_full_sigmoid_solver_bn.prototxt index a57b280fd1e..a4dabd67ca0 100644 --- a/examples/cifar10/cifar10_full_sigmoid_solver_bn.prototxt +++ b/examples/cifar10/cifar10_full_sigmoid_solver_bn.prototxt @@ -17,7 +17,7 @@ momentum: 0.9 lr_policy: "step" gamma: 1 stepsize: 5000 -# Display every 200 iterations +# Display every 100 iterations display: 100 # The maximum number of iterations max_iter: 60000 diff --git a/examples/cifar10/cifar10_quick_solver.prototxt b/examples/cifar10/cifar10_quick_solver.prototxt index 5de276f722f..14b4401ba16 100644 --- a/examples/cifar10/cifar10_quick_solver.prototxt +++ b/examples/cifar10/cifar10_quick_solver.prototxt @@ -20,7 +20,6 @@ display: 100 max_iter: 4000 # snapshot intermediate results snapshot: 4000 -snapshot_format: HDF5 snapshot_prefix: "examples/cifar10/cifar10_quick" # solver mode: CPU or GPU solver_mode: GPU diff --git a/examples/cifar10/create_cifar10.sh b/examples/cifar10/create_cifar10.sh index a42725cb610..7ee1d6ad0a0 100755 --- a/examples/cifar10/create_cifar10.sh +++ b/examples/cifar10/create_cifar10.sh @@ -1,5 +1,6 @@ #!/usr/bin/env sh # This script converts the cifar data into leveldb format. +set -e EXAMPLE=examples/cifar10 DATA=data/cifar10 diff --git a/examples/cifar10/train_full.sh b/examples/cifar10/train_full.sh index ef112e1f6db..fe46e60d795 100755 --- a/examples/cifar10/train_full.sh +++ b/examples/cifar10/train_full.sh @@ -1,16 +1,17 @@ #!/usr/bin/env sh +set -e TOOLS=./build/tools $TOOLS/caffe train \ - --solver=examples/cifar10/cifar10_full_solver.prototxt + --solver=examples/cifar10/cifar10_full_solver.prototxt $@ # reduce learning rate by factor of 10 $TOOLS/caffe train \ --solver=examples/cifar10/cifar10_full_solver_lr1.prototxt \ - --snapshot=examples/cifar10/cifar10_full_iter_60000.solverstate.h5 + --snapshot=examples/cifar10/cifar10_full_iter_60000.solverstate $@ # reduce learning rate by factor of 10 $TOOLS/caffe train \ --solver=examples/cifar10/cifar10_full_solver_lr2.prototxt \ - --snapshot=examples/cifar10/cifar10_full_iter_65000.solverstate.h5 + --snapshot=examples/cifar10/cifar10_full_iter_65000.solverstate $@ diff --git a/examples/cifar10/train_full_sigmoid.sh b/examples/cifar10/train_full_sigmoid.sh index 9cff06d3e34..9b5d5213b2a 100755 --- a/examples/cifar10/train_full_sigmoid.sh +++ b/examples/cifar10/train_full_sigmoid.sh @@ -1,7 +1,8 @@ #!/usr/bin/env sh +set -e TOOLS=./build/tools $TOOLS/caffe train \ - --solver=examples/cifar10/cifar10_full_sigmoid_solver.prototxt + --solver=examples/cifar10/cifar10_full_sigmoid_solver.prototxt $@ diff --git a/examples/cifar10/train_full_sigmoid_bn.sh b/examples/cifar10/train_full_sigmoid_bn.sh index 011387c996e..05547f3a104 100755 --- a/examples/cifar10/train_full_sigmoid_bn.sh +++ b/examples/cifar10/train_full_sigmoid_bn.sh @@ -1,7 +1,8 @@ #!/usr/bin/env sh +set -e TOOLS=./build/tools $TOOLS/caffe train \ - --solver=examples/cifar10/cifar10_full_sigmoid_solver_bn.prototxt + --solver=examples/cifar10/cifar10_full_sigmoid_solver_bn.prototxt $@ diff --git a/examples/cifar10/train_quick.sh b/examples/cifar10/train_quick.sh index 6b7d228879b..257479e0d77 100755 --- a/examples/cifar10/train_quick.sh +++ b/examples/cifar10/train_quick.sh @@ -1,11 +1,12 @@ #!/usr/bin/env sh +set -e TOOLS=./build/tools $TOOLS/caffe train \ - --solver=examples/cifar10/cifar10_quick_solver.prototxt + --solver=examples/cifar10/cifar10_quick_solver.prototxt $@ # reduce learning rate by factor of 10 after 8 epochs $TOOLS/caffe train \ --solver=examples/cifar10/cifar10_quick_solver_lr1.prototxt \ - --snapshot=examples/cifar10/cifar10_quick_iter_4000.solverstate.h5 + --snapshot=examples/cifar10/cifar10_quick_iter_4000.solverstate $@ diff --git a/examples/cpp_classification/readme.md b/examples/cpp_classification/readme.md index 0de2885b53c..4f683aa623f 100644 --- a/examples/cpp_classification/readme.md +++ b/examples/cpp_classification/readme.md @@ -10,7 +10,7 @@ priority: 10 Caffe, at its core, is written in C++. It is possible to use the C++ API of Caffe to implement an image classification application similar -to the Python code presented in one of the Notebook example. To look +to the Python code presented in one of the Notebook examples. To look at a more general-purpose example of the Caffe C++ API, you should study the source code of the command line tool `caffe` in `tools/caffe.cpp`. @@ -19,7 +19,7 @@ study the source code of the command line tool `caffe` in `tools/caffe.cpp`. A simple C++ code is proposed in `examples/cpp_classification/classification.cpp`. For the sake of simplicity, this example does not support oversampling of a single -sample nor batching of multiple independant samples. This example is +sample nor batching of multiple independent samples. This example is not trying to reach the maximum possible classification throughput on a system, but special care was given to avoid unnecessary pessimization while keeping the code readable. diff --git a/examples/finetune_flickr_style/readme.md b/examples/finetune_flickr_style/readme.md index 188dedf1b9a..dacfd01c8e1 100644 --- a/examples/finetune_flickr_style/readme.md +++ b/examples/finetune_flickr_style/readme.md @@ -9,7 +9,7 @@ priority: 5 # Fine-tuning CaffeNet for Style Recognition on "Flickr Style" Data Fine-tuning takes an already learned model, adapts the architecture, and resumes training from the already learned model weights. -Let's fine-tune the BVLC-distributed CaffeNet model on a different dataset, [Flickr Style](http://sergeykarayev.com/files/1311.3715v3.pdf), to predict image style instead of object category. +Let's fine-tune the BAIR-distributed CaffeNet model on a different dataset, [Flickr Style](http://sergeykarayev.com/files/1311.3715v3.pdf), to predict image style instead of object category. ## Explanation diff --git a/examples/imagenet/create_imagenet.sh b/examples/imagenet/create_imagenet.sh index e912ac43cd7..1bf08b1aa8f 100755 --- a/examples/imagenet/create_imagenet.sh +++ b/examples/imagenet/create_imagenet.sh @@ -1,6 +1,7 @@ #!/usr/bin/env sh # Create the imagenet lmdb inputs # N.B. set the path to the imagenet train + val data dirs +set -e EXAMPLE=examples/imagenet DATA=data/ilsvrc12 diff --git a/examples/imagenet/resume_training.sh b/examples/imagenet/resume_training.sh index bf7945c0fd0..4aef204368e 100755 --- a/examples/imagenet/resume_training.sh +++ b/examples/imagenet/resume_training.sh @@ -1,5 +1,7 @@ #!/usr/bin/env sh +set -e ./build/tools/caffe train \ --solver=models/bvlc_reference_caffenet/solver.prototxt \ - --snapshot=models/bvlc_reference_caffenet/caffenet_train_10000.solverstate.h5 + --snapshot=models/bvlc_reference_caffenet/caffenet_train_10000.solverstate.h5 \ + $@ diff --git a/examples/imagenet/train_caffenet.sh b/examples/imagenet/train_caffenet.sh index 94558ec5466..a5094d44ae0 100755 --- a/examples/imagenet/train_caffenet.sh +++ b/examples/imagenet/train_caffenet.sh @@ -1,4 +1,5 @@ #!/usr/bin/env sh +set -e ./build/tools/caffe train \ - --solver=models/bvlc_reference_caffenet/solver.prototxt + --solver=models/bvlc_reference_caffenet/solver.prototxt $@ diff --git a/examples/mnist/create_mnist.sh b/examples/mnist/create_mnist.sh index 06ecc27de63..f5e2e7960c5 100755 --- a/examples/mnist/create_mnist.sh +++ b/examples/mnist/create_mnist.sh @@ -1,6 +1,7 @@ #!/usr/bin/env sh # This script converts the mnist data into lmdb/leveldb format, # depending on the value assigned to $BACKEND. +set -e EXAMPLE=examples/mnist DATA=data/mnist diff --git a/examples/mnist/train_lenet.sh b/examples/mnist/train_lenet.sh index 1b6bf7d978d..f7f9b86198d 100755 --- a/examples/mnist/train_lenet.sh +++ b/examples/mnist/train_lenet.sh @@ -1,3 +1,4 @@ #!/usr/bin/env sh +set -e -./build/tools/caffe train --solver=examples/mnist/lenet_solver.prototxt +./build/tools/caffe train --solver=examples/mnist/lenet_solver.prototxt $@ diff --git a/examples/mnist/train_lenet_adam.sh b/examples/mnist/train_lenet_adam.sh index a32ecf2d9c2..7b4e905681b 100755 --- a/examples/mnist/train_lenet_adam.sh +++ b/examples/mnist/train_lenet_adam.sh @@ -1,3 +1,4 @@ #!/usr/bin/env sh +set -e -./build/tools/caffe train --solver=examples/mnist/lenet_solver_adam.prototxt +./build/tools/caffe train --solver=examples/mnist/lenet_solver_adam.prototxt $@ diff --git a/examples/mnist/train_lenet_consolidated.sh b/examples/mnist/train_lenet_consolidated.sh index c855467897e..c5f02666822 100755 --- a/examples/mnist/train_lenet_consolidated.sh +++ b/examples/mnist/train_lenet_consolidated.sh @@ -1,4 +1,5 @@ #!/usr/bin/env sh +set -e ./build/tools/caffe train \ - --solver=examples/mnist/lenet_consolidated_solver.prototxt + --solver=examples/mnist/lenet_consolidated_solver.prototxt $@ diff --git a/examples/mnist/train_lenet_docker.sh b/examples/mnist/train_lenet_docker.sh index 32cf1c8e4a3..e946ba0f4ad 100755 --- a/examples/mnist/train_lenet_docker.sh +++ b/examples/mnist/train_lenet_docker.sh @@ -25,7 +25,7 @@ set -e # executed. # # In order to provide additional flexibility, the following shell (environment) -# variables can be used to controll the execution of each of the phases: +# variables can be used to control the execution of each of the phases: # # DOWNLOAD_DATA: Enable (1) or disable (0) the downloading of the MNIST dataset # CREATE_LMDB: Enable (1) or disable (0) the creation of the LMDB database diff --git a/examples/mnist/train_lenet_rmsprop.sh b/examples/mnist/train_lenet_rmsprop.sh index 621cab238bf..adfa7ab0fca 100755 --- a/examples/mnist/train_lenet_rmsprop.sh +++ b/examples/mnist/train_lenet_rmsprop.sh @@ -1,3 +1,5 @@ #!/usr/bin/env sh +set -e -./build/tools/caffe train --solver=examples/mnist/lenet_solver_rmsprop.prototxt +./build/tools/caffe train \ + --solver=examples/mnist/lenet_solver_rmsprop.prototxt $@ diff --git a/examples/mnist/train_mnist_autoencoder.sh b/examples/mnist/train_mnist_autoencoder.sh index cfd67e82fda..724a0f14a49 100755 --- a/examples/mnist/train_mnist_autoencoder.sh +++ b/examples/mnist/train_mnist_autoencoder.sh @@ -1,4 +1,5 @@ #!/usr/bin/env sh +set -e ./build/tools/caffe train \ - --solver=examples/mnist/mnist_autoencoder_solver.prototxt + --solver=examples/mnist/mnist_autoencoder_solver.prototxt $@ diff --git a/examples/mnist/train_mnist_autoencoder_adadelta.sh b/examples/mnist/train_mnist_autoencoder_adadelta.sh index 4be0ebddedc..a660dbb9ed2 100755 --- a/examples/mnist/train_mnist_autoencoder_adadelta.sh +++ b/examples/mnist/train_mnist_autoencoder_adadelta.sh @@ -1,4 +1,5 @@ #!/bin/bash +set -e ./build/tools/caffe train \ - --solver=examples/mnist/mnist_autoencoder_solver_adadelta.prototxt + --solver=examples/mnist/mnist_autoencoder_solver_adadelta.prototxt $@ diff --git a/examples/mnist/train_mnist_autoencoder_adagrad.sh b/examples/mnist/train_mnist_autoencoder_adagrad.sh index 95fe1b17bd5..4c11dfa67ac 100755 --- a/examples/mnist/train_mnist_autoencoder_adagrad.sh +++ b/examples/mnist/train_mnist_autoencoder_adagrad.sh @@ -1,4 +1,5 @@ #!/bin/bash +set -e ./build/tools/caffe train \ - --solver=examples/mnist/mnist_autoencoder_solver_adagrad.prototxt + --solver=examples/mnist/mnist_autoencoder_solver_adagrad.prototxt $@ diff --git a/examples/mnist/train_mnist_autoencoder_nesterov.sh b/examples/mnist/train_mnist_autoencoder_nesterov.sh index cf19ea749b3..fd0559d2488 100755 --- a/examples/mnist/train_mnist_autoencoder_nesterov.sh +++ b/examples/mnist/train_mnist_autoencoder_nesterov.sh @@ -1,4 +1,5 @@ #!/bin/bash +set -e ./build/tools/caffe train \ - --solver=examples/mnist/mnist_autoencoder_solver_nesterov.prototxt + --solver=examples/mnist/mnist_autoencoder_solver_nesterov.prototxt $@ diff --git a/examples/net_surgery.ipynb b/examples/net_surgery.ipynb index d50d503bfe0..217c2d1a742 100644 --- a/examples/net_surgery.ipynb +++ b/examples/net_surgery.ipynb @@ -5479,7 +5479,7 @@ "\n", "Let's take the standard Caffe Reference ImageNet model \"CaffeNet\" and transform it into a fully convolutional net for efficient, dense inference on large inputs. This model generates a classification map that covers a given input size instead of a single classification. In particular a 8 $\\times$ 8 classification map on a 451 $\\times$ 451 input gives 64x the output in only 3x the time. The computation exploits a natural efficiency of convolutional network (convnet) structure by amortizing the computation of overlapping receptive fields.\n", "\n", - "To do so we translate the `InnerProduct` matrix multiplication layers of CaffeNet into `Convolutional` layers. This is the only change: the other layer types are agnostic to spatial size. Convolution is translation-invariant, activations are elementwise operations, and so on. The `fc6` inner product when carried out as convolution by `fc6-conv` turns into a 6 \\times 6 filter with stride 1 on `pool5`. Back in image space this gives a classification for each 227 $\\times$ 227 box with stride 32 in pixels. Remember the equation for output map / receptive field size, output = (input - kernel_size) / stride + 1, and work out the indexing details for a clear understanding." + "To do so we translate the `InnerProduct` matrix multiplication layers of CaffeNet into `Convolutional` layers. This is the only change: the other layer types are agnostic to spatial size. Convolution is translation-invariant, activations are elementwise operations, and so on. The `fc6` inner product when carried out as convolution by `fc6-conv` turns into a 6 $\\times$ 6 filter with stride 1 on `pool5`. Back in image space this gives a classification for each 227 $\\times$ 227 box with stride 32 in pixels. Remember the equation for output map / receptive field size, output = (input - kernel_size) / stride + 1, and work out the indexing details for a clear understanding." ] }, { diff --git a/examples/pycaffe/layers/pascal_multilabel_datalayers.py b/examples/pycaffe/layers/pascal_multilabel_datalayers.py index 68e4fa7960a..9420cb328ce 100644 --- a/examples/pycaffe/layers/pascal_multilabel_datalayers.py +++ b/examples/pycaffe/layers/pascal_multilabel_datalayers.py @@ -20,7 +20,7 @@ class PascalMultilabelDataLayerSync(caffe.Layer): """ - This is a simple syncronous datalayer for training a multilabel model on + This is a simple synchronous datalayer for training a multilabel model on PASCAL. """ @@ -33,7 +33,7 @@ def setup(self, bottom, top): # params is a python dictionary with layer parameters. params = eval(self.param_str) - # Check the paramameters for validity. + # Check the parameters for validity. check_params(params) # store input as class variables @@ -207,7 +207,7 @@ def check_params(params): def print_info(name, params): """ - Ouput some info regarding the class + Output some info regarding the class """ print "{} initialized for split: {}, with bs: {}, im_shape: {}.".format( name, diff --git a/examples/pycaffe/tools.py b/examples/pycaffe/tools.py index 88b1834af1e..7f6c2d835fb 100644 --- a/examples/pycaffe/tools.py +++ b/examples/pycaffe/tools.py @@ -26,7 +26,7 @@ def set_scale(self, scale): def preprocess(self, im): """ - preprocess() emulate the pre-processing occuring in the vgg16 caffe + preprocess() emulate the pre-processing occurring in the vgg16 caffe prototxt. """ @@ -75,7 +75,7 @@ def __init__(self, testnet_prototxt_path="testnet.prototxt", # looks: self.sp['display'] = '25' self.sp['snapshot'] = '2500' - self.sp['snapshot_prefix'] = '"snapshot"' # string withing a string! + self.sp['snapshot_prefix'] = '"snapshot"' # string within a string! # learning rate policy self.sp['lr_policy'] = '"fixed"' diff --git a/examples/siamese/create_mnist_siamese.sh b/examples/siamese/create_mnist_siamese.sh index 43ad6b184a7..03adce54d9b 100755 --- a/examples/siamese/create_mnist_siamese.sh +++ b/examples/siamese/create_mnist_siamese.sh @@ -1,5 +1,6 @@ #!/usr/bin/env sh # This script converts the mnist data into leveldb format. +set -e EXAMPLES=./build/examples/siamese DATA=./data/mnist diff --git a/examples/siamese/train_mnist_siamese.sh b/examples/siamese/train_mnist_siamese.sh index 84a30a8ac44..e01ac2ceefd 100755 --- a/examples/siamese/train_mnist_siamese.sh +++ b/examples/siamese/train_mnist_siamese.sh @@ -1,5 +1,6 @@ #!/usr/bin/env sh +set -e TOOLS=./build/tools -$TOOLS/caffe train --solver=examples/siamese/mnist_siamese_solver.prototxt +$TOOLS/caffe train --solver=examples/siamese/mnist_siamese_solver.prototxt $@ diff --git a/examples/web_demo/readme.md b/examples/web_demo/readme.md index fe74b9ef7d3..e50c4f101ee 100644 --- a/examples/web_demo/readme.md +++ b/examples/web_demo/readme.md @@ -11,7 +11,7 @@ priority: 10 ## Requirements The demo server requires Python with some dependencies. -To make sure you have the dependencies, please run `pip install -r examples/web_demo/requirements.txt`, and also make sure that you've compiled the Python Caffe interface and that it is on your `PYTHONPATH` (see [installation instructions](/installation.html)). +To make sure you have the dependencies, please run `pip install -r examples/web_demo/requirements.txt`, and also make sure that you've compiled the Python Caffe interface and that it is on your `PYTHONPATH` (see [installation instructions](http://caffe.berkeleyvision.org/installation.html)). Make sure that you have obtained the Reference CaffeNet Model and the ImageNet Auxiliary Data: diff --git a/include/caffe/blob.hpp b/include/caffe/blob.hpp index af360ac24bd..2f59471c29e 100644 --- a/include/caffe/blob.hpp +++ b/include/caffe/blob.hpp @@ -220,6 +220,7 @@ class Blob { void set_cpu_data(Dtype* data); const int* gpu_shape() const; const Dtype* gpu_data() const; + void set_gpu_data(Dtype* data); const Dtype* cpu_diff() const; const Dtype* gpu_diff() const; Dtype* mutable_cpu_data(); diff --git a/include/caffe/common.hpp b/include/caffe/common.hpp index 3c6a076ec2f..4904d1d8661 100644 --- a/include/caffe/common.hpp +++ b/include/caffe/common.hpp @@ -158,11 +158,14 @@ class Caffe { // Search from start_id to the highest possible device ordinal, // return the ordinal of the first available device. static int FindDevice(const int start_id = 0); - // Parallel training info + // Parallel training inline static int solver_count() { return Get().solver_count_; } inline static void set_solver_count(int val) { Get().solver_count_ = val; } - inline static bool root_solver() { return Get().root_solver_; } - inline static void set_root_solver(bool val) { Get().root_solver_ = val; } + inline static int solver_rank() { return Get().solver_rank_; } + inline static void set_solver_rank(int val) { Get().solver_rank_ = val; } + inline static bool multiprocess() { return Get().multiprocess_; } + inline static void set_multiprocess(bool val) { Get().multiprocess_ = val; } + inline static bool root_solver() { return Get().solver_rank_ == 0; } protected: #ifndef CPU_ONLY @@ -172,8 +175,11 @@ class Caffe { shared_ptr random_generator_; Brew mode_; + + // Parallel training int solver_count_; - bool root_solver_; + int solver_rank_; + bool multiprocess_; private: // The private constructor to avoid duplicate instantiation. diff --git a/include/caffe/data_reader.hpp b/include/caffe/data_reader.hpp deleted file mode 100644 index 8ed5542cb8d..00000000000 --- a/include/caffe/data_reader.hpp +++ /dev/null @@ -1,82 +0,0 @@ -#ifndef CAFFE_DATA_READER_HPP_ -#define CAFFE_DATA_READER_HPP_ - -#include -#include -#include - -#include "caffe/common.hpp" -#include "caffe/internal_thread.hpp" -#include "caffe/util/blocking_queue.hpp" -#include "caffe/util/db.hpp" - -namespace caffe { - -/** - * @brief Reads data from a source to queues available to data layers. - * A single reading thread is created per source, even if multiple solvers - * are running in parallel, e.g. for multi-GPU training. This makes sure - * databases are read sequentially, and that each solver accesses a different - * subset of the database. Data is distributed to solvers in a round-robin - * way to keep parallel training deterministic. - */ -class DataReader { - public: - explicit DataReader(const LayerParameter& param); - ~DataReader(); - - inline BlockingQueue& free() const { - return queue_pair_->free_; - } - inline BlockingQueue& full() const { - return queue_pair_->full_; - } - - protected: - // Queue pairs are shared between a body and its readers - class QueuePair { - public: - explicit QueuePair(int size); - ~QueuePair(); - - BlockingQueue free_; - BlockingQueue full_; - - DISABLE_COPY_AND_ASSIGN(QueuePair); - }; - - // A single body is created per source - class Body : public InternalThread { - public: - explicit Body(const LayerParameter& param); - virtual ~Body(); - - protected: - void InternalThreadEntry(); - void read_one(db::Cursor* cursor, QueuePair* qp); - - const LayerParameter param_; - BlockingQueue > new_queue_pairs_; - - friend class DataReader; - - DISABLE_COPY_AND_ASSIGN(Body); - }; - - // A source is uniquely identified by its layer name + path, in case - // the same database is read from two different locations in the net. - static inline string source_key(const LayerParameter& param) { - return param.name() + ":" + param.data_param().source(); - } - - const shared_ptr queue_pair_; - shared_ptr body_; - - static map > bodies_; - -DISABLE_COPY_AND_ASSIGN(DataReader); -}; - -} // namespace caffe - -#endif // CAFFE_DATA_READER_HPP_ diff --git a/include/caffe/filler.hpp b/include/caffe/filler.hpp index dad9ad46b3b..bb92ded780f 100644 --- a/include/caffe/filler.hpp +++ b/include/caffe/filler.hpp @@ -250,10 +250,10 @@ class BilinearFiller : public Filler { CHECK_EQ(blob->width(), blob->height()) << "Filter must be square"; Dtype* data = blob->mutable_cpu_data(); int f = ceil(blob->width() / 2.); - float c = (2 * f - 1 - f % 2) / (2. * f); + Dtype c = (blob->width() - 1) / (2. * f); for (int i = 0; i < blob->count(); ++i) { - float x = i % blob->width(); - float y = (i / blob->width()) % blob->height(); + Dtype x = i % blob->width(); + Dtype y = (i / blob->width()) % blob->height(); data[i] = (1 - fabs(x / f - c)) * (1 - fabs(y / f - c)); } CHECK_EQ(this->filler_param_.sparse(), -1) diff --git a/include/caffe/internal_thread.hpp b/include/caffe/internal_thread.hpp index 6a8c5a02892..0ba67665035 100644 --- a/include/caffe/internal_thread.hpp +++ b/include/caffe/internal_thread.hpp @@ -42,8 +42,8 @@ class InternalThread { bool must_stop(); private: - void entry(int device, Caffe::Brew mode, int rand_seed, int solver_count, - bool root_solver); + void entry(int device, Caffe::Brew mode, int rand_seed, + int solver_count, int solver_rank, bool multiprocess); shared_ptr thread_; }; diff --git a/include/caffe/layer.hpp b/include/caffe/layer.hpp index 10f353f94f9..30dbfd53758 100644 --- a/include/caffe/layer.hpp +++ b/include/caffe/layer.hpp @@ -38,7 +38,7 @@ class Layer { * layer. */ explicit Layer(const LayerParameter& param) - : layer_param_(param), is_shared_(false) { + : layer_param_(param) { // Set phase and copy blobs (if there are any). phase_ = param.phase(); if (layer_param_.blobs_size() > 0) { @@ -66,7 +66,6 @@ class Layer { */ void SetUp(const vector*>& bottom, const vector*>& top) { - InitMutex(); CheckBlobCounts(bottom, top); LayerSetUp(bottom, top); Reshape(bottom, top); @@ -92,30 +91,6 @@ class Layer { virtual void LayerSetUp(const vector*>& bottom, const vector*>& top) {} - /** - * @brief Whether a layer should be shared by multiple nets during data - * parallelism. By default, all layers except for data layers should - * not be shared. data layers should be shared to ensure each worker - * solver access data sequentially during data parallelism. - */ - virtual inline bool ShareInParallel() const { return false; } - - /** @brief Return whether this layer is actually shared by other nets. - * If ShareInParallel() is true and using more than one GPU and the - * net has TRAIN phase, then this function is expected return true. - */ - inline bool IsShared() const { return is_shared_; } - - /** @brief Set whether this layer is actually shared by other nets - * If ShareInParallel() is true and using more than one GPU and the - * net has TRAIN phase, then is_shared should be set true. - */ - inline void SetShared(bool is_shared) { - CHECK(ShareInParallel() || !is_shared) - << type() << "Layer does not support sharing."; - is_shared_ = is_shared; - } - /** * @brief Adjust the shapes of top blobs and internal buffers to accommodate * the shapes of the bottom blobs. @@ -428,19 +403,6 @@ class Layer { } private: - /** Whether this layer is actually shared by other nets*/ - bool is_shared_; - - /** The mutex for sequential forward if this layer is shared */ - shared_ptr forward_mutex_; - - /** Initialize forward_mutex_ */ - void InitMutex(); - /** Lock forward_mutex_ if this layer is shared */ - void Lock(); - /** Unlock forward_mutex_ if this layer is shared */ - void Unlock(); - DISABLE_COPY_AND_ASSIGN(Layer); }; // class Layer @@ -450,8 +412,6 @@ class Layer { template inline Dtype Layer::Forward(const vector*>& bottom, const vector*>& top) { - // Lock during forward to ensure sequential forward - Lock(); Dtype loss = 0; Reshape(bottom, top); switch (Caffe::mode()) { @@ -482,7 +442,6 @@ inline Dtype Layer::Forward(const vector*>& bottom, default: LOG(FATAL) << "Unknown caffe mode."; } - Unlock(); return loss; } diff --git a/include/caffe/layer_factory.hpp b/include/caffe/layer_factory.hpp index f385afccfee..2369c132911 100644 --- a/include/caffe/layer_factory.hpp +++ b/include/caffe/layer_factory.hpp @@ -1,6 +1,6 @@ /** * @brief A layer factory that allows one to register layers. - * During runtime, registered layers could be called by passing a LayerParameter + * During runtime, registered layers can be called by passing a LayerParameter * protobuffer to the CreateLayer function: * * LayerRegistry::CreateLayer(param); diff --git a/include/caffe/layers/accuracy_layer.hpp b/include/caffe/layers/accuracy_layer.hpp index fe2adb939e4..dd2247b9e4d 100644 --- a/include/caffe/layers/accuracy_layer.hpp +++ b/include/caffe/layers/accuracy_layer.hpp @@ -39,7 +39,7 @@ class AccuracyLayer : public Layer { // If there are two top blobs, then the second blob will contain // accuracies per class. virtual inline int MinTopBlobs() const { return 1; } - virtual inline int MaxTopBlos() const { return 2; } + virtual inline int MaxTopBlobs() const { return 2; } protected: /** @@ -68,6 +68,8 @@ class AccuracyLayer : public Layer { */ virtual void Forward_cpu(const vector*>& bottom, const vector*>& top); + virtual void Forward_gpu(const vector*>& bottom, + const vector*>& top); /// @brief Not implemented -- AccuracyLayer cannot be used as a loss. @@ -77,6 +79,8 @@ class AccuracyLayer : public Layer { if (propagate_down[i]) { NOT_IMPLEMENTED; } } } + virtual void Backward_gpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); int label_axis_, outer_num_, inner_num_; diff --git a/include/caffe/layers/base_data_layer.hpp b/include/caffe/layers/base_data_layer.hpp index 2c49b73184b..c8b6998c8f2 100644 --- a/include/caffe/layers/base_data_layer.hpp +++ b/include/caffe/layers/base_data_layer.hpp @@ -26,8 +26,6 @@ class BaseDataLayer : public Layer { // This method may not be overridden except by the BasePrefetchingDataLayer. virtual void LayerSetUp(const vector*>& bottom, const vector*>& top); - // Data layers should be shared by multiple solvers in parallel - virtual inline bool ShareInParallel() const { return true; } virtual void DataLayerSetUp(const vector*>& bottom, const vector*>& top) {} // Data layers have no bottoms, so reshaping is trivial. @@ -67,16 +65,14 @@ class BasePrefetchingDataLayer : virtual void Forward_gpu(const vector*>& bottom, const vector*>& top); - // Prefetches batches (asynchronously if to GPU memory) - static const int PREFETCH_COUNT = 3; - protected: virtual void InternalThreadEntry(); virtual void load_batch(Batch* batch) = 0; - Batch prefetch_[PREFETCH_COUNT]; + vector > > prefetch_; BlockingQueue*> prefetch_free_; BlockingQueue*> prefetch_full_; + Batch* prefetch_current_; Blob transformed_data_; }; diff --git a/include/caffe/layers/batch_norm_layer.hpp b/include/caffe/layers/batch_norm_layer.hpp index 9b2d5126efb..43f7b28be95 100644 --- a/include/caffe/layers/batch_norm_layer.hpp +++ b/include/caffe/layers/batch_norm_layer.hpp @@ -13,25 +13,22 @@ namespace caffe { * @brief Normalizes the input to have 0-mean and/or unit (1) variance across * the batch. * - * This layer computes Batch Normalization described in [1]. For - * each channel in the data (i.e. axis 1), it subtracts the mean and divides - * by the variance, where both statistics are computed across both spatial - * dimensions and across the different examples in the batch. + * This layer computes Batch Normalization as described in [1]. For each channel + * in the data (i.e. axis 1), it subtracts the mean and divides by the variance, + * where both statistics are computed across both spatial dimensions and across + * the different examples in the batch. * - * By default, during training time, the network is computing global mean/ - * variance statistics via a running average, which is then used at test - * time to allow deterministic outputs for each input. You can manually - * toggle whether the network is accumulating or using the statistics via the - * use_global_stats option. IMPORTANT: for this feature to work, you MUST - * set the learning rate to zero for all three parameter blobs, i.e., - * param {lr_mult: 0} three times in the layer definition. + * By default, during training time, the network is computing global + * mean/variance statistics via a running average, which is then used at test + * time to allow deterministic outputs for each input. You can manually toggle + * whether the network is accumulating or using the statistics via the + * use_global_stats option. For reference, these statistics are kept in the + * layer's three blobs: (0) mean, (1) variance, and (2) moving average factor. * * Note that the original paper also included a per-channel learned bias and - * scaling factor. It is possible (though a bit cumbersome) to implement - * this in caffe using a single-channel DummyDataLayer filled with zeros, - * followed by a Convolution layer with output the same size as the current. - * This produces a channel-specific value that can be added or multiplied by - * the BatchNorm layer's output. + * scaling factor. To implement this in Caffe, define a `ScaleLayer` configured + * with `bias_term: true` after each `BatchNormLayer` to handle both the bias + * and scaling factor. * * [1] S. Ioffe and C. Szegedy, "Batch Normalization: Accelerating Deep Network * Training by Reducing Internal Covariate Shift." arXiv preprint diff --git a/include/caffe/layers/bias_layer.hpp b/include/caffe/layers/bias_layer.hpp index eedc3aaa351..9639c9cdc8a 100644 --- a/include/caffe/layers/bias_layer.hpp +++ b/include/caffe/layers/bias_layer.hpp @@ -10,13 +10,13 @@ namespace caffe { /** - * @brief Computes a sum of two input Blobs, with the shape of the - * latter Blob "broadcast" to match the shape of the former. - * Equivalent to tiling the latter Blob, then computing the elementwise - * sum. + * @brief Computes a sum of two input Blobs, with the shape of the latter Blob + * "broadcast" to match the shape of the former. Equivalent to tiling + * the latter Blob, then computing the elementwise sum. * * The second input may be omitted, in which case it's learned as a parameter - * of the layer. + * of the layer. Note: in case bias and scaling are desired, both operations can + * be handled by `ScaleLayer` configured with `bias_term: true`. */ template class BiasLayer : public Layer { diff --git a/include/caffe/layers/crop_layer.hpp b/include/caffe/layers/crop_layer.hpp index c4fda1220c3..5219fa5cb5f 100644 --- a/include/caffe/layers/crop_layer.hpp +++ b/include/caffe/layers/crop_layer.hpp @@ -41,13 +41,15 @@ class CropLayer : public Layer { virtual void Backward_gpu(const vector*>& top, const vector& propagate_down, const vector*>& bottom); - vector offsets; + Blob offsets; + Blob src_strides_; + Blob dest_strides_; private: // Recursive copy function. void crop_copy(const vector*>& bottom, const vector*>& top, - const vector& offsets, + const int* offsets, vector indices, int cur_dim, const Dtype* src_data, diff --git a/include/caffe/layers/cudnn_deconv_layer.hpp b/include/caffe/layers/cudnn_deconv_layer.hpp new file mode 100644 index 00000000000..12799e5b8ef --- /dev/null +++ b/include/caffe/layers/cudnn_deconv_layer.hpp @@ -0,0 +1,68 @@ +#ifndef CAFFE_CUDNN_DECONV_LAYER_HPP_ +#define CAFFE_CUDNN_DECONV_LAYER_HPP_ + +#include + +#include "caffe/blob.hpp" +#include "caffe/layer.hpp" +#include "caffe/proto/caffe.pb.h" + +#include "caffe/layers/deconv_layer.hpp" + +namespace caffe { + +#ifdef USE_CUDNN +/* + * @brief cuDNN implementation of DeConvolutionLayer. + * Fallback to DeConvolutionLayer for CPU mode. + * + * cuDNN accelerates deconvolution through forward kernels for filtering and + * bias plus backward kernels for the gradient w.r.t. the filters, biases, and + * inputs. Caffe + cuDNN further speeds up the computation through forward + * parallelism across groups and backward parallelism across gradients. +*/ +template +class CuDNNDeconvolutionLayer : public DeconvolutionLayer { + public: + explicit CuDNNDeconvolutionLayer(const LayerParameter& param) + : DeconvolutionLayer(param), handles_setup_(false) {} + virtual void LayerSetUp(const vector*>& bottom, + const vector*>& top); + virtual void Reshape(const vector*>& bottom, + const vector*>& top); + virtual ~CuDNNDeconvolutionLayer(); + + protected: + virtual void Forward_gpu(const vector*>& bottom, + const vector*>& top); + virtual void Backward_gpu(const vector*>& top, + const vector& propagate_down, + const vector*>& bottom); + + bool handles_setup_; + cudnnHandle_t* handle_; + cudaStream_t* stream_; + + // algorithms for forward and backwards convolutions + cudnnConvolutionFwdAlgo_t *fwd_algo_; + cudnnConvolutionBwdFilterAlgo_t *bwd_filter_algo_; + cudnnConvolutionBwdDataAlgo_t *bwd_data_algo_; + + vector bottom_descs_, top_descs_; + cudnnTensorDescriptor_t bias_desc_; + cudnnFilterDescriptor_t filter_desc_; + vector conv_descs_; + int bottom_offset_, top_offset_, bias_offset_; + + size_t *workspace_fwd_sizes_; + size_t *workspace_bwd_data_sizes_; + size_t *workspace_bwd_filter_sizes_; + size_t workspaceSizeInBytes; // size of underlying storage + void *workspaceData; // underlying storage + void **workspace; // aliases into workspaceData +}; +#endif + +} // namespace caffe + +#endif // CAFFE_CUDNN_DECONV_LAYER_HPP_ diff --git a/include/caffe/layers/data_layer.hpp b/include/caffe/layers/data_layer.hpp index 6c361791a0c..667a4ae43a5 100644 --- a/include/caffe/layers/data_layer.hpp +++ b/include/caffe/layers/data_layer.hpp @@ -4,7 +4,6 @@ #include #include "caffe/blob.hpp" -#include "caffe/data_reader.hpp" #include "caffe/data_transformer.hpp" #include "caffe/internal_thread.hpp" #include "caffe/layer.hpp" @@ -21,17 +20,19 @@ class DataLayer : public BasePrefetchingDataLayer { virtual ~DataLayer(); virtual void DataLayerSetUp(const vector*>& bottom, const vector*>& top); - // DataLayer uses DataReader instead for sharing for parallelism - virtual inline bool ShareInParallel() const { return false; } virtual inline const char* type() const { return "Data"; } virtual inline int ExactNumBottomBlobs() const { return 0; } virtual inline int MinTopBlobs() const { return 1; } virtual inline int MaxTopBlobs() const { return 2; } protected: + void Next(); + bool Skip(); virtual void load_batch(Batch* batch); - DataReader reader_; + shared_ptr db_; + shared_ptr cursor_; + uint64_t offset_; }; } // namespace caffe diff --git a/include/caffe/layers/dummy_data_layer.hpp b/include/caffe/layers/dummy_data_layer.hpp index 4180f1d01e4..13a63d47ec4 100644 --- a/include/caffe/layers/dummy_data_layer.hpp +++ b/include/caffe/layers/dummy_data_layer.hpp @@ -22,8 +22,6 @@ class DummyDataLayer : public Layer { : Layer(param) {} virtual void LayerSetUp(const vector*>& bottom, const vector*>& top); - // Data layers should be shared by multiple solvers in parallel - virtual inline bool ShareInParallel() const { return true; } // Data layers have no bottoms, so reshaping is trivial. virtual void Reshape(const vector*>& bottom, const vector*>& top) {} diff --git a/include/caffe/layers/euclidean_loss_layer.hpp b/include/caffe/layers/euclidean_loss_layer.hpp index f564569e27a..24568c5496f 100644 --- a/include/caffe/layers/euclidean_loss_layer.hpp +++ b/include/caffe/layers/euclidean_loss_layer.hpp @@ -30,7 +30,7 @@ namespace caffe { * This can be used for least-squares regression tasks. An InnerProductLayer * input to a EuclideanLossLayer exactly formulates a linear least squares * regression problem. With non-zero weight decay the problem becomes one of - * ridge regression -- see src/caffe/test/test_sgd_solver.cpp for a concrete + * ridge regression -- see src/caffe/test/test_gradient_based_solver.cpp for a concrete * example wherein we check that the gradients computed for a Net with exactly * this structure match hand-computed gradient formulas for ridge regression. * diff --git a/include/caffe/layers/hdf5_data_layer.hpp b/include/caffe/layers/hdf5_data_layer.hpp index b04cf8e1940..601b36c6b89 100644 --- a/include/caffe/layers/hdf5_data_layer.hpp +++ b/include/caffe/layers/hdf5_data_layer.hpp @@ -23,12 +23,10 @@ template class HDF5DataLayer : public Layer { public: explicit HDF5DataLayer(const LayerParameter& param) - : Layer(param) {} + : Layer(param), offset_() {} virtual ~HDF5DataLayer(); virtual void LayerSetUp(const vector*>& bottom, const vector*>& top); - // Data layers should be shared by multiple solvers in parallel - virtual inline bool ShareInParallel() const { return true; } // Data layers have no bottoms, so reshaping is trivial. virtual void Reshape(const vector*>& bottom, const vector*>& top) {} @@ -38,6 +36,9 @@ class HDF5DataLayer : public Layer { virtual inline int MinTopBlobs() const { return 1; } protected: + void Next(); + bool Skip(); + virtual void Forward_cpu(const vector*>& bottom, const vector*>& top); virtual void Forward_gpu(const vector*>& bottom, @@ -55,6 +56,7 @@ class HDF5DataLayer : public Layer { std::vector > > hdf_blobs_; std::vector data_permutation_; std::vector file_permutation_; + uint64_t offset_; }; } // namespace caffe diff --git a/include/caffe/layers/hdf5_output_layer.hpp b/include/caffe/layers/hdf5_output_layer.hpp index 487d08fc06c..061e279d7a0 100644 --- a/include/caffe/layers/hdf5_output_layer.hpp +++ b/include/caffe/layers/hdf5_output_layer.hpp @@ -28,8 +28,6 @@ class HDF5OutputLayer : public Layer { virtual ~HDF5OutputLayer(); virtual void LayerSetUp(const vector*>& bottom, const vector*>& top); - // Data layers should be shared by multiple solvers in parallel - virtual inline bool ShareInParallel() const { return true; } // Data layers have no bottoms, so reshaping is trivial. virtual void Reshape(const vector*>& bottom, const vector*>& top) {} diff --git a/include/caffe/layers/infogain_loss_layer.hpp b/include/caffe/layers/infogain_loss_layer.hpp index 633f339a28e..3b3caa27c38 100644 --- a/include/caffe/layers/infogain_loss_layer.hpp +++ b/include/caffe/layers/infogain_loss_layer.hpp @@ -8,24 +8,26 @@ #include "caffe/proto/caffe.pb.h" #include "caffe/layers/loss_layer.hpp" +#include "caffe/layers/softmax_layer.hpp" namespace caffe { /** - * @brief A generalization of MultinomialLogisticLossLayer that takes an + * @brief A generalization of SoftmaxWithLossLayer that takes an * "information gain" (infogain) matrix specifying the "value" of all label * pairs. * - * Equivalent to the MultinomialLogisticLossLayer if the infogain matrix is the + * Equivalent to the SoftmaxWithLossLayer if the infogain matrix is the * identity. * * @param bottom input Blob vector (length 2-3) * -# @f$ (N \times C \times H \times W) @f$ - * the predictions @f$ \hat{p} @f$, a Blob with values in - * @f$ [0, 1] @f$ indicating the predicted probability of each of the - * @f$ K = CHW @f$ classes. Each prediction vector @f$ \hat{p}_n @f$ - * should sum to 1 as in a probability distribution: @f$ - * \forall n \sum\limits_{k=1}^K \hat{p}_{nk} = 1 @f$. + * the predictions @f$ x @f$, a Blob with values in + * @f$ [-\infty, +\infty] @f$ indicating the predicted score for each of + * the @f$ K = CHW @f$ classes. This layer maps these scores to a + * probability distribution over classes using the softmax function + * @f$ \hat{p}_{nk} = \exp(x_{nk}) / + * \left[\sum_{k'} \exp(x_{nk'})\right] @f$ (see SoftmaxLayer). * -# @f$ (N \times 1 \times 1 \times 1) @f$ * the labels @f$ l @f$, an integer-valued Blob with values * @f$ l_n \in [0, 1, 2, ..., K - 1] @f$ @@ -34,7 +36,7 @@ namespace caffe { * (\b optional) the infogain matrix @f$ H @f$. This must be provided as * the third bottom blob input if not provided as the infogain_mat in the * InfogainLossParameter. If @f$ H = I @f$, this layer is equivalent to the - * MultinomialLogisticLossLayer. + * SoftmaxWithLossLayer. * @param top output Blob vector (length 1) * -# @f$ (1 \times 1 \times 1 \times 1) @f$ * the computed infogain multinomial logistic loss: @f$ E = @@ -60,6 +62,12 @@ class InfogainLossLayer : public LossLayer { virtual inline int MinBottomBlobs() const { return 2; } virtual inline int MaxBottomBlobs() const { return 3; } + // InfogainLossLayer computes softmax prob internally. + // optional second "top" outputs the softmax prob + virtual inline int ExactNumTopBlobs() const { return -1; } + virtual inline int MinTopBlobs() const { return 1; } + virtual inline int MaxTopBlobs() const { return 2; } + virtual inline const char* type() const { return "InfogainLoss"; } protected: @@ -91,8 +99,8 @@ class InfogainLossLayer : public LossLayer { * infogain matrix, if provided as bottom[2]) * @param bottom input Blob vector (length 2-3) * -# @f$ (N \times C \times H \times W) @f$ - * the predictions @f$ \hat{p} @f$; Backward computes diff - * @f$ \frac{\partial E}{\partial \hat{p}} @f$ + * the predictions @f$ x @f$; Backward computes diff + * @f$ \frac{\partial E}{\partial x} @f$ * -# @f$ (N \times 1 \times 1 \times 1) @f$ * the labels -- ignored as we can't compute their error gradients * -# @f$ (1 \times 1 \times K \times K) @f$ @@ -102,7 +110,35 @@ class InfogainLossLayer : public LossLayer { virtual void Backward_cpu(const vector*>& top, const vector& propagate_down, const vector*>& bottom); + /// Read the normalization mode parameter and compute the normalizer based + /// on the blob size. If normalization_mode is VALID, the count of valid + /// outputs will be read from valid_count, unless it is -1 in which case + /// all outputs are assumed to be valid. + virtual Dtype get_normalizer( + LossParameter_NormalizationMode normalization_mode, int valid_count); + /// fill sum_rows_H_ according to matrix H + virtual void sum_rows_of_H(const Blob* H); + + /// The internal SoftmaxLayer used to map predictions to a distribution. + shared_ptr > softmax_layer_; + /// prob stores the output probability predictions from the SoftmaxLayer. + Blob prob_; + /// bottom vector holder used in call to the underlying SoftmaxLayer::Forward + vector*> softmax_bottom_vec_; + /// top vector holder used in call to the underlying SoftmaxLayer::Forward + vector*> softmax_top_vec_; + Blob infogain_; + Blob sum_rows_H_; // cache the row sums of H. + + /// Whether to ignore instances with a certain label. + bool has_ignore_label_; + /// The label indicating that an instance should be ignored. + int ignore_label_; + /// How to normalize the output loss. + LossParameter_NormalizationMode normalization_; + + int infogain_axis_, outer_num_, inner_num_, num_labels_; }; } // namespace caffe diff --git a/include/caffe/layers/input_layer.hpp b/include/caffe/layers/input_layer.hpp index f4472678c69..0ffdc724894 100644 --- a/include/caffe/layers/input_layer.hpp +++ b/include/caffe/layers/input_layer.hpp @@ -22,8 +22,6 @@ class InputLayer : public Layer { : Layer(param) {} virtual void LayerSetUp(const vector*>& bottom, const vector*>& top); - // Data layers should be shared by multiple solvers in parallel - virtual inline bool ShareInParallel() const { return true; } // Data layers have no bottoms, so reshaping is trivial. virtual void Reshape(const vector*>& bottom, const vector*>& top) {} diff --git a/include/caffe/layers/python_layer.hpp b/include/caffe/layers/python_layer.hpp index 66dbbdf13b8..1407d9217aa 100644 --- a/include/caffe/layers/python_layer.hpp +++ b/include/caffe/layers/python_layer.hpp @@ -21,8 +21,8 @@ class PythonLayer : public Layer { // Disallow PythonLayer in MultiGPU training stage, due to GIL issues // Details: https://github.com/BVLC/caffe/issues/2936 if (this->phase_ == TRAIN && Caffe::solver_count() > 1 - && !ShareInParallel()) { - LOG(FATAL) << "PythonLayer is not implemented in Multi-GPU training"; + && !Caffe::multiprocess()) { + LOG(FATAL) << "PythonLayer does not support CLI Multi-GPU, use train.py"; } self_.attr("param_str") = bp::str( this->layer_param_.python_param().param_str()); @@ -34,10 +34,6 @@ class PythonLayer : public Layer { self_.attr("reshape")(bottom, top); } - virtual inline bool ShareInParallel() const { - return this->layer_param_.python_param().share_in_parallel(); - } - virtual inline const char* type() const { return "Python"; } protected: diff --git a/include/caffe/layers/scale_layer.hpp b/include/caffe/layers/scale_layer.hpp index 924df2e51ab..45b714d4027 100644 --- a/include/caffe/layers/scale_layer.hpp +++ b/include/caffe/layers/scale_layer.hpp @@ -12,13 +12,15 @@ namespace caffe { /** - * @brief Computes a product of two input Blobs, with the shape of the - * latter Blob "broadcast" to match the shape of the former. + * @brief Computes the elementwise product of two input Blobs, with the shape of + * the latter Blob "broadcast" to match the shape of the former. * Equivalent to tiling the latter Blob, then computing the elementwise - * product. + * product. Note: for efficiency and convenience, this layer can + * additionally perform a "broadcast" sum too when `bias_term: true` + * is set. * - * The second input may be omitted, in which case it's learned as a parameter - * of the layer. + * The latter, scale input may be omitted, in which case it's learned as + * parameter of the layer (as is the bias, if it is included). */ template class ScaleLayer: public Layer { diff --git a/include/caffe/layers/sigmoid_cross_entropy_loss_layer.hpp b/include/caffe/layers/sigmoid_cross_entropy_loss_layer.hpp index 598dca5ff2c..3d92524421c 100644 --- a/include/caffe/layers/sigmoid_cross_entropy_loss_layer.hpp +++ b/include/caffe/layers/sigmoid_cross_entropy_loss_layer.hpp @@ -59,6 +59,8 @@ class SigmoidCrossEntropyLossLayer : public LossLayer { /// @copydoc SigmoidCrossEntropyLossLayer virtual void Forward_cpu(const vector*>& bottom, const vector*>& top); + virtual void Forward_gpu(const vector*>& bottom, + const vector*>& top); /** * @brief Computes the sigmoid cross-entropy loss error gradient w.r.t. the @@ -95,6 +97,13 @@ class SigmoidCrossEntropyLossLayer : public LossLayer { virtual void Backward_gpu(const vector*>& top, const vector& propagate_down, const vector*>& bottom); + /// Read the normalization mode parameter and compute the normalizer based + /// on the blob size. If normalization_mode is VALID, the count of valid + /// outputs will be read from valid_count, unless it is -1 in which case + /// all outputs are assumed to be valid. + virtual Dtype get_normalizer( + LossParameter_NormalizationMode normalization_mode, int valid_count); + /// The internal SigmoidLayer used to map predictions to probabilities. shared_ptr > sigmoid_layer_; /// sigmoid_output stores the output of the SigmoidLayer. @@ -103,6 +112,15 @@ class SigmoidCrossEntropyLossLayer : public LossLayer { vector*> sigmoid_bottom_vec_; /// top vector holder to call the underlying SigmoidLayer::Forward vector*> sigmoid_top_vec_; + + /// Whether to ignore instances with a certain label. + bool has_ignore_label_; + /// The label indicating that an instance should be ignored. + int ignore_label_; + /// How to normalize the loss. + LossParameter_NormalizationMode normalization_; + Dtype normalizer_; + int outer_num_, inner_num_; }; } // namespace caffe diff --git a/include/caffe/layers/window_data_layer.hpp b/include/caffe/layers/window_data_layer.hpp index 35f41b80e63..b9b66b7cf1d 100644 --- a/include/caffe/layers/window_data_layer.hpp +++ b/include/caffe/layers/window_data_layer.hpp @@ -16,7 +16,8 @@ namespace caffe { /** * @brief Provides data to the Net from windows of images files, specified - * by a window data file. + * by a window data file. This layer is *DEPRECATED* and only kept for + * archival purposes for use by the original R-CNN. * * TODO(dox): thorough documentation for Forward and proto params. */ diff --git a/include/caffe/net.hpp b/include/caffe/net.hpp index 6e29f5d883b..efdbbba7f64 100644 --- a/include/caffe/net.hpp +++ b/include/caffe/net.hpp @@ -23,9 +23,9 @@ namespace caffe { template class Net { public: - explicit Net(const NetParameter& param, const Net* root_net = NULL); + explicit Net(const NetParameter& param); explicit Net(const string& param_file, Phase phase, - const Net* root_net = NULL); + const int level = 0, const vector* stages = NULL); virtual ~Net() {} /// @brief Initialize a network with a NetParameter. @@ -245,6 +245,31 @@ class Net { */ Dtype findMax(Blob* blob); + // Invoked at specific points during an iteration + class Callback { + protected: + virtual void run(int layer) = 0; + + template + friend class Net; + }; + const vector& before_forward() const { return before_forward_; } + void add_before_forward(Callback* value) { + before_forward_.push_back(value); + } + const vector& after_forward() const { return after_forward_; } + void add_after_forward(Callback* value) { + after_forward_.push_back(value); + } + const vector& before_backward() const { return before_backward_; } + void add_before_backward(Callback* value) { + before_backward_.push_back(value); + } + const vector& after_backward() const { return after_backward_; } + void add_after_backward(Callback* value) { + after_backward_.push_back(value); + } + protected: // Helpers for Init. /// @brief Append a new top blob to the net. @@ -323,9 +348,13 @@ class Net { size_t memory_used_; /// Whether to compute and display debug info for the net. bool debug_info_; - /// The root net that actually holds the shared layers in data parallelism - const Net* const root_net_; - DISABLE_COPY_AND_ASSIGN(Net); + // Callbacks + vector before_forward_; + vector after_forward_; + vector before_backward_; + vector after_backward_; + +DISABLE_COPY_AND_ASSIGN(Net); }; diff --git a/include/caffe/parallel.hpp b/include/caffe/parallel.hpp index 6c496c884e3..64bb48e6b02 100644 --- a/include/caffe/parallel.hpp +++ b/include/caffe/parallel.hpp @@ -1,8 +1,11 @@ #ifndef CAFFE_PARALLEL_HPP_ #define CAFFE_PARALLEL_HPP_ -#include +#ifdef USE_NCCL +#include + +#include #include #include "caffe/blob.hpp" @@ -13,6 +16,7 @@ #include "caffe/solver.hpp" #include "caffe/syncedmem.hpp" #include "caffe/util/blocking_queue.hpp" +#include "caffe/util/nccl.hpp" namespace caffe { @@ -51,7 +55,7 @@ class GPUParams : public Params { GPUParams(shared_ptr > root_solver, int device); virtual ~GPUParams(); - void configure(Solver* solver) const; + void Configure(Solver* solver) const; protected: using Params::size_; @@ -59,58 +63,55 @@ class GPUParams : public Params { using Params::diff_; }; -class DevicePair { - public: - DevicePair(int parent, int device) - : parent_(parent), - device_(device) { - } - inline int parent() { - return parent_; - } - inline int device() { - return device_; - } - - // Group GPUs in pairs, by proximity depending on machine's topology - static void compute(const vector devices, vector* pairs); - - protected: - int parent_; - int device_; -}; - -// Synchronous data parallelism using map-reduce between local GPUs. template -class P2PSync : public GPUParams, public Solver::Callback, - public InternalThread { +class NCCL : public GPUParams, + public Solver::Callback, + public Net::Callback { public: - explicit P2PSync(shared_ptr > root_solver, - P2PSync* parent, const SolverParameter& param); - virtual ~P2PSync(); - - inline const shared_ptr >& solver() const { - return solver_; - } - - void Run(const vector& gpus); - void Prepare(const vector& gpus, - vector > >* syncs); - inline const int initial_iter() const { return initial_iter_; } + /** + * Single process version. + */ + explicit NCCL(shared_ptr > solver); + /** + * In multi-process settings, first create a NCCL id (new_uid), then + * pass it to each process to create connected instances. + */ + NCCL(shared_ptr > solver, const string& uid); + ~NCCL(); + + boost::barrier* barrier(); + void set_barrier(boost::barrier* value); + + /** + * In single process settings, create instances without uids and + * call this to connect them. + */ + static void InitSingleProcess(vector*>* nccls); + + static string new_uid(); + + /** + * Broadcast weights from rank 0 other solvers. + */ + void Broadcast(); + + /** + * Single process multi-GPU. + */ + void Run(const vector& gpus, const char* restore); protected: - void on_start(); + void Init(); + void on_start() {} + void run(int layer); // Net callback void on_gradients_ready(); - void InternalThreadEntry(); + ncclComm_t comm_; + cudaStream_t stream_; - P2PSync* parent_; - vector*> children_; - BlockingQueue*> queue_; - const int initial_iter_; - Dtype* parent_grads_; shared_ptr > solver_; - + // Should not be necessary, https://github.com/NVIDIA/nccl/issues/37 + boost::barrier* barrier_; using Params::size_; using Params::data_; using Params::diff_; @@ -118,4 +119,5 @@ class P2PSync : public GPUParams, public Solver::Callback, } // namespace caffe -#endif +#endif // USE_NCCL +#endif // header diff --git a/include/caffe/solver.hpp b/include/caffe/solver.hpp index 38259edad9f..a28d8cb897e 100644 --- a/include/caffe/solver.hpp +++ b/include/caffe/solver.hpp @@ -6,13 +6,14 @@ #include "caffe/net.hpp" #include "caffe/solver_factory.hpp" +#include "caffe/util/benchmark.hpp" namespace caffe { /** * @brief Enumeration of actions that a client of the Solver may request by * implementing the Solver's action request function, which a - * a client may optionally provide in order to request early termination + * client may optionally provide in order to request early termination * or saving a snapshot without exiting. In the executable caffe, this * mechanism is used to allow the snapshot to be saved when stopping * execution with a SIGINT (Ctrl-C). @@ -40,9 +41,8 @@ typedef boost::function ActionCallback; template class Solver { public: - explicit Solver(const SolverParameter& param, - const Solver* root_solver = NULL); - explicit Solver(const string& param_file, const Solver* root_solver = NULL); + explicit Solver(const SolverParameter& param); + explicit Solver(const string& param_file); void Init(const SolverParameter& param); void InitTrainNet(); void InitTestNets(); @@ -72,7 +72,7 @@ class Solver { inline const vector > >& test_nets() { return test_nets_; } - int iter() { return iter_; } + int iter() const { return iter_; } // Invoked at specific points during an iteration class Callback { @@ -118,10 +118,6 @@ class Solver { vector losses_; Dtype smoothed_loss_; - // The root solver that holds root nets (actually containing shared layers) - // in data parallelism - const Solver* const root_solver_; - // A function that can be set by a client of the Solver to provide indication // that it wants a snapshot saved and/or to exit early. ActionCallback action_request_function_; @@ -129,31 +125,11 @@ class Solver { // True iff a request to stop early was received. bool requested_early_exit_; - DISABLE_COPY_AND_ASSIGN(Solver); -}; + // Timing information, handy to tune e.g. nbr of GPUs + Timer iteration_timer_; + float iterations_last_; -/** - * @brief Solver that only computes gradients, used as worker - * for multi-GPU training. - */ -template -class WorkerSolver : public Solver { - public: - explicit WorkerSolver(const SolverParameter& param, - const Solver* root_solver = NULL) - : Solver(param, root_solver) {} - - protected: - void ApplyUpdate() {} - void SnapshotSolverState(const string& model_filename) { - LOG(FATAL) << "Should not be called on worker solver."; - } - void RestoreSolverStateFromBinaryProto(const string& state_file) { - LOG(FATAL) << "Should not be called on worker solver."; - } - void RestoreSolverStateFromHDF5(const string& state_file) { - LOG(FATAL) << "Should not be called on worker solver."; - } + DISABLE_COPY_AND_ASSIGN(Solver); }; } // namespace caffe diff --git a/include/caffe/solver_factory.hpp b/include/caffe/solver_factory.hpp index cfff721af40..a5b160739b2 100644 --- a/include/caffe/solver_factory.hpp +++ b/include/caffe/solver_factory.hpp @@ -15,7 +15,7 @@ * and its type is its C++ class name, but without the "Solver" at the end * ("MyAwesomeSolver" -> "MyAwesome"). * - * If the solver is going to be created simply by its constructor, in your c++ + * If the solver is going to be created simply by its constructor, in your C++ * file, add the following line: * * REGISTER_SOLVER_CLASS(MyAwesome); diff --git a/include/caffe/syncedmem.hpp b/include/caffe/syncedmem.hpp index 38ee4664028..317ce29a257 100644 --- a/include/caffe/syncedmem.hpp +++ b/include/caffe/syncedmem.hpp @@ -3,6 +3,10 @@ #include +#ifdef USE_MKL + #include "mkl.h" +#endif + #include "caffe/common.hpp" namespace caffe { @@ -20,7 +24,11 @@ inline void CaffeMallocHost(void** ptr, size_t size, bool* use_cuda) { return; } #endif +#ifdef USE_MKL + *ptr = mkl_malloc(size ? size:1, 64); +#else *ptr = malloc(size); +#endif *use_cuda = false; CHECK(*ptr) << "host allocation of size " << size << " failed"; } @@ -32,7 +40,11 @@ inline void CaffeFreeHost(void* ptr, bool use_cuda) { return; } #endif +#ifdef USE_MKL + mkl_free(ptr); +#else free(ptr); +#endif } @@ -44,14 +56,8 @@ inline void CaffeFreeHost(void* ptr, bool use_cuda) { */ class SyncedMemory { public: - SyncedMemory() - : cpu_ptr_(NULL), gpu_ptr_(NULL), size_(0), head_(UNINITIALIZED), - own_cpu_data_(false), cpu_malloc_use_cuda_(false), own_gpu_data_(false), - gpu_device_(-1) {} - explicit SyncedMemory(size_t size) - : cpu_ptr_(NULL), gpu_ptr_(NULL), size_(size), head_(UNINITIALIZED), - own_cpu_data_(false), cpu_malloc_use_cuda_(false), own_gpu_data_(false), - gpu_device_(-1) {} + SyncedMemory(); + explicit SyncedMemory(size_t size); ~SyncedMemory(); const void* cpu_data(); void set_cpu_data(void* data); @@ -68,6 +74,8 @@ class SyncedMemory { #endif private: + void check_device(); + void to_cpu(); void to_gpu(); void* cpu_ptr_; @@ -77,7 +85,7 @@ class SyncedMemory { bool own_cpu_data_; bool cpu_malloc_use_cuda_; bool own_gpu_data_; - int gpu_device_; + int device_; DISABLE_COPY_AND_ASSIGN(SyncedMemory); }; // class SyncedMemory diff --git a/include/caffe/test/test_caffe_main.hpp b/include/caffe/test/test_caffe_main.hpp index fc156091476..294f7e5011a 100644 --- a/include/caffe/test/test_caffe_main.hpp +++ b/include/caffe/test/test_caffe_main.hpp @@ -18,9 +18,8 @@ using std::endl; #include "caffe_config.h" #else #define CUDA_TEST_DEVICE -1 - #define CMAKE_SOURCE_DIR "src/" #define EXAMPLES_SOURCE_DIR "examples/" - #define CMAKE_EXT "" + #define ABS_TEST_DATA_DIR "src/caffe/test/test_data" #endif int main(int argc, char** argv); diff --git a/include/caffe/util/cudnn.hpp b/include/caffe/util/cudnn.hpp index a7d8dbbad4c..cd3f93f6e28 100644 --- a/include/caffe/util/cudnn.hpp +++ b/include/caffe/util/cudnn.hpp @@ -41,6 +41,16 @@ inline const char* cudnnGetErrorString(cudnnStatus_t status) { return "CUDNN_STATUS_NOT_SUPPORTED"; case CUDNN_STATUS_LICENSE_ERROR: return "CUDNN_STATUS_LICENSE_ERROR"; +#if CUDNN_VERSION_MIN(6, 0, 0) + case CUDNN_STATUS_RUNTIME_PREREQUISITE_MISSING: + return "CUDNN_STATUS_RUNTIME_PREREQUISITE_MISSING"; +#endif +#if CUDNN_VERSION_MIN(7, 0, 0) + case CUDNN_STATUS_RUNTIME_IN_PROGRESS: + return "CUDNN_STATUS_RUNTIME_IN_PROGRESS"; + case CUDNN_STATUS_RUNTIME_FP_OVERFLOW: + return "CUDNN_STATUS_RUNTIME_FP_OVERFLOW"; +#endif } return "Unknown cudnn status"; } @@ -109,8 +119,14 @@ template inline void setConvolutionDesc(cudnnConvolutionDescriptor_t* conv, cudnnTensorDescriptor_t bottom, cudnnFilterDescriptor_t filter, int pad_h, int pad_w, int stride_h, int stride_w) { +#if CUDNN_VERSION_MIN(6, 0, 0) CUDNN_CHECK(cudnnSetConvolution2dDescriptor(*conv, + pad_h, pad_w, stride_h, stride_w, 1, 1, CUDNN_CROSS_CORRELATION, + dataType::type)); +#else + CUDNN_CHECK(cudnnSetConvolution2dDescriptor(*conv, pad_h, pad_w, stride_h, stride_w, 1, 1, CUDNN_CROSS_CORRELATION)); +#endif } template diff --git a/include/caffe/util/db_leveldb.hpp b/include/caffe/util/db_leveldb.hpp index e9fa0d32b66..4cdb6db9558 100644 --- a/include/caffe/util/db_leveldb.hpp +++ b/include/caffe/util/db_leveldb.hpp @@ -14,7 +14,10 @@ namespace caffe { namespace db { class LevelDBCursor : public Cursor { public: explicit LevelDBCursor(leveldb::Iterator* iter) - : iter_(iter) { SeekToFirst(); } + : iter_(iter) { + SeekToFirst(); + CHECK(iter_->status().ok()) << iter_->status().ToString(); + } ~LevelDBCursor() { delete iter_; } virtual void SeekToFirst() { iter_->SeekToFirst(); } virtual void Next() { iter_->Next(); } diff --git a/include/caffe/util/hdf5.hpp b/include/caffe/util/hdf5.hpp index ce568c5eb0d..71549c1cc02 100644 --- a/include/caffe/util/hdf5.hpp +++ b/include/caffe/util/hdf5.hpp @@ -13,12 +13,12 @@ namespace caffe { template void hdf5_load_nd_dataset_helper( hid_t file_id, const char* dataset_name_, int min_dim, int max_dim, - Blob* blob); + Blob* blob, bool reshape); template void hdf5_load_nd_dataset( hid_t file_id, const char* dataset_name_, int min_dim, int max_dim, - Blob* blob); + Blob* blob, bool reshape = false); template void hdf5_save_nd_dataset( diff --git a/include/caffe/util/math_functions.hpp b/include/caffe/util/math_functions.hpp index 6f6d3feeae2..e549120a933 100644 --- a/include/caffe/util/math_functions.hpp +++ b/include/caffe/util/math_functions.hpp @@ -52,6 +52,9 @@ void caffe_scal(const int N, const Dtype alpha, Dtype *X); template void caffe_sqr(const int N, const Dtype* a, Dtype* y); +template +void caffe_sqrt(const int N, const Dtype* a, Dtype* y); + template void caffe_add(const int N, const Dtype* a, const Dtype* b, Dtype* y); @@ -128,16 +131,16 @@ inline int8_t caffe_sign(Dtype val) { } // output is 1 for the positives, 0 for zero, and -1 for the negatives -DEFINE_CAFFE_CPU_UNARY_FUNC(sign, y[i] = caffe_sign(x[i])); +DEFINE_CAFFE_CPU_UNARY_FUNC(sign, y[i] = caffe_sign(x[i])) // This returns a nonzero value if the input has its sign bit set. // The name sngbit is meant to avoid conflicts with std::signbit in the macro. // The extra parens are needed because CUDA < 6.5 defines signbit as a macro, // and we don't want that to expand here when CUDA headers are also included. DEFINE_CAFFE_CPU_UNARY_FUNC(sgnbit, \ - y[i] = static_cast((std::signbit)(x[i]))); + y[i] = static_cast((std::signbit)(x[i]))) -DEFINE_CAFFE_CPU_UNARY_FUNC(fabs, y[i] = std::fabs(x[i])); +DEFINE_CAFFE_CPU_UNARY_FUNC(fabs, y[i] = std::fabs(x[i])) template void caffe_cpu_scale(const int n, const Dtype alpha, const Dtype *x, Dtype* y); @@ -185,6 +188,11 @@ void caffe_gpu_add_scalar(const int N, const Dtype alpha, Dtype *X); template void caffe_gpu_scal(const int N, const Dtype alpha, Dtype *X); +#ifndef CPU_ONLY +template +void caffe_gpu_scal(const int N, const Dtype alpha, Dtype* X, cudaStream_t str); +#endif + template void caffe_gpu_add(const int N, const Dtype* a, const Dtype* b, Dtype* y); @@ -209,6 +217,9 @@ void caffe_gpu_log(const int n, const Dtype* a, Dtype* y); template void caffe_gpu_powx(const int n, const Dtype* a, const Dtype b, Dtype* y); +template +void caffe_gpu_sqrt(const int n, const Dtype* a, Dtype* y); + // caffe_gpu_rng_uniform with two arguments generates integers in the range // [0, UINT_MAX]. void caffe_gpu_rng_uniform(const int n, unsigned int* r); diff --git a/include/caffe/util/mkl_alternate.hpp b/include/caffe/util/mkl_alternate.hpp index 3355b6658a3..8c2294c7c86 100644 --- a/include/caffe/util/mkl_alternate.hpp +++ b/include/caffe/util/mkl_alternate.hpp @@ -7,9 +7,14 @@ #else // If use MKL, simply include the MKL header +#ifdef USE_ACCELERATE +#include +#else extern "C" { #include } +#endif // USE_ACCELERATE + #include // Functions that caffe uses but are not present if MKL is not linked. @@ -31,10 +36,11 @@ extern "C" { v##name(n, a, y); \ } -DEFINE_VSL_UNARY_FUNC(Sqr, y[i] = a[i] * a[i]); -DEFINE_VSL_UNARY_FUNC(Exp, y[i] = exp(a[i])); -DEFINE_VSL_UNARY_FUNC(Ln, y[i] = log(a[i])); -DEFINE_VSL_UNARY_FUNC(Abs, y[i] = fabs(a[i])); +DEFINE_VSL_UNARY_FUNC(Sqr, y[i] = a[i] * a[i]) +DEFINE_VSL_UNARY_FUNC(Sqrt, y[i] = sqrt(a[i])) +DEFINE_VSL_UNARY_FUNC(Exp, y[i] = exp(a[i])) +DEFINE_VSL_UNARY_FUNC(Ln, y[i] = log(a[i])) +DEFINE_VSL_UNARY_FUNC(Abs, y[i] = fabs(a[i])) // A simple way to define the vsl unary functions with singular parameter b. // The operation should be in the form e.g. y[i] = pow(a[i], b) @@ -53,7 +59,7 @@ DEFINE_VSL_UNARY_FUNC(Abs, y[i] = fabs(a[i])); v##name(n, a, b, y); \ } -DEFINE_VSL_UNARY_FUNC_WITH_PARAM(Powx, y[i] = pow(a[i], b)); +DEFINE_VSL_UNARY_FUNC_WITH_PARAM(Powx, y[i] = pow(a[i], b)) // A simple way to define the vsl binary functions. The operation should // be in the form e.g. y[i] = a[i] + b[i] @@ -72,10 +78,10 @@ DEFINE_VSL_UNARY_FUNC_WITH_PARAM(Powx, y[i] = pow(a[i], b)); v##name(n, a, b, y); \ } -DEFINE_VSL_BINARY_FUNC(Add, y[i] = a[i] + b[i]); -DEFINE_VSL_BINARY_FUNC(Sub, y[i] = a[i] - b[i]); -DEFINE_VSL_BINARY_FUNC(Mul, y[i] = a[i] * b[i]); -DEFINE_VSL_BINARY_FUNC(Div, y[i] = a[i] / b[i]); +DEFINE_VSL_BINARY_FUNC(Add, y[i] = a[i] + b[i]) +DEFINE_VSL_BINARY_FUNC(Sub, y[i] = a[i] - b[i]) +DEFINE_VSL_BINARY_FUNC(Mul, y[i] = a[i] * b[i]) +DEFINE_VSL_BINARY_FUNC(Div, y[i] = a[i] / b[i]) // In addition, MKL comes with an additional function axpby that is not present // in standard blas. We will simply use a two-step (inefficient, of course) way diff --git a/include/caffe/util/nccl.hpp b/include/caffe/util/nccl.hpp new file mode 100644 index 00000000000..e01fb7451e8 --- /dev/null +++ b/include/caffe/util/nccl.hpp @@ -0,0 +1,37 @@ +#ifndef CAFFE_UTIL_NCCL_H_ +#define CAFFE_UTIL_NCCL_H_ +#ifdef USE_NCCL + +#include + +#include "caffe/common.hpp" + +#define NCCL_CHECK(condition) \ +{ \ + ncclResult_t result = condition; \ + CHECK_EQ(result, ncclSuccess) << " " \ + << ncclGetErrorString(result); \ +} + +namespace caffe { + +namespace nccl { + +template class dataType; + +template<> class dataType { + public: + static const ncclDataType_t type = ncclFloat; +}; +template<> class dataType { + public: + static const ncclDataType_t type = ncclDouble; +}; + +} // namespace nccl + +} // namespace caffe + +#endif // end USE_NCCL + +#endif // CAFFE_UTIL_NCCL_H_ diff --git a/include/caffe/util/upgrade_proto.hpp b/include/caffe/util/upgrade_proto.hpp index 14e1936a8c2..b145822af32 100644 --- a/include/caffe/util/upgrade_proto.hpp +++ b/include/caffe/util/upgrade_proto.hpp @@ -65,6 +65,12 @@ bool NetNeedsInputUpgrade(const NetParameter& net_param); // Perform all necessary transformations to upgrade input fields into layers. void UpgradeNetInput(NetParameter* net_param); +// Return true iff the Net contains batch norm layers with manual local LRs. +bool NetNeedsBatchNormUpgrade(const NetParameter& net_param); + +// Perform all necessary transformations to upgrade batch norm layers. +void UpgradeNetBatchNorm(NetParameter* net_param); + // Return true iff the solver contains any old solver_type specified as enums bool SolverNeedsTypeUpgrade(const SolverParameter& solver_param); diff --git a/matlab/+caffe/Net.m b/matlab/+caffe/Net.m index e6295bba1a4..bb99ec89049 100644 --- a/matlab/+caffe/Net.m +++ b/matlab/+caffe/Net.m @@ -68,6 +68,11 @@ self.layer_names = self.attributes.layer_names; self.blob_names = self.attributes.blob_names; end + function delete (self) + if ~isempty(self.hNet_self) + caffe_('delete_net', self.hNet_self); + end + end function layer = layers(self, layer_name) CHECK(ischar(layer_name), 'layer_name must be a string'); layer = self.layer_vec(self.name2layer_index(layer_name)); diff --git a/matlab/+caffe/Solver.m b/matlab/+caffe/Solver.m index f8bdc4e22b2..2d3c98b2a26 100644 --- a/matlab/+caffe/Solver.m +++ b/matlab/+caffe/Solver.m @@ -36,6 +36,9 @@ self.test_nets(n) = caffe.Net(self.attributes.hNet_test_nets(n)); end end + function delete (self) + caffe_('delete_solver', self.hSolver_self); + end function iter = iter(self) iter = caffe_('solver_get_iter', self.hSolver_self); end diff --git a/matlab/+caffe/private/caffe_.cpp b/matlab/+caffe/private/caffe_.cpp index 1b1b2bff861..a32bd5e536d 100644 --- a/matlab/+caffe/private/caffe_.cpp +++ b/matlab/+caffe/private/caffe_.cpp @@ -44,7 +44,7 @@ void mxCHECK_FILE_EXIST(const char* file) { // The pointers to caffe::Solver and caffe::Net instances static vector > > solvers_; static vector > > nets_; -// init_key is generated at the beginning and everytime you call reset +// init_key is generated at the beginning and every time you call reset static double init_key = static_cast(caffe_rng_rand()); /** ----------------------------------------------------------------- @@ -197,6 +197,17 @@ static void get_solver(MEX_ARGS) { mxFree(solver_file); } +// Usage: caffe_('delete_solver', hSolver) +static void delete_solver(MEX_ARGS) { + mxCHECK(nrhs == 1 && mxIsStruct(prhs[0]), + "Usage: caffe_('delete_solver', hSolver)"); + Solver* solver = handle_to_ptr >(prhs[0]); + solvers_.erase(std::remove_if(solvers_.begin(), solvers_.end(), + [solver] (const shared_ptr< Solver > &solverPtr) { + return solverPtr.get() == solver; + }), solvers_.end()); +} + // Usage: caffe_('solver_get_attr', hSolver) static void solver_get_attr(MEX_ARGS) { mxCHECK(nrhs == 1 && mxIsStruct(prhs[0]), @@ -271,6 +282,17 @@ static void get_net(MEX_ARGS) { mxFree(phase_name); } +// Usage: caffe_('delete_solver', hSolver) +static void delete_net(MEX_ARGS) { + mxCHECK(nrhs == 1 && mxIsStruct(prhs[0]), + "Usage: caffe_('delete_solver', hNet)"); + Net* net = handle_to_ptr >(prhs[0]); + nets_.erase(std::remove_if(nets_.begin(), nets_.end(), + [net] (const shared_ptr< Net > &netPtr) { + return netPtr.get() == net; + }), nets_.end()); +} + // Usage: caffe_('net_get_attr', hNet) static void net_get_attr(MEX_ARGS) { mxCHECK(nrhs == 1 && mxIsStruct(prhs[0]), @@ -522,12 +544,14 @@ struct handler_registry { static handler_registry handlers[] = { // Public API functions { "get_solver", get_solver }, + { "delete_solver", delete_solver }, { "solver_get_attr", solver_get_attr }, { "solver_get_iter", solver_get_iter }, { "solver_restore", solver_restore }, { "solver_solve", solver_solve }, { "solver_step", solver_step }, { "get_net", get_net }, + { "delete_net", delete_net }, { "net_get_attr", net_get_attr }, { "net_forward", net_forward }, { "net_backward", net_backward }, diff --git a/matlab/CMakeLists.txt b/matlab/CMakeLists.txt index f420df8d412..987730d9b55 100644 --- a/matlab/CMakeLists.txt +++ b/matlab/CMakeLists.txt @@ -20,7 +20,7 @@ if(NOT BUILD_SHARED_LIBS AND build_using MATCHES Matlab) message(FATAL_ERROR "Matlab MEX interface (with default mex options file) can only be built if caffe is compiled as shared library. Please enable 'BUILD_SHARED_LIBS' in CMake. Aternativelly you can switch to Octave compiler.") endif() -# helper function to set proper mex file extention +# helper function to set proper mex file extension function(caffe_fetch_and_set_proper_mexext mexfile_variable) execute_process(COMMAND ${Matlab_mexext} OUTPUT_STRIP_TRAILING_WHITESPACE RESULT_VARIABLE res OUTPUT_VARIABLE ext) if(res MATCHES 0) diff --git a/matlab/demo/classification_demo.m b/matlab/demo/classification_demo.m index 2b60332970b..435c077845f 100644 --- a/matlab/demo/classification_demo.m +++ b/matlab/demo/classification_demo.m @@ -8,7 +8,7 @@ % % **************************************************************************** % For detailed documentation and usage on Caffe's Matlab interface, please -% refer to Caffe Interface Tutorial at +% refer to the Caffe Interface Tutorial at % http://caffe.berkeleyvision.org/tutorial/interfaces.html#matlab % **************************************************************************** % @@ -24,6 +24,7 @@ % $ export LD_LIBRARY_PATH=/opt/intel/mkl/lib/intel64:/usr/local/cuda-5.5/lib64 % $ export LD_PRELOAD=/usr/lib/x86_64-linux-gnu/libstdc++.so.6 % Or the equivalent based on where things are installed on your system +% and what versions are installed. % % Usage: % im = imread('../../examples/images/cat.jpg'); @@ -39,7 +40,7 @@ % Data coming in from matlab needs to be in the order % [width, height, channels, images] % where width is the fastest dimension. -% Here is the rough matlab for putting image data into the correct +% Here is the rough matlab code for putting image data into the correct % format in W x H x C with BGR channels: % % permute channels from RGB to BGR % im_data = im(:, :, [3, 2, 1]); @@ -54,7 +55,7 @@ % If you have multiple images, cat them with cat(4, ...) -% Add caffe/matlab to you Matlab search PATH to use matcaffe +% Add caffe/matlab to your Matlab search PATH in order to use matcaffe if exist('../+caffe', 'dir') addpath('..'); else diff --git a/models/bvlc_alexnet/readme.md b/models/bvlc_alexnet/readme.md index 008d690f7f4..a83e3d4e27c 100644 --- a/models/bvlc_alexnet/readme.md +++ b/models/bvlc_alexnet/readme.md @@ -1,5 +1,5 @@ --- -name: BVLC AlexNet Model +name: BAIR/BVLC AlexNet Model caffemodel: bvlc_alexnet.caffemodel caffemodel_url: http://dl.caffe.berkeleyvision.org/bvlc_alexnet.caffemodel license: unrestricted diff --git a/models/bvlc_googlenet/readme.md b/models/bvlc_googlenet/readme.md index 061b6d74530..ef04db62ab2 100644 --- a/models/bvlc_googlenet/readme.md +++ b/models/bvlc_googlenet/readme.md @@ -1,5 +1,5 @@ --- -name: BVLC GoogleNet Model +name: BAIR/BVLC GoogleNet Model caffemodel: bvlc_googlenet.caffemodel caffemodel_url: http://dl.caffe.berkeleyvision.org/bvlc_googlenet.caffemodel license: unrestricted diff --git a/models/bvlc_googlenet/train_val.prototxt b/models/bvlc_googlenet/train_val.prototxt old mode 100644 new mode 100755 index 5dee3abe28f..5fe367f2263 --- a/models/bvlc_googlenet/train_val.prototxt +++ b/models/bvlc_googlenet/train_val.prototxt @@ -1692,7 +1692,7 @@ layer { type: "SoftmaxWithLoss" bottom: "loss2/classifier" bottom: "label" - top: "loss2/loss1" + top: "loss2/loss2" loss_weight: 0.3 } layer { diff --git a/models/bvlc_reference_caffenet/readme.md b/models/bvlc_reference_caffenet/readme.md index 671e47a5056..5352e536a07 100644 --- a/models/bvlc_reference_caffenet/readme.md +++ b/models/bvlc_reference_caffenet/readme.md @@ -1,5 +1,5 @@ --- -name: BVLC CaffeNet Model +name: BAIR/BVLC CaffeNet Model caffemodel: bvlc_reference_caffenet.caffemodel caffemodel_url: http://dl.caffe.berkeleyvision.org/bvlc_reference_caffenet.caffemodel license: unrestricted diff --git a/models/bvlc_reference_rcnn_ilsvrc13/readme.md b/models/bvlc_reference_rcnn_ilsvrc13/readme.md index 9a11a24d8f8..12543b2bd2c 100644 --- a/models/bvlc_reference_rcnn_ilsvrc13/readme.md +++ b/models/bvlc_reference_rcnn_ilsvrc13/readme.md @@ -1,5 +1,5 @@ --- -name: BVLC Reference RCNN ILSVRC13 Model +name: BAIR/BVLC Reference RCNN ILSVRC13 Model caffemodel: bvlc_reference_rcnn_ilsvrc13.caffemodel caffemodel_url: http://dl.caffe.berkeleyvision.org/bvlc_reference_rcnn_ilsvrc13.caffemodel license: unrestricted diff --git a/python/CMakeLists.txt b/python/CMakeLists.txt index a22641401f0..c53299d265b 100644 --- a/python/CMakeLists.txt +++ b/python/CMakeLists.txt @@ -3,13 +3,13 @@ if(NOT HAVE_PYTHON) return() endif() -include_directories(${PYTHON_INCLUDE_DIRS} ${NUMPY_INCLUDE_DIR} ${Boost_INCLUDE_DIRS}) file(GLOB_RECURSE python_srcs ${PROJECT_SOURCE_DIR}/python/*.cpp) add_library(pycaffe SHARED ${python_srcs}) -target_link_libraries(pycaffe ${Caffe_LINK} ${PYTHON_LIBRARIES} ${Boost_LIBRARIES}) -set_target_properties(pycaffe PROPERTIES PREFIX "" OUTPUT_NAME "_caffe") caffe_default_properties(pycaffe) +set_target_properties(pycaffe PROPERTIES PREFIX "" OUTPUT_NAME "_caffe") +target_include_directories(pycaffe PUBLIC ${PYTHON_INCLUDE_DIRS} ${NUMPY_INCLUDE_DIR}) +target_link_libraries(pycaffe PUBLIC ${Caffe_LINK} ${PYTHON_LIBRARIES}) if(UNIX OR APPLE) set(__linkname "${PROJECT_SOURCE_DIR}/python/caffe/_caffe.so") @@ -22,13 +22,19 @@ if(UNIX OR APPLE) endif() # ---[ Install -file(GLOB files1 *.py requirements.txt) -install(FILES ${files1} DESTINATION python) - -file(GLOB files2 caffe/*.py) -install(FILES ${files2} DESTINATION python/caffe) +# scripts +file(GLOB python_files *.py requirements.txt) +install(FILES ${python_files} DESTINATION python) + +# module +install(DIRECTORY caffe + DESTINATION python + FILES_MATCHING + PATTERN "*.py" + PATTERN "ilsvrc_2012_mean.npy" + PATTERN "test" EXCLUDE + ) + +# _caffe.so install(TARGETS pycaffe DESTINATION python/caffe) -install(DIRECTORY caffe/imagenet caffe/proto caffe/test DESTINATION python/caffe) - - diff --git a/python/caffe/__init__.py b/python/caffe/__init__.py index e2881b89c1b..776945eec88 100644 --- a/python/caffe/__init__.py +++ b/python/caffe/__init__.py @@ -1,5 +1,5 @@ -from .pycaffe import Net, SGDSolver, NesterovSolver, AdaGradSolver, RMSPropSolver, AdaDeltaSolver, AdamSolver -from ._caffe import set_mode_cpu, set_mode_gpu, set_device, Layer, get_solver, layer_type_list +from .pycaffe import Net, SGDSolver, NesterovSolver, AdaGradSolver, RMSPropSolver, AdaDeltaSolver, AdamSolver, NCCL, Timer +from ._caffe import init_log, log, set_mode_cpu, set_mode_gpu, set_device, Layer, get_solver, layer_type_list, set_random_seed, solver_count, set_solver_count, solver_rank, set_solver_rank, set_multiprocess, has_nccl from ._caffe import __version__ from .proto.caffe_pb2 import TRAIN, TEST from .classifier import Classifier diff --git a/python/caffe/_caffe.cpp b/python/caffe/_caffe.cpp index 334088e8a57..72659a4f44e 100644 --- a/python/caffe/_caffe.cpp +++ b/python/caffe/_caffe.cpp @@ -51,6 +51,25 @@ const int NPY_DTYPE = NPY_FLOAT32; void set_mode_cpu() { Caffe::set_mode(Caffe::CPU); } void set_mode_gpu() { Caffe::set_mode(Caffe::GPU); } +void InitLog() { + ::google::InitGoogleLogging(""); + ::google::InstallFailureSignalHandler(); +} +void InitLogLevel(int level) { + FLAGS_minloglevel = level; + InitLog(); +} +void InitLogLevelPipe(int level, bool stderr) { + FLAGS_minloglevel = level; + FLAGS_logtostderr = stderr; + InitLog(); +} +void Log(const string& s) { + LOG(INFO) << s; +} + +void set_random_seed(unsigned int seed) { Caffe::set_random_seed(seed); } + // For convenience, check that input files can be opened, and raise an // exception that boost will send to Python if not (caffe could still crash // later if the input files are disturbed before they are actually used, but @@ -86,19 +105,42 @@ void CheckContiguousArray(PyArrayObject* arr, string name, } } -// Net constructor for passing phase as int -shared_ptr > Net_Init( - string param_file, int phase) { - CheckFile(param_file); +// Net constructor +shared_ptr > Net_Init(string network_file, int phase, + const int level, const bp::object& stages, + const bp::object& weights) { + CheckFile(network_file); + + // Convert stages from list to vector + vector stages_vector; + if (!stages.is_none()) { + for (int i = 0; i < len(stages); i++) { + stages_vector.push_back(bp::extract(stages[i])); + } + } + + // Initialize net + shared_ptr > net(new Net(network_file, + static_cast(phase), level, &stages_vector)); + + // Load weights + if (!weights.is_none()) { + std::string weights_file_str = bp::extract(weights); + CheckFile(weights_file_str); + net->CopyTrainedLayersFrom(weights_file_str); + } - shared_ptr > net(new Net(param_file, - static_cast(phase))); return net; } -// Net construct-and-load convenience constructor +// Legacy Net construct-and-load convenience constructor shared_ptr > Net_Init_Load( string param_file, string pretrained_param_file, int phase) { + LOG(WARNING) << "DEPRECATION WARNING - deprecated use of Python interface"; + LOG(WARNING) << "Use this instead (with the named \"weights\"" + << " parameter):"; + LOG(WARNING) << "Net('" << param_file << "', " << phase + << ", weights='" << pretrained_param_file << "')"; CheckFile(param_file); CheckFile(pretrained_param_file); @@ -229,12 +271,12 @@ bp::object BlobVec_add_blob(bp::tuple args, bp::dict kwargs) { } template -class PythonCallback: public Solver::Callback { +class SolverCallback: public Solver::Callback { protected: bp::object on_start_, on_gradients_ready_; public: - PythonCallback(bp::object on_start, bp::object on_gradients_ready) + SolverCallback(bp::object on_start, bp::object on_gradients_ready) : on_start_(on_start), on_gradients_ready_(on_gradients_ready) { } virtual void on_gradients_ready() { on_gradients_ready_(); @@ -246,9 +288,94 @@ class PythonCallback: public Solver::Callback { template void Solver_add_callback(Solver * solver, bp::object on_start, bp::object on_gradients_ready) { - solver->add_callback(new PythonCallback(on_start, on_gradients_ready)); + solver->add_callback(new SolverCallback(on_start, on_gradients_ready)); +} + +// Seems boost cannot call the base method directly +void Solver_add_nccl(Solver* solver +#ifdef USE_NCCL + , NCCL* nccl +#endif +) { +#ifdef USE_NCCL + solver->add_callback(nccl); +#endif +} + +void share_weights(Solver* solver, Net* net) { + net->ShareTrainedLayersWith(solver->net().get()); } +template +class NetCallback: public Net::Callback { + public: + explicit NetCallback(bp::object run) : run_(run) {} + + protected: + virtual void run(int layer) { + run_(layer); + } + bp::object run_; +}; +void Net_before_forward(Net* net, bp::object run) { + net->add_before_forward(new NetCallback(run)); +} +void Net_after_forward(Net* net, bp::object run) { + net->add_after_forward(new NetCallback(run)); +} +void Net_before_backward(Net* net, bp::object run) { + net->add_before_backward(new NetCallback(run)); +} +void Net_after_backward(Net* net, bp::object run) { + net->add_after_backward(new NetCallback(run)); +} + +void Net_add_nccl(Net* net +#ifdef USE_NCCL + , NCCL* nccl +#endif +) { +#ifdef USE_NCCL + net->add_after_backward(nccl); +#endif +} +#ifndef USE_NCCL +template +class NCCL { + public: + NCCL(shared_ptr > solver, const string& uid) {} +}; +#endif + +bool HasNCCL() { +#ifdef USE_NCCL + return true; +#else + return false; +#endif +} + +#ifdef USE_NCCL +bp::object NCCL_New_Uid() { + std::string uid = NCCL::new_uid(); +#if PY_MAJOR_VERSION >= 3 + // Convert std::string to bytes so that Python does not + // try to decode the string using the current locale. + + // Since boost 1.53 boost.python will convert str and bytes + // to std::string but will convert std::string to str. Here we + // force a bytes object to be returned. When this object + // is passed back to the NCCL constructor boost.python will + // correctly convert the bytes to std::string automatically + PyObject* py_uid = PyBytes_FromString(uid.c_str()); + return bp::object(bp::handle<>(py_uid)); +#else + // automatic conversion is correct for python 2. + return bp::object(uid); +#endif +} +#endif + BOOST_PYTHON_MEMBER_FUNCTION_OVERLOADS(SolveOverloads, Solve, 0, 1); BOOST_PYTHON_MODULE(_caffe) { @@ -258,19 +385,36 @@ BOOST_PYTHON_MODULE(_caffe) { bp::scope().attr("__version__") = AS_STRING(CAFFE_VERSION); // Caffe utility functions + bp::def("init_log", &InitLog); + bp::def("init_log", &InitLogLevel); + bp::def("init_log", &InitLogLevelPipe); + bp::def("log", &Log); + bp::def("has_nccl", &HasNCCL); bp::def("set_mode_cpu", &set_mode_cpu); bp::def("set_mode_gpu", &set_mode_gpu); + bp::def("set_random_seed", &set_random_seed); bp::def("set_device", &Caffe::SetDevice); + bp::def("solver_count", &Caffe::solver_count); + bp::def("set_solver_count", &Caffe::set_solver_count); + bp::def("solver_rank", &Caffe::solver_rank); + bp::def("set_solver_rank", &Caffe::set_solver_rank); + bp::def("set_multiprocess", &Caffe::set_multiprocess); bp::def("layer_type_list", &LayerRegistry::LayerTypeList); bp::class_, shared_ptr >, boost::noncopyable >("Net", bp::no_init) - .def("__init__", bp::make_constructor(&Net_Init)) + // Constructor + .def("__init__", bp::make_constructor(&Net_Init, + bp::default_call_policies(), (bp::arg("network_file"), "phase", + bp::arg("level")=0, bp::arg("stages")=bp::object(), + bp::arg("weights")=bp::object()))) + // Legacy constructor .def("__init__", bp::make_constructor(&Net_Init_Load)) .def("_forward", &Net::ForwardFromTo) .def("_backward", &Net::BackwardFromTo) .def("reshape", &Net::Reshape) + .def("clear_param_diffs", &Net::ClearParamDiffs) // The cast is to select a particular overload. .def("copy_from", static_cast::*)(const string)>( &Net::CopyTrainedLayersFrom)) @@ -298,7 +442,12 @@ BOOST_PYTHON_MODULE(_caffe) { bp::with_custodian_and_ward<1, 2, bp::with_custodian_and_ward<1, 3> >()) .def("save", &Net_Save) .def("save_hdf5", &Net_SaveHDF5) - .def("load_hdf5", &Net_LoadHDF5); + .def("load_hdf5", &Net_LoadHDF5) + .def("before_forward", &Net_before_forward) + .def("after_forward", &Net_after_forward) + .def("before_backward", &Net_before_backward) + .def("after_backward", &Net_after_backward) + .def("after_backward", &Net_add_nccl); BP_REGISTER_SHARED_PTR_TO_PYTHON(Net); bp::class_, shared_ptr >, boost::noncopyable>( @@ -315,6 +464,14 @@ BOOST_PYTHON_MODULE(_caffe) { .add_property("count", static_cast::*)() const>( &Blob::count)) .def("reshape", bp::raw_function(&Blob_Reshape)) +#ifndef CPU_ONLY + .add_property("_gpu_data_ptr", + reinterpret_cast::*)()>( + &Blob::mutable_gpu_data)) + .add_property("_gpu_diff_ptr", + reinterpret_cast::*)()>( + &Blob::mutable_gpu_diff)) +#endif .add_property("data", bp::make_function(&Blob::mutable_cpu_data, NdarrayCallPolicies())) .add_property("diff", bp::make_function(&Blob::mutable_cpu_diff, @@ -330,6 +487,10 @@ BOOST_PYTHON_MODULE(_caffe) { .add_property("type", bp::make_function(&Layer::type)); BP_REGISTER_SHARED_PTR_TO_PYTHON(Layer); + bp::class_("SolverParameter", bp::no_init) + .add_property("max_iter", &SolverParameter::max_iter) + .add_property("display", &SolverParameter::display) + .add_property("layer_wise_reduce", &SolverParameter::layer_wise_reduce); bp::class_("LayerParameter", bp::no_init); bp::class_, shared_ptr >, boost::noncopyable>( @@ -339,11 +500,15 @@ BOOST_PYTHON_MODULE(_caffe) { bp::return_internal_reference<>())) .add_property("iter", &Solver::iter) .def("add_callback", &Solver_add_callback) + .def("add_callback", &Solver_add_nccl) .def("solve", static_cast::*)(const char*)>( &Solver::Solve), SolveOverloads()) .def("step", &Solver::Step) .def("restore", &Solver::Restore) - .def("snapshot", &Solver::Snapshot); + .def("snapshot", &Solver::Snapshot) + .def("share_weights", &share_weights) + .add_property("param", bp::make_function(&Solver::param, + bp::return_value_policy())); BP_REGISTER_SHARED_PTR_TO_PYTHON(Solver); bp::class_, bp::bases >, @@ -387,6 +552,24 @@ BOOST_PYTHON_MODULE(_caffe) { bp::class_ >("BoolVec") .def(bp::vector_indexing_suite >()); + bp::class_, shared_ptr >, + boost::noncopyable>("NCCL", + bp::init >, const string&>()) +#ifdef USE_NCCL + .def("new_uid", NCCL_New_Uid).staticmethod("new_uid") + .def("bcast", &NCCL::Broadcast) +#endif + /* NOLINT_NEXT_LINE(whitespace/semicolon) */ + ; + BP_REGISTER_SHARED_PTR_TO_PYTHON(NCCL); + + bp::class_, boost::noncopyable>( + "Timer", bp::init<>()) + .def("start", &Timer::Start) + .def("stop", &Timer::Stop) + .add_property("ms", &Timer::MilliSeconds); + BP_REGISTER_SHARED_PTR_TO_PYTHON(Timer); + // boost python expects a void (missing) return value, while import_array // returns NULL for python3. import_array1() forces a void return value. import_array1(); diff --git a/python/caffe/classifier.py b/python/caffe/classifier.py index ea29fed86f9..64d804be554 100644 --- a/python/caffe/classifier.py +++ b/python/caffe/classifier.py @@ -23,7 +23,7 @@ class Classifier(caffe.Net): def __init__(self, model_file, pretrained_file, image_dims=None, mean=None, input_scale=None, raw_scale=None, channel_swap=None): - caffe.Net.__init__(self, model_file, pretrained_file, caffe.TEST) + caffe.Net.__init__(self, model_file, caffe.TEST, weights=pretrained_file) # configure pre-processing in_ = self.inputs[0] @@ -92,7 +92,7 @@ def predict(self, inputs, oversample=True): # For oversampling, average predictions across crops. if oversample: - predictions = predictions.reshape((len(predictions) / 10, 10, -1)) + predictions = predictions.reshape((len(predictions) // 10, 10, -1)) predictions = predictions.mean(1) return predictions diff --git a/python/caffe/detector.py b/python/caffe/detector.py index 75cd3b1202f..ceee5d36f4c 100644 --- a/python/caffe/detector.py +++ b/python/caffe/detector.py @@ -35,7 +35,7 @@ class Detector(caffe.Net): def __init__(self, model_file, pretrained_file, mean=None, input_scale=None, raw_scale=None, channel_swap=None, context_pad=None): - caffe.Net.__init__(self, model_file, pretrained_file, caffe.TEST) + caffe.Net.__init__(self, model_file, caffe.TEST, weights=pretrained_file) # configure pre-processing in_ = self.inputs[0] @@ -83,7 +83,7 @@ def detect_windows(self, images_windows): for ix, window_in in enumerate(window_inputs): caffe_in[ix] = self.transformer.preprocess(in_, window_in) out = self.forward_all(**{in_: caffe_in}) - predictions = out[self.outputs[0]].squeeze(axis=(2, 3)) + predictions = out[self.outputs[0]] # Package predictions with images and windows. detections = [] diff --git a/python/caffe/draw.py b/python/caffe/draw.py index 61205ca9f37..8411a41d1d4 100644 --- a/python/caffe/draw.py +++ b/python/caffe/draw.py @@ -91,11 +91,11 @@ def get_layer_label(layer, rankdir): separator, layer.type, separator, - layer.convolution_param.kernel_size[0] if len(layer.convolution_param.kernel_size._values) else 1, + layer.convolution_param.kernel_size[0] if len(layer.convolution_param.kernel_size) else 1, separator, - layer.convolution_param.stride[0] if len(layer.convolution_param.stride._values) else 1, + layer.convolution_param.stride[0] if len(layer.convolution_param.stride) else 1, separator, - layer.convolution_param.pad[0] if len(layer.convolution_param.pad._values) else 0) + layer.convolution_param.pad[0] if len(layer.convolution_param.pad) else 0) elif layer.type == 'Pooling': pooling_types_dict = get_pooling_types_dict() node_label = '"%s%s(%s %s)%skernel size: %d%sstride: %d%spad: %d"' %\ @@ -127,7 +127,7 @@ def choose_color_by_layertype(layertype): return color -def get_pydot_graph(caffe_net, rankdir, label_edges=True): +def get_pydot_graph(caffe_net, rankdir, label_edges=True, phase=None): """Create a data structure which represents the `caffe_net`. Parameters @@ -137,6 +137,9 @@ def get_pydot_graph(caffe_net, rankdir, label_edges=True): Direction of graph layout. label_edges : boolean, optional Label the edges (default is True). + phase : {caffe_pb2.Phase.TRAIN, caffe_pb2.Phase.TEST, None} optional + Include layers from this network phase. If None, include all layers. + (the default is None) Returns ------- @@ -148,6 +151,19 @@ def get_pydot_graph(caffe_net, rankdir, label_edges=True): pydot_nodes = {} pydot_edges = [] for layer in caffe_net.layer: + if phase is not None: + included = False + if len(layer.include) == 0: + included = True + if len(layer.include) > 0 and len(layer.exclude) > 0: + raise ValueError('layer ' + layer.name + ' has both include ' + 'and exclude specified.') + for layer_phase in layer.include: + included = included or layer_phase.phase == phase + for layer_phase in layer.exclude: + included = included and not layer_phase.phase == phase + if not included: + continue node_label = get_layer_label(layer, rankdir) node_name = "%s_%s" % (layer.name, layer.type) if (len(layer.bottom) == 1 and len(layer.top) == 1 and @@ -186,7 +202,7 @@ def get_pydot_graph(caffe_net, rankdir, label_edges=True): return pydot_graph -def draw_net(caffe_net, rankdir, ext='png'): +def draw_net(caffe_net, rankdir, ext='png', phase=None): """Draws a caffe net and returns the image string encoded using the given extension. @@ -195,16 +211,19 @@ def draw_net(caffe_net, rankdir, ext='png'): caffe_net : a caffe.proto.caffe_pb2.NetParameter protocol buffer. ext : string, optional The image extension (the default is 'png'). + phase : {caffe_pb2.Phase.TRAIN, caffe_pb2.Phase.TEST, None} optional + Include layers from this network phase. If None, include all layers. + (the default is None) Returns ------- string : Postscript representation of the graph. """ - return get_pydot_graph(caffe_net, rankdir).create(format=ext) + return get_pydot_graph(caffe_net, rankdir, phase=phase).create(format=ext) -def draw_net_to_file(caffe_net, filename, rankdir='LR'): +def draw_net_to_file(caffe_net, filename, rankdir='LR', phase=None): """Draws a caffe net, and saves it to file using the format given as the file extension. Use '.raw' to output raw text that you can manually feed to graphviz to draw graphs. @@ -216,7 +235,10 @@ def draw_net_to_file(caffe_net, filename, rankdir='LR'): The path to a file where the networks visualization will be stored. rankdir : {'LR', 'TB', 'BT'} Direction of graph layout. + phase : {caffe_pb2.Phase.TRAIN, caffe_pb2.Phase.TEST, None} optional + Include layers from this network phase. If None, include all layers. + (the default is None) """ ext = filename[filename.rfind('.')+1:] with open(filename, 'wb') as fid: - fid.write(draw_net(caffe_net, rankdir, ext)) + fid.write(draw_net(caffe_net, rankdir, ext, phase)) diff --git a/python/caffe/io.py b/python/caffe/io.py index e1759beb587..d0103d60892 100644 --- a/python/caffe/io.py +++ b/python/caffe/io.py @@ -75,7 +75,7 @@ def array_to_datum(arr, label=None): if arr.dtype == np.uint8: datum.data = arr.tostring() else: - datum.float_data.extend(arr.flat) + datum.float_data.extend(arr.astype(float).flat) if label is not None: datum.label = label return datum @@ -186,13 +186,14 @@ def deprocess(self, in_, data): def set_transpose(self, in_, order): """ - Set the input channel order for e.g. RGB to BGR conversion - as needed for the reference ImageNet model. + Set the order of dimensions, e.g. to convert OpenCV's HxWxC images + into CxHxW. Parameters ---------- - in_ : which input to assign this channel order + in_ : which input to assign this dimension order order : the order to transpose the dimensions + for example (2,0,1) changes HxWxC into CxHxW and (1,2,0) reverts """ self.__check_input(in_) if len(order) != len(self.inputs[in_]) - 1: @@ -256,7 +257,12 @@ def set_mean(self, in_, mean): if len(ms) != 3: raise ValueError('Mean shape invalid') if ms != self.inputs[in_][1:]: - raise ValueError('Mean shape incompatible with input shape.') + in_shape = self.inputs[in_][1:] + m_min, m_max = mean.min(), mean.max() + normal_mean = (mean - m_min) / (m_max - m_min) + mean = resize_image(normal_mean.transpose((1,2,0)), + in_shape[1:]).transpose((2,0,1)) * \ + (m_max - m_min) + m_min self.mean[in_] = mean def set_input_scale(self, in_, scale): @@ -323,7 +329,7 @@ def resize_image(im, new_dims, interp_order=1): # skimage is fast but only understands {1,3} channel images # in [0, 1]. im_std = (im - im_min) / (im_max - im_min) - resized_std = resize(im_std, new_dims, order=interp_order) + resized_std = resize(im_std, new_dims, order=interp_order, mode='constant') resized_im = resized_std * (im_max - im_min) + im_min else: # the image is a constant -- avoid divide by 0 diff --git a/python/caffe/net_spec.py b/python/caffe/net_spec.py index 5fb1f0b3fb1..20918f9b6bc 100644 --- a/python/caffe/net_spec.py +++ b/python/caffe/net_spec.py @@ -103,6 +103,10 @@ class Function(object): def __init__(self, type_name, inputs, params): self.type_name = type_name + for index, input in enumerate(inputs): + if not isinstance(input, Top): + raise TypeError('%s input %d is not a Top (type is %s)' % + (type_name, index, type(input))) self.inputs = inputs self.params = params self.ntop = self.params.get('ntop', 1) diff --git a/python/caffe/pycaffe.py b/python/caffe/pycaffe.py index ca6d050e2bd..4a7b5a24c46 100644 --- a/python/caffe/pycaffe.py +++ b/python/caffe/pycaffe.py @@ -11,7 +11,7 @@ import numpy as np from ._caffe import Net, SGDSolver, NesterovSolver, AdaGradSolver, \ - RMSPropSolver, AdaDeltaSolver, AdamSolver + RMSPropSolver, AdaDeltaSolver, AdamSolver, NCCL, Timer import caffe.io import six @@ -43,6 +43,16 @@ def _Net_blob_loss_weights(self): self._blob_loss_weights)) return self._blob_loss_weights_dict +@property +def _Net_layer_dict(self): + """ + An OrderedDict (bottom to top, i.e., input to output) of network + layers indexed by name + """ + if not hasattr(self, '_layer_dict'): + self._layer_dict = OrderedDict(zip(self._layer_names, self.layers)) + return self._layer_dict + @property def _Net_params(self): @@ -103,7 +113,7 @@ def _Net_forward(self, blobs=None, start=None, end=None, **kwargs): if end is not None: end_ind = list(self._layer_names).index(end) - outputs = set([end] + blobs) + outputs = set(self.top_names[end] + blobs) else: end_ind = len(self.layers) - 1 outputs = set(self.outputs + blobs) @@ -151,7 +161,7 @@ def _Net_backward(self, diffs=None, start=None, end=None, **kwargs): if end is not None: end_ind = list(self._layer_names).index(end) - outputs = set([end] + diffs) + outputs = set(self.bottom_names[end] + diffs) else: end_ind = 0 outputs = set(self.inputs + diffs) @@ -292,25 +302,36 @@ def _Net_batch(self, blobs): padding]) yield padded_batch - -class _Net_IdNameWrapper: - """ - A simple wrapper that allows the ids propery to be accessed as a dict - indexed by names. Used for top and bottom names +def _Net_get_id_name(func, field): """ - def __init__(self, net, func): - self.net, self.func = net, func + Generic property that maps func to the layer names into an OrderedDict. - def __getitem__(self, name): - # Map the layer name to id - ids = self.func(self.net, list(self.net._layer_names).index(name)) - # Map the blob id to name - id_to_name = list(self.net.blobs) - return [id_to_name[i] for i in ids] + Used for top_names and bottom_names. + + Parameters + ---------- + func: function id -> [id] + field: implementation field name (cache) + + Returns + ------ + A one-parameter function that can be set as a property. + """ + @property + def get_id_name(self): + if not hasattr(self, field): + id_to_name = list(self.blobs) + res = OrderedDict([(self._layer_names[i], + [id_to_name[j] for j in func(self, i)]) + for i in range(len(self.layers))]) + setattr(self, field, res) + return getattr(self, field) + return get_id_name # Attach methods to Net. Net.blobs = _Net_blobs Net.blob_loss_weights = _Net_blob_loss_weights +Net.layer_dict = _Net_layer_dict Net.params = _Net_params Net.forward = _Net_forward Net.backward = _Net_backward @@ -320,5 +341,5 @@ def __getitem__(self, name): Net._batch = _Net_batch Net.inputs = _Net_inputs Net.outputs = _Net_outputs -Net.top_names = property(lambda n: _Net_IdNameWrapper(n, Net._top_ids)) -Net.bottom_names = property(lambda n: _Net_IdNameWrapper(n, Net._bottom_ids)) +Net.top_names = _Net_get_id_name(Net._top_ids, "_top_names") +Net.bottom_names = _Net_get_id_name(Net._bottom_ids, "_bottom_names") diff --git a/python/caffe/test/test_draw.py b/python/caffe/test/test_draw.py new file mode 100644 index 00000000000..835bb5df010 --- /dev/null +++ b/python/caffe/test/test_draw.py @@ -0,0 +1,37 @@ +import os +import unittest + +from google.protobuf import text_format + +import caffe.draw +from caffe.proto import caffe_pb2 + +def getFilenames(): + """Yields files in the source tree which are Net prototxts.""" + result = [] + + root_dir = os.path.abspath(os.path.join( + os.path.dirname(__file__), '..', '..', '..')) + assert os.path.exists(root_dir) + + for dirname in ('models', 'examples'): + dirname = os.path.join(root_dir, dirname) + assert os.path.exists(dirname) + for cwd, _, filenames in os.walk(dirname): + for filename in filenames: + filename = os.path.join(cwd, filename) + if filename.endswith('.prototxt') and 'solver' not in filename: + yield os.path.join(dirname, filename) + + +class TestDraw(unittest.TestCase): + def test_draw_net(self): + for filename in getFilenames(): + net = caffe_pb2.NetParameter() + with open(filename) as infile: + text_format.Merge(infile.read(), net) + caffe.draw.draw_net(net, 'LR') + + +if __name__ == "__main__": + unittest.main() diff --git a/python/caffe/test/test_nccl.py b/python/caffe/test/test_nccl.py new file mode 100644 index 00000000000..127a9337040 --- /dev/null +++ b/python/caffe/test/test_nccl.py @@ -0,0 +1,19 @@ +import sys +import unittest + +import caffe + + +class TestNCCL(unittest.TestCase): + + def test_newuid(self): + """ + Test that NCCL uids are of the proper type + according to python version + """ + if caffe.has_nccl(): + uid = caffe.NCCL.new_uid() + if sys.version_info.major >= 3: + self.assertTrue(isinstance(uid, bytes)) + else: + self.assertTrue(isinstance(uid, str)) diff --git a/python/caffe/test/test_net.py b/python/caffe/test/test_net.py index 4cacfcd05bb..ee1d38c39db 100644 --- a/python/caffe/test/test_net.py +++ b/python/caffe/test/test_net.py @@ -3,6 +3,7 @@ import os import numpy as np import six +from collections import OrderedDict import caffe @@ -24,11 +25,11 @@ def simple_net_file(num_output): bias_filler { type: 'constant' value: 2 } } param { decay_mult: 1 } param { decay_mult: 0 } } - layer { type: 'InnerProduct' name: 'ip' bottom: 'conv' top: 'ip' + layer { type: 'InnerProduct' name: 'ip' bottom: 'conv' top: 'ip_blob' inner_product_param { num_output: """ + str(num_output) + """ weight_filler { type: 'gaussian' std: 2.5 } bias_filler { type: 'constant' value: -3 } } } - layer { type: 'SoftmaxWithLoss' name: 'loss' bottom: 'ip' bottom: 'label' + layer { type: 'SoftmaxWithLoss' name: 'loss' bottom: 'ip_blob' bottom: 'label' top: 'loss' }""") f.close() return f.name @@ -59,20 +60,91 @@ def test_memory(self): for bl in blobs: total += bl.data.sum() + bl.diff.sum() + def test_layer_dict(self): + layer_dict = self.net.layer_dict + self.assertEqual(list(layer_dict.keys()), list(self.net._layer_names)) + for i, name in enumerate(self.net._layer_names): + self.assertEqual(layer_dict[name].type, + self.net.layers[i].type) + def test_forward_backward(self): self.net.forward() self.net.backward() + def test_forward_start_end(self): + conv_blob=self.net.blobs['conv'] + ip_blob=self.net.blobs['ip_blob'] + sample_data=np.random.uniform(size=conv_blob.data.shape) + sample_data=sample_data.astype(np.float32) + conv_blob.data[:]=sample_data + forward_blob=self.net.forward(start='ip',end='ip') + self.assertIn('ip_blob',forward_blob) + + manual_forward=[] + for i in range(0,conv_blob.data.shape[0]): + dot=np.dot(self.net.params['ip'][0].data, + conv_blob.data[i].reshape(-1)) + manual_forward.append(dot+self.net.params['ip'][1].data) + manual_forward=np.array(manual_forward) + + np.testing.assert_allclose(ip_blob.data,manual_forward,rtol=1e-3,atol=1e-5) + + def test_backward_start_end(self): + conv_blob=self.net.blobs['conv'] + ip_blob=self.net.blobs['ip_blob'] + sample_data=np.random.uniform(size=ip_blob.data.shape) + sample_data=sample_data.astype(np.float32) + ip_blob.diff[:]=sample_data + backward_blob=self.net.backward(start='ip',end='ip') + self.assertIn('conv',backward_blob) + + manual_backward=[] + for i in range(0,conv_blob.data.shape[0]): + dot=np.dot(self.net.params['ip'][0].data.transpose(), + sample_data[i].reshape(-1)) + manual_backward.append(dot) + manual_backward=np.array(manual_backward) + manual_backward=manual_backward.reshape(conv_blob.data.shape) + + np.testing.assert_allclose(conv_blob.diff,manual_backward,rtol=1e-3,atol=1e-5) + + def test_clear_param_diffs(self): + # Run a forward/backward step to have non-zero diffs + self.net.forward() + self.net.backward() + diff = self.net.params["conv"][0].diff + # Check that we have non-zero diffs + self.assertTrue(diff.max() > 0) + self.net.clear_param_diffs() + # Check that the diffs are now 0 + self.assertTrue((diff == 0).all()) + def test_inputs_outputs(self): self.assertEqual(self.net.inputs, []) self.assertEqual(self.net.outputs, ['loss']) + def test_top_bottom_names(self): + self.assertEqual(self.net.top_names, + OrderedDict([('data', ['data', 'label']), + ('conv', ['conv']), + ('ip', ['ip_blob']), + ('loss', ['loss'])])) + self.assertEqual(self.net.bottom_names, + OrderedDict([('data', []), + ('conv', ['data']), + ('ip', ['conv']), + ('loss', ['ip_blob', 'label'])])) + def test_save_and_read(self): f = tempfile.NamedTemporaryFile(mode='w+', delete=False) f.close() self.net.save(f.name) net_file = simple_net_file(self.num_output) - net2 = caffe.Net(net_file, f.name, caffe.TRAIN) + # Test legacy constructor + # should print deprecation warning + caffe.Net(net_file, f.name, caffe.TRAIN) + # Test named constructor + net2 = caffe.Net(net_file, caffe.TRAIN, weights=f.name) os.remove(net_file) os.remove(f.name) for name in self.net.params: @@ -93,3 +165,225 @@ def test_save_hdf5(self): for i in range(len(self.net.params[name])): self.assertEqual(abs(self.net.params[name][i].data - net2.params[name][i].data).sum(), 0) + +class TestLevels(unittest.TestCase): + + TEST_NET = """ +layer { + name: "data" + type: "DummyData" + top: "data" + dummy_data_param { shape { dim: 1 dim: 1 dim: 10 dim: 10 } } +} +layer { + name: "NoLevel" + type: "InnerProduct" + bottom: "data" + top: "NoLevel" + inner_product_param { num_output: 1 } +} +layer { + name: "Level0Only" + type: "InnerProduct" + bottom: "data" + top: "Level0Only" + include { min_level: 0 max_level: 0 } + inner_product_param { num_output: 1 } +} +layer { + name: "Level1Only" + type: "InnerProduct" + bottom: "data" + top: "Level1Only" + include { min_level: 1 max_level: 1 } + inner_product_param { num_output: 1 } +} +layer { + name: "Level>=0" + type: "InnerProduct" + bottom: "data" + top: "Level>=0" + include { min_level: 0 } + inner_product_param { num_output: 1 } +} +layer { + name: "Level>=1" + type: "InnerProduct" + bottom: "data" + top: "Level>=1" + include { min_level: 1 } + inner_product_param { num_output: 1 } +} +""" + + def setUp(self): + self.f = tempfile.NamedTemporaryFile(mode='w+', delete=False) + self.f.write(self.TEST_NET) + self.f.close() + + def tearDown(self): + os.remove(self.f.name) + + def check_net(self, net, blobs): + net_blobs = [b for b in net.blobs.keys() if 'data' not in b] + self.assertEqual(net_blobs, blobs) + + def test_0(self): + net = caffe.Net(self.f.name, caffe.TEST) + self.check_net(net, ['NoLevel', 'Level0Only', 'Level>=0']) + + def test_1(self): + net = caffe.Net(self.f.name, caffe.TEST, level=1) + self.check_net(net, ['NoLevel', 'Level1Only', 'Level>=0', 'Level>=1']) + + +class TestStages(unittest.TestCase): + + TEST_NET = """ +layer { + name: "data" + type: "DummyData" + top: "data" + dummy_data_param { shape { dim: 1 dim: 1 dim: 10 dim: 10 } } +} +layer { + name: "A" + type: "InnerProduct" + bottom: "data" + top: "A" + include { stage: "A" } + inner_product_param { num_output: 1 } +} +layer { + name: "B" + type: "InnerProduct" + bottom: "data" + top: "B" + include { stage: "B" } + inner_product_param { num_output: 1 } +} +layer { + name: "AorB" + type: "InnerProduct" + bottom: "data" + top: "AorB" + include { stage: "A" } + include { stage: "B" } + inner_product_param { num_output: 1 } +} +layer { + name: "AandB" + type: "InnerProduct" + bottom: "data" + top: "AandB" + include { stage: "A" stage: "B" } + inner_product_param { num_output: 1 } +} +""" + + def setUp(self): + self.f = tempfile.NamedTemporaryFile(mode='w+', delete=False) + self.f.write(self.TEST_NET) + self.f.close() + + def tearDown(self): + os.remove(self.f.name) + + def check_net(self, net, blobs): + net_blobs = [b for b in net.blobs.keys() if 'data' not in b] + self.assertEqual(net_blobs, blobs) + + def test_A(self): + net = caffe.Net(self.f.name, caffe.TEST, stages=['A']) + self.check_net(net, ['A', 'AorB']) + + def test_B(self): + net = caffe.Net(self.f.name, caffe.TEST, stages=['B']) + self.check_net(net, ['B', 'AorB']) + + def test_AandB(self): + net = caffe.Net(self.f.name, caffe.TEST, stages=['A', 'B']) + self.check_net(net, ['A', 'B', 'AorB', 'AandB']) + + +class TestAllInOne(unittest.TestCase): + + TEST_NET = """ +layer { + name: "train_data" + type: "DummyData" + top: "data" + top: "label" + dummy_data_param { + shape { dim: 1 dim: 1 dim: 10 dim: 10 } + shape { dim: 1 dim: 1 dim: 1 dim: 1 } + } + include { phase: TRAIN stage: "train" } +} +layer { + name: "val_data" + type: "DummyData" + top: "data" + top: "label" + dummy_data_param { + shape { dim: 1 dim: 1 dim: 10 dim: 10 } + shape { dim: 1 dim: 1 dim: 1 dim: 1 } + } + include { phase: TEST stage: "val" } +} +layer { + name: "deploy_data" + type: "Input" + top: "data" + input_param { shape { dim: 1 dim: 1 dim: 10 dim: 10 } } + include { phase: TEST stage: "deploy" } +} +layer { + name: "ip" + type: "InnerProduct" + bottom: "data" + top: "ip" + inner_product_param { num_output: 2 } +} +layer { + name: "loss" + type: "SoftmaxWithLoss" + bottom: "ip" + bottom: "label" + top: "loss" + include: { phase: TRAIN stage: "train" } + include: { phase: TEST stage: "val" } +} +layer { + name: "pred" + type: "Softmax" + bottom: "ip" + top: "pred" + include: { phase: TEST stage: "deploy" } +} +""" + + def setUp(self): + self.f = tempfile.NamedTemporaryFile(mode='w+', delete=False) + self.f.write(self.TEST_NET) + self.f.close() + + def tearDown(self): + os.remove(self.f.name) + + def check_net(self, net, outputs): + self.assertEqual(list(net.blobs['data'].shape), [1,1,10,10]) + self.assertEqual(net.outputs, outputs) + + def test_train(self): + net = caffe.Net(self.f.name, caffe.TRAIN, stages=['train']) + self.check_net(net, ['loss']) + + def test_val(self): + net = caffe.Net(self.f.name, caffe.TEST, stages=['val']) + self.check_net(net, ['loss']) + + def test_deploy(self): + net = caffe.Net(self.f.name, caffe.TEST, stages=['deploy']) + self.check_net(net, ['pred']) + diff --git a/python/caffe/test/test_net_spec.py b/python/caffe/test/test_net_spec.py index fee3c0aaebe..ffe71bacb08 100644 --- a/python/caffe/test/test_net_spec.py +++ b/python/caffe/test/test_net_spec.py @@ -79,3 +79,11 @@ def test_zero_tops(self): net_proto = silent_net() net = self.load_net(net_proto) self.assertEqual(len(net.forward()), 0) + + def test_type_error(self): + """Test that a TypeError is raised when a Function input isn't a Top.""" + data = L.DummyData(ntop=2) # data is a 2-tuple of Tops + r = r"^Silence input 0 is not a Top \(type is <(type|class) 'tuple'>\)$" + with self.assertRaisesRegexp(TypeError, r): + L.Silence(data, ntop=0) # should raise: data is a tuple, not a Top + L.Silence(*data, ntop=0) # shouldn't raise: each elt of data is a Top diff --git a/python/draw_net.py b/python/draw_net.py index ec76a744da3..dfe70d26a71 100755 --- a/python/draw_net.py +++ b/python/draw_net.py @@ -28,6 +28,11 @@ def parse_args(): 'http://www.graphviz.org/doc/info/' 'attrs.html#k:rankdir'), default='LR') + parser.add_argument('--phase', + help=('Which network phase to draw: can be TRAIN, ' + 'TEST, or ALL. If ALL, then all layers are drawn ' + 'regardless of phase.'), + default="ALL") args = parser.parse_args() return args @@ -38,7 +43,15 @@ def main(): net = caffe_pb2.NetParameter() text_format.Merge(open(args.input_net_proto_file).read(), net) print('Drawing net to %s' % args.output_image_file) - caffe.draw.draw_net_to_file(net, args.output_image_file, args.rankdir) + phase=None; + if args.phase == "TRAIN": + phase = caffe.TRAIN + elif args.phase == "TEST": + phase = caffe.TEST + elif args.phase != "ALL": + raise ValueError("Unknown phase: " + args.phase) + caffe.draw.draw_net_to_file(net, args.output_image_file, args.rankdir, + phase) if __name__ == '__main__': diff --git a/python/train.py b/python/train.py new file mode 100644 index 00000000000..5897f5dcb90 --- /dev/null +++ b/python/train.py @@ -0,0 +1,100 @@ +#!/usr/bin/env python +""" +Trains a model using one or more GPUs. +""" +from multiprocessing import Process + +import caffe + + +def train( + solver, # solver proto definition + snapshot, # solver snapshot to restore + gpus, # list of device ids + timing=False, # show timing info for compute and communications +): + # NCCL uses a uid to identify a session + uid = caffe.NCCL.new_uid() + + caffe.init_log() + caffe.log('Using devices %s' % str(gpus)) + + procs = [] + for rank in range(len(gpus)): + p = Process(target=solve, + args=(solver, snapshot, gpus, timing, uid, rank)) + p.daemon = True + p.start() + procs.append(p) + for p in procs: + p.join() + + +def time(solver, nccl): + fprop = [] + bprop = [] + total = caffe.Timer() + allrd = caffe.Timer() + for _ in range(len(solver.net.layers)): + fprop.append(caffe.Timer()) + bprop.append(caffe.Timer()) + display = solver.param.display + + def show_time(): + if solver.iter % display == 0: + s = '\n' + for i in range(len(solver.net.layers)): + s += 'forw %3d %8s ' % (i, solver.net._layer_names[i]) + s += ': %.2f\n' % fprop[i].ms + for i in range(len(solver.net.layers) - 1, -1, -1): + s += 'back %3d %8s ' % (i, solver.net._layer_names[i]) + s += ': %.2f\n' % bprop[i].ms + s += 'solver total: %.2f\n' % total.ms + s += 'allreduce: %.2f\n' % allrd.ms + caffe.log(s) + + solver.net.before_forward(lambda layer: fprop[layer].start()) + solver.net.after_forward(lambda layer: fprop[layer].stop()) + solver.net.before_backward(lambda layer: bprop[layer].start()) + solver.net.after_backward(lambda layer: bprop[layer].stop()) + solver.add_callback(lambda: total.start(), lambda: (total.stop(), allrd.start())) + solver.add_callback(nccl) + solver.add_callback(lambda: '', lambda: (allrd.stop(), show_time())) + + +def solve(proto, snapshot, gpus, timing, uid, rank): + caffe.set_mode_gpu() + caffe.set_device(gpus[rank]) + caffe.set_solver_count(len(gpus)) + caffe.set_solver_rank(rank) + caffe.set_multiprocess(True) + + solver = caffe.SGDSolver(proto) + if snapshot and len(snapshot) != 0: + solver.restore(snapshot) + + nccl = caffe.NCCL(solver, uid) + nccl.bcast() + + if timing and rank == 0: + time(solver, nccl) + else: + solver.add_callback(nccl) + + if solver.param.layer_wise_reduce: + solver.net.after_backward(nccl) + solver.step(solver.param.max_iter) + + +if __name__ == '__main__': + import argparse + parser = argparse.ArgumentParser() + + parser.add_argument("--solver", required=True, help="Solver proto definition.") + parser.add_argument("--snapshot", help="Solver snapshot to restore.") + parser.add_argument("--gpus", type=int, nargs='+', default=[0], + help="List of device ids.") + parser.add_argument("--timing", action='store_true', help="Show timing info.") + args = parser.parse_args() + + train(args.solver, args.snapshot, args.gpus, args.timing) diff --git a/scripts/build_docs.sh b/scripts/build_docs.sh index 0e28bd71631..4837587ad30 100755 --- a/scripts/build_docs.sh +++ b/scripts/build_docs.sh @@ -12,6 +12,9 @@ cd $ROOT_DIR # Gather docs. scripts/gather_examples.sh +# Split caffe.proto for inclusion by layer catalogue. +scripts/split_caffe_proto.py + # Generate developer docs. make docs diff --git a/scripts/caffe b/scripts/caffe new file mode 100644 index 00000000000..8a0b22af6ac --- /dev/null +++ b/scripts/caffe @@ -0,0 +1,73 @@ +# bash completion for Caffe's command line utility -*- shell-script -*- +# COPYRIGHT (C) 2015,2016 Zhou Mo +# License: BSD-2-Clause +# Originally appeard at https://github.com/BVLC/caffe/issues/3149 + +# Updated for caffe (1.0.0~rc3+20160715-g42cd785) +_caffe() +{ + local cur prev words cword + _init_completion -s || return + + local prototxts='@(prototxt)' + local caffemodels='@(caffemodel,binaryproto)' + local solverstates='@(solverstate)' + local caffefiles='@(prototxt|caffemodel|solverstate)' + + local flags='-gpu -iterations -model -snapshot -solver -weights -sighup_effect -sigint_effect -level -stage -phase' + + if [[ $cword -eq 1 ]]; then + COMPREPLY=( $( compgen -W 'train test time device_query' -- "$cur" ) ) + return 0 + fi + + if [[ $cword -eq 2 ]]; then + case ${words[1]} in + train|test|device_query|time) + COMPREPLY=( $( compgen -W "$flags" -- "$cur") ) + return 0 + ;; + *) + return 0 + ;; + esac + fi + + case $prev in + -gpu|-iterations|-version|-level|-stage) + return 0 + ;; + -solver|-model) + _filedir $prototxts + return 0 + ;; + -weights) + _filedir $caffemodels + return 0 + ;; + -snapshot) + _filedir $solverstates + return 0 + ;; + -sighup_effect|-sigint_effect) + COMPREPLY=( $( compgen -W 'snapshot stop none' -- "$cur") ) + return 0 + ;; + -phase) + COMPREPLY=( $( compgen -W 'TRAIN TEST' -- "$cur") ) + return 0 + ;; + *) + COMPREPLY=( $( compgen -W "$flags" -- "$cur") ) + return 0 + ;; + esac + + # file completion on relevant files + _filedir "$caffefiles" + + return 0 +} +complete -F _caffe caffe + +# vim diff --git a/scripts/cpp_lint.py b/scripts/cpp_lint.py index 14c76ecd6bf..b2016d4b6dd 100755 --- a/scripts/cpp_lint.py +++ b/scripts/cpp_lint.py @@ -1,4 +1,4 @@ -#!/usr/bin/python2 +#!/usr/bin/env python # # Copyright (c) 2009 Google Inc. All rights reserved. # @@ -52,6 +52,10 @@ import sys import unicodedata +import six + +from six import iteritems, itervalues +from six.moves import xrange _USAGE = """ Syntax: cpp_lint.py [--verbose=#] [--output=vs7] [--filter=-x,+y,...] @@ -756,7 +760,7 @@ def IncrementErrorCount(self, category): def PrintErrorCounts(self): """Print a summary of errors by category, and the total.""" - for category, count in self.errors_by_category.iteritems(): + for category, count in iteritems(self.errors_by_category): sys.stderr.write('Category \'%s\' errors found: %d\n' % (category, count)) sys.stderr.write('Total errors found: %d\n' % self.error_count) @@ -3444,16 +3448,16 @@ def GetLineWidth(line): The width of the line in column positions, accounting for Unicode combining characters and wide characters. """ - if isinstance(line, unicode): - width = 0 - for uc in unicodedata.normalize('NFC', line): - if unicodedata.east_asian_width(uc) in ('W', 'F'): - width += 2 - elif not unicodedata.combining(uc): - width += 1 - return width - else: - return len(line) + if six.PY2: + if isinstance(line, unicode): + width = 0 + for uc in unicodedata.normalize('NFC', line): + if unicodedata.east_asian_width(uc) in ('W', 'F'): + width += 2 + elif not unicodedata.combining(uc): + width += 1 + return width + return len(line) def CheckStyle(filename, clean_lines, linenum, file_extension, nesting_state, @@ -3774,7 +3778,7 @@ def _GetTextInside(text, start_pattern): # Give opening punctuations to get the matching close-punctuations. matching_punctuation = {'(': ')', '{': '}', '[': ']'} - closing_punctuation = set(matching_punctuation.itervalues()) + closing_punctuation = set(itervalues(matching_punctuation)) # Find the position to start extracting text. match = re.search(start_pattern, text, re.M) @@ -4460,7 +4464,7 @@ def UpdateIncludeState(filename, include_state, io=codecs): io: The io factory to use to read the file. Provided for testability. Returns: - True if a header was succesfully added. False otherwise. + True if a header was successfully added. False otherwise. """ headerfile = None try: @@ -4532,7 +4536,7 @@ def CheckForIncludeWhatYouUse(filename, clean_lines, include_state, error, # Let's copy the include_state so it is only messed up within this function. include_state = include_state.copy() - # Did we find the header for this file (if any) and succesfully load it? + # Did we find the header for this file (if any) and successfully load it? header_found = False # Use the absolute path so that matching works properly. @@ -4833,7 +4837,7 @@ def ParseArguments(args): try: _valid_extensions = set(val.split(',')) except ValueError: - PrintUsage('Extensions must be comma seperated list.') + PrintUsage('Extensions must be comma separated list.') if not filenames: PrintUsage('No files were specified.') @@ -4851,10 +4855,11 @@ def main(): # Change stderr to write with replacement characters so we don't die # if we try to print something containing non-ASCII characters. - sys.stderr = codecs.StreamReaderWriter(sys.stderr, - codecs.getreader('utf8'), - codecs.getwriter('utf8'), - 'replace') + if six.PY2: + sys.stderr = codecs.StreamReaderWriter(sys.stderr, + codecs.getreader('utf8'), + codecs.getwriter('utf8'), + 'replace') _cpplint_state.ResetErrorCounts() for filename in filenames: diff --git a/scripts/download_model_binary.py b/scripts/download_model_binary.py index fcdbb5a91a2..a72fd5d76ba 100755 --- a/scripts/download_model_binary.py +++ b/scripts/download_model_binary.py @@ -3,10 +3,11 @@ import sys import time import yaml -import urllib import hashlib import argparse +from six.moves import urllib + required_keys = ['caffemodel', 'caffemodel_url', 'sha1'] @@ -69,7 +70,7 @@ def model_checks_out(filename=model_filename, sha1=frontmatter['sha1']): sys.exit(0) # Download and verify model. - urllib.urlretrieve( + urllib.request.urlretrieve( frontmatter['caffemodel_url'], model_filename, reporthook) if not model_checks_out(): print('ERROR: model did not download correctly! Run this again.') diff --git a/scripts/split_caffe_proto.py b/scripts/split_caffe_proto.py new file mode 100755 index 00000000000..7e9dc3e7b22 --- /dev/null +++ b/scripts/split_caffe_proto.py @@ -0,0 +1,35 @@ +#!/usr/bin/env python +import mmap +import re +import os +import errno + +script_path = os.path.dirname(os.path.realpath(__file__)) + +# a regex to match the parameter definitions in caffe.proto +r = re.compile(r'(?://.*\n)*message ([^ ]*) \{\n(?: .*\n|\n)*\}') + +# create directory to put caffe.proto fragments +try: + os.mkdir( + os.path.join(script_path, + '../docs/_includes/')) + os.mkdir( + os.path.join(script_path, + '../docs/_includes/proto/')) +except OSError as exception: + if exception.errno != errno.EEXIST: + raise + +caffe_proto_fn = os.path.join( + script_path, + '../src/caffe/proto/caffe.proto') + +with open(caffe_proto_fn, 'r') as fin: + + for m in r.finditer(fin.read(), re.MULTILINE): + fn = os.path.join( + script_path, + '../docs/_includes/proto/%s.txt' % m.group(1)) + with open(fn, 'w') as fout: + fout.write(m.group(0)) diff --git a/scripts/travis/install-deps.sh b/scripts/travis/install-deps.sh index ee16d36a7fc..abf9cf1ca70 100755 --- a/scripts/travis/install-deps.sh +++ b/scripts/travis/install-deps.sh @@ -8,6 +8,7 @@ source $BASEDIR/defaults.sh apt-get -y update apt-get install -y --no-install-recommends \ build-essential \ + graphviz \ libboost-filesystem-dev \ libboost-python-dev \ libboost-system-dev \ @@ -31,6 +32,7 @@ if ! $WITH_PYTHON3 ; then python-dev \ python-numpy \ python-protobuf \ + python-pydot \ python-skimage else # Python3 @@ -56,7 +58,7 @@ else dh-autoreconf \ unzip - wget https://github.com/google/protobuf/archive/v3.0.0-beta-3.tar.gz -O protobuf3.tar.gz + wget https://github.com/google/protobuf/archive/3.0.x.tar.gz -O protobuf3.tar.gz tar -xzf protobuf3.tar.gz -C $PROTOBUF3_DIR --strip 1 rm protobuf3.tar.gz cd $PROTOBUF3_DIR @@ -84,7 +86,7 @@ if $WITH_CUDA ; then rm $CUDA_REPO_PKG if $WITH_CUDNN ; then - ML_REPO_PKG=nvidia-machine-learning-repo_4.0-2_amd64.deb + ML_REPO_PKG=nvidia-machine-learning-repo-ubuntu1404_4.0-2_amd64.deb wget http://developer.download.nvidia.com/compute/machine-learning/repos/ubuntu1404/x86_64/$ML_REPO_PKG dpkg -i $ML_REPO_PKG fi @@ -104,7 +106,7 @@ if $WITH_CUDA ; then ln -s /usr/local/cuda-$CUDA_VERSION /usr/local/cuda if $WITH_CUDNN ; then - apt-get install -y --no-install-recommends libcudnn5-dev + apt-get install -y --no-install-recommends libcudnn7-dev fi fi diff --git a/scripts/travis/install-python-deps.sh b/scripts/travis/install-python-deps.sh index eeec302791f..910d35a93be 100755 --- a/scripts/travis/install-python-deps.sh +++ b/scripts/travis/install-python-deps.sh @@ -11,4 +11,5 @@ if ! $WITH_PYTHON3 ; then else # Python3 pip install --pre protobuf==3.0.0b3 + pip install pydot fi diff --git a/src/caffe/CMakeLists.txt b/src/caffe/CMakeLists.txt index 8a80c940488..4a805568566 100644 --- a/src/caffe/CMakeLists.txt +++ b/src/caffe/CMakeLists.txt @@ -3,9 +3,12 @@ file(GLOB proto_files proto/*.proto) caffe_protobuf_generate_cpp_py(${proto_gen_folder} proto_srcs proto_hdrs proto_python ${proto_files}) # include python files either to force generation -add_library(proto STATIC ${proto_hdrs} ${proto_srcs} ${proto_python}) -set(Caffe_LINKER_LIBS proto ${Caffe_LINKER_LIBS}) # note, crucial to prepend! -caffe_default_properties(proto) +add_library(caffeproto STATIC ${proto_hdrs} ${proto_srcs} ${proto_python}) +caffe_default_properties(caffeproto) +target_link_libraries(caffeproto PUBLIC ${PROTOBUF_LIBRARIES}) +target_include_directories(caffeproto PUBLIC ${PROTOBUF_INCLUDE_DIR}) + +list(INSERT Caffe_LINKER_LIBS 0 PUBLIC caffeproto) # note, crucial to prepend! # --[ Caffe library @@ -18,8 +21,16 @@ if(HAVE_CUDA) endif() add_library(caffe ${srcs}) -target_link_libraries(caffe proto ${Caffe_LINKER_LIBS}) caffe_default_properties(caffe) +target_link_libraries(caffe ${Caffe_LINKER_LIBS}) +target_include_directories(caffe ${Caffe_INCLUDE_DIRS} + PUBLIC + $ + $) +target_compile_definitions(caffe ${Caffe_DEFINITIONS}) +if(Caffe_COMPILE_OPTIONS) + target_compile_options(caffe ${Caffe_COMPILE_OPTIONS}) +endif() set_target_properties(caffe PROPERTIES VERSION ${CAFFE_TARGET_VERSION} SOVERSION ${CAFFE_TARGET_SOVERSION} @@ -29,12 +40,11 @@ set_target_properties(caffe PROPERTIES add_subdirectory(test) # ---[ Install -install(DIRECTORY ${Caffe_INCLUDE_DIR}/caffe DESTINATION include) -install(FILES ${proto_hdrs} DESTINATION include/caffe/proto) -install(TARGETS caffe proto EXPORT CaffeTargets DESTINATION lib) +install(DIRECTORY ${Caffe_INCLUDE_DIR}/caffe DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}) +install(FILES ${proto_hdrs} DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}/caffe/proto) +install(TARGETS caffe caffeproto EXPORT CaffeTargets DESTINATION ${CMAKE_INSTALL_LIBDIR}) file(WRITE ${PROJECT_BINARY_DIR}/__init__.py) list(APPEND proto_python ${PROJECT_BINARY_DIR}/__init__.py) install(PROGRAMS ${proto_python} DESTINATION python/caffe/proto) - diff --git a/src/caffe/blob.cpp b/src/caffe/blob.cpp index 4a34e4c5856..603e52f7025 100644 --- a/src/caffe/blob.cpp +++ b/src/caffe/blob.cpp @@ -89,6 +89,12 @@ const Dtype* Blob::cpu_data() const { template void Blob::set_cpu_data(Dtype* data) { CHECK(data); + // Make sure CPU and GPU sizes remain equal + size_t size = count_ * sizeof(Dtype); + if (data_->size() != size) { + data_.reset(new SyncedMemory(size)); + diff_.reset(new SyncedMemory(size)); + } data_->set_cpu_data(data); } @@ -98,6 +104,18 @@ const Dtype* Blob::gpu_data() const { return (const Dtype*)data_->gpu_data(); } +template +void Blob::set_gpu_data(Dtype* data) { + CHECK(data); + // Make sure CPU and GPU sizes remain equal + size_t size = count_ * sizeof(Dtype); + if (data_->size() != size) { + data_.reset(new SyncedMemory(size)); + diff_.reset(new SyncedMemory(size)); + } + data_->set_gpu_data(data); +} + template const Dtype* Blob::cpu_diff() const { CHECK(diff_); diff --git a/src/caffe/common.cpp b/src/caffe/common.cpp index dee681654aa..4f6f9bccc38 100644 --- a/src/caffe/common.cpp +++ b/src/caffe/common.cpp @@ -53,7 +53,7 @@ void GlobalInit(int* pargc, char*** pargv) { Caffe::Caffe() : random_generator_(), mode_(Caffe::CPU), - solver_count_(1), root_solver_(true) { } + solver_count_(1), solver_rank_(0), multiprocess_(false) { } Caffe::~Caffe() { } @@ -106,7 +106,8 @@ void* Caffe::RNG::generator() { Caffe::Caffe() : cublas_handle_(NULL), curand_generator_(NULL), random_generator_(), - mode_(Caffe::CPU), solver_count_(1), root_solver_(true) { + mode_(Caffe::CPU), + solver_count_(1), solver_rank_(0), multiprocess_(false) { // Try to create a cublas handler, and report an error if failed (but we will // keep the program running as one might just want to run CPU code). if (cublasCreate(&cublas_handle_) != CUBLAS_STATUS_SUCCESS) { diff --git a/src/caffe/data_reader.cpp b/src/caffe/data_reader.cpp deleted file mode 100644 index 9f019bbfcb7..00000000000 --- a/src/caffe/data_reader.cpp +++ /dev/null @@ -1,119 +0,0 @@ -#include -#include -#include -#include - -#include "caffe/common.hpp" -#include "caffe/data_reader.hpp" -#include "caffe/layers/data_layer.hpp" -#include "caffe/proto/caffe.pb.h" - -namespace caffe { - -using boost::weak_ptr; - -map > DataReader::bodies_; -static boost::mutex bodies_mutex_; - -DataReader::DataReader(const LayerParameter& param) - : queue_pair_(new QueuePair( // - param.data_param().prefetch() * param.data_param().batch_size())) { - // Get or create a body - boost::mutex::scoped_lock lock(bodies_mutex_); - string key = source_key(param); - weak_ptr& weak = bodies_[key]; - body_ = weak.lock(); - if (!body_) { - body_.reset(new Body(param)); - bodies_[key] = weak_ptr(body_); - } - body_->new_queue_pairs_.push(queue_pair_); -} - -DataReader::~DataReader() { - string key = source_key(body_->param_); - body_.reset(); - boost::mutex::scoped_lock lock(bodies_mutex_); - if (bodies_[key].expired()) { - bodies_.erase(key); - } -} - -// - -DataReader::QueuePair::QueuePair(int size) { - // Initialize the free queue with requested number of datums - for (int i = 0; i < size; ++i) { - free_.push(new Datum()); - } -} - -DataReader::QueuePair::~QueuePair() { - Datum* datum; - while (free_.try_pop(&datum)) { - delete datum; - } - while (full_.try_pop(&datum)) { - delete datum; - } -} - -// - -DataReader::Body::Body(const LayerParameter& param) - : param_(param), - new_queue_pairs_() { - StartInternalThread(); -} - -DataReader::Body::~Body() { - StopInternalThread(); -} - -void DataReader::Body::InternalThreadEntry() { - shared_ptr db(db::GetDB(param_.data_param().backend())); - db->Open(param_.data_param().source(), db::READ); - shared_ptr cursor(db->NewCursor()); - vector > qps; - try { - int solver_count = param_.phase() == TRAIN ? Caffe::solver_count() : 1; - - // To ensure deterministic runs, only start running once all solvers - // are ready. But solvers need to peek on one item during initialization, - // so read one item, then wait for the next solver. - for (int i = 0; i < solver_count; ++i) { - shared_ptr qp(new_queue_pairs_.pop()); - read_one(cursor.get(), qp.get()); - qps.push_back(qp); - } - // Main loop - while (!must_stop()) { - for (int i = 0; i < solver_count; ++i) { - read_one(cursor.get(), qps[i].get()); - } - // Check no additional readers have been created. This can happen if - // more than one net is trained at a time per process, whether single - // or multi solver. It might also happen if two data layers have same - // name and same source. - CHECK_EQ(new_queue_pairs_.size(), 0); - } - } catch (boost::thread_interrupted&) { - // Interrupted exception is expected on shutdown - } -} - -void DataReader::Body::read_one(db::Cursor* cursor, QueuePair* qp) { - Datum* datum = qp->free_.pop(); - // TODO deserialize in-place instead of copy? - datum->ParseFromString(cursor->value()); - qp->full_.push(datum); - - // go to the next iter - cursor->Next(); - if (!cursor->valid()) { - DLOG(INFO) << "Restarting data prefetching from start."; - cursor->SeekToFirst(); - } -} - -} // namespace caffe diff --git a/src/caffe/data_transformer.cpp b/src/caffe/data_transformer.cpp index 7189d67e289..3012251e0a5 100644 --- a/src/caffe/data_transformer.cpp +++ b/src/caffe/data_transformer.cpp @@ -130,7 +130,7 @@ void DataTransformer::Transform(const Datum& datum, template void DataTransformer::Transform(const Datum& datum, Blob* transformed_blob) { - // If datum is encoded, decoded and transform the cv::image. + // If datum is encoded, decode and transform the cv::image. if (datum.encoded()) { #ifdef USE_OPENCV CHECK(!(param_.force_color() && param_.force_gray())) diff --git a/src/caffe/internal_thread.cpp b/src/caffe/internal_thread.cpp index 104884e0295..11de4979935 100644 --- a/src/caffe/internal_thread.cpp +++ b/src/caffe/internal_thread.cpp @@ -28,25 +28,27 @@ void InternalThread::StartInternalThread() { Caffe::Brew mode = Caffe::mode(); int rand_seed = caffe_rng_rand(); int solver_count = Caffe::solver_count(); - bool root_solver = Caffe::root_solver(); + int solver_rank = Caffe::solver_rank(); + bool multiprocess = Caffe::multiprocess(); try { thread_.reset(new boost::thread(&InternalThread::entry, this, device, mode, - rand_seed, solver_count, root_solver)); + rand_seed, solver_count, solver_rank, multiprocess)); } catch (std::exception& e) { LOG(FATAL) << "Thread exception: " << e.what(); } } void InternalThread::entry(int device, Caffe::Brew mode, int rand_seed, - int solver_count, bool root_solver) { + int solver_count, int solver_rank, bool multiprocess) { #ifndef CPU_ONLY CUDA_CHECK(cudaSetDevice(device)); #endif Caffe::set_mode(mode); Caffe::set_random_seed(rand_seed); Caffe::set_solver_count(solver_count); - Caffe::set_root_solver(root_solver); + Caffe::set_solver_rank(solver_rank); + Caffe::set_multiprocess(multiprocess); InternalThreadEntry(); } diff --git a/src/caffe/layer.cpp b/src/caffe/layer.cpp index 3b9128986ae..684ae88bb49 100644 --- a/src/caffe/layer.cpp +++ b/src/caffe/layer.cpp @@ -1,27 +1,7 @@ -#include #include "caffe/layer.hpp" namespace caffe { -template -void Layer::InitMutex() { - forward_mutex_.reset(new boost::mutex()); -} - -template -void Layer::Lock() { - if (IsShared()) { - forward_mutex_->lock(); - } -} - -template -void Layer::Unlock() { - if (IsShared()) { - forward_mutex_->unlock(); - } -} - INSTANTIATE_CLASS(Layer); } // namespace caffe diff --git a/src/caffe/layer_factory.cpp b/src/caffe/layer_factory.cpp index e967bd6181c..9f9026b1dde 100644 --- a/src/caffe/layer_factory.cpp +++ b/src/caffe/layer_factory.cpp @@ -8,6 +8,7 @@ #include "caffe/layer.hpp" #include "caffe/layer_factory.hpp" #include "caffe/layers/conv_layer.hpp" +#include "caffe/layers/deconv_layer.hpp" #include "caffe/layers/lrn_layer.hpp" #include "caffe/layers/pooling_layer.hpp" #include "caffe/layers/relu_layer.hpp" @@ -18,6 +19,7 @@ #ifdef USE_CUDNN #include "caffe/layers/cudnn_conv_layer.hpp" +#include "caffe/layers/cudnn_deconv_layer.hpp" #include "caffe/layers/cudnn_lcn_layer.hpp" #include "caffe/layers/cudnn_lrn_layer.hpp" #include "caffe/layers/cudnn_pooling_layer.hpp" @@ -67,11 +69,51 @@ shared_ptr > GetConvolutionLayer( #endif } else { LOG(FATAL) << "Layer " << param.name() << " has unknown engine."; + throw; // Avoids missing return warning } } REGISTER_LAYER_CREATOR(Convolution, GetConvolutionLayer); +// Get deconvolution layer according to engine. +template +shared_ptr > GetDeconvolutionLayer(const LayerParameter& param) { + ConvolutionParameter conv_param = param.convolution_param(); + ConvolutionParameter_Engine engine = conv_param.engine(); +#ifdef USE_CUDNN + bool use_dilation = false; + for (int i = 0; i < conv_param.dilation_size(); ++i) { + if (conv_param.dilation(i) > 1) { + use_dilation = true; + } + } +#endif + if (engine == ConvolutionParameter_Engine_DEFAULT) { + engine = ConvolutionParameter_Engine_CAFFE; +#ifdef USE_CUDNN + if (!use_dilation) { + engine = ConvolutionParameter_Engine_CUDNN; + } +#endif + } + if (engine == ConvolutionParameter_Engine_CAFFE) { + return shared_ptr >(new DeconvolutionLayer(param)); +#ifdef USE_CUDNN + } else if (engine == ConvolutionParameter_Engine_CUDNN) { + if (use_dilation) { + LOG(FATAL) << "CuDNN doesn't support the dilated deconvolution at Layer " + << param.name(); + } + return shared_ptr >(new CuDNNDeconvolutionLayer(param)); +#endif + } else { + LOG(FATAL) << "Layer " << param.name() << " has unknown engine."; + throw; // Avoids missing return warning + } +} + +REGISTER_LAYER_CREATOR(Deconvolution, GetDeconvolutionLayer); + // Get pooling layer according to engine. template shared_ptr > GetPoolingLayer(const LayerParameter& param) { @@ -104,6 +146,7 @@ shared_ptr > GetPoolingLayer(const LayerParameter& param) { #endif } else { LOG(FATAL) << "Layer " << param.name() << " has unknown engine."; + throw; // Avoids missing return warning } } @@ -141,6 +184,7 @@ shared_ptr > GetLRNLayer(const LayerParameter& param) { #endif } else { LOG(FATAL) << "Layer " << param.name() << " has unknown engine."; + throw; // Avoids missing return warning } } @@ -164,6 +208,7 @@ shared_ptr > GetReLULayer(const LayerParameter& param) { #endif } else { LOG(FATAL) << "Layer " << param.name() << " has unknown engine."; + throw; // Avoids missing return warning } } @@ -187,6 +232,7 @@ shared_ptr > GetSigmoidLayer(const LayerParameter& param) { #endif } else { LOG(FATAL) << "Layer " << param.name() << " has unknown engine."; + throw; // Avoids missing return warning } } @@ -210,6 +256,7 @@ shared_ptr > GetSoftmaxLayer(const LayerParameter& param) { #endif } else { LOG(FATAL) << "Layer " << param.name() << " has unknown engine."; + throw; // Avoids missing return warning } } @@ -233,6 +280,7 @@ shared_ptr > GetTanHLayer(const LayerParameter& param) { #endif } else { LOG(FATAL) << "Layer " << param.name() << " has unknown engine."; + throw; // Avoids missing return warning } } diff --git a/src/caffe/layers/accuracy_layer.cpp b/src/caffe/layers/accuracy_layer.cpp index 4eddbb5c850..b6d95b54a5d 100644 --- a/src/caffe/layers/accuracy_layer.cpp +++ b/src/caffe/layers/accuracy_layer.cpp @@ -52,8 +52,6 @@ void AccuracyLayer::Forward_cpu(const vector*>& bottom, const Dtype* bottom_label = bottom[1]->cpu_data(); const int dim = bottom[0]->count() / outer_num_; const int num_labels = bottom[0]->shape(label_axis_); - vector maxval(top_k_+1); - vector max_id(top_k_+1); if (top.size() > 1) { caffe_set(nums_buffer_.count(), Dtype(0), nums_buffer_.mutable_cpu_data()); caffe_set(top[1]->count(), Dtype(0), top[1]->mutable_cpu_data()); @@ -66,32 +64,29 @@ void AccuracyLayer::Forward_cpu(const vector*>& bottom, if (has_ignore_label_ && label_value == ignore_label_) { continue; } - if (top.size() > 1) ++nums_buffer_.mutable_cpu_data()[label_value]; DCHECK_GE(label_value, 0); DCHECK_LT(label_value, num_labels); + if (top.size() > 1) ++nums_buffer_.mutable_cpu_data()[label_value]; + const Dtype prob_of_true_class = bottom_data[i * dim + + label_value * inner_num_ + + j]; + int num_better_predictions = -1; // true_class also counts as "better" // Top-k accuracy - std::vector > bottom_data_vector; - for (int k = 0; k < num_labels; ++k) { - bottom_data_vector.push_back(std::make_pair( - bottom_data[i * dim + k * inner_num_ + j], k)); + for (int k = 0; k < num_labels && num_better_predictions < top_k_; ++k) { + num_better_predictions += + (bottom_data[i * dim + k * inner_num_ + j] >= prob_of_true_class); } - std::partial_sort( - bottom_data_vector.begin(), bottom_data_vector.begin() + top_k_, - bottom_data_vector.end(), std::greater >()); - // check if true label is in top k predictions - for (int k = 0; k < top_k_; k++) { - if (bottom_data_vector[k].second == label_value) { - ++accuracy; - if (top.size() > 1) ++top[1]->mutable_cpu_data()[label_value]; - break; - } + // check if there are less than top_k_ predictions + if (num_better_predictions < top_k_) { + ++accuracy; + if (top.size() > 1) ++top[1]->mutable_cpu_data()[label_value]; } ++count; } } // LOG(INFO) << "Accuracy: " << accuracy; - top[0]->mutable_cpu_data()[0] = accuracy / count; + top[0]->mutable_cpu_data()[0] = (count == 0) ? 0 : (accuracy / count); if (top.size() > 1) { for (int i = 0; i < top[1]->count(); ++i) { top[1]->mutable_cpu_data()[i] = @@ -102,6 +97,10 @@ void AccuracyLayer::Forward_cpu(const vector*>& bottom, // Accuracy layer should not be used as a loss function. } +#ifdef CPU_ONLY +STUB_GPU(AccuracyLayer); +#endif + INSTANTIATE_CLASS(AccuracyLayer); REGISTER_LAYER_CLASS(Accuracy); diff --git a/src/caffe/layers/accuracy_layer.cu b/src/caffe/layers/accuracy_layer.cu new file mode 100644 index 00000000000..904aab422af --- /dev/null +++ b/src/caffe/layers/accuracy_layer.cu @@ -0,0 +1,148 @@ +#include + +#include "caffe/layers/accuracy_layer.hpp" +#include "caffe/util/math_functions.hpp" + + +namespace caffe { + +template +__global__ void AccuracyForwardGPU(const int nthreads, + const Dtype* bottom_data, const Dtype* label, Dtype* acc, + const int num, const int dim, const int spatial_dim, + const int num_labels, const int top_k, + const bool has_ignore_label_, const int ignore_label_, + Dtype* counts) { + CUDA_KERNEL_LOOP(index, nthreads) { + const int n = index / spatial_dim; + const int s = index % spatial_dim; + const int label_value = static_cast(label[n * spatial_dim + s]); + const Dtype prob_of_true_class = bottom_data[n * dim + + label_value * spatial_dim + + s]; + int num_better_predictions = -1; // true_class also counts as "better" + if (has_ignore_label_ && label_value == ignore_label_) { + acc[index] = 0; + counts[index] = 0; + } else { + for (int k = 0; k < num_labels & num_better_predictions < top_k; k++) { + num_better_predictions += + (bottom_data[n * dim + k * spatial_dim + s] >= prob_of_true_class); + } + acc[index] = (num_better_predictions < top_k); + counts[index] = 1; + } + } +} + +template +__global__ void AccuracyForwardWithPerClassGPU(const int nthreads, + const Dtype* bottom_data, const Dtype* label, + Dtype* acc, Dtype* counts, + const int num, const int dim, const int spatial_dim, + const int num_labels, const int top_k, + const bool has_ignore_label_, const int ignore_label_) { + CUDA_KERNEL_LOOP(index, nthreads) { + const int n = index / spatial_dim; + const int s = index % spatial_dim; + const int label_value = static_cast(label[n * spatial_dim + s]); + const Dtype prob_of_true_class = bottom_data[n * dim + + label_value * spatial_dim + + s]; + if (has_ignore_label_ && label_value == ignore_label_) { + // nothing to be done. + } else { + int num_better_predictions = -1; // true_class also counts as "better" + for (int k = 0; k < num_labels & num_better_predictions < top_k; k++) { + num_better_predictions += + (bottom_data[n * dim + k * spatial_dim + s] >= prob_of_true_class); + } + acc[label_value*nthreads + index] += (num_better_predictions < top_k); + counts[label_value*nthreads + index] = 1; + } + } +} + +template +void AccuracyLayer::Forward_gpu( + const vector*>& bottom, const vector*>& top) { + const Dtype* bottom_data = bottom[0]->gpu_data(); + const Dtype* bottom_label = bottom[1]->gpu_data(); + const int dim = bottom[0]->count() / outer_num_; + const int num_labels = bottom[0]->shape(label_axis_); + const int nthreads = outer_num_ * inner_num_; + // Since this memory is not used for anything, we use it here to avoid having + // to allocate new GPU memory to accumulate intermediate results. + Dtype* acc_data = bottom[0]->mutable_gpu_diff(); + if (top.size() == 1) { + // simple case - report only global accuracy. + + // Similarly, this memory is never used elsewhere, and thus we can use it + // to avoid having to allocate additional GPU memory. + Dtype* counts = bottom[1]->mutable_gpu_diff(); + // NOLINT_NEXT_LINE(whitespace/operators) + AccuracyForwardGPU<<>>(nthreads, bottom_data, bottom_label, + acc_data, outer_num_, dim, inner_num_, num_labels, top_k_, + has_ignore_label_, ignore_label_, counts); + Dtype acc; + caffe_gpu_asum(nthreads, acc_data, &acc); + Dtype valid_count; + caffe_gpu_asum(nthreads, counts, &valid_count); + if (valid_count > 0) { + top[0]->mutable_cpu_data()[0] = acc / valid_count; + } else { + top[0]->mutable_cpu_data()[0] = 0; + } + } else { + // need to report per-class accuracy as well + + // allocate space for more detailed "counts" + nums_buffer_.ReshapeLike(*bottom[0]); + Dtype* counts = nums_buffer_.mutable_gpu_data(); + + caffe_gpu_set(bottom[0]->count(), Dtype(0), acc_data); + caffe_gpu_set(nums_buffer_.count(), Dtype(0), counts); + + // NOLINT_NEXT_LINE(whitespace/operators) + AccuracyForwardWithPerClassGPU<<>>(nthreads, bottom_data, bottom_label, + acc_data, counts, outer_num_, dim, inner_num_, num_labels, top_k_, + has_ignore_label_, ignore_label_); + + // get the overall accuracy + Dtype acc; + caffe_gpu_asum(bottom[0]->count(), acc_data, &acc); + Dtype valid_count; + caffe_gpu_asum(nums_buffer_.count(), counts, &valid_count); + if (valid_count > 0) { + top[0]->mutable_cpu_data()[0] = acc / valid_count; + } else { + top[0]->mutable_cpu_data()[0] = 0; + } + + // get per-class accuracy + Dtype* per_class_acc = top[1]->mutable_cpu_data(); + for (int l = 0; l < num_labels; l++) { + caffe_gpu_asum(nthreads, acc_data + l*nthreads, per_class_acc+l); + caffe_gpu_asum(nthreads, counts + l*nthreads, &valid_count); + if (valid_count > 0) { + per_class_acc[l] /= valid_count; + } else { + per_class_acc[l] = 0; + } + } + } + // Clear scratch memory to prevent interfering with backward (see #6202). + caffe_gpu_set(bottom[0]->count(), Dtype(0), bottom[0]->mutable_gpu_diff()); +} + + +template +void AccuracyLayer::Backward_gpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom) { + if (propagate_down[1]) { NOT_IMPLEMENTED; } +} + +INSTANTIATE_LAYER_GPU_FUNCS(AccuracyLayer); +} // namespace caffe diff --git a/src/caffe/layers/base_conv_layer.cpp b/src/caffe/layers/base_conv_layer.cpp index 4a4c68e009a..aff0a7548c7 100644 --- a/src/caffe/layers/base_conv_layer.cpp +++ b/src/caffe/layers/base_conv_layer.cpp @@ -19,7 +19,6 @@ void BaseConvolutionLayer::LayerSetUp(const vector*>& bottom, const int num_axes = bottom[0]->num_axes(); num_spatial_axes_ = num_axes - first_spatial_axis; CHECK_GE(num_spatial_axes_, 0); - vector bottom_dim_blob_shape(1, num_spatial_axes_ + 1); vector spatial_dim_blob_shape(1, std::max(num_spatial_axes_, 1)); // Setup filter kernel dimensions (kernel_shape_). kernel_shape_.Reshape(spatial_dim_blob_shape); @@ -194,7 +193,9 @@ void BaseConvolutionLayer::Reshape(const vector*>& bottom, // TODO: generalize to handle inputs of different shapes. for (int bottom_id = 1; bottom_id < bottom.size(); ++bottom_id) { CHECK(bottom[0]->shape() == bottom[bottom_id]->shape()) - << "All inputs must have the same shape."; + << "shape mismatch - bottom[0]: " << bottom[0]->shape_string() + << " vs. bottom[" << bottom_id << "]: " + << bottom[bottom_id]->shape_string(); } // Shape the tops. bottom_shape_ = &bottom[0]->shape(); diff --git a/src/caffe/layers/base_data_layer.cpp b/src/caffe/layers/base_data_layer.cpp index 989319f1a07..93a798f3571 100644 --- a/src/caffe/layers/base_data_layer.cpp +++ b/src/caffe/layers/base_data_layer.cpp @@ -36,9 +36,11 @@ template BasePrefetchingDataLayer::BasePrefetchingDataLayer( const LayerParameter& param) : BaseDataLayer(param), - prefetch_free_(), prefetch_full_() { - for (int i = 0; i < PREFETCH_COUNT; ++i) { - prefetch_free_.push(&prefetch_[i]); + prefetch_(param.data_param().prefetch()), + prefetch_free_(), prefetch_full_(), prefetch_current_() { + for (int i = 0; i < prefetch_.size(); ++i) { + prefetch_[i].reset(new Batch()); + prefetch_free_.push(prefetch_[i].get()); } } @@ -46,22 +48,23 @@ template void BasePrefetchingDataLayer::LayerSetUp( const vector*>& bottom, const vector*>& top) { BaseDataLayer::LayerSetUp(bottom, top); + // Before starting the prefetch thread, we make cpu_data and gpu_data // calls so that the prefetch thread does not accidentally make simultaneous // cudaMalloc calls when the main thread is running. In some GPUs this // seems to cause failures if we do not so. - for (int i = 0; i < PREFETCH_COUNT; ++i) { - prefetch_[i].data_.mutable_cpu_data(); + for (int i = 0; i < prefetch_.size(); ++i) { + prefetch_[i]->data_.mutable_cpu_data(); if (this->output_labels_) { - prefetch_[i].label_.mutable_cpu_data(); + prefetch_[i]->label_.mutable_cpu_data(); } } #ifndef CPU_ONLY if (Caffe::mode() == Caffe::GPU) { - for (int i = 0; i < PREFETCH_COUNT; ++i) { - prefetch_[i].data_.mutable_gpu_data(); + for (int i = 0; i < prefetch_.size(); ++i) { + prefetch_[i]->data_.mutable_gpu_data(); if (this->output_labels_) { - prefetch_[i].label_.mutable_gpu_data(); + prefetch_[i]->label_.mutable_gpu_data(); } } } @@ -88,6 +91,9 @@ void BasePrefetchingDataLayer::InternalThreadEntry() { #ifndef CPU_ONLY if (Caffe::mode() == Caffe::GPU) { batch->data_.data().get()->async_gpu_push(stream); + if (this->output_labels_) { + batch->label_.data().get()->async_gpu_push(stream); + } CUDA_CHECK(cudaStreamSynchronize(stream)); } #endif @@ -106,22 +112,18 @@ void BasePrefetchingDataLayer::InternalThreadEntry() { template void BasePrefetchingDataLayer::Forward_cpu( const vector*>& bottom, const vector*>& top) { - Batch* batch = prefetch_full_.pop("Data layer prefetch queue empty"); + if (prefetch_current_) { + prefetch_free_.push(prefetch_current_); + } + prefetch_current_ = prefetch_full_.pop("Waiting for data"); // Reshape to loaded data. - top[0]->ReshapeLike(batch->data_); - // Copy the data - caffe_copy(batch->data_.count(), batch->data_.cpu_data(), - top[0]->mutable_cpu_data()); - DLOG(INFO) << "Prefetch copied"; + top[0]->ReshapeLike(prefetch_current_->data_); + top[0]->set_cpu_data(prefetch_current_->data_.mutable_cpu_data()); if (this->output_labels_) { // Reshape to loaded labels. - top[1]->ReshapeLike(batch->label_); - // Copy the labels. - caffe_copy(batch->label_.count(), batch->label_.cpu_data(), - top[1]->mutable_cpu_data()); + top[1]->ReshapeLike(prefetch_current_->label_); + top[1]->set_cpu_data(prefetch_current_->label_.mutable_cpu_data()); } - - prefetch_free_.push(batch); } #ifdef CPU_ONLY diff --git a/src/caffe/layers/base_data_layer.cu b/src/caffe/layers/base_data_layer.cu index 4056d36a7b4..64c621a74f1 100644 --- a/src/caffe/layers/base_data_layer.cu +++ b/src/caffe/layers/base_data_layer.cu @@ -7,23 +7,18 @@ namespace caffe { template void BasePrefetchingDataLayer::Forward_gpu( const vector*>& bottom, const vector*>& top) { - Batch* batch = prefetch_full_.pop("Data layer prefetch queue empty"); + if (prefetch_current_) { + prefetch_free_.push(prefetch_current_); + } + prefetch_current_ = prefetch_full_.pop("Waiting for data"); // Reshape to loaded data. - top[0]->ReshapeLike(batch->data_); - // Copy the data - caffe_copy(batch->data_.count(), batch->data_.gpu_data(), - top[0]->mutable_gpu_data()); + top[0]->ReshapeLike(prefetch_current_->data_); + top[0]->set_gpu_data(prefetch_current_->data_.mutable_gpu_data()); if (this->output_labels_) { // Reshape to loaded labels. - top[1]->ReshapeLike(batch->label_); - // Copy the labels. - caffe_copy(batch->label_.count(), batch->label_.gpu_data(), - top[1]->mutable_gpu_data()); + top[1]->ReshapeLike(prefetch_current_->label_); + top[1]->set_gpu_data(prefetch_current_->label_.mutable_gpu_data()); } - // Ensure the copy is synchronous wrt the host, so that the next batch isn't - // copied in meanwhile. - CUDA_CHECK(cudaStreamSynchronize(cudaStreamDefault)); - prefetch_free_.push(batch); } INSTANTIATE_LAYER_GPU_FORWARD(BasePrefetchingDataLayer); diff --git a/src/caffe/layers/batch_norm_layer.cpp b/src/caffe/layers/batch_norm_layer.cpp index a69d8f99316..c6a1d5b1b2c 100644 --- a/src/caffe/layers/batch_norm_layer.cpp +++ b/src/caffe/layers/batch_norm_layer.cpp @@ -27,13 +27,25 @@ void BatchNormLayer::LayerSetUp(const vector*>& bottom, sz.push_back(channels_); this->blobs_[0].reset(new Blob(sz)); this->blobs_[1].reset(new Blob(sz)); - sz[0]=1; + sz[0] = 1; this->blobs_[2].reset(new Blob(sz)); for (int i = 0; i < 3; ++i) { caffe_set(this->blobs_[i]->count(), Dtype(0), this->blobs_[i]->mutable_cpu_data()); } } + // Mask statistics from optimization by setting local learning rates + // for mean, variance, and the bias correction to zero. + for (int i = 0; i < this->blobs_.size(); ++i) { + if (this->layer_param_.param_size() == i) { + ParamSpec* fixed_param_spec = this->layer_param_.add_param(); + fixed_param_spec->set_lr_mult(0.f); + } else { + CHECK_EQ(this->layer_param_.param(i).lr_mult(), 0.f) + << "Cannot configure batch normalization statistics as layer " + << "parameters."; + } + } } template @@ -49,7 +61,7 @@ void BatchNormLayer::Reshape(const vector*>& bottom, variance_.Reshape(sz); temp_.ReshapeLike(*bottom[0]); x_norm_.ReshapeLike(*bottom[0]); - sz[0]=bottom[0]->shape(0); + sz[0] = bottom[0]->shape(0); batch_sum_multiplier_.Reshape(sz); int spatial_dim = bottom[0]->count()/(channels_*bottom[0]->shape(0)); @@ -112,8 +124,8 @@ void BatchNormLayer::Forward_cpu(const vector*>& bottom, if (!use_global_stats_) { // compute variance using var(X) = E((X-EX)^2) - caffe_powx(top[0]->count(), top_data, Dtype(2), - temp_.mutable_cpu_data()); // (X-EX)^2 + caffe_sqr(top[0]->count(), top_data, + temp_.mutable_cpu_data()); // (X-EX)^2 caffe_cpu_gemv(CblasNoTrans, channels_ * num, spatial_dim, 1. / (num * spatial_dim), temp_.cpu_data(), spatial_sum_multiplier_.cpu_data(), 0., @@ -136,7 +148,7 @@ void BatchNormLayer::Forward_cpu(const vector*>& bottom, // normalize variance caffe_add_scalar(variance_.count(), eps_, variance_.mutable_cpu_data()); - caffe_powx(variance_.count(), variance_.cpu_data(), Dtype(0.5), + caffe_sqrt(variance_.count(), variance_.cpu_data(), variance_.mutable_cpu_data()); // replicate variance to input size diff --git a/src/caffe/layers/batch_norm_layer.cu b/src/caffe/layers/batch_norm_layer.cu index c21713c81d9..a35e778e2f1 100644 --- a/src/caffe/layers/batch_norm_layer.cu +++ b/src/caffe/layers/batch_norm_layer.cu @@ -48,14 +48,14 @@ void BatchNormLayer::Forward_gpu(const vector*>& bottom, if (!use_global_stats_) { // compute variance using var(X) = E((X-EX)^2) - caffe_gpu_powx(top[0]->count(), top_data, Dtype(2), + caffe_gpu_mul(top[0]->count(), top[0]->gpu_data(), top[0]->gpu_data(), temp_.mutable_gpu_data()); // (X-EX)^2 caffe_gpu_gemv(CblasNoTrans, channels_ * num, spatial_dim, 1. / (num * spatial_dim), temp_.gpu_data(), spatial_sum_multiplier_.gpu_data(), 0., num_by_chans_.mutable_gpu_data()); - caffe_gpu_gemv(CblasTrans, num, channels_, 1., - num_by_chans_.gpu_data(), batch_sum_multiplier_.gpu_data(), 0., + caffe_gpu_gemv(CblasTrans, num, channels_, Dtype(1.), + num_by_chans_.gpu_data(), batch_sum_multiplier_.gpu_data(), Dtype(0.), variance_.mutable_gpu_data()); // E((X_EX)^2) // compute and save moving average @@ -72,7 +72,7 @@ void BatchNormLayer::Forward_gpu(const vector*>& bottom, // normalize variance caffe_gpu_add_scalar(variance_.count(), eps_, variance_.mutable_gpu_data()); - caffe_gpu_powx(variance_.count(), variance_.gpu_data(), Dtype(0.5), + caffe_gpu_sqrt(variance_.count(), variance_.gpu_data(), variance_.mutable_gpu_data()); // replicate variance to input size diff --git a/src/caffe/layers/crop_layer.cpp b/src/caffe/layers/crop_layer.cpp index aecdcd63194..65ea8f8b7d0 100644 --- a/src/caffe/layers/crop_layer.cpp +++ b/src/caffe/layers/crop_layer.cpp @@ -40,8 +40,10 @@ void CropLayer::Reshape(const vector*>& bottom, const int start_axis = bottom[0]->CanonicalAxisIndex(param.axis()); // Initialize offsets to 0 and the new shape to the current shape of the data. - offsets = vector(input_dim, 0); vector new_shape(bottom[0]->shape()); + vector offsets_shape(1, input_dim); + offsets.Reshape(offsets_shape); + int* offset_data = offsets.mutable_cpu_data(); // Determine crop offsets and the new shape post-crop. for (int i = 0; i < input_dim; ++i) { @@ -63,15 +65,22 @@ void CropLayer::Reshape(const vector*>& bottom, << "size " << bottom[1]->shape(i) << " and offset " << crop_offset; } new_shape[i] = new_size; - offsets[i] = crop_offset; + offset_data[i] = crop_offset; } top[0]->Reshape(new_shape); + // Compute strides + src_strides_.Reshape(offsets_shape); + dest_strides_.Reshape(offsets_shape); + for (int i = 0; i < input_dim; ++i) { + src_strides_.mutable_cpu_data()[i] = bottom[0]->count(i + 1, input_dim); + dest_strides_.mutable_cpu_data()[i] = top[0]->count(i + 1, input_dim); + } } template void CropLayer::crop_copy(const vector*>& bottom, const vector*>& top, - const vector& offsets, + const int* offsets, vector indices, int cur_dim, const Dtype* src_data, @@ -85,28 +94,26 @@ void CropLayer::crop_copy(const vector*>& bottom, src_data, dest_data, is_forward); } } else { - // We are at the last dimensions, which is stored continously in memory - for (int i = 0; i < top[0]->shape(cur_dim); ++i) { - // prepare index vector reduced(red) and with offsets(off) - std::vector ind_red(cur_dim, 0); - std::vector ind_off(cur_dim+1, 0); - for (int j = 0; j < cur_dim; ++j) { - ind_red[j] = indices[j]; - ind_off[j] = indices[j] + offsets[j]; - } - ind_off[cur_dim] = offsets[cur_dim]; - // do the copy - if (is_forward) { - caffe_copy(top[0]->shape(cur_dim), - src_data + bottom[0]->offset(ind_off), - dest_data + top[0]->offset(ind_red)); - } else { - // in the backwards pass the src_data is top_diff - // and the dest_data is bottom_diff - caffe_copy(top[0]->shape(cur_dim), - src_data + top[0]->offset(ind_red), - dest_data + bottom[0]->offset(ind_off)); - } + // We are at the last dimensions, which is stored continuously in memory + // prepare index vector reduced(red) and with offsets(off) + std::vector ind_red(cur_dim, 0); + std::vector ind_off(cur_dim+1, 0); + for (int j = 0; j < cur_dim; ++j) { + ind_red[j] = indices[j]; + ind_off[j] = indices[j] + offsets[j]; + } + ind_off[cur_dim] = offsets[cur_dim]; + // do the copy + if (is_forward) { + caffe_copy(top[0]->shape(cur_dim), + src_data + bottom[0]->offset(ind_off), + dest_data + top[0]->offset(ind_red)); + } else { + // in the backwards pass the src_data is top_diff + // and the dest_data is bottom_diff + caffe_copy(top[0]->shape(cur_dim), + src_data + top[0]->offset(ind_red), + dest_data + bottom[0]->offset(ind_off)); } } } @@ -117,7 +124,8 @@ void CropLayer::Forward_cpu(const vector*>& bottom, std::vector indices(top[0]->num_axes(), 0); const Dtype* bottom_data = bottom[0]->cpu_data(); Dtype* top_data = top[0]->mutable_cpu_data(); - crop_copy(bottom, top, offsets, indices, 0, bottom_data, top_data, true); + crop_copy(bottom, top, offsets.cpu_data(), indices, 0, bottom_data, top_data, + true); } template @@ -129,7 +137,8 @@ void CropLayer::Backward_cpu(const vector*>& top, if (propagate_down[0]) { caffe_set(bottom[0]->count(), static_cast(0), bottom_diff); std::vector indices(top[0]->num_axes(), 0); - crop_copy(bottom, top, offsets, indices, 0, top_diff, bottom_diff, false); + crop_copy(bottom, top, offsets.cpu_data(), indices, 0, top_diff, + bottom_diff, false); } } diff --git a/src/caffe/layers/crop_layer.cu b/src/caffe/layers/crop_layer.cu index f78cecbbeee..4ece9cd1761 100644 --- a/src/caffe/layers/crop_layer.cu +++ b/src/caffe/layers/crop_layer.cu @@ -4,96 +4,63 @@ namespace caffe { -// Copy (one line per thread) from one array to another, with arbitrary -// strides in the last two dimensions. +__device__ int compute_uncropped_index( + int index, + const int ndims, + const int* src_strides, + const int* dest_strides, + const int* offsets) { + int dest_index = index; + int src_index = 0; + for (int i = 0; i < ndims; ++i) { + int coord = dest_index / dest_strides[i]; + dest_index -= coord * dest_strides[i]; + src_index += src_strides[i] * (coord + offsets[i]); + } + return src_index; +} + template -__global__ void copy_kernel(const int n, const int height, const int width, - const int src_outer_stride, const int src_inner_stride, - const int dest_outer_stride, const int dest_inner_stride, +__global__ void crop_kernel_forward(const int nthreads, + const int ndims, + const int* src_strides, + const int* dest_strides, + const int* offsets, const Dtype* src, Dtype* dest) { - CUDA_KERNEL_LOOP(index, n) { - int src_start = index / height * src_outer_stride - + index % height * src_inner_stride; - int dest_start = index / height * dest_outer_stride - + index % height * dest_inner_stride; - for (int i = 0; i < width; ++i) { - dest[dest_start + i] = src[src_start + i]; - } + CUDA_KERNEL_LOOP(index, nthreads) { + int src_index = compute_uncropped_index( + index, ndims, src_strides, dest_strides, offsets); + dest[index] = src[src_index]; } } template -void CropLayer::crop_copy_gpu(const vector*>& bottom, - const vector*>& top, - const vector& offsets, - vector indices, - int cur_dim, - const Dtype* src_data, - Dtype* dest_data, - bool is_forward) { - if (cur_dim + 2 < top[0]->num_axes()) { - // We are not yet at the final dimension, call copy recursivley - for (int i = 0; i < top[0]->shape(cur_dim); ++i) { - indices[cur_dim] = i; - crop_copy_gpu(bottom, top, offsets, indices, cur_dim+1, - src_data, dest_data, is_forward); - } - } else { - // We are at the last two dimensions, which are stored continously in memory - // With (N,C,H,W) - // (0,1,2,3) cur_dim -> H - // cur_dim+1 -> W - const int lines = top[0]->shape(cur_dim); - const int height = top[0]->shape(cur_dim); - const int width = top[0]->shape(cur_dim+1); - std::vector ind_off(cur_dim+2, 0); - for (int j = 0; j < cur_dim; ++j) { - ind_off[j] = indices[j] + offsets[j]; - } - ind_off[cur_dim] = offsets[cur_dim]; - ind_off[cur_dim+1] = offsets[cur_dim+1]; - // Compute copy strides - const int src_outer_stride = - bottom[0]->shape(cur_dim)*bottom[0]->shape(cur_dim+1); - const int src_inner_stride = bottom[0]->shape(cur_dim+1); - const int dest_outer_stride = - top[0]->shape(cur_dim)*top[0]->shape(cur_dim+1); - const int dest_inner_stride = top[0]->shape(cur_dim+1); - - if (is_forward) { - const Dtype* bottom_data = bottom[0]->gpu_data() + - bottom[0]->offset(ind_off); - Dtype* top_data = top[0]->mutable_gpu_data() + - top[0]->offset(indices); - // NOLINT_NEXT_LINE(whitespace/operators) - copy_kernel<<>>( - lines, height, width, - src_outer_stride, src_inner_stride, - dest_outer_stride, dest_inner_stride, - bottom_data, top_data); - - } else { - const Dtype* top_diff = top[0]->gpu_diff() + - top[0]->offset(indices); - Dtype* bottom_diff = bottom[0]->mutable_gpu_diff() + - bottom[0]->offset(ind_off); - // NOLINT_NEXT_LINE(whitespace/operators) - copy_kernel<<>>( - lines, height, width, - dest_outer_stride, dest_inner_stride, - src_outer_stride, src_inner_stride, - top_diff, bottom_diff); - } +__global__ void crop_kernel_backward(const int nthreads, + const int ndims, + const int* src_strides, + const int* dest_strides, + const int* offsets, + Dtype* src, const Dtype* dest) { + CUDA_KERNEL_LOOP(index, nthreads) { + int src_index = compute_uncropped_index( + index, ndims, src_strides, dest_strides, offsets); + src[src_index] = dest[index]; } } template void CropLayer::Forward_gpu(const vector*>& bottom, const vector*>& top) { - std::vector indices(top[0]->num_axes(), 0); const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); - crop_copy_gpu(bottom, top, offsets, indices, 0, bottom_data, top_data, true); + int n = top[0]->count(); + // NOLINT_NEXT_LINE(whitespace/operators) + crop_kernel_forward<<>>(n, + bottom[0]->num_axes(), + src_strides_.gpu_data(), + dest_strides_.gpu_data(), + offsets.gpu_data(), + bottom_data, top_data); } template @@ -101,12 +68,17 @@ void CropLayer::Backward_gpu(const vector*>& top, const vector& propagate_down, const vector*>& bottom) { const Dtype* top_diff = top[0]->gpu_diff(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); + int n = top[0]->count(); if (propagate_down[0]) { caffe_gpu_set(bottom[0]->count(), static_cast(0), bottom_diff); - std::vector indices(top[0]->num_axes(), 0); - crop_copy_gpu(bottom, top, offsets, indices, 0, top_diff, bottom_diff, - false); + // NOLINT_NEXT_LINE(whitespace/operators) + crop_kernel_backward<<>>(n, + bottom[0]->num_axes(), + src_strides_.gpu_data(), + dest_strides_.gpu_data(), + offsets.gpu_data(), + bottom_diff, top_diff); } } diff --git a/src/caffe/layers/cudnn_conv_layer.cpp b/src/caffe/layers/cudnn_conv_layer.cpp index 1987fb096b0..efc9e04e8c0 100644 --- a/src/caffe/layers/cudnn_conv_layer.cpp +++ b/src/caffe/layers/cudnn_conv_layer.cpp @@ -252,6 +252,7 @@ CuDNNConvolutionLayer::~CuDNNConvolutionLayer() { } cudaFree(workspaceData); + delete [] workspace; delete [] stream_; delete [] handle_; delete [] fwd_algo_; diff --git a/src/caffe/layers/cudnn_deconv_layer.cpp b/src/caffe/layers/cudnn_deconv_layer.cpp new file mode 100644 index 00000000000..260da5c1ee0 --- /dev/null +++ b/src/caffe/layers/cudnn_deconv_layer.cpp @@ -0,0 +1,327 @@ +#ifdef USE_CUDNN +#include +#include + +#include "caffe/layers/cudnn_deconv_layer.hpp" + +namespace caffe { + +// Set to three for the benefit of the backward pass, which +// can use separate streams for calculating the gradient w.r.t. +// bias, filter weights, and bottom data for each group independently +#define CUDNN_STREAMS_PER_GROUP 3 + +/** + * TODO(dox) explain cuDNN interface + */ +template +void CuDNNDeconvolutionLayer::LayerSetUp( + const vector*>& bottom, const vector*>& top) { + DeconvolutionLayer::LayerSetUp(bottom, top); + // Initialize CUDA streams and cuDNN. + stream_ = new cudaStream_t[this->group_ * CUDNN_STREAMS_PER_GROUP]; + handle_ = new cudnnHandle_t[this->group_ * CUDNN_STREAMS_PER_GROUP]; + + // Initialize algorithm arrays + fwd_algo_ = new cudnnConvolutionFwdAlgo_t[bottom.size()]; + bwd_filter_algo_= new cudnnConvolutionBwdFilterAlgo_t[bottom.size()]; + bwd_data_algo_ = new cudnnConvolutionBwdDataAlgo_t[bottom.size()]; + + // initialize size arrays + workspace_fwd_sizes_ = new size_t[bottom.size()]; + workspace_bwd_filter_sizes_ = new size_t[bottom.size()]; + workspace_bwd_data_sizes_ = new size_t[bottom.size()]; + + // workspace data + workspaceSizeInBytes = 0; + workspaceData = NULL; + workspace = new void*[this->group_ * CUDNN_STREAMS_PER_GROUP]; + + for (size_t i = 0; i < bottom.size(); ++i) { + // initialize all to default algorithms + fwd_algo_[i] = (cudnnConvolutionFwdAlgo_t)0; + bwd_filter_algo_[i] = (cudnnConvolutionBwdFilterAlgo_t)0; + bwd_data_algo_[i] = (cudnnConvolutionBwdDataAlgo_t)0; + // default algorithms don't require workspace + workspace_fwd_sizes_[i] = 0; + workspace_bwd_data_sizes_[i] = 0; + workspace_bwd_filter_sizes_[i] = 0; + } + + for (int g = 0; g < this->group_ * CUDNN_STREAMS_PER_GROUP; g++) { + CUDA_CHECK(cudaStreamCreate(&stream_[g])); + CUDNN_CHECK(cudnnCreate(&handle_[g])); + CUDNN_CHECK(cudnnSetStream(handle_[g], stream_[g])); + workspace[g] = NULL; + } + + // Set the indexing parameters. + bias_offset_ = (this->num_output_ / this->group_); + + // Create filter descriptor. + const int* kernel_shape_data = this->kernel_shape_.cpu_data(); + const int kernel_h = kernel_shape_data[0]; + const int kernel_w = kernel_shape_data[1]; + cudnn::createFilterDesc(&filter_desc_, + this->channels_ / this->group_, + this->num_output_ / this->group_, + kernel_h, + kernel_w); + + // Create tensor descriptor(s) for data and corresponding convolution(s). + for (int i = 0; i < bottom.size(); i++) { + cudnnTensorDescriptor_t bottom_desc; + cudnn::createTensor4dDesc(&bottom_desc); + bottom_descs_.push_back(bottom_desc); + cudnnTensorDescriptor_t top_desc; + cudnn::createTensor4dDesc(&top_desc); + top_descs_.push_back(top_desc); + cudnnConvolutionDescriptor_t conv_desc; + cudnn::createConvolutionDesc(&conv_desc); + conv_descs_.push_back(conv_desc); + } + + // Tensor descriptor for bias. + if (this->bias_term_) { + cudnn::createTensor4dDesc(&bias_desc_); + } + + handles_setup_ = true; +} + +template +void CuDNNDeconvolutionLayer::Reshape( + const vector*>& bottom, const vector*>& top) { + DeconvolutionLayer::Reshape(bottom, top); + CHECK_EQ(2, this->num_spatial_axes_) + << "CuDNNDeconvolutionLayer input must have 2 spatial axes " + << "(e.g., height and width). " + << "Use 'engine: CAFFE' for general ND convolution."; + bottom_offset_ = this->bottom_dim_ / this->group_; + top_offset_ = this->top_dim_ / this->group_; + const int height = bottom[0]->shape(this->channel_axis_ + 1); + const int width = bottom[0]->shape(this->channel_axis_ + 2); + const int height_out = top[0]->shape(this->channel_axis_ + 1); + const int width_out = top[0]->shape(this->channel_axis_ + 2); + const int* pad_data = this->pad_.cpu_data(); + const int pad_h = pad_data[0]; + const int pad_w = pad_data[1]; + const int* stride_data = this->stride_.cpu_data(); + const int stride_h = stride_data[0]; + const int stride_w = stride_data[1]; + + // Specify workspace limit for kernels directly until we have a + // planning strategy and a rewrite of Caffe's GPU memory mangagement + size_t workspace_limit_bytes = 8*1024*1024; + + for (int i = 0; i < bottom.size(); i++) { + cudnn::setTensor4dDesc(&bottom_descs_[i], + this->num_, + this->channels_ / this->group_, + height, + width, + this->channels_ * height * width, + height * width, + width, + 1); + cudnn::setTensor4dDesc(&top_descs_[i], + this->num_, + this->num_output_ / this->group_, + height_out, + width_out, + this->num_output_ * height_out * width_out, + height_out * width_out, + width_out, + 1); + cudnn::setConvolutionDesc(&conv_descs_[i], + top_descs_[i], + filter_desc_, + pad_h, + pad_w, + stride_h, + stride_w); + + // choose forward and backward algorithms + workspace(s) + CUDNN_CHECK(cudnnGetConvolutionForwardAlgorithm( + handle_[0], + top_descs_[i], + filter_desc_, + conv_descs_[i], + bottom_descs_[i], + CUDNN_CONVOLUTION_FWD_SPECIFY_WORKSPACE_LIMIT, + workspace_limit_bytes, + &fwd_algo_[i])); + + // We have found that CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_PRECOMP_GEMM is + // buggy. Thus, if this algo was chosen, choose winograd instead. If + // winograd is not supported or workspace is larger than threshold, choose + // implicit_gemm instead. + if (fwd_algo_[i] == CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_PRECOMP_GEMM) { + size_t winograd_workspace_size; + cudnnStatus_t status = cudnnGetConvolutionForwardWorkspaceSize( + handle_[0], + top_descs_[i], + filter_desc_, + conv_descs_[i], + bottom_descs_[i], + CUDNN_CONVOLUTION_FWD_ALGO_WINOGRAD, + &winograd_workspace_size); + if (status != CUDNN_STATUS_SUCCESS || + winograd_workspace_size >= workspace_limit_bytes) { + fwd_algo_[i] = CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_GEMM; + } else { + fwd_algo_[i] = CUDNN_CONVOLUTION_FWD_ALGO_WINOGRAD; + } + } + + CUDNN_CHECK(cudnnGetConvolutionForwardWorkspaceSize( + handle_[0], + top_descs_[i], + filter_desc_, + conv_descs_[i], + bottom_descs_[i], + fwd_algo_[i], + &(workspace_fwd_sizes_[i]))); + + // choose backward algorithm for filter + CUDNN_CHECK(cudnnGetConvolutionBackwardFilterAlgorithm( + handle_[0], + top_descs_[i], + bottom_descs_[i], + conv_descs_[i], + filter_desc_, + CUDNN_CONVOLUTION_BWD_FILTER_SPECIFY_WORKSPACE_LIMIT, + workspace_limit_bytes, + &bwd_filter_algo_[i])); + + // get workspace for backwards filter algorithm + CUDNN_CHECK(cudnnGetConvolutionBackwardFilterWorkspaceSize( + handle_[0], + top_descs_[i], + bottom_descs_[i], + conv_descs_[i], + filter_desc_, + bwd_filter_algo_[i], + &workspace_bwd_filter_sizes_[i])); + + // choose backward algo for data + CUDNN_CHECK(cudnnGetConvolutionBackwardDataAlgorithm( + handle_[0], + filter_desc_, + bottom_descs_[i], + conv_descs_[i], + top_descs_[i], + CUDNN_CONVOLUTION_BWD_DATA_SPECIFY_WORKSPACE_LIMIT, + workspace_limit_bytes, + &bwd_data_algo_[i])); + + // get workspace size + CUDNN_CHECK(cudnnGetConvolutionBackwardDataWorkspaceSize( + handle_[0], + filter_desc_, + bottom_descs_[i], + conv_descs_[i], + top_descs_[i], + bwd_data_algo_[i], + &workspace_bwd_data_sizes_[i])); + } + + // reduce over all workspace sizes to get a maximum to allocate / reallocate + size_t total_workspace_fwd = 0; + size_t total_workspace_bwd_data = 0; + size_t total_workspace_bwd_filter = 0; + + for (size_t i = 0; i < bottom.size(); i++) { + total_workspace_fwd = std::max(total_workspace_fwd, + workspace_fwd_sizes_[i]); + total_workspace_bwd_data = std::max(total_workspace_bwd_data, + workspace_bwd_data_sizes_[i]); + total_workspace_bwd_filter = std::max(total_workspace_bwd_filter, + workspace_bwd_filter_sizes_[i]); + } + // get max over all operations + size_t max_workspace = std::max(total_workspace_fwd, + total_workspace_bwd_data); + max_workspace = std::max(max_workspace, total_workspace_bwd_filter); + // ensure all groups have enough workspace + size_t total_max_workspace = max_workspace * + (this->group_ * CUDNN_STREAMS_PER_GROUP); + + // this is the total amount of storage needed over all groups + streams + if (total_max_workspace > workspaceSizeInBytes) { + DLOG(INFO) << "Reallocating workspace storage: " << total_max_workspace; + workspaceSizeInBytes = total_max_workspace; + + // free the existing workspace and allocate a new (larger) one + cudaFree(this->workspaceData); + + cudaError_t err = cudaMalloc(&(this->workspaceData), workspaceSizeInBytes); + if (err != cudaSuccess) { + // force zero memory path + for (int i = 0; i < bottom.size(); i++) { + workspace_fwd_sizes_[i] = 0; + workspace_bwd_filter_sizes_[i] = 0; + workspace_bwd_data_sizes_[i] = 0; + fwd_algo_[i] = CUDNN_CONVOLUTION_FWD_ALGO_FFT_TILING; + bwd_filter_algo_[i] = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_0; + bwd_data_algo_[i] = CUDNN_CONVOLUTION_BWD_DATA_ALGO_0; + } + + // NULL out all workspace pointers + for (int g = 0; g < (this->group_ * CUDNN_STREAMS_PER_GROUP); g++) { + workspace[g] = NULL; + } + // NULL out underlying data + workspaceData = NULL; + workspaceSizeInBytes = 0; + } + + // if we succeed in the allocation, set pointer aliases for workspaces + for (int g = 0; g < (this->group_ * CUDNN_STREAMS_PER_GROUP); g++) { + workspace[g] = reinterpret_cast(workspaceData) + g*max_workspace; + } + } + + // Tensor descriptor for bias. + if (this->bias_term_) { + cudnn::setTensor4dDesc( + &bias_desc_, 1, this->num_output_ / this->group_, 1, 1); + } +} + +template +CuDNNDeconvolutionLayer::~CuDNNDeconvolutionLayer() { + // Check that handles have been setup before destroying. + if (!handles_setup_) { return; } + + for (int i = 0; i < bottom_descs_.size(); i++) { + cudnnDestroyTensorDescriptor(bottom_descs_[i]); + cudnnDestroyTensorDescriptor(top_descs_[i]); + cudnnDestroyConvolutionDescriptor(conv_descs_[i]); + } + if (this->bias_term_) { + cudnnDestroyTensorDescriptor(bias_desc_); + } + cudnnDestroyFilterDescriptor(filter_desc_); + + for (int g = 0; g < this->group_ * CUDNN_STREAMS_PER_GROUP; g++) { + cudaStreamDestroy(stream_[g]); + cudnnDestroy(handle_[g]); + } + + cudaFree(workspaceData); + delete [] workspace; + delete [] stream_; + delete [] handle_; + delete [] fwd_algo_; + delete [] bwd_filter_algo_; + delete [] bwd_data_algo_; + delete [] workspace_fwd_sizes_; + delete [] workspace_bwd_data_sizes_; + delete [] workspace_bwd_filter_sizes_; +} + +INSTANTIATE_CLASS(CuDNNDeconvolutionLayer); + +} // namespace caffe +#endif diff --git a/src/caffe/layers/cudnn_deconv_layer.cu b/src/caffe/layers/cudnn_deconv_layer.cu new file mode 100644 index 00000000000..eb1df32918f --- /dev/null +++ b/src/caffe/layers/cudnn_deconv_layer.cu @@ -0,0 +1,138 @@ +#ifdef USE_CUDNN +#include + +#include "caffe/layers/cudnn_deconv_layer.hpp" + +namespace caffe { + +__global__ void sync_deconv_groups() {} + +template +void CuDNNDeconvolutionLayer::Forward_gpu( + const vector*>& bottom, const vector*>& top) { + const Dtype* weight = this->blobs_[0]->gpu_data(); + for (int i = 0; i < bottom.size(); ++i) { + const Dtype* bottom_data = bottom[i]->gpu_data(); + Dtype* top_data = top[i]->mutable_gpu_data(); + + // Forward through cuDNN in parallel over groups. + for (int g = 0; g < this->group_; g++) { + // Filters. + CUDNN_CHECK(cudnnConvolutionBackwardData( + handle_[g], + cudnn::dataType::one, + filter_desc_, + weight + this->weight_offset_ * g, + bottom_descs_[i], + bottom_data + bottom_offset_ * g, + conv_descs_[i], + bwd_data_algo_[i], + workspace[g], + workspace_bwd_data_sizes_[i], + cudnn::dataType::zero, + top_descs_[i], + top_data + top_offset_ * g)); + + // Bias. + if (this->bias_term_) { + const Dtype* bias_data = this->blobs_[1]->gpu_data(); + CUDNN_CHECK(cudnnAddTensor(handle_[g], + cudnn::dataType::one, + bias_desc_, + bias_data + bias_offset_ * g, + cudnn::dataType::one, + top_descs_[i], + top_data + top_offset_ * g)); + } + } + + // Synchronize the work across groups, each of which went into its own + // stream, by launching an empty kernel into the default (null) stream. + // NOLINT_NEXT_LINE(whitespace/operators) + sync_deconv_groups<<<1, 1>>>(); + } +} + +template +void CuDNNDeconvolutionLayer::Backward_gpu( + const vector*>& top, + const vector& propagate_down, + const vector*>& bottom) { + const Dtype* weight = NULL; + Dtype* weight_diff = NULL; + if (this->param_propagate_down_[0]) { + weight = this->blobs_[0]->gpu_data(); + weight_diff = this->blobs_[0]->mutable_gpu_diff(); + } + Dtype* bias_diff = NULL; + if (this->bias_term_ && this->param_propagate_down_[1]) { + bias_diff = this->blobs_[1]->mutable_gpu_diff(); + } + for (int i = 0; i < top.size(); ++i) { + const Dtype* top_diff = top[i]->gpu_diff(); + // Backward through cuDNN in parallel over groups and gradients. + for (int g = 0; g < this->group_; g++) { + // Gradient w.r.t. bias. + if (this->bias_term_ && this->param_propagate_down_[1]) { + CUDNN_CHECK(cudnnConvolutionBackwardBias(handle_[0 * this->group_ + g], + cudnn::dataType::one, + top_descs_[i], + top_diff + top_offset_ * g, + cudnn::dataType::one, + bias_desc_, + bias_diff + bias_offset_ * g)); + } + + // Gradient w.r.t. weights. + if (this->param_propagate_down_[0]) { + const Dtype* bottom_data = bottom[i]->gpu_data(); + CUDNN_CHECK(cudnnConvolutionBackwardFilter( + handle_[1 * this->group_ + g], + cudnn::dataType::one, + top_descs_[i], + top_diff + top_offset_ * g, + bottom_descs_[i], + bottom_data + bottom_offset_ * g, + conv_descs_[i], + bwd_filter_algo_[i], + workspace[1 * this->group_ + g], + workspace_bwd_filter_sizes_[i], + cudnn::dataType::one, + filter_desc_, + weight_diff + this->weight_offset_ * g)); + } + + // Gradient w.r.t. bottom data. + if (propagate_down[i]) { + if (weight == NULL) { + weight = this->blobs_[0]->gpu_data(); + } + Dtype* bottom_diff = bottom[i]->mutable_gpu_diff(); + CUDNN_CHECK( + cudnnConvolutionForward(handle_[2 * this->group_ + g], + cudnn::dataType::one, + top_descs_[i], + top_diff + top_offset_ * g, + filter_desc_, + weight + this->weight_offset_ * g, + conv_descs_[i], + fwd_algo_[i], + workspace[2 * this->group_ + g], + workspace_fwd_sizes_[i], + cudnn::dataType::zero, + bottom_descs_[i], + bottom_diff + bottom_offset_ * g)); + } + } + + // Synchronize the work across groups, each of which went into its own + // stream, by launching an empty kernel into the default (null) stream. + // NOLINT_NEXT_LINE(whitespace/operators) + sync_deconv_groups<<<1, 1>>>(); + } +} + +INSTANTIATE_LAYER_GPU_FUNCS(CuDNNDeconvolutionLayer); + +} // namespace caffe +#endif diff --git a/src/caffe/layers/cudnn_relu_layer.cpp b/src/caffe/layers/cudnn_relu_layer.cpp index 795e0a9efb0..687c905763e 100644 --- a/src/caffe/layers/cudnn_relu_layer.cpp +++ b/src/caffe/layers/cudnn_relu_layer.cpp @@ -36,6 +36,7 @@ CuDNNReLULayer::~CuDNNReLULayer() { cudnnDestroyTensorDescriptor(this->bottom_desc_); cudnnDestroyTensorDescriptor(this->top_desc_); + cudnnDestroyActivationDescriptor(this->activ_desc_); cudnnDestroy(this->handle_); } diff --git a/src/caffe/layers/data_layer.cpp b/src/caffe/layers/data_layer.cpp index 66e6301fd45..0f1296bbc77 100644 --- a/src/caffe/layers/data_layer.cpp +++ b/src/caffe/layers/data_layer.cpp @@ -14,7 +14,10 @@ namespace caffe { template DataLayer::DataLayer(const LayerParameter& param) : BasePrefetchingDataLayer(param), - reader_(param) { + offset_() { + db_.reset(db::GetDB(param.data_param().backend())); + db_->Open(param.data_param().source(), db::READ); + cursor_.reset(db_->NewCursor()); } template @@ -27,7 +30,8 @@ void DataLayer::DataLayerSetUp(const vector*>& bottom, const vector*>& top) { const int batch_size = this->layer_param_.data_param().batch_size(); // Read a data point, and use it to initialize the top blob. - Datum& datum = *(reader_.full().peek()); + Datum datum; + datum.ParseFromString(cursor_->value()); // Use data_transformer to infer the expected blob shape from datum. vector top_shape = this->data_transformer_->InferBlobShape(datum); @@ -35,22 +39,44 @@ void DataLayer::DataLayerSetUp(const vector*>& bottom, // Reshape top[0] and prefetch_data according to the batch_size. top_shape[0] = batch_size; top[0]->Reshape(top_shape); - for (int i = 0; i < this->PREFETCH_COUNT; ++i) { - this->prefetch_[i].data_.Reshape(top_shape); + for (int i = 0; i < this->prefetch_.size(); ++i) { + this->prefetch_[i]->data_.Reshape(top_shape); } - LOG(INFO) << "output data size: " << top[0]->num() << "," + LOG_IF(INFO, Caffe::root_solver()) + << "output data size: " << top[0]->num() << "," << top[0]->channels() << "," << top[0]->height() << "," << top[0]->width(); // label if (this->output_labels_) { vector label_shape(1, batch_size); top[1]->Reshape(label_shape); - for (int i = 0; i < this->PREFETCH_COUNT; ++i) { - this->prefetch_[i].label_.Reshape(label_shape); + for (int i = 0; i < this->prefetch_.size(); ++i) { + this->prefetch_[i]->label_.Reshape(label_shape); } } } +template +bool DataLayer::Skip() { + int size = Caffe::solver_count(); + int rank = Caffe::solver_rank(); + bool keep = (offset_ % size) == rank || + // In test mode, only rank 0 runs, so avoid skipping + this->layer_param_.phase() == TEST; + return !keep; +} + +template +void DataLayer::Next() { + cursor_->Next(); + if (!cursor_->valid()) { + LOG_IF(INFO, Caffe::root_solver()) + << "Restarting data prefetching from start."; + cursor_->SeekToFirst(); + } + offset_++; +} + // This function is called on prefetch thread template void DataLayer::load_batch(Batch* batch) { @@ -61,41 +87,41 @@ void DataLayer::load_batch(Batch* batch) { CPUTimer timer; CHECK(batch->data_.count()); CHECK(this->transformed_data_.count()); - - // Reshape according to the first datum of each batch - // on single input batches allows for inputs of varying dimension. const int batch_size = this->layer_param_.data_param().batch_size(); - Datum& datum = *(reader_.full().peek()); - // Use data_transformer to infer the expected blob shape from datum. - vector top_shape = this->data_transformer_->InferBlobShape(datum); - this->transformed_data_.Reshape(top_shape); - // Reshape batch according to the batch_size. - top_shape[0] = batch_size; - batch->data_.Reshape(top_shape); - - Dtype* top_data = batch->data_.mutable_cpu_data(); - Dtype* top_label = NULL; // suppress warnings about uninitialized variables - if (this->output_labels_) { - top_label = batch->label_.mutable_cpu_data(); - } + Datum datum; for (int item_id = 0; item_id < batch_size; ++item_id) { timer.Start(); - // get a datum - Datum& datum = *(reader_.full().pop("Waiting for data")); + while (Skip()) { + Next(); + } + datum.ParseFromString(cursor_->value()); read_time += timer.MicroSeconds(); - timer.Start(); + + if (item_id == 0) { + // Reshape according to the first datum of each batch + // on single input batches allows for inputs of varying dimension. + // Use data_transformer to infer the expected blob shape from datum. + vector top_shape = this->data_transformer_->InferBlobShape(datum); + this->transformed_data_.Reshape(top_shape); + // Reshape batch according to the batch_size. + top_shape[0] = batch_size; + batch->data_.Reshape(top_shape); + } + // Apply data transformations (mirror, scale, crop...) + timer.Start(); int offset = batch->data_.offset(item_id); + Dtype* top_data = batch->data_.mutable_cpu_data(); this->transformed_data_.set_cpu_data(top_data + offset); this->data_transformer_->Transform(datum, &(this->transformed_data_)); // Copy label. if (this->output_labels_) { + Dtype* top_label = batch->label_.mutable_cpu_data(); top_label[item_id] = datum.label(); } trans_time += timer.MicroSeconds(); - - reader_.free().push(const_cast(&datum)); + Next(); } timer.Stop(); batch_timer.Stop(); diff --git a/src/caffe/layers/deconv_layer.cpp b/src/caffe/layers/deconv_layer.cpp index 20a460fbdea..b86472b34be 100644 --- a/src/caffe/layers/deconv_layer.cpp +++ b/src/caffe/layers/deconv_layer.cpp @@ -79,6 +79,5 @@ STUB_GPU(DeconvolutionLayer); #endif INSTANTIATE_CLASS(DeconvolutionLayer); -REGISTER_LAYER_CLASS(Deconvolution); } // namespace caffe diff --git a/src/caffe/layers/eltwise_layer.cpp b/src/caffe/layers/eltwise_layer.cpp index 21256166bfa..3d82b0e1cbf 100644 --- a/src/caffe/layers/eltwise_layer.cpp +++ b/src/caffe/layers/eltwise_layer.cpp @@ -31,7 +31,9 @@ template void EltwiseLayer::Reshape(const vector*>& bottom, const vector*>& top) { for (int i = 1; i < bottom.size(); ++i) { - CHECK(bottom[i]->shape() == bottom[0]->shape()); + CHECK(bottom[0]->shape() == bottom[i]->shape()) + << "bottom[0]: " << bottom[0]->shape_string() + << ", bottom[" << i << "]: " << bottom[i]->shape_string(); } top[0]->ReshapeLike(*bottom[0]); // If max operation, we will initialize the vector index part. diff --git a/src/caffe/layers/hdf5_data_layer.cpp b/src/caffe/layers/hdf5_data_layer.cpp index 2f13dc641df..00716a92b15 100644 --- a/src/caffe/layers/hdf5_data_layer.cpp +++ b/src/caffe/layers/hdf5_data_layer.cpp @@ -39,8 +39,9 @@ void HDF5DataLayer::LoadHDF5FileData(const char* filename) { for (int i = 0; i < top_size; ++i) { hdf_blobs_[i] = shared_ptr >(new Blob()); + // Allow reshape here, as we are loading data not params hdf5_load_nd_dataset(file_id, this->layer_param_.top(i).c_str(), - MIN_DATA_DIM, MAX_DATA_DIM, hdf_blobs_[i].get()); + MIN_DATA_DIM, MAX_DATA_DIM, hdf_blobs_[i].get(), true); } herr_t status = H5Fclose(file_id); @@ -61,10 +62,10 @@ void HDF5DataLayer::LoadHDF5FileData(const char* filename) { // Shuffle if needed. if (this->layer_param_.hdf5_data_param().shuffle()) { std::random_shuffle(data_permutation_.begin(), data_permutation_.end()); - DLOG(INFO) << "Successully loaded " << hdf_blobs_[0]->shape(0) + DLOG(INFO) << "Successfully loaded " << hdf_blobs_[0]->shape(0) << " rows (shuffled)"; } else { - DLOG(INFO) << "Successully loaded " << hdf_blobs_[0]->shape(0) << " rows"; + DLOG(INFO) << "Successfully loaded " << hdf_blobs_[0]->shape(0) << " rows"; } } @@ -124,28 +125,46 @@ void HDF5DataLayer::LayerSetUp(const vector*>& bottom, } } +template +bool HDF5DataLayer::Skip() { + int size = Caffe::solver_count(); + int rank = Caffe::solver_rank(); + bool keep = (offset_ % size) == rank || + // In test mode, only rank 0 runs, so avoid skipping + this->layer_param_.phase() == TEST; + return !keep; +} + +template +void HDF5DataLayer::Next() { + if (++current_row_ == hdf_blobs_[0]->shape(0)) { + if (num_files_ > 1) { + ++current_file_; + if (current_file_ == num_files_) { + current_file_ = 0; + if (this->layer_param_.hdf5_data_param().shuffle()) { + std::random_shuffle(file_permutation_.begin(), + file_permutation_.end()); + } + DLOG(INFO) << "Looping around to first file."; + } + LoadHDF5FileData( + hdf_filenames_[file_permutation_[current_file_]].c_str()); + } + current_row_ = 0; + if (this->layer_param_.hdf5_data_param().shuffle()) + std::random_shuffle(data_permutation_.begin(), data_permutation_.end()); + } + offset_++; +} + template void HDF5DataLayer::Forward_cpu(const vector*>& bottom, const vector*>& top) { const int batch_size = this->layer_param_.hdf5_data_param().batch_size(); - for (int i = 0; i < batch_size; ++i, ++current_row_) { - if (current_row_ == hdf_blobs_[0]->shape(0)) { - if (num_files_ > 1) { - ++current_file_; - if (current_file_ == num_files_) { - current_file_ = 0; - if (this->layer_param_.hdf5_data_param().shuffle()) { - std::random_shuffle(file_permutation_.begin(), - file_permutation_.end()); - } - DLOG(INFO) << "Looping around to first file."; - } - LoadHDF5FileData( - hdf_filenames_[file_permutation_[current_file_]].c_str()); - } - current_row_ = 0; - if (this->layer_param_.hdf5_data_param().shuffle()) - std::random_shuffle(data_permutation_.begin(), data_permutation_.end()); + for (int i = 0; i < batch_size; ++i) { + while (Skip()) { + Next(); } for (int j = 0; j < this->layer_param_.top_size(); ++j) { int data_dim = top[j]->count() / top[j]->shape(0); @@ -153,6 +172,7 @@ void HDF5DataLayer::Forward_cpu(const vector*>& bottom, &hdf_blobs_[j]->cpu_data()[data_permutation_[current_row_] * data_dim], &top[j]->mutable_cpu_data()[i * data_dim]); } + Next(); } } diff --git a/src/caffe/layers/hdf5_data_layer.cu b/src/caffe/layers/hdf5_data_layer.cu index 595d2230220..33eebd41dfc 100644 --- a/src/caffe/layers/hdf5_data_layer.cu +++ b/src/caffe/layers/hdf5_data_layer.cu @@ -17,24 +17,9 @@ template void HDF5DataLayer::Forward_gpu(const vector*>& bottom, const vector*>& top) { const int batch_size = this->layer_param_.hdf5_data_param().batch_size(); - for (int i = 0; i < batch_size; ++i, ++current_row_) { - if (current_row_ == hdf_blobs_[0]->shape(0)) { - if (num_files_ > 1) { - current_file_ += 1; - if (current_file_ == num_files_) { - current_file_ = 0; - if (this->layer_param_.hdf5_data_param().shuffle()) { - std::random_shuffle(file_permutation_.begin(), - file_permutation_.end()); - } - DLOG(INFO) << "Looping around to first file."; - } - LoadHDF5FileData( - hdf_filenames_[file_permutation_[current_file_]].c_str()); - } - current_row_ = 0; - if (this->layer_param_.hdf5_data_param().shuffle()) - std::random_shuffle(data_permutation_.begin(), data_permutation_.end()); + for (int i = 0; i < batch_size; ++i) { + while (Skip()) { + Next(); } for (int j = 0; j < this->layer_param_.top_size(); ++j) { int data_dim = top[j]->count() / top[j]->shape(0); @@ -42,6 +27,7 @@ void HDF5DataLayer::Forward_gpu(const vector*>& bottom, &hdf_blobs_[j]->cpu_data()[data_permutation_[current_row_] * data_dim], &top[j]->mutable_gpu_data()[i * data_dim]); } + Next(); } } diff --git a/src/caffe/layers/image_data_layer.cpp b/src/caffe/layers/image_data_layer.cpp index 7ee7dc40714..ec0fc5b0383 100644 --- a/src/caffe/layers/image_data_layer.cpp +++ b/src/caffe/layers/image_data_layer.cpp @@ -54,6 +54,11 @@ void ImageDataLayer::DataLayerSetUp(const vector*>& bottom, const unsigned int prefetch_rng_seed = caffe_rng_rand(); prefetch_rng_.reset(new Caffe::RNG(prefetch_rng_seed)); ShuffleImages(); + } else { + if (this->phase_ == TRAIN && Caffe::solver_rank() > 0 && + this->layer_param_.image_data_param().rand_skip() == 0) { + LOG(WARNING) << "Shuffling or skipping recommended for multi-GPU"; + } } LOG(INFO) << "A total of " << lines_.size() << " images."; @@ -77,8 +82,8 @@ void ImageDataLayer::DataLayerSetUp(const vector*>& bottom, const int batch_size = this->layer_param_.image_data_param().batch_size(); CHECK_GT(batch_size, 0) << "Positive batch size required"; top_shape[0] = batch_size; - for (int i = 0; i < this->PREFETCH_COUNT; ++i) { - this->prefetch_[i].data_.Reshape(top_shape); + for (int i = 0; i < this->prefetch_.size(); ++i) { + this->prefetch_[i]->data_.Reshape(top_shape); } top[0]->Reshape(top_shape); @@ -88,8 +93,8 @@ void ImageDataLayer::DataLayerSetUp(const vector*>& bottom, // label vector label_shape(1, batch_size); top[1]->Reshape(label_shape); - for (int i = 0; i < this->PREFETCH_COUNT; ++i) { - this->prefetch_[i].label_.Reshape(label_shape); + for (int i = 0; i < this->prefetch_.size(); ++i) { + this->prefetch_[i]->label_.Reshape(label_shape); } } diff --git a/src/caffe/layers/infogain_loss_layer.cpp b/src/caffe/layers/infogain_loss_layer.cpp index 624d3118124..3c3f460ec34 100644 --- a/src/caffe/layers/infogain_loss_layer.cpp +++ b/src/caffe/layers/infogain_loss_layer.cpp @@ -3,7 +3,8 @@ #include #include "caffe/layers/infogain_loss_layer.hpp" -#include "caffe/util/io.hpp" +#include "caffe/util/io.hpp" // for bolb reading of matrix H +#include "caffe/util/math_functions.hpp" namespace caffe { @@ -11,6 +12,31 @@ template void InfogainLossLayer::LayerSetUp( const vector*>& bottom, const vector*>& top) { LossLayer::LayerSetUp(bottom, top); + // internal softmax layer + LayerParameter softmax_layer_param(this->layer_param_); + SoftmaxParameter* softmax_param = softmax_layer_param.mutable_softmax_param(); + softmax_param->set_axis(this->layer_param_.infogain_loss_param().axis()); + softmax_layer_param.set_type("Softmax"); + softmax_layer_param.clear_loss_weight(); + softmax_layer_param.add_loss_weight(1); + softmax_layer_ = LayerRegistry::CreateLayer(softmax_layer_param); + softmax_bottom_vec_.clear(); + softmax_bottom_vec_.push_back(bottom[0]); + softmax_top_vec_.clear(); + softmax_top_vec_.push_back(&prob_); + softmax_layer_->SetUp(softmax_bottom_vec_, softmax_top_vec_); + + // ignore label + has_ignore_label_ = + this->layer_param_.loss_param().has_ignore_label(); + if (has_ignore_label_) { + ignore_label_ = this->layer_param_.loss_param().ignore_label(); + } + // normalization + CHECK(!this->layer_param_.loss_param().has_normalize()) + << "normalize is deprecated. use \"normalization\""; + normalization_ = this->layer_param_.loss_param().normalization(); + // matrix H if (bottom.size() < 3) { CHECK(this->layer_param_.infogain_loss_param().has_source()) << "Infogain matrix source must be specified."; @@ -25,28 +51,86 @@ template void InfogainLossLayer::Reshape( const vector*>& bottom, const vector*>& top) { LossLayer::Reshape(bottom, top); + softmax_layer_->Reshape(softmax_bottom_vec_, softmax_top_vec_); + infogain_axis_ = + bottom[0]->CanonicalAxisIndex( + this->layer_param_.infogain_loss_param().axis()); + outer_num_ = bottom[0]->count(0, infogain_axis_); + inner_num_ = bottom[0]->count(infogain_axis_ + 1); + CHECK_EQ(outer_num_ * inner_num_, bottom[1]->count()) + << "Number of labels must match number of predictions; " + << "e.g., if infogain axis == 1 and prediction shape is (N, C, H, W), " + << "label count (number of labels) must be N*H*W, " + << "with integer values in {0, 1, ..., C-1}."; + num_labels_ = bottom[0]->shape(infogain_axis_); Blob* infogain = NULL; if (bottom.size() < 3) { infogain = &infogain_; } else { infogain = bottom[2]; } - CHECK_EQ(bottom[1]->channels(), 1); - CHECK_EQ(bottom[1]->height(), 1); - CHECK_EQ(bottom[1]->width(), 1); - const int num = bottom[0]->num(); - const int dim = bottom[0]->count() / num; - CHECK_EQ(infogain->num(), 1); - CHECK_EQ(infogain->channels(), 1); - CHECK_EQ(infogain->height(), dim); - CHECK_EQ(infogain->width(), dim); + CHECK_EQ(infogain->count(), num_labels_*num_labels_); + sum_rows_H_.Reshape(vector(1, num_labels_)); + if (bottom.size() == 2) { + // H is provided as a parameter and will not change. sum rows once + sum_rows_of_H(infogain); + } + if (top.size() >= 2) { + // softmax output + top[1]->ReshapeLike(*bottom[0]); + } +} + +template +Dtype InfogainLossLayer::get_normalizer( + LossParameter_NormalizationMode normalization_mode, int valid_count) { + Dtype normalizer; + switch (normalization_mode) { + case LossParameter_NormalizationMode_FULL: + normalizer = Dtype(outer_num_ * inner_num_); + break; + case LossParameter_NormalizationMode_VALID: + if (valid_count == -1) { + normalizer = Dtype(outer_num_ * inner_num_); + } else { + normalizer = Dtype(valid_count); + } + break; + case LossParameter_NormalizationMode_BATCH_SIZE: + normalizer = Dtype(outer_num_); + break; + case LossParameter_NormalizationMode_NONE: + normalizer = Dtype(1); + break; + default: + LOG(FATAL) << "Unknown normalization mode: " + << LossParameter_NormalizationMode_Name(normalization_mode); + } + // Some users will have no labels for some examples in order to 'turn off' a + // particular loss in a multi-task setup. The max prevents NaNs in that case. + return std::max(Dtype(1.0), normalizer); } +template +void InfogainLossLayer::sum_rows_of_H(const Blob* H) { + CHECK_EQ(H->count(), num_labels_*num_labels_) + << "H must be " << num_labels_ << "x" << num_labels_; + const Dtype* infogain_mat = H->cpu_data(); + Dtype* sum = sum_rows_H_.mutable_cpu_data(); + for ( int row = 0; row < num_labels_ ; row++ ) { + sum[row] = 0; + for ( int col = 0; col < num_labels_ ; col++ ) { + sum[row] += infogain_mat[row*num_labels_+col]; + } + } +} template void InfogainLossLayer::Forward_cpu(const vector*>& bottom, const vector*>& top) { - const Dtype* bottom_data = bottom[0]->cpu_data(); + // The forward pass computes the softmax prob values. + softmax_layer_->Forward(softmax_bottom_vec_, softmax_top_vec_); + const Dtype* prob_data = prob_.cpu_data(); const Dtype* bottom_label = bottom[1]->cpu_data(); const Dtype* infogain_mat = NULL; if (bottom.size() < 3) { @@ -54,17 +138,30 @@ void InfogainLossLayer::Forward_cpu(const vector*>& bottom, } else { infogain_mat = bottom[2]->cpu_data(); } - int num = bottom[0]->num(); - int dim = bottom[0]->count() / bottom[0]->num(); + int count = 0; Dtype loss = 0; - for (int i = 0; i < num; ++i) { - int label = static_cast(bottom_label[i]); - for (int j = 0; j < dim; ++j) { - Dtype prob = std::max(bottom_data[i * dim + j], Dtype(kLOG_THRESHOLD)); - loss -= infogain_mat[label * dim + j] * log(prob); + for (int i = 0; i < outer_num_; ++i) { + for (int j = 0; j < inner_num_; j++) { + const int label_value = + static_cast(bottom_label[i * inner_num_ + j]); + if (has_ignore_label_ && label_value == ignore_label_) { + continue; + } + DCHECK_GE(label_value, 0); + DCHECK_LT(label_value, num_labels_); + for (int l = 0; l < num_labels_; l++) { + loss -= infogain_mat[label_value * num_labels_ + l] * + log(std::max( + prob_data[i * inner_num_*num_labels_ + l * inner_num_ + j], + Dtype(kLOG_THRESHOLD))); + } + ++count; } } - top[0]->mutable_cpu_data()[0] = loss / num; + top[0]->mutable_cpu_data()[0] = loss / get_normalizer(normalization_, count); + if (top.size() == 2) { + top[1]->ShareData(prob_); + } } template @@ -80,25 +177,44 @@ void InfogainLossLayer::Backward_cpu(const vector*>& top, << " Layer cannot backpropagate to infogain inputs."; } if (propagate_down[0]) { - const Dtype* bottom_data = bottom[0]->cpu_data(); + const Dtype* prob_data = prob_.cpu_data(); const Dtype* bottom_label = bottom[1]->cpu_data(); const Dtype* infogain_mat = NULL; if (bottom.size() < 3) { infogain_mat = infogain_.cpu_data(); } else { infogain_mat = bottom[2]->cpu_data(); + // H is provided as a "bottom" and might change. sum rows every time. + sum_rows_of_H(bottom[2]); } + const Dtype* sum_rows_H = sum_rows_H_.cpu_data(); Dtype* bottom_diff = bottom[0]->mutable_cpu_diff(); - int num = bottom[0]->num(); - int dim = bottom[0]->count() / bottom[0]->num(); - const Dtype scale = - top[0]->cpu_diff()[0] / num; - for (int i = 0; i < num; ++i) { - const int label = static_cast(bottom_label[i]); - for (int j = 0; j < dim; ++j) { - Dtype prob = std::max(bottom_data[i * dim + j], Dtype(kLOG_THRESHOLD)); - bottom_diff[i * dim + j] = scale * infogain_mat[label * dim + j] / prob; + const int dim = bottom[0]->count() / outer_num_; + int count = 0; + for (int i = 0; i < outer_num_; ++i) { + for (int j = 0; j < inner_num_; ++j) { + const int label_value = + static_cast(bottom_label[i * inner_num_ + j]); + DCHECK_GE(label_value, 0); + DCHECK_LT(label_value, num_labels_); + if (has_ignore_label_ && label_value == ignore_label_) { + for (int l = 0; l < num_labels_; ++l) { + bottom_diff[i * dim + l * inner_num_ + j] = 0; + } + } else { + for (int l = 0; l < num_labels_; ++l) { + bottom_diff[i * dim + l * inner_num_ + j] = + prob_data[i*dim + l*inner_num_ + j]*sum_rows_H[label_value] + - infogain_mat[label_value * num_labels_ + l]; + } + ++count; + } } } + // Scale gradient + Dtype loss_weight = top[0]->cpu_diff()[0] / + get_normalizer(normalization_, count); + caffe_scal(bottom[0]->count(), loss_weight, bottom_diff); } } diff --git a/src/caffe/layers/loss_layer.cpp b/src/caffe/layers/loss_layer.cpp index c0b7a862181..afb1ce94893 100644 --- a/src/caffe/layers/loss_layer.cpp +++ b/src/caffe/layers/loss_layer.cpp @@ -16,8 +16,8 @@ void LossLayer::LayerSetUp( template void LossLayer::Reshape( const vector*>& bottom, const vector*>& top) { - CHECK_EQ(bottom[0]->num(), bottom[1]->num()) - << "The data and label should have the same number."; + CHECK_EQ(bottom[0]->shape(0), bottom[1]->shape(0)) + << "The data and label should have the same first dimension."; vector loss_shape(0); // Loss layers output a scalar; 0 axes. top[0]->Reshape(loss_shape); } diff --git a/src/caffe/layers/lstm_unit_layer.cpp b/src/caffe/layers/lstm_unit_layer.cpp index 277c031ad15..d1ab59c4bd1 100644 --- a/src/caffe/layers/lstm_unit_layer.cpp +++ b/src/caffe/layers/lstm_unit_layer.cpp @@ -31,7 +31,6 @@ void LSTMUnitLayer::Reshape(const vector*>& bottom, CHECK_EQ(num_instances, bottom[i]->shape(1)); } hidden_dim_ = bottom[0]->shape(2); - CHECK_EQ(num_instances, bottom[1]->shape(1)); CHECK_EQ(4 * hidden_dim_, bottom[1]->shape(2)); top[0]->ReshapeLike(*bottom[0]); top[1]->ReshapeLike(*bottom[0]); diff --git a/src/caffe/layers/pooling_layer.cu b/src/caffe/layers/pooling_layer.cu index 1ea46cc81b1..46eddb94924 100644 --- a/src/caffe/layers/pooling_layer.cu +++ b/src/caffe/layers/pooling_layer.cu @@ -138,7 +138,7 @@ __global__ void StoPoolForwardTest(const int nthreads, const int wstart = pw * stride_w; const int wend = min(wstart + kernel_w, width); // We set cumsum to be 0 to avoid divide-by-zero problems - Dtype cumsum = FLT_MIN; + Dtype cumsum = 0.; Dtype cumvalues = 0.; const Dtype* const bottom_slice = bottom_data + (n * channels + c) * height * width; @@ -149,7 +149,7 @@ __global__ void StoPoolForwardTest(const int nthreads, cumvalues += bottom_slice[h * width + w] * bottom_slice[h * width + w]; } } - top_data[index] = cumvalues / cumsum; + top_data[index] = (cumsum > 0.) ? cumvalues / cumsum : 0.; } } diff --git a/src/caffe/layers/recurrent_layer.cpp b/src/caffe/layers/recurrent_layer.cpp index e0c82773392..9cd3206f66a 100644 --- a/src/caffe/layers/recurrent_layer.cpp +++ b/src/caffe/layers/recurrent_layer.cpp @@ -214,8 +214,9 @@ void RecurrentLayer::Reshape(const vector*>& bottom, const int bottom_offset = 2 + static_input_; for (int i = bottom_offset, j = 0; i < bottom.size(); ++i, ++j) { CHECK(recur_input_blobs_[j]->shape() == bottom[i]->shape()) - << "bottom[" << i << "] shape must match hidden state input shape: " - << recur_input_blobs_[j]->shape_string(); + << "shape mismatch - recur_input_blobs_[" << j << "]: " + << recur_input_blobs_[j]->shape_string() + << " vs. bottom[" << i << "]: " << bottom[i]->shape_string(); recur_input_blobs_[j]->ShareData(*bottom[i]); } } diff --git a/src/caffe/layers/rnn_layer.cpp b/src/caffe/layers/rnn_layer.cpp index f62ae8c77de..8c2fa22e598 100644 --- a/src/caffe/layers/rnn_layer.cpp +++ b/src/caffe/layers/rnn_layer.cpp @@ -215,7 +215,7 @@ void RNNLayer::FillUnrolledNet(NetParameter* net_param) const { } // Add layers to compute - // o_t := \tanh( W_ho h_t + b_o) + // o_t := \tanh( W_ho * h_t + b_o) // = \tanh( W_ho_h_t ) { LayerParameter* o_neuron_param = net_param->add_layer(); diff --git a/src/caffe/layers/scale_layer.cpp b/src/caffe/layers/scale_layer.cpp index ecdbb123e31..e652dad6e10 100644 --- a/src/caffe/layers/scale_layer.cpp +++ b/src/caffe/layers/scale_layer.cpp @@ -56,9 +56,17 @@ void ScaleLayer::LayerSetUp(const vector*>& bottom, bias_bottom_vec_.resize(1); bias_bottom_vec_[0] = bottom[0]; bias_layer_->SetUp(bias_bottom_vec_, top); - bias_param_id_ = this->blobs_.size(); - this->blobs_.resize(bias_param_id_ + 1); - this->blobs_[bias_param_id_] = bias_layer_->blobs()[0]; + if (this->blobs_.size() + bottom.size() < 3) { + // case: blobs.size == 1 && bottom.size == 1 + // or blobs.size == 0 && bottom.size == 2 + bias_param_id_ = this->blobs_.size(); + this->blobs_.resize(bias_param_id_ + 1); + this->blobs_[bias_param_id_] = bias_layer_->blobs()[0]; + } else { + // bias param already initialized + bias_param_id_ = this->blobs_.size() - 1; + bias_layer_->blobs()[0] = this->blobs_[bias_param_id_]; + } bias_propagate_down_.resize(1, false); } this->param_propagate_down_.resize(this->blobs_.size(), true); diff --git a/src/caffe/layers/sigmoid_cross_entropy_loss_layer.cpp b/src/caffe/layers/sigmoid_cross_entropy_loss_layer.cpp index 10ac9470832..99fa3eb645a 100644 --- a/src/caffe/layers/sigmoid_cross_entropy_loss_layer.cpp +++ b/src/caffe/layers/sigmoid_cross_entropy_loss_layer.cpp @@ -1,3 +1,4 @@ +#include #include #include "caffe/layers/sigmoid_cross_entropy_loss_layer.hpp" @@ -14,17 +15,66 @@ void SigmoidCrossEntropyLossLayer::LayerSetUp( sigmoid_top_vec_.clear(); sigmoid_top_vec_.push_back(sigmoid_output_.get()); sigmoid_layer_->SetUp(sigmoid_bottom_vec_, sigmoid_top_vec_); + + has_ignore_label_ = + this->layer_param_.loss_param().has_ignore_label(); + if (has_ignore_label_) { + ignore_label_ = this->layer_param_.loss_param().ignore_label(); + } + if (this->layer_param_.loss_param().has_normalization()) { + normalization_ = this->layer_param_.loss_param().normalization(); + } else if (this->layer_param_.loss_param().has_normalize()) { + normalization_ = this->layer_param_.loss_param().normalize() ? + LossParameter_NormalizationMode_VALID : + LossParameter_NormalizationMode_BATCH_SIZE; + } else { + normalization_ = LossParameter_NormalizationMode_BATCH_SIZE; + } } template void SigmoidCrossEntropyLossLayer::Reshape( const vector*>& bottom, const vector*>& top) { LossLayer::Reshape(bottom, top); + outer_num_ = bottom[0]->shape(0); // batch size + inner_num_ = bottom[0]->count(1); // instance size: |output| == |target| CHECK_EQ(bottom[0]->count(), bottom[1]->count()) << "SIGMOID_CROSS_ENTROPY_LOSS layer inputs must have the same count."; sigmoid_layer_->Reshape(sigmoid_bottom_vec_, sigmoid_top_vec_); } +// TODO(shelhamer) loss normalization should be pulled up into LossLayer, +// instead of duplicated here and in SoftMaxWithLossLayer +template +Dtype SigmoidCrossEntropyLossLayer::get_normalizer( + LossParameter_NormalizationMode normalization_mode, int valid_count) { + Dtype normalizer; + switch (normalization_mode) { + case LossParameter_NormalizationMode_FULL: + normalizer = Dtype(outer_num_ * inner_num_); + break; + case LossParameter_NormalizationMode_VALID: + if (valid_count == -1) { + normalizer = Dtype(outer_num_ * inner_num_); + } else { + normalizer = Dtype(valid_count); + } + break; + case LossParameter_NormalizationMode_BATCH_SIZE: + normalizer = Dtype(outer_num_); + break; + case LossParameter_NormalizationMode_NONE: + normalizer = Dtype(1); + break; + default: + LOG(FATAL) << "Unknown normalization mode: " + << LossParameter_NormalizationMode_Name(normalization_mode); + } + // Some users will have no labels for some examples in order to 'turn off' a + // particular loss in a multi-task setup. The max prevents NaNs in that case. + return std::max(Dtype(1.0), normalizer); +} + template void SigmoidCrossEntropyLossLayer::Forward_cpu( const vector*>& bottom, const vector*>& top) { @@ -32,17 +82,22 @@ void SigmoidCrossEntropyLossLayer::Forward_cpu( sigmoid_bottom_vec_[0] = bottom[0]; sigmoid_layer_->Forward(sigmoid_bottom_vec_, sigmoid_top_vec_); // Compute the loss (negative log likelihood) - const int count = bottom[0]->count(); - const int num = bottom[0]->num(); // Stable version of loss computation from input data const Dtype* input_data = bottom[0]->cpu_data(); const Dtype* target = bottom[1]->cpu_data(); + int valid_count = 0; Dtype loss = 0; - for (int i = 0; i < count; ++i) { + for (int i = 0; i < bottom[0]->count(); ++i) { + const int target_value = static_cast(target[i]); + if (has_ignore_label_ && target_value == ignore_label_) { + continue; + } loss -= input_data[i] * (target[i] - (input_data[i] >= 0)) - log(1 + exp(input_data[i] - 2 * input_data[i] * (input_data[i] >= 0))); + ++valid_count; } - top[0]->mutable_cpu_data()[0] = loss / num; + normalizer_ = get_normalizer(normalization_, valid_count); + top[0]->mutable_cpu_data()[0] = loss / normalizer_; } template @@ -56,19 +111,27 @@ void SigmoidCrossEntropyLossLayer::Backward_cpu( if (propagate_down[0]) { // First, compute the diff const int count = bottom[0]->count(); - const int num = bottom[0]->num(); const Dtype* sigmoid_output_data = sigmoid_output_->cpu_data(); const Dtype* target = bottom[1]->cpu_data(); Dtype* bottom_diff = bottom[0]->mutable_cpu_diff(); caffe_sub(count, sigmoid_output_data, target, bottom_diff); + // Zero out gradient of ignored targets. + if (has_ignore_label_) { + for (int i = 0; i < count; ++i) { + const int target_value = static_cast(target[i]); + if (target_value == ignore_label_) { + bottom_diff[i] = 0; + } + } + } // Scale down gradient - const Dtype loss_weight = top[0]->cpu_diff()[0]; - caffe_scal(count, loss_weight / num, bottom_diff); + Dtype loss_weight = top[0]->cpu_diff()[0] / normalizer_; + caffe_scal(count, loss_weight, bottom_diff); } } #ifdef CPU_ONLY -STUB_GPU_BACKWARD(SigmoidCrossEntropyLossLayer, Backward); +STUB_GPU(SigmoidCrossEntropyLossLayer); #endif INSTANTIATE_CLASS(SigmoidCrossEntropyLossLayer); diff --git a/src/caffe/layers/sigmoid_cross_entropy_loss_layer.cu b/src/caffe/layers/sigmoid_cross_entropy_loss_layer.cu index 046cb9d3a31..7497e4aa47d 100644 --- a/src/caffe/layers/sigmoid_cross_entropy_loss_layer.cu +++ b/src/caffe/layers/sigmoid_cross_entropy_loss_layer.cu @@ -5,6 +5,75 @@ namespace caffe { + +template +__global__ void SigmoidCrossEntropyLossForwardGPU(const int nthreads, + const Dtype* input_data, const Dtype* target, Dtype* loss, + const bool has_ignore_label_, const int ignore_label_, + Dtype* counts) { + CUDA_KERNEL_LOOP(i, nthreads) { + const int target_value = static_cast(target[i]); + if (has_ignore_label_ && target_value == ignore_label_) { + loss[i] = 0; + counts[i] = 0; + } else { + loss[i] = input_data[i] * (target[i] - (input_data[i] >= 0)) - + log(1 + exp(input_data[i] - 2 * input_data[i] * + (input_data[i] >= 0))); + counts[i] = 1; + } + } +} + +template +__global__ void SigmoidCrossEntropyLossIgnoreDiffGPU(const int count, + const int ignore_label, const Dtype* target, Dtype* diff) { + CUDA_KERNEL_LOOP(i, count) { + const int target_value = static_cast(target[i]); + if (target_value == ignore_label) { + diff[i] = 0; + } + } +} + + +template +void SigmoidCrossEntropyLossLayer::Forward_gpu( + const vector*>& bottom, const vector*>& top) { + // The forward pass computes the sigmoid outputs. + sigmoid_bottom_vec_[0] = bottom[0]; + sigmoid_layer_->Forward(sigmoid_bottom_vec_, sigmoid_top_vec_); + // Compute the loss (negative log likelihood) + const int count = bottom[0]->count(); + // Stable version of loss computation from input data + const Dtype* input_data = bottom[0]->gpu_data(); + const Dtype* target = bottom[1]->gpu_data(); + // Since this memory is not used for anything, we use it here to avoid having + // to allocate new GPU memory to accumulate intermediate results. + Dtype* loss_data = bottom[0]->mutable_gpu_diff(); + Dtype* count_data = bottom[1]->mutable_gpu_diff(); + Dtype valid_count; + // NOLINT_NEXT_LINE(whitespace/operators) + SigmoidCrossEntropyLossForwardGPU<<>>(count, input_data, target, loss_data, + has_ignore_label_, ignore_label_, count_data); + // Only launch another CUDA kernel if we actually need the valid count. + if (normalization_ == LossParameter_NormalizationMode_VALID && + has_ignore_label_) { + caffe_gpu_asum(count, count_data, &valid_count); + } else { + valid_count = count; + } + Dtype loss; + caffe_gpu_asum(count, loss_data, &loss); + normalizer_ = get_normalizer(normalization_, valid_count); + top[0]->mutable_cpu_data()[0] = loss / normalizer_; + + // Clear scratch memory to prevent interfering with backward (see #6202). + caffe_gpu_set(bottom[0]->count(), Dtype(0), bottom[0]->mutable_gpu_diff()); + caffe_gpu_set(bottom[1]->count(), Dtype(0), bottom[1]->mutable_gpu_diff()); +} + template void SigmoidCrossEntropyLossLayer::Backward_gpu( const vector*>& top, const vector& propagate_down, @@ -16,19 +85,23 @@ void SigmoidCrossEntropyLossLayer::Backward_gpu( if (propagate_down[0]) { // First, compute the diff const int count = bottom[0]->count(); - const int num = bottom[0]->num(); const Dtype* sigmoid_output_data = sigmoid_output_->gpu_data(); const Dtype* target = bottom[1]->gpu_data(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); caffe_copy(count, sigmoid_output_data, bottom_diff); caffe_gpu_axpy(count, Dtype(-1), target, bottom_diff); + // Zero out gradient of ignored targets. + if (has_ignore_label_) { + // NOLINT_NEXT_LINE(whitespace/operators) + SigmoidCrossEntropyLossIgnoreDiffGPU<<>>(count, ignore_label_, target, bottom_diff); + } // Scale down gradient - const Dtype loss_weight = top[0]->cpu_diff()[0]; - caffe_gpu_scal(count, loss_weight / num, bottom_diff); + Dtype loss_weight = top[0]->cpu_diff()[0] / normalizer_; + caffe_gpu_scal(count, loss_weight, bottom_diff); } } -INSTANTIATE_LAYER_GPU_BACKWARD(SigmoidCrossEntropyLossLayer); - +INSTANTIATE_LAYER_GPU_FUNCS(SigmoidCrossEntropyLossLayer); } // namespace caffe diff --git a/src/caffe/layers/sigmoid_layer.cpp b/src/caffe/layers/sigmoid_layer.cpp index 85fd9676812..f8aa769a174 100644 --- a/src/caffe/layers/sigmoid_layer.cpp +++ b/src/caffe/layers/sigmoid_layer.cpp @@ -7,7 +7,7 @@ namespace caffe { template inline Dtype sigmoid(Dtype x) { - return 1. / (1. + exp(-x)); + return 0.5 * tanh(0.5 * x) + 0.5; } template diff --git a/src/caffe/layers/sigmoid_layer.cu b/src/caffe/layers/sigmoid_layer.cu index 184c61ede83..8a4ea6616e0 100644 --- a/src/caffe/layers/sigmoid_layer.cu +++ b/src/caffe/layers/sigmoid_layer.cu @@ -8,7 +8,7 @@ namespace caffe { template __global__ void SigmoidForward(const int n, const Dtype* in, Dtype* out) { CUDA_KERNEL_LOOP(index, n) { - out[index] = 1. / (1. + exp(-in[index])); + out[index] = 0.5 * tanh(0.5 * in[index]) + 0.5; } } diff --git a/src/caffe/layers/slice_layer.cpp b/src/caffe/layers/slice_layer.cpp index 759beafe0d9..64de0964483 100644 --- a/src/caffe/layers/slice_layer.cpp +++ b/src/caffe/layers/slice_layer.cpp @@ -41,7 +41,9 @@ void SliceLayer::Reshape(const vector*>& bottom, int count = 0; if (slice_point_.size() != 0) { CHECK_EQ(slice_point_.size(), top.size() - 1); - CHECK_LE(top.size(), bottom_slice_axis); + CHECK_LE(top.size(), bottom_slice_axis) + << "slice axis: " << slice_axis_ + << ", bottom[0] shape: " << bottom[0]->shape_string(); int prev = 0; vector slices; for (int i = 0; i < slice_point_.size(); ++i) { diff --git a/src/caffe/layers/softmax_loss_layer.cu b/src/caffe/layers/softmax_loss_layer.cu index 660e1b39fe0..b3c8ffa6b6c 100644 --- a/src/caffe/layers/softmax_loss_layer.cu +++ b/src/caffe/layers/softmax_loss_layer.cu @@ -36,9 +36,8 @@ void SoftmaxWithLossLayer::Forward_gpu( const Dtype* label = bottom[1]->gpu_data(); const int dim = prob_.count() / outer_num_; const int nthreads = outer_num_ * inner_num_; - // Since this memory is not used for anything until it is overwritten - // on the backward pass, we use it here to avoid having to allocate new GPU - // memory to accumulate intermediate results in the kernel. + // Since this memory is not used for anything, we use it here to avoid having + // to allocate new GPU memory to accumulate intermediate results. Dtype* loss_data = bottom[0]->mutable_gpu_diff(); // Similarly, this memory is never used elsewhere, and thus we can use it // to avoid having to allocate additional GPU memory. @@ -61,6 +60,9 @@ void SoftmaxWithLossLayer::Forward_gpu( if (top.size() == 2) { top[1]->ShareData(prob_); } + + // Clear scratch memory to prevent interfering with backward (see #6202). + caffe_gpu_set(bottom[0]->count(), Dtype(0), bottom[0]->mutable_gpu_diff()); } template diff --git a/src/caffe/layers/window_data_layer.cpp b/src/caffe/layers/window_data_layer.cpp index 103dd4b6af8..1bf3760e9fd 100644 --- a/src/caffe/layers/window_data_layer.cpp +++ b/src/caffe/layers/window_data_layer.cpp @@ -173,8 +173,8 @@ void WindowDataLayer::DataLayerSetUp(const vector*>& bottom, CHECK_GT(crop_size, 0); const int batch_size = this->layer_param_.window_data_param().batch_size(); top[0]->Reshape(batch_size, channels, crop_size, crop_size); - for (int i = 0; i < this->PREFETCH_COUNT; ++i) - this->prefetch_[i].data_.Reshape( + for (int i = 0; i < this->prefetch_.size(); ++i) + this->prefetch_[i]->data_.Reshape( batch_size, channels, crop_size, crop_size); LOG(INFO) << "output data size: " << top[0]->num() << "," @@ -183,8 +183,8 @@ void WindowDataLayer::DataLayerSetUp(const vector*>& bottom, // label vector label_shape(1, batch_size); top[1]->Reshape(label_shape); - for (int i = 0; i < this->PREFETCH_COUNT; ++i) { - this->prefetch_[i].label_.Reshape(label_shape); + for (int i = 0; i < this->prefetch_.size(); ++i) { + this->prefetch_[i]->label_.Reshape(label_shape); } // data mean diff --git a/src/caffe/net.cpp b/src/caffe/net.cpp index c753f190bdc..98bbba81eac 100644 --- a/src/caffe/net.cpp +++ b/src/caffe/net.cpp @@ -17,29 +17,31 @@ #include "caffe/util/math_functions.hpp" #include "caffe/util/upgrade_proto.hpp" -#include "caffe/test/test_caffe_main.hpp" - namespace caffe { template -Net::Net(const NetParameter& param, const Net* root_net) - : root_net_(root_net) { +Net::Net(const NetParameter& param) { Init(param); } template -Net::Net(const string& param_file, Phase phase, const Net* root_net) - : root_net_(root_net) { +Net::Net(const string& param_file, Phase phase, + const int level, const vector* stages) { NetParameter param; ReadNetParamsFromTextFileOrDie(param_file, ¶m); + // Set phase, stages and level param.mutable_state()->set_phase(phase); + if (stages != NULL) { + for (int i = 0; i < stages->size(); i++) { + param.mutable_state()->add_stage((*stages)[i]); + } + } + param.mutable_state()->set_level(level); Init(param); } template void Net::Init(const NetParameter& in_param) { - CHECK(Caffe::root_solver() || root_net_) - << "root_net_ needs to be set for all non-root solvers"; // Set phase from the state. phase_ = in_param.state().phase(); // Filter layers based on their include/exclude rules and @@ -65,9 +67,6 @@ void Net::Init(const NetParameter& in_param) { top_id_vecs_.resize(param.layer_size()); bottom_need_backward_.resize(param.layer_size()); for (int layer_id = 0; layer_id < param.layer_size(); ++layer_id) { - // For non-root solvers, whether this layer is shared from root_net_. - bool share_from_root = !Caffe::root_solver() - && root_net_->layers_[layer_id]->ShareInParallel(); // Inherit phase from net if unset. if (!param.layer(layer_id).has_phase()) { param.mutable_layer(layer_id)->set_phase(phase_); @@ -80,13 +79,7 @@ void Net::Init(const NetParameter& in_param) { << "propagate_down param must be specified " << "either 0 or bottom_size times "; } - if (share_from_root) { - LOG(INFO) << "Sharing layer " << layer_param.name() << " from root net"; - layers_.push_back(root_net_->layers_[layer_id]); - layers_[layer_id]->SetShared(true); - } else { - layers_.push_back(LayerRegistry::CreateLayer(layer_param)); - } + layers_.push_back(LayerRegistry::CreateLayer(layer_param)); layer_names_.push_back(layer_param.name()); LOG_IF(INFO, Caffe::root_solver()) << "Creating Layer " << layer_param.name(); @@ -125,19 +118,7 @@ void Net::Init(const NetParameter& in_param) { } } // After this layer is connected, set it up. - if (share_from_root) { - // Set up size of top blobs using root_net_ - const vector*>& base_top = root_net_->top_vecs_[layer_id]; - const vector*>& this_top = this->top_vecs_[layer_id]; - for (int top_id = 0; top_id < base_top.size(); ++top_id) { - this_top[top_id]->ReshapeLike(*base_top[top_id]); - LOG(INFO) << "Created top blob " << top_id << " (shape: " - << this_top[top_id]->shape_string() << ") for shared layer " - << layer_param.name(); - } - } else { - layers_[layer_id]->SetUp(bottom_vecs_[layer_id], top_vecs_[layer_id]); - } + layers_[layer_id]->SetUp(bottom_vecs_[layer_id], top_vecs_[layer_id]); LOG_IF(INFO, Caffe::root_solver()) << "Setting up " << layer_names_[layer_id]; for (int top_id = 0; top_id < top_vecs_[layer_id].size(); ++top_id) { @@ -581,10 +562,15 @@ Dtype Net::ForwardFromTo(int start, int end) { CHECK_LT(end, layers_.size()); Dtype loss = 0; for (int i = start; i <= end; ++i) { - // LOG(ERROR) << "Forwarding " << layer_names_[i]; + for (int c = 0; c < before_forward_.size(); ++c) { + before_forward_[c]->run(i); + } Dtype layer_loss = layers_[i]->Forward(bottom_vecs_[i], top_vecs_[i]); loss += layer_loss; if (debug_info_) { ForwardDebugInfo(i); } + for (int c = 0; c < after_forward_.size(); ++c) { + after_forward_[c]->run(i); + } } return loss; } @@ -626,11 +612,17 @@ void Net::BackwardFromTo(int start, int end) { CHECK_GE(end, 0); CHECK_LT(start, layers_.size()); for (int i = start; i >= end; --i) { + for (int c = 0; c < before_backward_.size(); ++c) { + before_backward_[c]->run(i); + } if (layer_need_backward_[i]) { layers_[i]->Backward( top_vecs_[i], bottom_need_backward_[i], bottom_vecs_[i]); if (debug_info_) { BackwardDebugInfo(i); } } + for (int c = 0; c < after_backward_.size(); ++c) { + after_backward_[c]->run(i); + } } } @@ -821,8 +813,7 @@ void Net::CopyTrainedLayersFrom(const NetParameter& param) { template void Net::CopyTrainedLayersFrom(const string trained_filename) { - if (trained_filename.size() >= 3 && - trained_filename.compare(trained_filename.size() - 3, 3, ".h5") == 0) { + if (H5Fis_hdf5(trained_filename.c_str())) { CopyTrainedLayersFromHDF5(trained_filename); } else { CopyTrainedLayersFromBinaryProto(trained_filename); diff --git a/src/caffe/parallel.cpp b/src/caffe/parallel.cpp index 5bc41c6a6e5..d9433917d25 100644 --- a/src/caffe/parallel.cpp +++ b/src/caffe/parallel.cpp @@ -1,16 +1,15 @@ -#ifndef CPU_ONLY +#ifdef USE_NCCL + #include -#endif #include #include - #include #include #include -#include "boost/thread.hpp" #include "caffe/caffe.hpp" #include "caffe/parallel.hpp" +#include "caffe/sgd_solvers.hpp" namespace caffe { @@ -68,15 +67,14 @@ static size_t total_size(const vector*>& params) { template Params::Params(shared_ptr > root_solver) - : size_(total_size(root_solver->net()->learnable_params())), - data_(), - diff_() { + : size_(total_size(root_solver->net()->learnable_params())), + data_(), + diff_() { } template GPUParams::GPUParams(shared_ptr > root_solver, int device) - : Params(root_solver) { -#ifndef CPU_ONLY + : Params(root_solver) { int initial_device; CUDA_CHECK(cudaGetDevice(&initial_device)); @@ -86,358 +84,288 @@ GPUParams::GPUParams(shared_ptr > root_solver, int device) // Copy blob values const vector*>& net = - root_solver->net()->learnable_params(); + root_solver->net()->learnable_params(); apply_buffers(net, data_, size_, copy); CUDA_CHECK(cudaMalloc(&diff_, size_ * sizeof(Dtype))); caffe_gpu_set(size_, Dtype(0), diff_); CUDA_CHECK(cudaSetDevice(initial_device)); -#else - NO_GPU; -#endif } template GPUParams::~GPUParams() { -#ifndef CPU_ONLY CUDA_CHECK(cudaFree(data_)); CUDA_CHECK(cudaFree(diff_)); -#endif } template -void GPUParams::configure(Solver* solver) const { +void GPUParams::Configure(Solver* solver) const { const vector*>& net = - solver->net()->learnable_params(); + solver->net()->learnable_params(); apply_buffers(net, data_, size_, replace_gpu); apply_buffers(net, diff_, size_, replace_gpu_diff); } -void DevicePair::compute(const vector devices, vector* pairs) { -#ifndef CPU_ONLY - vector remaining(devices); - - // Depth for reduction tree - int remaining_depth = static_cast(ceil(log2(remaining.size()))); - - // Group GPUs by board - for (int d = 0; d < remaining_depth; ++d) { - for (int i = 0; i < remaining.size(); ++i) { - for (int j = i + 1; j < remaining.size(); ++j) { - cudaDeviceProp a, b; - CUDA_CHECK(cudaGetDeviceProperties(&a, remaining[i])); - CUDA_CHECK(cudaGetDeviceProperties(&b, remaining[j])); - if (a.isMultiGpuBoard && b.isMultiGpuBoard) { - if (a.multiGpuBoardGroupID == b.multiGpuBoardGroupID) { - pairs->push_back(DevicePair(remaining[i], remaining[j])); - DLOG(INFO) << "GPU board: " << remaining[i] << ":" << remaining[j]; - remaining.erase(remaining.begin() + j); - break; - } - } - } - } - } - ostringstream s; - for (int i = 0; i < remaining.size(); ++i) { - s << (i ? ", " : "") << remaining[i]; - } - DLOG(INFO) << "GPUs paired by boards, remaining: " << s.str(); - - // Group by P2P accessibility - remaining_depth = ceil(log2(remaining.size())); - for (int d = 0; d < remaining_depth; ++d) { - for (int i = 0; i < remaining.size(); ++i) { - for (int j = i + 1; j < remaining.size(); ++j) { - int access; - CUDA_CHECK( - cudaDeviceCanAccessPeer(&access, remaining[i], remaining[j])); - if (access) { - pairs->push_back(DevicePair(remaining[i], remaining[j])); - DLOG(INFO) << "P2P pair: " << remaining[i] << ":" << remaining[j]; - remaining.erase(remaining.begin() + j); - break; - } - } - } - } - s.str(""); - for (int i = 0; i < remaining.size(); ++i) { - s << (i ? ", " : "") << remaining[i]; - } - DLOG(INFO) << "GPUs paired by P2P access, remaining: " << s.str(); - - // Group remaining - remaining_depth = ceil(log2(remaining.size())); - for (int d = 0; d < remaining_depth; ++d) { - for (int i = 0; i < remaining.size(); ++i) { - pairs->push_back(DevicePair(remaining[i], remaining[i + 1])); - DLOG(INFO) << "Remaining pair: " << remaining[i] << ":" - << remaining[i + 1]; - remaining.erase(remaining.begin() + i + 1); - } - } +static int getDevice() { + int device = 0; + CUDA_CHECK(cudaGetDevice(&device)); + return device; +} - // Should only be the parent node remaining - CHECK_EQ(remaining.size(), 1); +template +NCCL::NCCL(shared_ptr > solver) + : GPUParams(solver, getDevice()), + comm_(), solver_(solver), barrier_() { + this->Configure(solver.get()); + Init(); +} - pairs->insert(pairs->begin(), DevicePair(-1, remaining[0])); +template +NCCL::NCCL(shared_ptr > solver, const string& uid) + : GPUParams(solver, getDevice()), + solver_(solver), barrier_() { + this->Configure(solver.get()); + Caffe::set_multiprocess(true); + ncclUniqueId nccl_uid; + memcpy(&nccl_uid, &uid[0], NCCL_UNIQUE_ID_BYTES); // NOLINT(caffe/alt_fn) + NCCL_CHECK(ncclCommInitRank(&comm_, + Caffe::solver_count(), + nccl_uid, + Caffe::solver_rank())); + Init(); +} - CHECK(pairs->size() == devices.size()); - for (int i = 0; i < pairs->size(); ++i) { - CHECK((*pairs)[i].parent() != (*pairs)[i].device()); - for (int j = i + 1; j < pairs->size(); ++j) { - CHECK((*pairs)[i].device() != (*pairs)[j].device()); - } +template +void NCCL::Init() { + if (solver_->param().layer_wise_reduce()) { + CUDA_CHECK(cudaStreamCreateWithFlags(&stream_, cudaStreamNonBlocking)); } -#else - NO_GPU; -#endif } -// - template -P2PSync::P2PSync(shared_ptr > root_solver, - P2PSync* parent, const SolverParameter& param) - : GPUParams(root_solver, param.device_id()), - parent_(parent), - children_(), - queue_(), - initial_iter_(root_solver->iter()), - solver_() { -#ifndef CPU_ONLY - int initial_device; - CUDA_CHECK(cudaGetDevice(&initial_device)); - const int self = param.device_id(); - CUDA_CHECK(cudaSetDevice(self)); - - if (parent == NULL) { - solver_ = root_solver; - } else { - Caffe::set_root_solver(false); - solver_.reset(new WorkerSolver(param, root_solver.get())); - Caffe::set_root_solver(true); +NCCL::~NCCL() { + if (solver_->param().layer_wise_reduce()) { + CUDA_CHECK(cudaStreamDestroy(stream_)); } - this->configure(solver_.get()); - solver_->add_callback(this); - - if (parent) { - // Enable p2p access between devices - const int peer = parent->solver_->param().device_id(); - int access; - CUDA_CHECK(cudaDeviceCanAccessPeer(&access, self, peer)); - if (access) { - CUDA_CHECK(cudaDeviceEnablePeerAccess(peer, 0)); - } else { - LOG(INFO)<< "GPU " << self << " does not have p2p access to GPU " << peer; - } - // Allocate receiving buffer on parent - CUDA_CHECK(cudaSetDevice(peer)); - CUDA_CHECK(cudaMalloc(&parent_grads_, size_ * sizeof(Dtype))); - CUDA_CHECK(cudaSetDevice(self)); + if (comm_) { + ncclCommDestroy(comm_); } - - CUDA_CHECK(cudaSetDevice(initial_device)); -#else - NO_GPU; -#endif } template -P2PSync::~P2PSync() { -#ifndef CPU_ONLY - int initial_device; - CUDA_CHECK(cudaGetDevice(&initial_device)); - const int self = solver_->param().device_id(); - CUDA_CHECK(cudaSetDevice(self)); - - if (parent_) { - CUDA_CHECK(cudaFree(parent_grads_)); - const int peer = parent_->solver_->param().device_id(); - int access; - CUDA_CHECK(cudaDeviceCanAccessPeer(&access, self, peer)); - if (access) { - CUDA_CHECK(cudaDeviceDisablePeerAccess(peer)); - } - } - - CUDA_CHECK(cudaSetDevice(initial_device)); -#endif +boost::barrier* NCCL::barrier() { + return barrier_; +} +template +void NCCL::set_barrier(boost::barrier* value) { + barrier_ = value; } template -void P2PSync::InternalThreadEntry() { - Caffe::SetDevice(solver_->param().device_id()); - CHECK(Caffe::root_solver()); - Caffe::set_root_solver(false); - // See if there is a defined seed and reset random state if so - if (solver_->param().random_seed() >= 0) { - // Fetch random seed and modulate by device ID to make sure - // everyone doesn't have the same seed. We seem to have some - // solver instability if we have everyone with the same seed - Caffe::set_random_seed( - solver_->param().random_seed() + solver_->param().device_id()); +void NCCL::InitSingleProcess(vector*>* nccls) { + ncclComm_t* comms = new ncclComm_t[nccls->size()]; + int* gpu_list = new int[nccls->size()]; + for (int i = 0; i < nccls->size(); ++i) { + gpu_list[i] = (*nccls)[i]->solver_->param().device_id(); + } + NCCL_CHECK(ncclCommInitAll(comms, static_cast(nccls->size()), gpu_list)); + for (int i = 0; i < nccls->size(); ++i) { + (*nccls)[i]->comm_ = comms[i]; } - solver_->Step(solver_->param().max_iter() - initial_iter_); } template -void P2PSync::on_start() { -#ifndef CPU_ONLY -#ifdef DEBUG - int device; - CUDA_CHECK(cudaGetDevice(&device)); - CHECK(device == solver_->param().device_id()); -#else -// CHECK(false); -#endif +string NCCL::new_uid() { + string uid; + uid.resize(NCCL_UNIQUE_ID_BYTES); + ncclUniqueId nccl_uid; + NCCL_CHECK(ncclGetUniqueId(&nccl_uid)); + memcpy(&uid[0], &nccl_uid, NCCL_UNIQUE_ID_BYTES); // NOLINT(caffe/alt_fn) + return uid; +} - // Wait for update from parent - if (parent_) { - P2PSync *parent = queue_.pop(); - CHECK(parent == parent_); +template +void NCCL::Broadcast() { + if (barrier_) { // NULL in multi process case + barrier_->wait(); } - - // Update children - for (int i = children_.size() - 1; i >= 0; i--) { - Dtype* src = data_; - Dtype* dst = children_[i]->data_; - -#ifdef DEBUG - cudaPointerAttributes attributes; - CUDA_CHECK(cudaPointerGetAttributes(&attributes, src)); - CHECK(attributes.device == device); - CUDA_CHECK(cudaPointerGetAttributes(&attributes, dst)); - CHECK(attributes.device == children_[i]->solver_->param().device_id()); -#endif - - CUDA_CHECK(cudaMemcpyAsync(dst, src, size_ * sizeof(Dtype), - cudaMemcpyDeviceToDevice, cudaStreamDefault)); - CUDA_CHECK(cudaStreamSynchronize(cudaStreamDefault)); - children_[i]->queue_.push(this); + NCCL_CHECK(ncclBcast(data_, static_cast(size_), + nccl::dataType::type, 0, + comm_, cudaStreamDefault)); + if (barrier_) { + barrier_->wait(); } -#endif } template -void P2PSync::on_gradients_ready() { -#ifndef CPU_ONLY +void NCCL::run(int layer) { + CHECK(solver_->param().layer_wise_reduce()); + vector > >& blobs = + solver_->net()->layers()[layer]->blobs(); #ifdef DEBUG - int device; - CUDA_CHECK(cudaGetDevice(&device)); - CHECK(device == solver_->param().device_id()); + // Assert blobs are contiguous to reduce in one step (e.g. bias often small) + for (int i = 1; i < blobs.size(); ++i) { + CHECK_EQ(blobs[i - 1]->gpu_diff() + blobs[i - 1]->count(), + blobs[i + 0]->gpu_diff()); + } #endif + if (blobs.size() > 0) { + // Make sure default stream is done computing gradients. Could be + // replaced by cudaEventRecord+cudaStreamWaitEvent to avoid + // blocking the default stream, but it's actually slower. + CUDA_CHECK(cudaStreamSynchronize(cudaStreamDefault)); - // Sum children gradients as they appear in the queue - for (int i = 0; i < children_.size(); ++i) { - P2PSync *child = queue_.pop(); - Dtype* src = child->parent_grads_; - Dtype* dst = diff_; - -#ifdef DEBUG - bool ok = false; - for (int j = 0; j < children_.size(); ++j) { - if (child == children_[j]) { - ok = true; - } + // Reduce asynchronously + int size = 0; + for (int i = 0; i < blobs.size(); ++i) { + size += blobs[i]->count(); } - CHECK(ok); - cudaPointerAttributes attributes; - CUDA_CHECK(cudaPointerGetAttributes(&attributes, src)); - CHECK(attributes.device == device); - CUDA_CHECK(cudaPointerGetAttributes(&attributes, dst)); - CHECK(attributes.device == device); -#endif - - caffe_gpu_add(size_, src, dst, dst); + if (barrier_) { // NULL in multi process case + barrier_->wait(); + } + NCCL_CHECK(ncclAllReduce(blobs[0]->mutable_gpu_diff(), + blobs[0]->mutable_gpu_diff(), + size, + nccl::dataType::type, + ncclSum, comm_, stream_)); + caffe_gpu_scal(size, (Dtype) 1.0 / Caffe::solver_count(), + blobs[0]->mutable_gpu_diff(), stream_); } +} - // Send gradients to parent - if (parent_) { - Dtype* src = diff_; - Dtype* dst = parent_grads_; - -#ifdef DEBUG - cudaPointerAttributes attributes; - CUDA_CHECK(cudaPointerGetAttributes(&attributes, src)); - CHECK(attributes.device == device); - CUDA_CHECK(cudaPointerGetAttributes(&attributes, dst)); - CHECK(attributes.device == parent_->solver_->param().device_id()); -#endif - - CUDA_CHECK(cudaMemcpyAsync(dst, src, size_ * sizeof(Dtype), // - cudaMemcpyDeviceToDevice, cudaStreamDefault)); - CUDA_CHECK(cudaStreamSynchronize(cudaStreamDefault)); - parent_->queue_.push(this); +template +void NCCL::on_gradients_ready() { + if (solver_->param().layer_wise_reduce()) { + CHECK_EQ(solver_->net()->params().size(), + solver_->net()->learnable_params().size()) + << "Layer-wise reduce is not supported for nets with shared weights."; + + // Make sure reduction is done before applying gradients + CUDA_CHECK(cudaStreamSynchronize(stream_)); } else { - // Loss functions divide gradients by the batch size, so to compensate - // for split batch, the root solver divides by number of solvers. - caffe_gpu_scal(size_, Dtype(1.0 / Caffe::solver_count()), diff_); + if (barrier_) { // NULL in multi process case + barrier_->wait(); + } + NCCL_CHECK(ncclAllReduce(diff_, diff_, static_cast(size_), + nccl::dataType::type, ncclSum, comm_, + cudaStreamDefault)); + caffe_gpu_scal(static_cast(size_), + (Dtype) 1.0 / Caffe::solver_count(), diff_); } -#endif } template -void P2PSync::Prepare(const vector& gpus, - vector > >* syncs) { - // Pair devices for map-reduce synchronization - vector pairs; - DevicePair::compute(gpus, &pairs); - ostringstream s; - for (int i = 1; i < pairs.size(); ++i) { - s << (i == 1 ? "" : ", ") << pairs[i].parent() << ":" << pairs[i].device(); +class Worker : public InternalThread { + public: + explicit Worker(shared_ptr > rank0, int device, + boost::barrier* barrier, vector*>* nccls, + const char* restore) + : rank0_(rank0), device_(device), barrier_(barrier), + nccls_(nccls), restore_(restore) { } - LOG(INFO)<< "GPUs pairs " << s.str(); - - SolverParameter param(solver_->param()); - - // Build the GPU tree by finding the parent for each solver - for (int attempts = 0; attempts < pairs.size(); ++attempts) { - for (int i = 1; i < pairs.size(); ++i) { - if (!syncs->at(i).get()) { - P2PSync* parent = NULL; - for (int j = 0; j < syncs->size(); ++j) { - P2PSync* sync = j == 0 ? this : syncs->at(j).get(); - if (sync) { - const SolverParameter& p = sync->solver()->param(); - if (p.device_id() == pairs[i].parent()) { - parent = sync; - } - } - } - if (parent) { - param.set_device_id(pairs[i].device()); - syncs->at(i).reset(new P2PSync(solver_, parent, param)); - parent->children_.push_back((P2PSync*) syncs->at(i).get()); - } + virtual ~Worker() {} + + protected: + void InternalThreadEntry() { + // Create solver and install callbacks + SolverParameter param(rank0_->param()); + param.set_device_id(device_); +#ifdef DEBUG + int device; + CUDA_CHECK(cudaGetDevice(&device)); + CHECK_EQ(device, device_); +#endif + param.set_type(rank0_->type()); + shared_ptr > s(SolverRegistry::CreateSolver(param)); + CHECK_EQ(s->type(), rank0_->type()); + if (restore_) { + // Could not make NCCL broadcast solver state, it seems to crash + // if called in a tight loop, regardless of barriers etc. so + // restore all solvers from file. + s->Restore(restore_); + } + NCCL nccl(s); + nccl.set_barrier(barrier_); + s->add_callback(&nccl); + if (s->param().layer_wise_reduce()) { + s->net()->add_after_backward(&nccl); + } + (*nccls_)[Caffe::solver_rank()] = &nccl; + // Wait for other threads + barrier_->wait(); + // Wait for NCCL init + barrier_->wait(); + // Broadcast rank 0 state + nccl.Broadcast(); + // Solve + s->Step(param.max_iter() - s->iter()); + barrier_->wait(); +#ifdef DEBUG + // Check all solvers have same state + SGDSolver* sa = static_cast*>(rank0_.get()); + SGDSolver* sb = static_cast*>(s.get()); + for (int h = 0; h < sa->history().size(); ++h) { + CUDA_CHECK(cudaSetDevice(sa->param().device_id())); + const Dtype* a = sa->history()[h]->cpu_data(); + CUDA_CHECK(cudaSetDevice(sb->param().device_id())); + const Dtype* b = sb->history()[h]->cpu_data(); + for (int v = 0; v < sa->history()[h]->count(); ++v) { + CHECK_DOUBLE_EQ(a[v], b[v]); } } +#endif } -} - -template -void P2PSync::Run(const vector& gpus) { - vector > > syncs(gpus.size()); - Prepare(gpus, &syncs); - LOG(INFO)<< "Starting Optimization"; + shared_ptr > rank0_; + int device_; + boost::barrier* barrier_; + vector*>* nccls_; + const char* restore_; +}; - for (int i = 1; i < syncs.size(); ++i) { - syncs[i]->StartInternalThread(); +template +void NCCL::Run(const vector& gpus, const char* restore) { + boost::barrier barrier(static_cast(gpus.size())); + vector*> nccls(gpus.size()); + // Create workers + vector > > workers(gpus.size()); + for (int i = 1; i < gpus.size(); ++i) { + CUDA_CHECK(cudaSetDevice(gpus[i])); + Caffe::set_solver_rank(i); + Worker* w = new Worker(solver_, gpus[i], &barrier, + &nccls, restore); + w->StartInternalThread(); + workers[i].reset(w); } - - // Run root solver on current thread + CUDA_CHECK(cudaSetDevice(gpus[0])); + Caffe::set_solver_rank(0); + barrier_ = &barrier; + solver_->add_callback(this); + if (solver_->param().layer_wise_reduce()) { + solver_->net()->add_after_backward(this); + } + nccls[0] = this; + // Wait for workers + barrier.wait(); + // Init NCCL + InitSingleProcess(&nccls); + barrier.wait(); + // Run first solver on current thread + Broadcast(); solver_->Solve(); - - for (int i = 1; i < syncs.size(); ++i) { - syncs[i]->StopInternalThread(); + barrier.wait(); // Hangs without it when running tests + // Wait for shutdown + for (int i = 1; i < gpus.size(); ++i) { + workers[i]->StopInternalThread(); } } INSTANTIATE_CLASS(Params); INSTANTIATE_CLASS(GPUParams); -INSTANTIATE_CLASS(P2PSync); +INSTANTIATE_CLASS(Worker); +INSTANTIATE_CLASS(NCCL); } // namespace caffe + +#endif // USE_NCCL diff --git a/src/caffe/proto/caffe.proto b/src/caffe/proto/caffe.proto index 4d0f33e7108..1ff7b1af1c6 100644 --- a/src/caffe/proto/caffe.proto +++ b/src/caffe/proto/caffe.proto @@ -98,7 +98,7 @@ message NetParameter { // NOTE // Update the next available ID when you add a new SolverParameter field. // -// SolverParameter next available ID: 41 (last added: type) +// SolverParameter next available ID: 43 (last added: weights) message SolverParameter { ////////////////////////////////////////////////////////////////////////////// // Specifying the train and test networks @@ -128,8 +128,7 @@ message SolverParameter { // The states for the train/test nets. Must be unspecified or // specified once per net. // - // By default, all states will have solver = true; - // train_state will have phase = TRAIN, + // By default, train_state will have phase = TRAIN, // and all test_state's will have phase = TEST. // Other defaults are set according to the NetState defaults. optional NetState train_state = 26; @@ -187,7 +186,11 @@ message SolverParameter { optional float clip_gradients = 35 [default = -1]; optional int32 snapshot = 14 [default = 0]; // The snapshot interval - optional string snapshot_prefix = 15; // The prefix for the snapshot. + // The prefix for the snapshot. + // If not set then is replaced by prototxt file path without extention. + // If is set to directory then is augmented by prototxt file name + // without extention. + optional string snapshot_prefix = 15; // whether to snapshot diff in the results or not. Snapshotting diff will help // debugging but the final protocol buffer size will be much larger. optional bool snapshot_diff = 16 [default = false]; @@ -219,7 +222,7 @@ message SolverParameter { // RMSProp decay value // MeanSquare(t) = rms_decay*MeanSquare(t-1) + (1-rms_decay)*SquareGradient(t) - optional float rms_decay = 38; + optional float rms_decay = 38 [default = 0.99]; // If true, print information about the state of the net that may help with // debugging learning problems. @@ -239,6 +242,19 @@ message SolverParameter { } // DEPRECATED: use type instead of solver_type optional SolverType solver_type = 30 [default = SGD]; + + // Overlap compute and communication for data parallel training + optional bool layer_wise_reduce = 41 [default = true]; + + // Path to caffemodel file(s) with pretrained weights to initialize finetuning. + // Tha same as command line --weights parameter for caffe train command. + // If command line --weights parameter if specified, it has higher priority + // and owerwrites this one(s). + // If --snapshot command line parameter is specified, this one(s) are ignored. + // If several model files are expected, they can be listed in a one + // weights parameter separated by ',' (like in a command string) or + // in repeated weights parameters separately. + repeated string weights = 42; } // A message that stores the solver snapshots @@ -448,7 +464,7 @@ message TransformationParameter { optional uint32 crop_size = 3 [default = 0]; // mean_file and mean_value cannot be specified at the same time optional string mean_file = 4; - // if specified can be repeated once (would substract it from all the channels) + // if specified can be repeated once (would subtract it from all the channels) // or can be repeated the same number of times as channels // (would subtract them from the corresponding channel) repeated float mean_value = 5; @@ -464,7 +480,7 @@ message LossParameter { optional int32 ignore_label = 1; // How to normalize the loss for loss layers that aggregate across batches, // spatial dimensions, or other dimensions. Currently only implemented in - // SoftmaxWithLoss layer. + // SoftmaxWithLoss and SigmoidCrossEntropyLoss layers. enum NormalizationMode { // Divide by the number of examples in the batch times spatial dimensions. // Outputs that receive the ignore label will NOT be ignored in computing @@ -478,6 +494,8 @@ message LossParameter { // Do not normalize the loss. NONE = 3; } + // For historical reasons, the default normalization for + // SigmoidCrossEntropyLoss is BATCH_SIZE and *not* VALID. optional NormalizationMode normalization = 3 [default = VALID]; // Deprecated. Ignored if normalization is specified. If normalization // is not specified, then setting this to false will be equivalent to @@ -528,11 +546,21 @@ message ConcatParameter { } message BatchNormParameter { - // If false, accumulate global mean/variance values via a moving average. If - // true, use those accumulated values instead of computing mean/variance - // across the batch. + // If false, normalization is performed over the current mini-batch + // and global statistics are accumulated (but not yet used) by a moving + // average. + // If true, those accumulated mean and variance values are used for the + // normalization. + // By default, it is set to false when the network is in the training + // phase and true when the network is in the testing phase. optional bool use_global_stats = 1; - // How much does the moving average decay each iteration? + // What fraction of the moving average remains each iteration? + // Smaller values make the moving average decay faster, giving more + // weight to the recent values. + // Each iteration updates the moving average @f$S_{t-1}@f$ with the + // current mean @f$ Y_t @f$ by + // @f$ S_t = (1-\beta)Y_t + \beta \cdot S_{t-1} @f$, where @f$ \beta @f$ + // is the moving_average_fraction parameter. optional float moving_average_fraction = 2 [default = .999]; // Small value to add to the variance estimate so that we don't divide by // zero. @@ -683,8 +711,8 @@ message DataParameter { optional bool mirror = 6 [default = false]; // Force the encoded image to have 3 color channels optional bool force_encoded_color = 9 [default = false]; - // Prefetch queue (Number of batches to prefetch to host memory, increase if - // data access bandwidth varies). + // Prefetch queue (Increase if data feeding bandwidth varies, within the + // limit of device memory for GPU training) optional uint32 prefetch = 10 [default = 4]; } @@ -831,6 +859,7 @@ message ImageDataParameter { message InfogainLossParameter { // Specify the infogain matrix source. optional string source = 1; + optional int32 axis = 2 [default = 1]; // axis of prob } message InnerProductParameter { @@ -953,9 +982,7 @@ message PythonParameter { // string, dictionary in Python dict format, JSON, etc. You may parse this // string in `setup` method and use it in `forward` and `backward`. optional string param_str = 3 [default = '']; - // Whether this PythonLayer is shared among worker solvers during data parallelism. - // If true, each worker solver sequentially run forward from this layer. - // This value should be set true if you are using it as a data layer. + // DEPRECATED optional bool share_in_parallel = 4 [default = false]; } @@ -1424,6 +1451,6 @@ message PReLUParameter { // Initial value of a_i. Default is a_i=0.25 for all i. optional FillerParameter filler = 1; - // Whether or not slope paramters are shared across channels. + // Whether or not slope parameters are shared across channels. optional bool channel_shared = 2 [default = false]; } diff --git a/src/caffe/solver.cpp b/src/caffe/solver.cpp index ece3913e88a..d229acff485 100644 --- a/src/caffe/solver.cpp +++ b/src/caffe/solver.cpp @@ -3,6 +3,7 @@ #include #include +#include "boost/algorithm/string.hpp" #include "caffe/solver.hpp" #include "caffe/util/format.hpp" #include "caffe/util/hdf5.hpp" @@ -26,16 +27,14 @@ SolverAction::Enum Solver::GetRequestedAction() { } template -Solver::Solver(const SolverParameter& param, const Solver* root_solver) - : net_(), callbacks_(), root_solver_(root_solver), - requested_early_exit_(false) { +Solver::Solver(const SolverParameter& param) + : net_(), callbacks_(), requested_early_exit_(false) { Init(param); } template -Solver::Solver(const string& param_file, const Solver* root_solver) - : net_(), callbacks_(), root_solver_(root_solver), - requested_early_exit_(false) { +Solver::Solver(const string& param_file) + : net_(), callbacks_(), requested_early_exit_(false) { SolverParameter param; ReadSolverParamsFromTextFileOrDie(param_file, ¶m); Init(param); @@ -43,26 +42,38 @@ Solver::Solver(const string& param_file, const Solver* root_solver) template void Solver::Init(const SolverParameter& param) { - CHECK(Caffe::root_solver() || root_solver_) - << "root_solver_ needs to be set for all non-root solvers"; LOG_IF(INFO, Caffe::root_solver()) << "Initializing solver from parameters: " << std::endl << param.DebugString(); param_ = param; CHECK_GE(param_.average_loss(), 1) << "average_loss should be non-negative."; CheckSnapshotWritePermissions(); - if (Caffe::root_solver() && param_.random_seed() >= 0) { - Caffe::set_random_seed(param_.random_seed()); + if (param_.random_seed() >= 0) { + Caffe::set_random_seed(param_.random_seed() + Caffe::solver_rank()); } // Scaffolding code InitTrainNet(); + InitTestNets(); if (Caffe::root_solver()) { - InitTestNets(); LOG(INFO) << "Solver scaffolding done."; } iter_ = 0; current_step_ = 0; } +// Load weights from the caffemodel(s) specified in "weights" solver parameter +// into the train and test nets. +template +void LoadNetWeights(shared_ptr > net, + const std::string& model_list) { + std::vector model_names; + boost::split(model_names, model_list, boost::is_any_of(",")); + for (int i = 0; i < model_names.size(); ++i) { + boost::trim(model_names[i]); + LOG(INFO) << "Finetuning from " << model_names[i]; + net->CopyTrainedLayersFrom(model_names[i]); + } +} + template void Solver::InitTrainNet() { const int num_train_nets = param_.has_net() + param_.has_net_param() + @@ -101,16 +112,14 @@ void Solver::InitTrainNet() { net_state.MergeFrom(net_param.state()); net_state.MergeFrom(param_.train_state()); net_param.mutable_state()->CopyFrom(net_state); - if (Caffe::root_solver()) { - net_.reset(new Net(net_param)); - } else { - net_.reset(new Net(net_param, root_solver_->net_.get())); + net_.reset(new Net(net_param)); + for (int w_idx = 0; w_idx < param_.weights_size(); ++w_idx) { + LoadNetWeights(net_, param_.weights(w_idx)); } } template void Solver::InitTestNets() { - CHECK(Caffe::root_solver()); const bool has_net_param = param_.has_net_param(); const bool has_net_file = param_.has_net(); const int num_generic_nets = has_net_param + has_net_file; @@ -180,13 +189,11 @@ void Solver::InitTestNets() { net_params[i].mutable_state()->CopyFrom(net_state); LOG(INFO) << "Creating test net (#" << i << ") specified by " << sources[i]; - if (Caffe::root_solver()) { - test_nets_[i].reset(new Net(net_params[i])); - } else { - test_nets_[i].reset(new Net(net_params[i], - root_solver_->test_nets_[i].get())); - } + test_nets_[i].reset(new Net(net_params[i])); test_nets_[i]->set_debug_info(param_.debug_info()); + for (int w_idx = 0; w_idx < param_.weights_size(); ++w_idx) { + LoadNetWeights(test_nets_[i], param_.weights(w_idx)); + } } } @@ -197,14 +204,16 @@ void Solver::Step(int iters) { int average_loss = this->param_.average_loss(); losses_.clear(); smoothed_loss_ = 0; + iteration_timer_.Start(); while (iter_ < stop_iter) { // zero-init the params net_->ClearParamDiffs(); if (param_.test_interval() && iter_ % param_.test_interval() == 0 - && (iter_ > 0 || param_.test_initialization()) - && Caffe::root_solver()) { - TestAll(); + && (iter_ > 0 || param_.test_initialization())) { + if (Caffe::root_solver()) { + TestAll(); + } if (requested_early_exit_) { // Break out of the while loop because stop was requested while testing. break; @@ -225,8 +234,13 @@ void Solver::Step(int iters) { // average the loss across iterations for smoothed reporting UpdateSmoothedLoss(loss, start_iter, average_loss); if (display) { + float lapse = iteration_timer_.Seconds(); + float per_s = (iter_ - iterations_last_) / (lapse ? lapse : 1); LOG_IF(INFO, Caffe::root_solver()) << "Iteration " << iter_ - << ", loss = " << smoothed_loss_; + << " (" << per_s << " iter/s, " << lapse << "s/" + << param_.display() << " iters), loss = " << smoothed_loss_; + iteration_timer_.Start(); + iterations_last_ = iter_; const vector*>& result = net_->output_blobs(); int score_index = 0; for (int j = 0; j < result.size(); ++j) { @@ -468,7 +482,6 @@ string Solver::SnapshotToHDF5() { template void Solver::Restore(const char* state_file) { - CHECK(Caffe::root_solver()); string state_filename(state_file); if (state_filename.size() >= 3 && state_filename.compare(state_filename.size() - 3, 3, ".h5") == 0) { diff --git a/src/caffe/solvers/adagrad_solver.cpp b/src/caffe/solvers/adagrad_solver.cpp index e78eadca141..d8107e1e623 100644 --- a/src/caffe/solvers/adagrad_solver.cpp +++ b/src/caffe/solvers/adagrad_solver.cpp @@ -12,7 +12,6 @@ void adagrad_update_gpu(int N, Dtype* g, Dtype* h, Dtype delta, template void AdaGradSolver::ComputeUpdateValue(int param_id, Dtype rate) { - CHECK(Caffe::root_solver()); const vector*>& net_params = this->net_->learnable_params(); const vector& net_params_lr = this->net_->params_lr(); Dtype delta = this->param_.delta(); diff --git a/src/caffe/solvers/nesterov_solver.cpp b/src/caffe/solvers/nesterov_solver.cpp index 23ab2d4369a..7c1fac1f884 100644 --- a/src/caffe/solvers/nesterov_solver.cpp +++ b/src/caffe/solvers/nesterov_solver.cpp @@ -12,7 +12,6 @@ void nesterov_update_gpu(int N, Dtype* g, Dtype* h, Dtype momentum, template void NesterovSolver::ComputeUpdateValue(int param_id, Dtype rate) { - CHECK(Caffe::root_solver()); const vector*>& net_params = this->net_->learnable_params(); const vector& net_params_lr = this->net_->params_lr(); Dtype momentum = this->param_.momentum(); diff --git a/src/caffe/solvers/sgd_solver.cpp b/src/caffe/solvers/sgd_solver.cpp index f30f316d1a0..ad6abe54a0a 100644 --- a/src/caffe/solvers/sgd_solver.cpp +++ b/src/caffe/solvers/sgd_solver.cpp @@ -100,10 +100,10 @@ void SGDSolver::ClipGradients() { template void SGDSolver::ApplyUpdate() { - CHECK(Caffe::root_solver()); Dtype rate = GetLearningRate(); if (this->param_.display() && this->iter_ % this->param_.display() == 0) { - LOG(INFO) << "Iteration " << this->iter_ << ", lr = " << rate; + LOG_IF(INFO, Caffe::root_solver()) << "Iteration " << this->iter_ + << ", lr = " << rate; } ClipGradients(); for (int param_id = 0; param_id < this->net_->learnable_params().size(); diff --git a/src/caffe/syncedmem.cpp b/src/caffe/syncedmem.cpp index 4d3564172ab..88d9b78510a 100644 --- a/src/caffe/syncedmem.cpp +++ b/src/caffe/syncedmem.cpp @@ -3,26 +3,41 @@ #include "caffe/util/math_functions.hpp" namespace caffe { +SyncedMemory::SyncedMemory() + : cpu_ptr_(NULL), gpu_ptr_(NULL), size_(0), head_(UNINITIALIZED), + own_cpu_data_(false), cpu_malloc_use_cuda_(false), own_gpu_data_(false) { +#ifndef CPU_ONLY +#ifdef DEBUG + CUDA_CHECK(cudaGetDevice(&device_)); +#endif +#endif +} + +SyncedMemory::SyncedMemory(size_t size) + : cpu_ptr_(NULL), gpu_ptr_(NULL), size_(size), head_(UNINITIALIZED), + own_cpu_data_(false), cpu_malloc_use_cuda_(false), own_gpu_data_(false) { +#ifndef CPU_ONLY +#ifdef DEBUG + CUDA_CHECK(cudaGetDevice(&device_)); +#endif +#endif +} SyncedMemory::~SyncedMemory() { + check_device(); if (cpu_ptr_ && own_cpu_data_) { CaffeFreeHost(cpu_ptr_, cpu_malloc_use_cuda_); } #ifndef CPU_ONLY if (gpu_ptr_ && own_gpu_data_) { - int initial_device; - cudaGetDevice(&initial_device); - if (gpu_device_ != -1) { - CUDA_CHECK(cudaSetDevice(gpu_device_)); - } CUDA_CHECK(cudaFree(gpu_ptr_)); - cudaSetDevice(initial_device); } #endif // CPU_ONLY } inline void SyncedMemory::to_cpu() { + check_device(); switch (head_) { case UNINITIALIZED: CaffeMallocHost(&cpu_ptr_, size_, &cpu_malloc_use_cuda_); @@ -49,10 +64,10 @@ inline void SyncedMemory::to_cpu() { } inline void SyncedMemory::to_gpu() { + check_device(); #ifndef CPU_ONLY switch (head_) { case UNINITIALIZED: - CUDA_CHECK(cudaGetDevice(&gpu_device_)); CUDA_CHECK(cudaMalloc(&gpu_ptr_, size_)); caffe_gpu_memset(size_, 0, gpu_ptr_); head_ = HEAD_AT_GPU; @@ -60,7 +75,6 @@ inline void SyncedMemory::to_gpu() { break; case HEAD_AT_CPU: if (gpu_ptr_ == NULL) { - CUDA_CHECK(cudaGetDevice(&gpu_device_)); CUDA_CHECK(cudaMalloc(&gpu_ptr_, size_)); own_gpu_data_ = true; } @@ -77,11 +91,13 @@ inline void SyncedMemory::to_gpu() { } const void* SyncedMemory::cpu_data() { + check_device(); to_cpu(); return (const void*)cpu_ptr_; } void SyncedMemory::set_cpu_data(void* data) { + check_device(); CHECK(data); if (own_cpu_data_) { CaffeFreeHost(cpu_ptr_, cpu_malloc_use_cuda_); @@ -92,6 +108,7 @@ void SyncedMemory::set_cpu_data(void* data) { } const void* SyncedMemory::gpu_data() { + check_device(); #ifndef CPU_ONLY to_gpu(); return (const void*)gpu_ptr_; @@ -102,16 +119,11 @@ const void* SyncedMemory::gpu_data() { } void SyncedMemory::set_gpu_data(void* data) { + check_device(); #ifndef CPU_ONLY CHECK(data); if (own_gpu_data_) { - int initial_device; - cudaGetDevice(&initial_device); - if (gpu_device_ != -1) { - CUDA_CHECK(cudaSetDevice(gpu_device_)); - } CUDA_CHECK(cudaFree(gpu_ptr_)); - cudaSetDevice(initial_device); } gpu_ptr_ = data; head_ = HEAD_AT_GPU; @@ -122,12 +134,14 @@ void SyncedMemory::set_gpu_data(void* data) { } void* SyncedMemory::mutable_cpu_data() { + check_device(); to_cpu(); head_ = HEAD_AT_CPU; return cpu_ptr_; } void* SyncedMemory::mutable_gpu_data() { + check_device(); #ifndef CPU_ONLY to_gpu(); head_ = HEAD_AT_GPU; @@ -140,9 +154,9 @@ void* SyncedMemory::mutable_gpu_data() { #ifndef CPU_ONLY void SyncedMemory::async_gpu_push(const cudaStream_t& stream) { + check_device(); CHECK(head_ == HEAD_AT_CPU); if (gpu_ptr_ == NULL) { - CUDA_CHECK(cudaGetDevice(&gpu_device_)); CUDA_CHECK(cudaMalloc(&gpu_ptr_, size_)); own_gpu_data_ = true; } @@ -153,5 +167,20 @@ void SyncedMemory::async_gpu_push(const cudaStream_t& stream) { } #endif +void SyncedMemory::check_device() { +#ifndef CPU_ONLY +#ifdef DEBUG + int device; + cudaGetDevice(&device); + CHECK(device == device_); + if (gpu_ptr_ && own_gpu_data_) { + cudaPointerAttributes attributes; + CUDA_CHECK(cudaPointerGetAttributes(&attributes, gpu_ptr_)); + CHECK(attributes.device == device_); + } +#endif +#endif +} + } // namespace caffe diff --git a/src/caffe/test/CMakeLists.txt b/src/caffe/test/CMakeLists.txt index 35a803f2f41..d8afc30b76b 100644 --- a/src/caffe/test/CMakeLists.txt +++ b/src/caffe/test/CMakeLists.txt @@ -1,7 +1,7 @@ # The option allows to include in build only selected test files and exclude all others # Usage example: # cmake -DBUILD_only_tests="common,net,blob,im2col_kernel" -set(BUILD_only_tests "" CACHE STRING "Blank or comma-separated list of test files to build without 'test_' prefix and extention") +set(BUILD_only_tests "" CACHE STRING "Blank or comma-separated list of test files to build without 'test_' prefix and extension") caffe_leave_only_selected_tests(test_srcs ${BUILD_only_tests}) caffe_leave_only_selected_tests(test_cuda ${BUILD_only_tests}) diff --git a/src/caffe/test/test_accuracy_layer.cpp b/src/caffe/test/test_accuracy_layer.cpp index 6fe808bd5c5..e5cc9d5e85f 100644 --- a/src/caffe/test/test_accuracy_layer.cpp +++ b/src/caffe/test/test_accuracy_layer.cpp @@ -13,8 +13,10 @@ namespace caffe { -template -class AccuracyLayerTest : public CPUDeviceTest { +template +class AccuracyLayerTest : public MultiDeviceTest { + typedef typename TypeParam::Dtype Dtype; + protected: AccuracyLayerTest() : blob_bottom_data_(new Blob()), @@ -69,11 +71,12 @@ class AccuracyLayerTest : public CPUDeviceTest { int top_k_; }; -TYPED_TEST_CASE(AccuracyLayerTest, TestDtypes); +TYPED_TEST_CASE(AccuracyLayerTest, TestDtypesAndDevices); TYPED_TEST(AccuracyLayerTest, TestSetup) { + typedef typename TypeParam::Dtype Dtype; LayerParameter layer_param; - AccuracyLayer layer(layer_param); + AccuracyLayer layer(layer_param); layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); EXPECT_EQ(this->blob_top_->num(), 1); EXPECT_EQ(this->blob_top_->channels(), 1); @@ -82,11 +85,12 @@ TYPED_TEST(AccuracyLayerTest, TestSetup) { } TYPED_TEST(AccuracyLayerTest, TestSetupTopK) { + typedef typename TypeParam::Dtype Dtype; LayerParameter layer_param; AccuracyParameter* accuracy_param = layer_param.mutable_accuracy_param(); accuracy_param->set_top_k(5); - AccuracyLayer layer(layer_param); + AccuracyLayer layer(layer_param); layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); EXPECT_EQ(this->blob_top_->num(), 1); EXPECT_EQ(this->blob_top_->channels(), 1); @@ -95,8 +99,9 @@ TYPED_TEST(AccuracyLayerTest, TestSetupTopK) { } TYPED_TEST(AccuracyLayerTest, TestSetupOutputPerClass) { + typedef typename TypeParam::Dtype Dtype; LayerParameter layer_param; - AccuracyLayer layer(layer_param); + AccuracyLayer layer(layer_param); layer.SetUp(this->blob_bottom_vec_, this->blob_top_per_class_vec_); EXPECT_EQ(this->blob_top_->num(), 1); EXPECT_EQ(this->blob_top_->channels(), 1); @@ -108,33 +113,39 @@ TYPED_TEST(AccuracyLayerTest, TestSetupOutputPerClass) { EXPECT_EQ(this->blob_top_per_class_->width(), 1); } -TYPED_TEST(AccuracyLayerTest, TestForwardCPU) { +TYPED_TEST(AccuracyLayerTest, TestForward) { + typedef typename TypeParam::Dtype Dtype; LayerParameter layer_param; - AccuracyLayer layer(layer_param); + AccuracyLayer layer(layer_param); layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); - layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); - - TypeParam max_value; - int max_id; - int num_correct_labels = 0; - for (int i = 0; i < 100; ++i) { - max_value = -FLT_MAX; - max_id = 0; - for (int j = 0; j < 10; ++j) { - if (this->blob_bottom_data_->data_at(i, j, 0, 0) > max_value) { - max_value = this->blob_bottom_data_->data_at(i, j, 0, 0); - max_id = j; + + // repeat the forward + for (int iter = 0; iter < 3; iter++) { + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); + + Dtype max_value; + int max_id; + int num_correct_labels = 0; + for (int i = 0; i < 100; ++i) { + max_value = -FLT_MAX; + max_id = 0; + for (int j = 0; j < 10; ++j) { + if (this->blob_bottom_data_->data_at(i, j, 0, 0) > max_value) { + max_value = this->blob_bottom_data_->data_at(i, j, 0, 0); + max_id = j; + } + } + if (max_id == this->blob_bottom_label_->data_at(i, 0, 0, 0)) { + ++num_correct_labels; } } - if (max_id == this->blob_bottom_label_->data_at(i, 0, 0, 0)) { - ++num_correct_labels; - } + EXPECT_NEAR(this->blob_top_->data_at(0, 0, 0, 0), + num_correct_labels / Dtype(100.0), 1e-4); } - EXPECT_NEAR(this->blob_top_->data_at(0, 0, 0, 0), - num_correct_labels / 100.0, 1e-4); } TYPED_TEST(AccuracyLayerTest, TestForwardWithSpatialAxes) { + typedef typename TypeParam::Dtype Dtype; this->blob_bottom_data_->Reshape(2, 10, 4, 5); vector label_shape(3); label_shape[0] = 2; label_shape[1] = 4; label_shape[2] = 5; @@ -142,195 +153,218 @@ TYPED_TEST(AccuracyLayerTest, TestForwardWithSpatialAxes) { this->FillBottoms(); LayerParameter layer_param; layer_param.mutable_accuracy_param()->set_axis(1); - AccuracyLayer layer(layer_param); + AccuracyLayer layer(layer_param); layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); - layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); - - TypeParam max_value; - const int num_labels = this->blob_bottom_label_->count(); - int max_id; - int num_correct_labels = 0; - vector label_offset(3); - for (int n = 0; n < this->blob_bottom_data_->num(); ++n) { - for (int h = 0; h < this->blob_bottom_data_->height(); ++h) { - for (int w = 0; w < this->blob_bottom_data_->width(); ++w) { - max_value = -FLT_MAX; - max_id = 0; - for (int c = 0; c < this->blob_bottom_data_->channels(); ++c) { - const TypeParam pred_value = - this->blob_bottom_data_->data_at(n, c, h, w); - if (pred_value > max_value) { - max_value = pred_value; - max_id = c; + + // repeat the forward + for (int iter = 0; iter < 3; iter++) { + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); + + Dtype max_value; + const int num_labels = this->blob_bottom_label_->count(); + int max_id; + int num_correct_labels = 0; + vector label_offset(3); + for (int n = 0; n < this->blob_bottom_data_->num(); ++n) { + for (int h = 0; h < this->blob_bottom_data_->height(); ++h) { + for (int w = 0; w < this->blob_bottom_data_->width(); ++w) { + max_value = -FLT_MAX; + max_id = 0; + for (int c = 0; c < this->blob_bottom_data_->channels(); ++c) { + const Dtype pred_value = + this->blob_bottom_data_->data_at(n, c, h, w); + if (pred_value > max_value) { + max_value = pred_value; + max_id = c; + } + } + label_offset[0] = n; label_offset[1] = h; label_offset[2] = w; + const int correct_label = + static_cast(this->blob_bottom_label_->data_at(label_offset)); + if (max_id == correct_label) { + ++num_correct_labels; } - } - label_offset[0] = n; label_offset[1] = h; label_offset[2] = w; - const int correct_label = - static_cast(this->blob_bottom_label_->data_at(label_offset)); - if (max_id == correct_label) { - ++num_correct_labels; } } } + EXPECT_NEAR(this->blob_top_->data_at(0, 0, 0, 0), + num_correct_labels / Dtype(num_labels), 1e-4); } - EXPECT_NEAR(this->blob_top_->data_at(0, 0, 0, 0), - num_correct_labels / TypeParam(num_labels), 1e-4); } TYPED_TEST(AccuracyLayerTest, TestForwardIgnoreLabel) { + typedef typename TypeParam::Dtype Dtype; LayerParameter layer_param; - const TypeParam kIgnoreLabelValue = -1; + const Dtype kIgnoreLabelValue = -1; layer_param.mutable_accuracy_param()->set_ignore_label(kIgnoreLabelValue); - AccuracyLayer layer(layer_param); + AccuracyLayer layer(layer_param); // Manually set some labels to the ignore label value (-1). this->blob_bottom_label_->mutable_cpu_data()[2] = kIgnoreLabelValue; this->blob_bottom_label_->mutable_cpu_data()[5] = kIgnoreLabelValue; this->blob_bottom_label_->mutable_cpu_data()[32] = kIgnoreLabelValue; layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); - layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); - - TypeParam max_value; - int max_id; - int num_correct_labels = 0; - int count = 0; - for (int i = 0; i < 100; ++i) { - if (kIgnoreLabelValue == this->blob_bottom_label_->data_at(i, 0, 0, 0)) { - continue; - } - ++count; - max_value = -FLT_MAX; - max_id = 0; - for (int j = 0; j < 10; ++j) { - if (this->blob_bottom_data_->data_at(i, j, 0, 0) > max_value) { - max_value = this->blob_bottom_data_->data_at(i, j, 0, 0); - max_id = j; + + // repeat the forward + for (int iter = 0; iter < 3; iter++) { + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); + + Dtype max_value; + int max_id; + int num_correct_labels = 0; + int count = 0; + for (int i = 0; i < 100; ++i) { + if (kIgnoreLabelValue == this->blob_bottom_label_->data_at(i, 0, 0, 0)) { + continue; + } + ++count; + max_value = -FLT_MAX; + max_id = 0; + for (int j = 0; j < 10; ++j) { + if (this->blob_bottom_data_->data_at(i, j, 0, 0) > max_value) { + max_value = this->blob_bottom_data_->data_at(i, j, 0, 0); + max_id = j; + } + } + if (max_id == this->blob_bottom_label_->data_at(i, 0, 0, 0)) { + ++num_correct_labels; } } - if (max_id == this->blob_bottom_label_->data_at(i, 0, 0, 0)) { - ++num_correct_labels; - } + EXPECT_EQ(count, 97); // We set 3 out of 100 labels to kIgnoreLabelValue. + EXPECT_NEAR(this->blob_top_->data_at(0, 0, 0, 0), + num_correct_labels / Dtype(count), 1e-4); } - EXPECT_EQ(count, 97); // We set 3 out of 100 labels to kIgnoreLabelValue. - EXPECT_NEAR(this->blob_top_->data_at(0, 0, 0, 0), - num_correct_labels / TypeParam(count), 1e-4); } -TYPED_TEST(AccuracyLayerTest, TestForwardCPUTopK) { +TYPED_TEST(AccuracyLayerTest, TestForwardTopK) { + typedef typename TypeParam::Dtype Dtype; LayerParameter layer_param; AccuracyParameter* accuracy_param = layer_param.mutable_accuracy_param(); accuracy_param->set_top_k(this->top_k_); - AccuracyLayer layer(layer_param); + AccuracyLayer layer(layer_param); layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); - layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); - - TypeParam current_value; - int current_rank; - int num_correct_labels = 0; - for (int i = 0; i < 100; ++i) { - for (int j = 0; j < 10; ++j) { - current_value = this->blob_bottom_data_->data_at(i, j, 0, 0); - current_rank = 0; - for (int k = 0; k < 10; ++k) { - if (this->blob_bottom_data_->data_at(i, k, 0, 0) > current_value) { - ++current_rank; + + // repeat the forward + for (int iter = 0; iter < 3; iter++) { + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); + + Dtype current_value; + int current_rank; + int num_correct_labels = 0; + for (int i = 0; i < 100; ++i) { + for (int j = 0; j < 10; ++j) { + current_value = this->blob_bottom_data_->data_at(i, j, 0, 0); + current_rank = 0; + for (int k = 0; k < 10; ++k) { + if (this->blob_bottom_data_->data_at(i, k, 0, 0) > current_value) { + ++current_rank; + } + } + if (current_rank < this->top_k_ && + j == this->blob_bottom_label_->data_at(i, 0, 0, 0)) { + ++num_correct_labels; } - } - if (current_rank < this->top_k_ && - j == this->blob_bottom_label_->data_at(i, 0, 0, 0)) { - ++num_correct_labels; } } - } - EXPECT_NEAR(this->blob_top_->data_at(0, 0, 0, 0), - num_correct_labels / 100.0, 1e-4); + EXPECT_NEAR(this->blob_top_->data_at(0, 0, 0, 0), + num_correct_labels / Dtype(100.0), 1e-4); + } } -TYPED_TEST(AccuracyLayerTest, TestForwardCPUPerClass) { +TYPED_TEST(AccuracyLayerTest, TestForwardPerClass) { + typedef typename TypeParam::Dtype Dtype; LayerParameter layer_param; - AccuracyLayer layer(layer_param); + AccuracyLayer layer(layer_param); layer.SetUp(this->blob_bottom_vec_, this->blob_top_per_class_vec_); - layer.Forward(this->blob_bottom_vec_, this->blob_top_per_class_vec_); - - TypeParam max_value; - int max_id; - int num_correct_labels = 0; - const int num_class = this->blob_top_per_class_->num(); - vector correct_per_class(num_class, 0); - vector num_per_class(num_class, 0); - for (int i = 0; i < 100; ++i) { - max_value = -FLT_MAX; - max_id = 0; - for (int j = 0; j < 10; ++j) { - if (this->blob_bottom_data_->data_at(i, j, 0, 0) > max_value) { - max_value = this->blob_bottom_data_->data_at(i, j, 0, 0); - max_id = j; + // repeat the forward + for (int iter = 0; iter < 3; iter++) { + layer.Forward(this->blob_bottom_vec_, this->blob_top_per_class_vec_); + + Dtype max_value; + int max_id; + int num_correct_labels = 0; + const int num_class = this->blob_top_per_class_->num(); + vector correct_per_class(num_class, 0); + vector num_per_class(num_class, 0); + for (int i = 0; i < 100; ++i) { + max_value = -FLT_MAX; + max_id = 0; + for (int j = 0; j < 10; ++j) { + if (this->blob_bottom_data_->data_at(i, j, 0, 0) > max_value) { + max_value = this->blob_bottom_data_->data_at(i, j, 0, 0); + max_id = j; + } + } + ++num_per_class[this->blob_bottom_label_->data_at(i, 0, 0, 0)]; + if (max_id == this->blob_bottom_label_->data_at(i, 0, 0, 0)) { + ++num_correct_labels; + ++correct_per_class[max_id]; } } - ++num_per_class[this->blob_bottom_label_->data_at(i, 0, 0, 0)]; - if (max_id == this->blob_bottom_label_->data_at(i, 0, 0, 0)) { - ++num_correct_labels; - ++correct_per_class[max_id]; + EXPECT_NEAR(this->blob_top_->data_at(0, 0, 0, 0), + num_correct_labels / 100.0, 1e-4); + for (int i = 0; i < num_class; ++i) { + Dtype accuracy_per_class = (num_per_class[i] > 0 ? + static_cast(correct_per_class[i]) / num_per_class[i] : 0); + EXPECT_NEAR(this->blob_top_per_class_->data_at(i, 0, 0, 0), + accuracy_per_class, 1e-4); } } - EXPECT_NEAR(this->blob_top_->data_at(0, 0, 0, 0), - num_correct_labels / 100.0, 1e-4); - for (int i = 0; i < num_class; ++i) { - TypeParam accuracy_per_class = (num_per_class[i] > 0 ? - static_cast(correct_per_class[i]) / num_per_class[i] : 0); - EXPECT_NEAR(this->blob_top_per_class_->data_at(i, 0, 0, 0), - accuracy_per_class, 1e-4); - } } -TYPED_TEST(AccuracyLayerTest, TestForwardCPUPerClassWithIgnoreLabel) { +TYPED_TEST(AccuracyLayerTest, TestForwardPerClassWithIgnoreLabel) { + typedef typename TypeParam::Dtype Dtype; LayerParameter layer_param; - const TypeParam kIgnoreLabelValue = -1; + const Dtype kIgnoreLabelValue = -1; layer_param.mutable_accuracy_param()->set_ignore_label(kIgnoreLabelValue); - AccuracyLayer layer(layer_param); + AccuracyLayer layer(layer_param); // Manually set some labels to the ignore label value (-1). this->blob_bottom_label_->mutable_cpu_data()[2] = kIgnoreLabelValue; this->blob_bottom_label_->mutable_cpu_data()[5] = kIgnoreLabelValue; this->blob_bottom_label_->mutable_cpu_data()[32] = kIgnoreLabelValue; layer.SetUp(this->blob_bottom_vec_, this->blob_top_per_class_vec_); - layer.Forward(this->blob_bottom_vec_, this->blob_top_per_class_vec_); - - TypeParam max_value; - int max_id; - int num_correct_labels = 0; - const int num_class = this->blob_top_per_class_->num(); - vector correct_per_class(num_class, 0); - vector num_per_class(num_class, 0); - int count = 0; - for (int i = 0; i < 100; ++i) { - if (kIgnoreLabelValue == this->blob_bottom_label_->data_at(i, 0, 0, 0)) { - continue; - } - ++count; - max_value = -FLT_MAX; - max_id = 0; - for (int j = 0; j < 10; ++j) { - if (this->blob_bottom_data_->data_at(i, j, 0, 0) > max_value) { - max_value = this->blob_bottom_data_->data_at(i, j, 0, 0); - max_id = j; + + // repeat the forward + for (int iter = 0; iter < 3; iter++) { + layer.Forward(this->blob_bottom_vec_, this->blob_top_per_class_vec_); + + Dtype max_value; + int max_id; + int num_correct_labels = 0; + const int num_class = this->blob_top_per_class_->num(); + vector correct_per_class(num_class, 0); + vector num_per_class(num_class, 0); + int count = 0; + for (int i = 0; i < 100; ++i) { + if (kIgnoreLabelValue == this->blob_bottom_label_->data_at(i, 0, 0, 0)) { + continue; + } + ++count; + max_value = -FLT_MAX; + max_id = 0; + for (int j = 0; j < 10; ++j) { + if (this->blob_bottom_data_->data_at(i, j, 0, 0) > max_value) { + max_value = this->blob_bottom_data_->data_at(i, j, 0, 0); + max_id = j; + } + } + ++num_per_class[this->blob_bottom_label_->data_at(i, 0, 0, 0)]; + if (max_id == this->blob_bottom_label_->data_at(i, 0, 0, 0)) { + ++num_correct_labels; + ++correct_per_class[max_id]; } } - ++num_per_class[this->blob_bottom_label_->data_at(i, 0, 0, 0)]; - if (max_id == this->blob_bottom_label_->data_at(i, 0, 0, 0)) { - ++num_correct_labels; - ++correct_per_class[max_id]; + EXPECT_EQ(count, 97); + EXPECT_NEAR(this->blob_top_->data_at(0, 0, 0, 0), + num_correct_labels / Dtype(count), 1e-4); + for (int i = 0; i < 10; ++i) { + Dtype accuracy_per_class = (num_per_class[i] > 0 ? + static_cast(correct_per_class[i]) / num_per_class[i] : 0); + EXPECT_NEAR(this->blob_top_per_class_->data_at(i, 0, 0, 0), + accuracy_per_class, 1e-4); } } - EXPECT_EQ(count, 97); - EXPECT_NEAR(this->blob_top_->data_at(0, 0, 0, 0), - num_correct_labels / TypeParam(count), 1e-4); - for (int i = 0; i < 10; ++i) { - TypeParam accuracy_per_class = (num_per_class[i] > 0 ? - static_cast(correct_per_class[i]) / num_per_class[i] : 0); - EXPECT_NEAR(this->blob_top_per_class_->data_at(i, 0, 0, 0), - accuracy_per_class, 1e-4); - } } } // namespace caffe diff --git a/src/caffe/test/test_caffe_main.cpp b/src/caffe/test/test_caffe_main.cpp index 6473b74d0a6..8f333bd7105 100644 --- a/src/caffe/test/test_caffe_main.cpp +++ b/src/caffe/test/test_caffe_main.cpp @@ -15,7 +15,7 @@ int main(int argc, char** argv) { ::testing::InitGoogleTest(&argc, argv); caffe::GlobalInit(&argc, &argv); #ifndef CPU_ONLY - // Before starting testing, let's first print out a few cuda defice info. + // Before starting testing, let's first print out a few cuda device info. int device; cudaGetDeviceCount(&device); cout << "Cuda number of devices: " << device << endl; diff --git a/src/caffe/test/test_convolution_layer.cpp b/src/caffe/test/test_convolution_layer.cpp index 9bb19d13592..85c10a29483 100644 --- a/src/caffe/test/test_convolution_layer.cpp +++ b/src/caffe/test/test_convolution_layer.cpp @@ -695,7 +695,7 @@ TYPED_TEST(ConvolutionLayerTest, TestNDAgainst2D) { } ASSERT_EQ(backward_result_nd.count(), backward_result_2d.count()); for (int i = 0; i < backward_result_2d.count(); ++i) { - EXPECT_EQ(backward_result_2d.cpu_diff()[i], + EXPECT_FLOAT_EQ(backward_result_2d.cpu_diff()[i], backward_result_nd.cpu_diff()[i]); } ASSERT_EQ(backward_weight_result_nd.count(), diff --git a/src/caffe/test/test_data_layer.cpp b/src/caffe/test/test_data_layer.cpp index 3e8d113d918..3835af1f173 100644 --- a/src/caffe/test/test_data_layer.cpp +++ b/src/caffe/test/test_data_layer.cpp @@ -105,6 +105,32 @@ class DataLayerTest : public MultiDeviceTest { } } + void TestSkip() { + LayerParameter param; + param.set_phase(TRAIN); + DataParameter* data_param = param.mutable_data_param(); + int batch_size = 5; + data_param->set_batch_size(batch_size); + data_param->set_source(filename_->c_str()); + data_param->set_backend(backend_); + Caffe::set_solver_count(8); + for (int dev = 0; dev < Caffe::solver_count(); ++dev) { + Caffe::set_solver_rank(dev); + DataLayer layer(param); + layer.SetUp(blob_bottom_vec_, blob_top_vec_); + int label = dev; + for (int iter = 0; iter < 10; ++iter) { + layer.Forward(blob_bottom_vec_, blob_top_vec_); + for (int i = 0; i < batch_size; ++i) { + EXPECT_EQ(label % batch_size, blob_top_label_->cpu_data()[i]); + label += Caffe::solver_count(); + } + } + } + Caffe::set_solver_count(1); + Caffe::set_solver_rank(0); + } + void TestReshape(DataParameter_DB backend) { const int num_inputs = 5; // Save data of varying shapes. @@ -356,6 +382,11 @@ TYPED_TEST(DataLayerTest, TestReadLevelDB) { this->TestRead(); } +TYPED_TEST(DataLayerTest, TestSkipLevelDB) { + this->Fill(false, DataParameter_DB_LEVELDB); + this->TestSkip(); +} + TYPED_TEST(DataLayerTest, TestReshapeLevelDB) { this->TestReshape(DataParameter_DB_LEVELDB); } @@ -396,6 +427,11 @@ TYPED_TEST(DataLayerTest, TestReadLMDB) { this->TestRead(); } +TYPED_TEST(DataLayerTest, TestSkipLMDB) { + this->Fill(false, DataParameter_DB_LMDB); + this->TestSkip(); +} + TYPED_TEST(DataLayerTest, TestReshapeLMDB) { this->TestReshape(DataParameter_DB_LMDB); } diff --git a/src/caffe/test/test_deconvolution_layer.cpp b/src/caffe/test/test_deconvolution_layer.cpp index c4b09ad555a..1a022d6f7bc 100644 --- a/src/caffe/test/test_deconvolution_layer.cpp +++ b/src/caffe/test/test_deconvolution_layer.cpp @@ -5,6 +5,7 @@ #include "caffe/blob.hpp" #include "caffe/common.hpp" #include "caffe/filler.hpp" +#include "caffe/layers/cudnn_deconv_layer.hpp" #include "caffe/layers/deconv_layer.hpp" #include "caffe/test/test_caffe_main.hpp" @@ -301,4 +302,268 @@ TYPED_TEST(DeconvolutionLayerTest, TestGradient3D) { this->blob_top_vec_); } +#ifdef USE_CUDNN + +// Since ConvolutionLayerTest checks the shared conv/deconv code in detail, +// we'll just do a simple forward test and a gradient check. +template +class CuDNNDeconvolutionLayerTest : public MultiDeviceTest { + typedef typename TypeParam::Dtype Dtype; + + protected: + CuDNNDeconvolutionLayerTest() + : blob_bottom_(new Blob(2, 3, 6, 4)), + blob_bottom_2_(new Blob(2, 3, 6, 4)), + blob_top_(new Blob()), + blob_top_2_(new Blob()) {} + virtual void SetUp() { + // fill the values + FillerParameter filler_param; + filler_param.set_value(1.); + GaussianFiller filler(filler_param); + filler.Fill(this->blob_bottom_); + filler.Fill(this->blob_bottom_2_); + blob_bottom_vec_.push_back(blob_bottom_); + blob_top_vec_.push_back(blob_top_); + } + + virtual ~CuDNNDeconvolutionLayerTest() { + delete blob_bottom_; + delete blob_bottom_2_; + delete blob_top_; + delete blob_top_2_; + } + + Blob* const blob_bottom_; + Blob* const blob_bottom_2_; + Blob* const blob_top_; + Blob* const blob_top_2_; + vector*> blob_bottom_vec_; + vector*> blob_top_vec_; +}; + +TYPED_TEST_CASE(CuDNNDeconvolutionLayerTest, TestDtypesAndDevices); + +TYPED_TEST(CuDNNDeconvolutionLayerTest, TestSetup) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + ConvolutionParameter* convolution_param = + layer_param.mutable_convolution_param(); + convolution_param->add_kernel_size(3); + convolution_param->add_stride(2); + convolution_param->set_num_output(4); + this->blob_bottom_vec_.push_back(this->blob_bottom_2_); + this->blob_top_vec_.push_back(this->blob_top_2_); + shared_ptr > layer( + new CuDNNDeconvolutionLayer(layer_param)); + layer->SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + EXPECT_EQ(this->blob_top_->num(), 2); + EXPECT_EQ(this->blob_top_->channels(), 4); + EXPECT_EQ(this->blob_top_->height(), 13); + EXPECT_EQ(this->blob_top_->width(), 9); + EXPECT_EQ(this->blob_top_2_->num(), 2); + EXPECT_EQ(this->blob_top_2_->channels(), 4); + EXPECT_EQ(this->blob_top_2_->height(), 13); + EXPECT_EQ(this->blob_top_2_->width(), 9); + // setting group should not change the shape + convolution_param->set_num_output(3); + convolution_param->set_group(3); + layer.reset(new CuDNNDeconvolutionLayer(layer_param)); + layer->SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + EXPECT_EQ(this->blob_top_->num(), 2); + EXPECT_EQ(this->blob_top_->channels(), 3); + EXPECT_EQ(this->blob_top_->height(), 13); + EXPECT_EQ(this->blob_top_->width(), 9); + EXPECT_EQ(this->blob_top_2_->num(), 2); + EXPECT_EQ(this->blob_top_2_->channels(), 3); + EXPECT_EQ(this->blob_top_2_->height(), 13); + EXPECT_EQ(this->blob_top_2_->width(), 9); +} + +TYPED_TEST(CuDNNDeconvolutionLayerTest, TestSimpleCuDNNDeconvolution) { + typedef typename TypeParam::Dtype Dtype; + this->blob_bottom_vec_.push_back(this->blob_bottom_2_); + this->blob_top_vec_.push_back(this->blob_top_2_); + LayerParameter layer_param; + ConvolutionParameter* convolution_param = + layer_param.mutable_convolution_param(); + convolution_param->add_kernel_size(3); + convolution_param->add_stride(2); + convolution_param->set_num_output(4); + convolution_param->mutable_weight_filler()->set_type("constant"); + convolution_param->mutable_weight_filler()->set_value(1); + convolution_param->mutable_bias_filler()->set_type("constant"); + convolution_param->mutable_bias_filler()->set_value(0.1); + shared_ptr > layer( + new CuDNNDeconvolutionLayer(layer_param)); + layer->SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + // constant-fill the bottom blobs + FillerParameter filler_param; + filler_param.set_value(1.); + ConstantFiller filler(filler_param); + filler.Fill(this->blob_bottom_); + filler.Fill(this->blob_bottom_2_); + layer->Forward(this->blob_bottom_vec_, this->blob_top_vec_); + // simply check that accumulation works with overlapping filters + const Dtype* top_data = this->blob_top_->cpu_data(); + for (int n = 0; n < this->blob_top_->num(); ++n) { + for (int c = 0; c < this->blob_top_->channels(); ++c) { + for (int h = 0; h < this->blob_top_->height(); ++h) { + for (int w = 0; w < this->blob_top_->width(); ++w) { + Dtype expected = 3.1; + bool h_overlap = h % 2 == 0 && h > 0 + && h < this->blob_top_->height() - 1; + bool w_overlap = w % 2 == 0 && w > 0 + && w < this->blob_top_->width() - 1; + if (h_overlap && w_overlap) { + expected += 9; + } else if (h_overlap || w_overlap) { + expected += 3; + } + EXPECT_NEAR(top_data[this->blob_top_->offset(n, c, h, w)], + expected, 1e-4); + } + } + } + } +} + +TYPED_TEST(CuDNNDeconvolutionLayerTest, TestGradient) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + ConvolutionParameter* convolution_param = + layer_param.mutable_convolution_param(); + this->blob_bottom_vec_.push_back(this->blob_bottom_2_); + this->blob_top_vec_.push_back(this->blob_top_2_); + convolution_param->add_kernel_size(2); + convolution_param->add_stride(1); + convolution_param->set_num_output(1); + convolution_param->mutable_weight_filler()->set_type("gaussian"); + convolution_param->mutable_bias_filler()->set_type("gaussian"); + CuDNNDeconvolutionLayer layer(layer_param); + GradientChecker checker(1e-2, 1e-3); + checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, + this->blob_top_vec_); +} + +TYPED_TEST(CuDNNDeconvolutionLayerTest, TestNDAgainst2D) { + typedef typename TypeParam::Dtype Dtype; + const int kernel_h = 11; + const int kernel_w = 13; + vector bottom_shape(4); + bottom_shape[0] = 15; + bottom_shape[1] = 12; + bottom_shape[2] = kernel_h * 2; + bottom_shape[3] = kernel_w * 2; + FillerParameter filler_param; + GaussianFiller filler(filler_param); + for (int i = 0; i < this->blob_bottom_vec_.size(); ++i) { + this->blob_bottom_vec_[i]->Reshape(bottom_shape); + filler.Fill(this->blob_bottom_vec_[i]); + } + LayerParameter layer_param; + ConvolutionParameter* convolution_param = + layer_param.mutable_convolution_param(); + convolution_param->set_num_output(18); + convolution_param->set_bias_term(false); + convolution_param->set_group(6); + convolution_param->set_kernel_h(kernel_h); + convolution_param->set_kernel_w(kernel_w); + convolution_param->mutable_weight_filler()->set_type("gaussian"); + Blob weights; + Blob top_diff; + // Shape and fill weights and top_diff. + bool copy_diff; + bool reshape; + { + CuDNNDeconvolutionLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + top_diff.ReshapeLike(*this->blob_top_); + filler.Fill(&top_diff); + ASSERT_EQ(1, layer.blobs().size()); + copy_diff = false; reshape = true; + weights.CopyFrom(*layer.blobs()[0], copy_diff, reshape); + } + vector propagate_down(1, true); + Blob result_2d; + Blob backward_result_2d; + Blob backward_weight_result_2d; + // Test with 2D im2col + { + caffe_set(this->blob_top_->count(), Dtype(0), + this->blob_top_->mutable_cpu_data()); + caffe_set(this->blob_bottom_->count(), Dtype(0), + this->blob_bottom_->mutable_cpu_diff()); + caffe_set(weights.count(), Dtype(0), weights.mutable_cpu_diff()); + // Do SetUp and Forward; save Forward result in result_2d. + convolution_param->set_force_nd_im2col(false); + CuDNNDeconvolutionLayer layer_2d(layer_param); + layer_2d.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + ASSERT_EQ(1, layer_2d.blobs().size()); + copy_diff = false; reshape = false; + layer_2d.blobs()[0]->CopyFrom(weights, copy_diff, reshape); + layer_2d.Forward(this->blob_bottom_vec_, this->blob_top_vec_); + copy_diff = false; reshape = true; + result_2d.CopyFrom(*this->blob_top_, copy_diff, reshape); + // Copy pre-generated top diff into actual top diff; + // do Backward and save result in backward_result_2d. + ASSERT_EQ(this->blob_top_->shape(), top_diff.shape()); + caffe_copy(top_diff.count(), top_diff.cpu_data(), + this->blob_top_->mutable_cpu_diff()); + layer_2d.Backward(this->blob_top_vec_, propagate_down, + this->blob_bottom_vec_); + copy_diff = true; reshape = true; + backward_result_2d.CopyFrom(*this->blob_bottom_, copy_diff, reshape); + backward_weight_result_2d.CopyFrom(weights, copy_diff, reshape); + } + Blob result_nd; + Blob backward_result_nd; + Blob backward_weight_result_nd; + // Test with ND im2col + { + caffe_set(this->blob_top_->count(), Dtype(0), + this->blob_top_->mutable_cpu_data()); + caffe_set(this->blob_bottom_->count(), Dtype(0), + this->blob_bottom_->mutable_cpu_diff()); + caffe_set(weights.count(), Dtype(0), weights.mutable_cpu_diff()); + // Do SetUp and Forward; save Forward result in result_nd. + convolution_param->set_force_nd_im2col(true); + CuDNNDeconvolutionLayer layer_nd(layer_param); + layer_nd.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + ASSERT_EQ(1, layer_nd.blobs().size()); + copy_diff = false; reshape = false; + layer_nd.blobs()[0]->CopyFrom(weights, copy_diff, reshape); + layer_nd.Forward(this->blob_bottom_vec_, this->blob_top_vec_); + copy_diff = false; reshape = true; + result_nd.CopyFrom(*this->blob_top_, copy_diff, reshape); + // Copy pre-generated top diff into actual top diff; + // do Backward and save result in backward_result_nd. + ASSERT_EQ(this->blob_top_->shape(), top_diff.shape()); + caffe_copy(top_diff.count(), top_diff.cpu_data(), + this->blob_top_->mutable_cpu_diff()); + layer_nd.Backward(this->blob_top_vec_, propagate_down, + this->blob_bottom_vec_); + copy_diff = true; reshape = true; + backward_result_nd.CopyFrom(*this->blob_bottom_, copy_diff, reshape); + backward_weight_result_nd.CopyFrom(weights, copy_diff, reshape); + } + ASSERT_EQ(result_nd.count(), result_2d.count()); + for (int i = 0; i < result_2d.count(); ++i) { + EXPECT_NEAR(result_2d.cpu_data()[i], result_nd.cpu_data()[i], 1e-4); + } + ASSERT_EQ(backward_result_nd.count(), backward_result_2d.count()); + for (int i = 0; i < backward_result_2d.count(); ++i) { + EXPECT_EQ(backward_result_2d.cpu_diff()[i], + backward_result_nd.cpu_diff()[i]); + } + ASSERT_EQ(backward_weight_result_nd.count(), + backward_weight_result_2d.count()); + for (int i = 0; i < backward_weight_result_2d.count(); ++i) { + EXPECT_EQ(backward_weight_result_2d.cpu_diff()[i], + backward_weight_result_nd.cpu_diff()[i]); + } +} + +#endif + } // namespace caffe diff --git a/src/caffe/test/test_embed_layer.cpp b/src/caffe/test/test_embed_layer.cpp index dc7f5c4aa47..13f13a878d3 100644 --- a/src/caffe/test/test_embed_layer.cpp +++ b/src/caffe/test/test_embed_layer.cpp @@ -124,7 +124,7 @@ TYPED_TEST(EmbedLayerTest, TestForwardWithBias) { top_offset[4] = 0; bias_offset[0] = 0; for (int j = 0; j < kNumOutput; ++j) { - EXPECT_EQ(layer->blobs()[0]->data_at(weight_offset) + + EXPECT_FLOAT_EQ(layer->blobs()[0]->data_at(weight_offset) + layer->blobs()[1]->data_at(bias_offset), this->blob_top_->data_at(top_offset)); ++top_offset[4]; diff --git a/src/caffe/test/test_euclidean_loss_layer.cpp b/src/caffe/test/test_euclidean_loss_layer.cpp index f253f9fd393..b026f5b2077 100644 --- a/src/caffe/test/test_euclidean_loss_layer.cpp +++ b/src/caffe/test/test_euclidean_loss_layer.cpp @@ -39,7 +39,7 @@ class EuclideanLossLayerTest : public MultiDeviceTest { void TestForward() { // Get the loss without a specified objective weight -- should be - // equivalent to explicitly specifiying a weight of 1. + // equivalent to explicitly specifying a weight of 1. LayerParameter layer_param; EuclideanLossLayer layer_weight_1(layer_param); layer_weight_1.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); diff --git a/src/caffe/test/test_filler.cpp b/src/caffe/test/test_filler.cpp index 26e9b217e35..f84d707baa0 100644 --- a/src/caffe/test/test_filler.cpp +++ b/src/caffe/test/test_filler.cpp @@ -29,7 +29,7 @@ TYPED_TEST(ConstantFillerTest, TestFill) { const int count = this->blob_->count(); const TypeParam* data = this->blob_->cpu_data(); for (int i = 0; i < count; ++i) { - EXPECT_GE(data[i], this->filler_param_.value()); + EXPECT_EQ(data[i], this->filler_param_.value()); } } @@ -238,4 +238,45 @@ TYPED_TEST(MSRAFillerTest, TestFillAverage) { this->test_params(FillerParameter_VarianceNorm_AVERAGE, n); } +template +class BilinearFillerTest : public ::testing::Test { + protected: + BilinearFillerTest() : filler_param_() {} + virtual void test_params(const int n) { + this->blob_ = new Blob(1000, 2, n, n); + this->filler_.reset(new BilinearFiller(this->filler_param_)); + this->filler_->Fill(blob_); + EXPECT_TRUE(this->blob_); + const int outer_num = this->blob_->count(0, 2); + const int inner_num = this->blob_->count(2, 4); + const Dtype* data = this->blob_->cpu_data(); + int f = ceil(this->blob_->width() / 2.); + Dtype c = (this->blob_->width() - 1) / (2. * f); + for (int i = 0; i < outer_num; ++i) { + for (int j = 0; j < inner_num; ++j) { + Dtype x = j % this->blob_->width(); + Dtype y = (j / this->blob_->width()) % this->blob_->height(); + Dtype expected_value = (1 - fabs(x / f - c)) * (1 - fabs(y / f - c)); + const Dtype actual_value = data[i * inner_num + j]; + EXPECT_NEAR(expected_value, actual_value, 0.01); + } + } + } + virtual ~BilinearFillerTest() { delete blob_; } + Blob* blob_; + FillerParameter filler_param_; + shared_ptr > filler_; +}; + +TYPED_TEST_CASE(BilinearFillerTest, TestDtypes); + +TYPED_TEST(BilinearFillerTest, TestFillOdd) { + const int n = 7; + this->test_params(n); +} +TYPED_TEST(BilinearFillerTest, TestFillEven) { + const int n = 6; + this->test_params(n); +} + } // namespace caffe diff --git a/src/caffe/test/test_gradient_based_solver.cpp b/src/caffe/test/test_gradient_based_solver.cpp index 975a8f0f88a..f4395f5311c 100644 --- a/src/caffe/test/test_gradient_based_solver.cpp +++ b/src/caffe/test/test_gradient_based_solver.cpp @@ -28,7 +28,7 @@ class GradientBasedSolverTest : public MultiDeviceTest { seed_(1701), num_(4), channels_(3), height_(10), width_(10), share_(false) { input_file_ = new string( - CMAKE_SOURCE_DIR "caffe/test/test_data/solver_data_list.txt" CMAKE_EXT); + ABS_TEST_DATA_DIR "/solver_data_list.txt"); } ~GradientBasedSolverTest() { delete input_file_; @@ -36,7 +36,9 @@ class GradientBasedSolverTest : public MultiDeviceTest { string snapshot_prefix_; shared_ptr > solver_; - shared_ptr > sync_; +#ifdef USE_NCCL + shared_ptr > nccl_; +#endif int seed_; // Dimensions are determined by generate_sample_data.py // TODO this is brittle and the hdf5 file should be checked instead. @@ -85,6 +87,7 @@ class GradientBasedSolverTest : public MultiDeviceTest { "lr_policy: 'fixed' " "iter_size: " << iter_size << " " "device_id: " << device_id << " " + "layer_wise_reduce: " << (!share_) << " " "net_param { " " name: 'TestNetwork' " " layer { " @@ -183,7 +186,7 @@ class GradientBasedSolverTest : public MultiDeviceTest { } Caffe::set_random_seed(this->seed_); this->InitSolverFromProtoString(proto.str()); - if (from_snapshot != NULL) { + if (from_snapshot) { this->solver_->Restore(from_snapshot); for (int i = 0; i < this->solver_->iter(); ++i) { this->solver_->net()->Forward(); @@ -202,9 +205,10 @@ class GradientBasedSolverTest : public MultiDeviceTest { gpus.push_back(i); } Caffe::set_solver_count(gpus.size()); - this->sync_.reset(new P2PSync( - this->solver_, NULL, this->solver_->param())); - this->sync_->Run(gpus); +#ifdef USE_NCCL + this->nccl_.reset(new NCCL(this->solver_)); + this->nccl_->Run(gpus, from_snapshot); +#endif Caffe::set_solver_count(1); } if (snapshot) { @@ -457,12 +461,28 @@ class GradientBasedSolverTest : public MultiDeviceTest { const int kIterSize = 1; // Test over all numbers of devices. int available_devices = 1; -#ifndef CPU_ONLY +#ifdef USE_NCCL if (Caffe::mode() == Caffe::GPU) { CUDA_CHECK(cudaGetDeviceCount(&available_devices)); } #endif - for (int devices = 1; devices <= available_devices; ++devices) { + // Takes a while to test all sizes for each test so sparse + vector sizes; + sizes.push_back(1); + if (available_devices >= 2) { + sizes.push_back(2); + } + if (available_devices >= 3) { + sizes.push_back(3); + } + if (available_devices >= 8) { + sizes.push_back(8); + } + if (available_devices >= 16) { + sizes.push_back(16); + } + for (int i = 0; i < sizes.size(); ++i) { + int devices = sizes[i]; // Configure batch size for single / multi device equivalence. // Constant data is needed for multi device as for accumulation. num_ = kNum * devices; @@ -538,9 +558,11 @@ class GradientBasedSolverTest : public MultiDeviceTest { const vector*>& params = solver_->net()->learnable_params(); for (int i = 0; i < params.size(); ++i) { for (int j = 0; j < params[i]->count(); ++j) { - EXPECT_EQ(param_copies[i]->cpu_data()[j], params[i]->cpu_data()[j]) + EXPECT_FLOAT_EQ(param_copies[i]->cpu_data()[j], + params[i]->cpu_data()[j]) << "param " << i << " data differed at dim " << j; - EXPECT_EQ(param_copies[i]->cpu_diff()[j], params[i]->cpu_diff()[j]) + EXPECT_FLOAT_EQ(param_copies[i]->cpu_diff()[j], + params[i]->cpu_diff()[j]) << "param " << i << " diff differed at dim " << j; } } @@ -549,9 +571,11 @@ class GradientBasedSolverTest : public MultiDeviceTest { const vector > >& history = solver_->history(); for (int i = 0; i < history.size(); ++i) { for (int j = 0; j < history[i]->count(); ++j) { - EXPECT_EQ(history_copies[i]->cpu_data()[j], history[i]->cpu_data()[j]) + EXPECT_FLOAT_EQ(history_copies[i]->cpu_data()[j], + history[i]->cpu_data()[j]) << "history blob " << i << " data differed at dim " << j; - EXPECT_EQ(history_copies[i]->cpu_diff()[j], history[i]->cpu_diff()[j]) + EXPECT_FLOAT_EQ(history_copies[i]->cpu_diff()[j], + history[i]->cpu_diff()[j]) << "history blob " << i << " diff differed at dim " << j; } } diff --git a/src/caffe/test/test_hdf5_output_layer.cpp b/src/caffe/test/test_hdf5_output_layer.cpp index 3833ebff78e..f94dd57e7de 100644 --- a/src/caffe/test/test_hdf5_output_layer.cpp +++ b/src/caffe/test/test_hdf5_output_layer.cpp @@ -20,8 +20,7 @@ class HDF5OutputLayerTest : public MultiDeviceTest { protected: HDF5OutputLayerTest() - : input_file_name_( - CMAKE_SOURCE_DIR "caffe/test/test_data/sample_data.h5"), + : input_file_name_(ABS_TEST_DATA_DIR "/sample_data.h5"), blob_data_(new Blob()), blob_label_(new Blob()), num_(5), @@ -77,10 +76,12 @@ TYPED_TEST(HDF5OutputLayerTest, TestForward) { H5P_DEFAULT); ASSERT_GE(file_id, 0)<< "Failed to open HDF5 file" << this->input_file_name_; + // Allow reshape here as we are loading data not params + bool reshape = true; hdf5_load_nd_dataset(file_id, HDF5_DATA_DATASET_NAME, 0, 4, - this->blob_data_); + this->blob_data_, reshape); hdf5_load_nd_dataset(file_id, HDF5_DATA_LABEL_NAME, 0, 4, - this->blob_label_); + this->blob_label_, reshape); herr_t status = H5Fclose(file_id); EXPECT_GE(status, 0)<< "Failed to close HDF5 file " << this->input_file_name_; @@ -105,12 +106,12 @@ TYPED_TEST(HDF5OutputLayerTest, TestForward) { Blob* blob_data = new Blob(); hdf5_load_nd_dataset(file_id, HDF5_DATA_DATASET_NAME, 0, 4, - blob_data); + blob_data, reshape); this->CheckBlobEqual(*(this->blob_data_), *blob_data); Blob* blob_label = new Blob(); hdf5_load_nd_dataset(file_id, HDF5_DATA_LABEL_NAME, 0, 4, - blob_label); + blob_label, reshape); this->CheckBlobEqual(*(this->blob_label_), *blob_label); status = H5Fclose(file_id); diff --git a/src/caffe/test/test_hdf5data_layer.cpp b/src/caffe/test/test_hdf5data_layer.cpp index 8884ce95a23..3977c4866c7 100644 --- a/src/caffe/test/test_hdf5data_layer.cpp +++ b/src/caffe/test/test_hdf5data_layer.cpp @@ -30,8 +30,7 @@ class HDF5DataLayerTest : public MultiDeviceTest { blob_top_vec_.push_back(blob_top_label2_); // Check out generate_sample_data.py in the same directory. - filename = new string( - CMAKE_SOURCE_DIR "caffe/test/test_data/sample_data_list.txt" CMAKE_EXT); + filename = new string(ABS_TEST_DATA_DIR "/sample_data_list.txt"); LOG(INFO)<< "Using sample HDF5 data file " << filename; } @@ -70,7 +69,7 @@ TYPED_TEST(HDF5DataLayerTest, TestRead) { int height = 6; int width = 5; - // Test that the layer setup got the correct parameters. + // Test that the layer setup gives correct parameters. HDF5DataLayer layer(param); layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); EXPECT_EQ(this->blob_top_data_->num(), batch_size); @@ -133,4 +132,34 @@ TYPED_TEST(HDF5DataLayerTest, TestRead) { } } +TYPED_TEST(HDF5DataLayerTest, TestSkip) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter param; + param.add_top("data"); + param.add_top("label"); + + HDF5DataParameter* hdf5_data_param = param.mutable_hdf5_data_param(); + int batch_size = 5; + hdf5_data_param->set_batch_size(batch_size); + hdf5_data_param->set_source(*(this->filename)); + + Caffe::set_solver_count(8); + for (int dev = 0; dev < Caffe::solver_count(); ++dev) { + Caffe::set_solver_rank(dev); + + HDF5DataLayer layer(param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + int label = dev; + for (int iter = 0; iter < 1; ++iter) { + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); + for (int i = 0; i < batch_size; ++i) { + EXPECT_EQ(1 + label, this->blob_top_label_->cpu_data()[i]); + label = (label + Caffe::solver_count()) % (batch_size * 2); + } + } + } + Caffe::set_solver_count(1); + Caffe::set_solver_rank(0); +} + } // namespace caffe diff --git a/src/caffe/test/test_infogain_loss_layer.cpp b/src/caffe/test/test_infogain_loss_layer.cpp index a24ac683dc5..34f21271a62 100644 --- a/src/caffe/test/test_infogain_loss_layer.cpp +++ b/src/caffe/test/test_infogain_loss_layer.cpp @@ -1,3 +1,4 @@ +#include #include #include "gtest/gtest.h" @@ -18,17 +19,22 @@ class InfogainLossLayerTest : public MultiDeviceTest { protected: InfogainLossLayerTest() - : blob_bottom_data_(new Blob(10, 5, 1, 1)), - blob_bottom_label_(new Blob(10, 1, 1, 1)), + : blob_bottom_data_(new Blob(4, 2, 5, 2)), + blob_bottom_label_(new Blob(4, 2, 1, 2)), blob_bottom_infogain_(new Blob(1, 1, 5, 5)), - blob_top_loss_(new Blob()) { + blob_top_loss_(new Blob()), + blob_top_prob_(new Blob()), + inner_(2), outer_(4*2), num_labels_(5) { Caffe::set_random_seed(1701); FillerParameter filler_param; - PositiveUnitballFiller filler(filler_param); + filler_param.set_min(-0.5); + filler_param.set_max(2.0); + UniformFiller filler(filler_param); filler.Fill(this->blob_bottom_data_); blob_bottom_vec_.push_back(blob_bottom_data_); for (int i = 0; i < blob_bottom_label_->count(); ++i) { - blob_bottom_label_->mutable_cpu_data()[i] = caffe_rng_rand() % 5; + blob_bottom_label_->mutable_cpu_data()[i] = + caffe_rng_rand() % num_labels_; } blob_bottom_vec_.push_back(blob_bottom_label_); filler_param.set_min(0.1); @@ -37,29 +43,94 @@ class InfogainLossLayerTest : public MultiDeviceTest { infogain_filler.Fill(this->blob_bottom_infogain_); blob_bottom_vec_.push_back(blob_bottom_infogain_); blob_top_vec_.push_back(blob_top_loss_); + blob_top_vec_.push_back(blob_top_prob_); } virtual ~InfogainLossLayerTest() { delete blob_bottom_data_; delete blob_bottom_label_; delete blob_bottom_infogain_; delete blob_top_loss_; + delete blob_top_prob_; } Blob* const blob_bottom_data_; Blob* const blob_bottom_label_; Blob* const blob_bottom_infogain_; Blob* const blob_top_loss_; + Blob* const blob_top_prob_; vector*> blob_bottom_vec_; vector*> blob_top_vec_; + int inner_, outer_, num_labels_; }; TYPED_TEST_CASE(InfogainLossLayerTest, TestDtypesAndDevices); +TYPED_TEST(InfogainLossLayerTest, TestInfogainLoss) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + layer_param.mutable_infogain_loss_param()->set_axis(2); + layer_param.clear_loss_weight(); + layer_param.add_loss_weight(1); + layer_param.add_loss_weight(0); + /*vector* lw = layer_param.mutable_loss_weight(); + lw->clear(); + lw->push_back(1); + lw->push_back(1);*/ + InfogainLossLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); + // Now, check values + const Dtype* data = this->blob_bottom_vec_[0]->cpu_data(); + const Dtype* prob = this->blob_top_vec_[1]->cpu_data(); + const Dtype* labels = this->blob_bottom_vec_[1]->cpu_data(); + const Dtype* H = this->blob_bottom_vec_[2]->cpu_data(); + // first. test the prob top + CHECK_EQ(this->blob_bottom_vec_[0]->num_axes(), + this->blob_top_vec_[1]->num_axes()) + << "prob top shape not match bottom data"; + for (int ai = 0 ; ai < this->blob_bottom_vec_[0]->num_axes(); ai++) { + CHECK_EQ(this->blob_bottom_vec_[0]->shape(ai), + this->blob_top_vec_[1]->shape(ai)) + << "prob top shape not match bottom data"; + } + vector est_prob(this->num_labels_, 0); + for ( int i = 0 ; i < this->outer_; i++ ) { + for ( int j = 0; j < this->inner_; j++ ) { + Dtype den = 0; + for ( int l = 0; l < this->num_labels_; l++ ) { + est_prob[l] = std::exp( + data[i*this->num_labels_*this->inner_ + l*this->inner_ + j]); + den += est_prob[l]; + } + for ( int l = 0; l < this->num_labels_; l++ ) { + EXPECT_NEAR(prob[i*this->num_labels_*this->inner_ + l*this->inner_ + j], + est_prob[l]/den, 1e-6); + } + } + } + Dtype loss = 0; // loss from prob top + for ( int i = 0 ; i < this->outer_; i++ ) { + for ( int j = 0; j < this->inner_; j++ ) { + int gt = static_cast(labels[i*this->inner_+j]); + for ( int l = 0; l < this->num_labels_; l++ ) { + loss -= H[gt*this->num_labels_ + l] * + log(std::max( + prob[i*this->num_labels_*this->inner_ + l*this->inner_ + j], + Dtype(kLOG_THRESHOLD))); + } + } + } + EXPECT_NEAR(this->blob_top_loss_->cpu_data()[0], + loss/(this->outer_*this->inner_), 1e-6); +} TYPED_TEST(InfogainLossLayerTest, TestGradient) { typedef typename TypeParam::Dtype Dtype; LayerParameter layer_param; + layer_param.mutable_infogain_loss_param()->set_axis(2); InfogainLossLayer layer(layer_param); - GradientChecker checker(1e-4, 2e-2, 1701, 1, 0.01); + this->blob_top_vec_.clear(); // ignore prob top. + this->blob_top_vec_.push_back(this->blob_top_loss_); + GradientChecker checker(1e-4, 2e-2, 1701); // no "kink" checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, this->blob_top_vec_, 0); } diff --git a/src/caffe/test/test_inner_product_layer.cpp b/src/caffe/test/test_inner_product_layer.cpp index f1ec2333fae..6d84d292b38 100644 --- a/src/caffe/test/test_inner_product_layer.cpp +++ b/src/caffe/test/test_inner_product_layer.cpp @@ -60,9 +60,9 @@ TYPED_TEST(InnerProductLayerTest, TestSetUp) { EXPECT_EQ(this->blob_top_->channels(), 10); } -/** @brief TestSetUp while toggling tranpose flag +/** @brief TestSetUp while toggling transpose flag */ -TYPED_TEST(InnerProductLayerTest, TestSetUpTranposeFalse) { +TYPED_TEST(InnerProductLayerTest, TestSetUpTransposeFalse) { typedef typename TypeParam::Dtype Dtype; this->blob_bottom_vec_.push_back(this->blob_bottom_); LayerParameter layer_param; @@ -82,9 +82,9 @@ TYPED_TEST(InnerProductLayerTest, TestSetUpTranposeFalse) { EXPECT_EQ(60, layer->blobs()[0]->shape(1)); } -/** @brief TestSetUp while toggling tranpose flag +/** @brief TestSetUp while toggling transpose flag */ -TYPED_TEST(InnerProductLayerTest, TestSetUpTranposeTrue) { +TYPED_TEST(InnerProductLayerTest, TestSetUpTransposeTrue) { typedef typename TypeParam::Dtype Dtype; this->blob_bottom_vec_.push_back(this->blob_bottom_); LayerParameter layer_param; @@ -339,7 +339,7 @@ TYPED_TEST(InnerProductLayerTest, TestBackwardTranspose) { // copy bottom diffs Blob* const bottom_diff = new Blob(); bottom_diff->CopyFrom(*this->blob_bottom_vec_[0], true, true); - // repeat original top with tranposed ip + // repeat original top with transposed ip this->blob_top_vec_.clear(); this->blob_top_vec_.push_back(new Blob()); inner_product_param->set_transpose(true); diff --git a/src/caffe/test/test_net.cpp b/src/caffe/test/test_net.cpp index 92fd317fee8..24b957f2acc 100644 --- a/src/caffe/test/test_net.cpp +++ b/src/caffe/test/test_net.cpp @@ -9,6 +9,7 @@ #include "caffe/common.hpp" #include "caffe/filler.hpp" #include "caffe/net.hpp" +#include "caffe/util/io.hpp" #include "caffe/util/math_functions.hpp" #include "caffe/test/test_caffe_main.hpp" @@ -29,6 +30,17 @@ class NetTest : public MultiDeviceTest { net_.reset(new Net(param)); } + virtual void InitNetFromProtoFileWithState(const string& proto, + Phase phase = caffe::TRAIN, const int level = 0, + const vector* stages = NULL) { + NetParameter param; + CHECK(google::protobuf::TextFormat::ParseFromString(proto, ¶m)); + string param_file; + MakeTempFilename(¶m_file); + WriteProtoToTextFile(param, param_file); + net_.reset(new Net(param_file, phase, level, stages)); + } + virtual void CopyNetBlobs(const bool copy_diff, vector > >* blobs_copy) { CHECK(net_); @@ -771,6 +783,62 @@ class NetTest : public MultiDeviceTest { InitNetFromProtoString(proto); } + virtual void InitAllInOneNet(Phase phase = caffe::TRAIN, + const int level = 0, const vector* stages = NULL) { + string proto = + "name: 'All-in-one Network'" + "layer { " + " name: 'train-data' " + " type: 'DummyData' " + " top: 'data' " + " top: 'label' " + " dummy_data_param { " + " shape { dim: 1 dim: 10 } " + " shape { dim: 1 dim: 1 } " + " } " + " include { phase: TRAIN stage: 'train' } " + "} " + "layer { " + " name: 'val-data' " + " type: 'DummyData' " + " top: 'data' " + " top: 'label' " + " dummy_data_param { " + " shape { dim: 1 dim: 10 } " + " shape { dim: 1 dim: 1 } " + " } " + " include { phase: TEST stage: 'val' } " + "} " + "layer { " + " name: 'deploy-data' " + " type: 'Input' " + " top: 'data' " + " input_param { " + " shape { dim: 1 dim: 10 } " + " } " + " include { phase: TEST stage: 'deploy' } " + "} " + "layer { " + " name: 'ip' " + " type: 'InnerProduct' " + " bottom: 'data' " + " top: 'ip' " + " inner_product_param { " + " num_output: 2 " + " } " + "} " + "layer { " + " name: 'loss' " + " type: 'SoftmaxWithLoss' " + " bottom: 'ip' " + " bottom: 'label' " + " top: 'loss' " + " include { phase: TRAIN stage: 'train' } " + " include { phase: TEST stage: 'val' } " + "} "; + InitNetFromProtoFileWithState(proto, phase, level, stages); + } + int seed_; shared_ptr > net_; }; @@ -2473,4 +2541,64 @@ TYPED_TEST(NetTest, TestForcePropagateDown) { } } +TYPED_TEST(NetTest, TestAllInOneNetTrain) { + vector stages; + stages.push_back("train"); + this->InitAllInOneNet(caffe::TRAIN, 0, &stages); + bool found_data = false; + bool found_loss = false; + for (int i = 0; i < this->net_->layers().size(); ++i) { + const string& layer_name = this->net_->layer_names()[i]; + if (layer_name == "train-data") { + found_data = true; + } else if (layer_name == "loss") { + found_loss = true; + } else { + ASSERT_NE(layer_name, "val-data"); + ASSERT_NE(layer_name, "deploy-data"); + } + } + ASSERT_TRUE(found_data); + ASSERT_TRUE(found_loss); +} + +TYPED_TEST(NetTest, TestAllInOneNetVal) { + vector stages; + stages.push_back("val"); + this->InitAllInOneNet(caffe::TEST, 0, &stages); + bool found_data = false; + bool found_loss = false; + for (int i = 0; i < this->net_->layers().size(); ++i) { + const string& layer_name = this->net_->layer_names()[i]; + if (layer_name == "val-data") { + found_data = true; + } else if (layer_name == "loss") { + found_loss = true; + } else { + ASSERT_NE(layer_name, "train-data"); + ASSERT_NE(layer_name, "deploy-data"); + } + } + ASSERT_TRUE(found_data); + ASSERT_TRUE(found_loss); +} + +TYPED_TEST(NetTest, TestAllInOneNetDeploy) { + vector stages; + stages.push_back("deploy"); + this->InitAllInOneNet(caffe::TEST, 0, &stages); + bool found_data = false; + for (int i = 0; i < this->net_->layers().size(); ++i) { + const string& layer_name = this->net_->layer_names()[i]; + if (layer_name == "deploy-data") { + found_data = true; + } else { + ASSERT_NE(layer_name, "train-data"); + ASSERT_NE(layer_name, "val-data"); + ASSERT_NE(layer_name, "loss"); + } + } + ASSERT_TRUE(found_data); +} + } // namespace caffe diff --git a/src/caffe/test/test_neuron_layer.cpp b/src/caffe/test/test_neuron_layer.cpp index 342f825cec3..180871a29ee 100644 --- a/src/caffe/test/test_neuron_layer.cpp +++ b/src/caffe/test/test_neuron_layer.cpp @@ -791,16 +791,19 @@ TYPED_TEST(NeuronLayerTest, TestPReLUInPlace) { ip2.Backward(blob_middle_vec_2, propagate_down, blob_bottom_vec_2); // Check numbers for (int s = 0; s < blob_bottom_2->count(); ++s) { - EXPECT_EQ(this->blob_bottom_->cpu_diff()[s], blob_bottom_2->cpu_diff()[s]); + EXPECT_FLOAT_EQ(this->blob_bottom_->cpu_diff()[s], + blob_bottom_2->cpu_diff()[s]); } for (int s = 0; s < ip.blobs()[0]->count(); ++s) { - EXPECT_EQ(ip.blobs()[0]->cpu_diff()[s], ip2.blobs()[0]->cpu_diff()[s]); + EXPECT_FLOAT_EQ(ip.blobs()[0]->cpu_diff()[s], + ip2.blobs()[0]->cpu_diff()[s]); } for (int s = 0; s < ip.blobs()[1]->count(); ++s) { - EXPECT_EQ(ip.blobs()[1]->cpu_diff()[s], ip2.blobs()[1]->cpu_diff()[s]); + EXPECT_FLOAT_EQ(ip.blobs()[1]->cpu_diff()[s], + ip2.blobs()[1]->cpu_diff()[s]); } for (int s = 0; s < prelu.blobs()[0]->count(); ++s) { - EXPECT_EQ(prelu.blobs()[0]->cpu_diff()[s], + EXPECT_FLOAT_EQ(prelu.blobs()[0]->cpu_diff()[s], prelu2.blobs()[0]->cpu_diff()[s]); } } diff --git a/src/caffe/test/test_sigmoid_cross_entropy_loss_layer.cpp b/src/caffe/test/test_sigmoid_cross_entropy_loss_layer.cpp index 5dfd7656db2..1bd5f93796f 100644 --- a/src/caffe/test/test_sigmoid_cross_entropy_loss_layer.cpp +++ b/src/caffe/test/test_sigmoid_cross_entropy_loss_layer.cpp @@ -116,5 +116,33 @@ TYPED_TEST(SigmoidCrossEntropyLossLayerTest, TestGradient) { this->blob_top_vec_, 0); } +TYPED_TEST(SigmoidCrossEntropyLossLayerTest, TestIgnoreGradient) { + typedef typename TypeParam::Dtype Dtype; + FillerParameter data_filler_param; + data_filler_param.set_std(1); + GaussianFiller data_filler(data_filler_param); + data_filler.Fill(this->blob_bottom_data_); + LayerParameter layer_param; + LossParameter* loss_param = layer_param.mutable_loss_param(); + loss_param->set_ignore_label(-1); + Dtype* target = this->blob_bottom_targets_->mutable_cpu_data(); + const int count = this->blob_bottom_targets_->count(); + // Ignore half of targets, then check that diff of this half is zero, + // while the other half is nonzero. + caffe_set(count / 2, Dtype(-1), target); + SigmoidCrossEntropyLossLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); + vector propagate_down(2); + propagate_down[0] = true; + propagate_down[1] = false; + layer.Backward(this->blob_top_vec_, propagate_down, this->blob_bottom_vec_); + const Dtype* diff = this->blob_bottom_data_->cpu_diff(); + for (int i = 0; i < count / 2; ++i) { + EXPECT_FLOAT_EQ(diff[i], 0.); + EXPECT_NE(diff[i + count / 2], 0.); + } +} + } // namespace caffe diff --git a/src/caffe/test/test_upgrade_proto.cpp b/src/caffe/test/test_upgrade_proto.cpp index 9dcc2aa55ec..769112ebfe7 100644 --- a/src/caffe/test/test_upgrade_proto.cpp +++ b/src/caffe/test/test_upgrade_proto.cpp @@ -2952,6 +2952,8 @@ TEST_F(SolverTypeUpgradeTest, TestSimple) { for (int i = 0; i < 6; ++i) { const string& input_proto = "net: 'examples/mnist/lenet_train_test.prototxt' " + "weights: 'examples/mnist/lenet_train_test1.caffemodel' " + "weights: 'examples/mnist/lenet_train_test2.caffemodel' " "test_iter: 100 " "test_interval: 500 " "base_lr: 0.01 " @@ -2968,6 +2970,8 @@ TEST_F(SolverTypeUpgradeTest, TestSimple) { "solver_type: " + std::string(old_type_vec[i]) + " "; const string& expected_output_proto = "net: 'examples/mnist/lenet_train_test.prototxt' " + "weights: 'examples/mnist/lenet_train_test1.caffemodel' " + "weights: 'examples/mnist/lenet_train_test2.caffemodel' " "test_iter: 100 " "test_interval: 500 " "base_lr: 0.01 " diff --git a/src/caffe/util/benchmark.cpp b/src/caffe/util/benchmark.cpp index 1d269c351c1..d994225f97b 100644 --- a/src/caffe/util/benchmark.cpp +++ b/src/caffe/util/benchmark.cpp @@ -44,7 +44,6 @@ void Timer::Stop() { if (Caffe::mode() == Caffe::GPU) { #ifndef CPU_ONLY CUDA_CHECK(cudaEventRecord(stop_gpu_, 0)); - CUDA_CHECK(cudaEventSynchronize(stop_gpu_)); #else NO_GPU; #endif @@ -66,6 +65,7 @@ float Timer::MicroSeconds() { } if (Caffe::mode() == Caffe::GPU) { #ifndef CPU_ONLY + CUDA_CHECK(cudaEventSynchronize(stop_gpu_)); CUDA_CHECK(cudaEventElapsedTime(&elapsed_milliseconds_, start_gpu_, stop_gpu_)); // Cuda only measure milliseconds @@ -89,6 +89,7 @@ float Timer::MilliSeconds() { } if (Caffe::mode() == Caffe::GPU) { #ifndef CPU_ONLY + CUDA_CHECK(cudaEventSynchronize(stop_gpu_)); CUDA_CHECK(cudaEventElapsedTime(&elapsed_milliseconds_, start_gpu_, stop_gpu_)); #else diff --git a/src/caffe/util/blocking_queue.cpp b/src/caffe/util/blocking_queue.cpp index 058668fe28c..f69d210459c 100644 --- a/src/caffe/util/blocking_queue.cpp +++ b/src/caffe/util/blocking_queue.cpp @@ -1,7 +1,6 @@ #include #include -#include "caffe/data_reader.hpp" #include "caffe/layers/base_data_layer.hpp" #include "caffe/parallel.hpp" #include "caffe/util/blocking_queue.hpp" @@ -88,9 +87,5 @@ size_t BlockingQueue::size() const { template class BlockingQueue*>; template class BlockingQueue*>; -template class BlockingQueue; -template class BlockingQueue >; -template class BlockingQueue*>; -template class BlockingQueue*>; } // namespace caffe diff --git a/src/caffe/util/db_lmdb.cpp b/src/caffe/util/db_lmdb.cpp index fb1d4956aa1..491a9bd03a6 100644 --- a/src/caffe/util/db_lmdb.cpp +++ b/src/caffe/util/db_lmdb.cpp @@ -32,7 +32,7 @@ void LMDB::Open(const string& source, Mode mode) { MDB_CHECK(rc); } #endif - LOG(INFO) << "Opened lmdb " << source; + LOG_IF(INFO, Caffe::root_solver()) << "Opened lmdb " << source; } LMDBCursor* LMDB::NewCursor() { diff --git a/src/caffe/util/hdf5.cpp b/src/caffe/util/hdf5.cpp index 7730e76ab87..ed73742937f 100644 --- a/src/caffe/util/hdf5.cpp +++ b/src/caffe/util/hdf5.cpp @@ -9,7 +9,7 @@ namespace caffe { template void hdf5_load_nd_dataset_helper( hid_t file_id, const char* dataset_name_, int min_dim, int max_dim, - Blob* blob) { + Blob* blob, bool reshape) { // Verify that the dataset exists. CHECK(H5LTfind_dataset(file_id, dataset_name_)) << "Failed to find HDF5 dataset " << dataset_name_; @@ -29,10 +29,10 @@ void hdf5_load_nd_dataset_helper( CHECK_GE(status, 0) << "Failed to get dataset info for " << dataset_name_; switch (class_) { case H5T_FLOAT: - LOG_FIRST_N(INFO, 1) << "Datatype class: H5T_FLOAT"; + { LOG_FIRST_N(INFO, 1) << "Datatype class: H5T_FLOAT"; } break; case H5T_INTEGER: - LOG_FIRST_N(INFO, 1) << "Datatype class: H5T_INTEGER"; + { LOG_FIRST_N(INFO, 1) << "Datatype class: H5T_INTEGER"; } break; case H5T_TIME: LOG(FATAL) << "Unsupported datatype class: H5T_TIME"; @@ -56,17 +56,38 @@ void hdf5_load_nd_dataset_helper( LOG(FATAL) << "Datatype class unknown"; } + vector blob_dims(dims.size()); for (int i = 0; i < dims.size(); ++i) { blob_dims[i] = dims[i]; } - blob->Reshape(blob_dims); + + if (reshape) { + blob->Reshape(blob_dims); + } else { + if (blob_dims != blob->shape()) { + // create shape string for error message + ostringstream stream; + int count = 1; + for (int i = 0; i < blob_dims.size(); ++i) { + stream << blob_dims[i] << " "; + count = count * blob_dims[i]; + } + stream << "(" << count << ")"; + string source_shape_string = stream.str(); + + CHECK(blob_dims == blob->shape()) << "Cannot load blob from hdf5; shape " + << "mismatch. Source shape is " << source_shape_string + << " target shape is " << blob->shape_string(); + } + } } template <> void hdf5_load_nd_dataset(hid_t file_id, const char* dataset_name_, - int min_dim, int max_dim, Blob* blob) { - hdf5_load_nd_dataset_helper(file_id, dataset_name_, min_dim, max_dim, blob); + int min_dim, int max_dim, Blob* blob, bool reshape) { + hdf5_load_nd_dataset_helper(file_id, dataset_name_, min_dim, max_dim, blob, + reshape); herr_t status = H5LTread_dataset_float( file_id, dataset_name_, blob->mutable_cpu_data()); CHECK_GE(status, 0) << "Failed to read float dataset " << dataset_name_; @@ -74,8 +95,9 @@ void hdf5_load_nd_dataset(hid_t file_id, const char* dataset_name_, template <> void hdf5_load_nd_dataset(hid_t file_id, const char* dataset_name_, - int min_dim, int max_dim, Blob* blob) { - hdf5_load_nd_dataset_helper(file_id, dataset_name_, min_dim, max_dim, blob); + int min_dim, int max_dim, Blob* blob, bool reshape) { + hdf5_load_nd_dataset_helper(file_id, dataset_name_, min_dim, max_dim, blob, + reshape); herr_t status = H5LTread_dataset_double( file_id, dataset_name_, blob->mutable_cpu_data()); CHECK_GE(status, 0) << "Failed to read double dataset " << dataset_name_; diff --git a/src/caffe/util/math_functions.cpp b/src/caffe/util/math_functions.cpp index 71c02274a75..59625bc05ce 100644 --- a/src/caffe/util/math_functions.cpp +++ b/src/caffe/util/math_functions.cpp @@ -196,6 +196,16 @@ void caffe_sqr(const int n, const double* a, double* y) { vdSqr(n, a, y); } +template <> +void caffe_sqrt(const int n, const float* a, float* y) { + vsSqrt(n, a, y); +} + +template <> +void caffe_sqrt(const int n, const double* a, double* y) { + vdSqrt(n, a, y); +} + template <> void caffe_exp(const int n, const float* a, float* y) { vsExp(n, a, y); diff --git a/src/caffe/util/math_functions.cu b/src/caffe/util/math_functions.cu index 4c587537435..314e6ba0f63 100644 --- a/src/caffe/util/math_functions.cu +++ b/src/caffe/util/math_functions.cu @@ -90,6 +90,26 @@ void caffe_gpu_scal(const int N, const double alpha, double *X) { CUBLAS_CHECK(cublasDscal(Caffe::cublas_handle(), N, &alpha, X, 1)); } +template <> +void caffe_gpu_scal(const int N, const float alpha, float* X, + cudaStream_t str) { + cudaStream_t initial_stream; + CUBLAS_CHECK(cublasGetStream(Caffe::cublas_handle(), &initial_stream)); + CUBLAS_CHECK(cublasSetStream(Caffe::cublas_handle(), str)); + CUBLAS_CHECK(cublasSscal(Caffe::cublas_handle(), N, &alpha, X, 1)); + CUBLAS_CHECK(cublasSetStream(Caffe::cublas_handle(), initial_stream)); +} + +template <> +void caffe_gpu_scal(const int N, const double alpha, double* X, + cudaStream_t str) { + cudaStream_t initial_stream; + CUBLAS_CHECK(cublasGetStream(Caffe::cublas_handle(), &initial_stream)); + CUBLAS_CHECK(cublasSetStream(Caffe::cublas_handle(), str)); + CUBLAS_CHECK(cublasDscal(Caffe::cublas_handle(), N, &alpha, X, 1)); + CUBLAS_CHECK(cublasSetStream(Caffe::cublas_handle(), initial_stream)); +} + template <> void caffe_gpu_axpby(const int N, const float alpha, const float* X, const float beta, float* Y) { @@ -367,6 +387,27 @@ void caffe_gpu_powx(const int N, const double* a, N, a, alpha, y); } +template +__global__ void sqrt_kernel(const int n, const Dtype* a, Dtype* y) { + CUDA_KERNEL_LOOP(index, n) { + y[index] = sqrt(a[index]); + } +} + +template <> +void caffe_gpu_sqrt(const int N, const float* a, float* y) { + // NOLINT_NEXT_LINE(whitespace/operators) + sqrt_kernel<<>>( + N, a, y); +} + +template <> +void caffe_gpu_sqrt(const int N, const double* a, double* y) { + // NOLINT_NEXT_LINE(whitespace/operators) + sqrt_kernel<<>>( + N, a, y); +} + DEFINE_AND_INSTANTIATE_GPU_UNARY_FUNC(sign, y[index] = (Dtype(0) < x[index]) - (x[index] < Dtype(0))); DEFINE_AND_INSTANTIATE_GPU_UNARY_FUNC(sgnbit, y[index] = signbit(x[index])); diff --git a/src/caffe/util/upgrade_proto.cpp b/src/caffe/util/upgrade_proto.cpp index 9e186915b43..ad40b73d295 100644 --- a/src/caffe/util/upgrade_proto.cpp +++ b/src/caffe/util/upgrade_proto.cpp @@ -2,6 +2,8 @@ #include #include +#include + #include #include @@ -14,7 +16,8 @@ namespace caffe { bool NetNeedsUpgrade(const NetParameter& net_param) { return NetNeedsV0ToV1Upgrade(net_param) || NetNeedsV1ToV2Upgrade(net_param) - || NetNeedsDataUpgrade(net_param) || NetNeedsInputUpgrade(net_param); + || NetNeedsDataUpgrade(net_param) || NetNeedsInputUpgrade(net_param) + || NetNeedsBatchNormUpgrade(net_param); } bool UpgradeNetAsNeeded(const string& param_file, NetParameter* param) { @@ -71,6 +74,14 @@ bool UpgradeNetAsNeeded(const string& param_file, NetParameter* param) { LOG(WARNING) << "Note that future Caffe releases will only support " << "input layers and not input fields."; } + // NetParameter uses old style batch norm layers; try to upgrade it. + if (NetNeedsBatchNormUpgrade(*param)) { + LOG(INFO) << "Attempting to upgrade batch norm layers using deprecated " + << "params: " << param_file; + UpgradeNetBatchNorm(param); + LOG(INFO) << "Successfully upgraded batch norm layers using deprecated " + << "params."; + } return success; } @@ -991,6 +1002,35 @@ void UpgradeNetInput(NetParameter* net_param) { net_param->clear_input_dim(); } +bool NetNeedsBatchNormUpgrade(const NetParameter& net_param) { + for (int i = 0; i < net_param.layer_size(); ++i) { + // Check if BatchNorm layers declare three parameters, as required by + // the previous BatchNorm layer definition. + if (net_param.layer(i).type() == "BatchNorm" + && net_param.layer(i).param_size() == 3) { + return true; + } + } + return false; +} + +void UpgradeNetBatchNorm(NetParameter* net_param) { + for (int i = 0; i < net_param->layer_size(); ++i) { + // Check if BatchNorm layers declare three parameters, as required by + // the previous BatchNorm layer definition. + if (net_param->layer(i).type() == "BatchNorm" + && net_param->layer(i).param_size() == 3) { + // set lr_mult and decay_mult to zero. leave all other param intact. + for (int ip = 0; ip < net_param->layer(i).param_size(); ip++) { + ParamSpec* fixed_param_spec = + net_param->mutable_layer(i)->mutable_param(ip); + fixed_param_spec->set_lr_mult(0.f); + fixed_param_spec->set_decay_mult(0.f); + } + } + } +} + // Return true iff the solver contains any old solver_type specified as enums bool SolverNeedsTypeUpgrade(const SolverParameter& solver_param) { if (solver_param.has_solver_type()) { @@ -1057,12 +1097,31 @@ bool UpgradeSolverAsNeeded(const string& param_file, SolverParameter* param) { return success; } +// Replaces snapshot_prefix of SolverParameter if it is not specified +// or is set to directory +void UpgradeSnapshotPrefixProperty(const string& param_file, + SolverParameter* param) { + using boost::filesystem::path; + using boost::filesystem::is_directory; + if (!param->has_snapshot_prefix()) { + param->set_snapshot_prefix(path(param_file).replace_extension().string()); + LOG(INFO) << "snapshot_prefix was not specified and is set to " + + param->snapshot_prefix(); + } else if (is_directory(param->snapshot_prefix())) { + param->set_snapshot_prefix((path(param->snapshot_prefix()) / + path(param_file).stem()).string()); + LOG(INFO) << "snapshot_prefix was a directory and is replaced to " + + param->snapshot_prefix(); + } +} + // Read parameters from a file into a SolverParameter proto message. void ReadSolverParamsFromTextFileOrDie(const string& param_file, SolverParameter* param) { CHECK(ReadProtoFromTextFile(param_file, param)) << "Failed to parse SolverParameter file: " << param_file; UpgradeSolverAsNeeded(param_file, param); + UpgradeSnapshotPrefixProperty(param_file, param); } } // namespace caffe diff --git a/src/gtest/CMakeLists.txt b/src/gtest/CMakeLists.txt index ef7ff7ed14b..e98254af130 100644 --- a/src/gtest/CMakeLists.txt +++ b/src/gtest/CMakeLists.txt @@ -1,5 +1,8 @@ add_library(gtest STATIC EXCLUDE_FROM_ALL gtest.h gtest-all.cpp) caffe_default_properties(gtest) +target_include_directories(gtest PUBLIC ${Caffe_SRC_DIR}) +target_compile_definitions(gtest PUBLIC -DGTEST_USE_OWN_TR1_TUPLE) + #add_library(gtest_main gtest_main.cc) #target_link_libraries(gtest_main gtest) diff --git a/src/gtest/gtest-all.cpp b/src/gtest/gtest-all.cpp index 926197419fc..81cdb578cd5 100644 --- a/src/gtest/gtest-all.cpp +++ b/src/gtest/gtest-all.cpp @@ -2697,7 +2697,7 @@ AssertionResult IsHRESULTFailure(const char* expr, long hr) { // NOLINT // Utility functions for encoding Unicode text (wide strings) in // UTF-8. -// A Unicode code-point can have upto 21 bits, and is encoded in UTF-8 +// A Unicode code-point can have up to 21 bits, and is encoded in UTF-8 // like this: // // Code-point length Encoding @@ -7550,7 +7550,7 @@ FilePath FilePath::RemoveExtension(const char* extension) const { return *this; } -// Returns a pointer to the last occurence of a valid path separator in +// Returns a pointer to the last occurrence of a valid path separator in // the FilePath. On Windows, for example, both '/' and '\' are valid path // separators. Returns NULL if no path separator was found. const char* FilePath::FindLastPathSeparator() const { diff --git a/src/gtest/gtest.h b/src/gtest/gtest.h index 3143bd67996..124fb2321f9 100644 --- a/src/gtest/gtest.h +++ b/src/gtest/gtest.h @@ -3395,7 +3395,7 @@ class GTEST_API_ FilePath { void Normalize(); - // Returns a pointer to the last occurence of a valid path separator in + // Returns a pointer to the last occurrence of a valid path separator in // the FilePath. On Windows, for example, both '/' and '\' are valid path // separators. Returns NULL if no path separator was found. const char* FindLastPathSeparator() const; diff --git a/tools/CMakeLists.txt b/tools/CMakeLists.txt index 02fbd5cadd8..3789450555e 100644 --- a/tools/CMakeLists.txt +++ b/tools/CMakeLists.txt @@ -25,5 +25,6 @@ foreach(source ${srcs}) endif() # Install - install(TARGETS ${name} DESTINATION bin) + install(TARGETS ${name} DESTINATION ${CMAKE_INSTALL_BINDIR}) + endforeach(source) diff --git a/tools/caffe.cpp b/tools/caffe.cpp index 5bb60eb161d..389cfb8a99e 100644 --- a/tools/caffe.cpp +++ b/tools/caffe.cpp @@ -34,6 +34,13 @@ DEFINE_string(solver, "", "The solver definition protocol buffer text file."); DEFINE_string(model, "", "The model definition protocol buffer text file."); +DEFINE_string(phase, "", + "Optional; network phase (TRAIN or TEST). Only used for 'time'."); +DEFINE_int32(level, 0, + "Optional; network level."); +DEFINE_string(stage, "", + "Optional; network stages (not to be confused with phase), " + "separated by ','."); DEFINE_string(snapshot, "", "Optional; the snapshot solver state to resume training."); DEFINE_string(weights, "", @@ -101,6 +108,25 @@ static void get_gpus(vector* gpus) { } } +// Parse phase from flags +caffe::Phase get_phase_from_flags(caffe::Phase default_value) { + if (FLAGS_phase == "") + return default_value; + if (FLAGS_phase == "TRAIN") + return caffe::TRAIN; + if (FLAGS_phase == "TEST") + return caffe::TEST; + LOG(FATAL) << "phase must be \"TRAIN\" or \"TEST\""; + return caffe::TRAIN; // Avoid warning +} + +// Parse stages from flags +vector get_stages_from_flags() { + vector stages; + boost::split(stages, FLAGS_stage, boost::is_any_of(",")); + return stages; +} + // caffe commands to call by // caffe // @@ -120,20 +146,6 @@ int device_query() { } RegisterBrewFunction(device_query); -// Load the weights from the specified caffemodel(s) into the train and -// test nets. -void CopyLayers(caffe::Solver* solver, const std::string& model_list) { - std::vector model_names; - boost::split(model_names, model_list, boost::is_any_of(",") ); - for (int i = 0; i < model_names.size(); ++i) { - LOG(INFO) << "Finetuning from " << model_names[i]; - solver->net()->CopyTrainedLayersFrom(model_names[i]); - for (int j = 0; j < solver->test_nets().size(); ++j) { - solver->test_nets()[j]->CopyTrainedLayersFrom(model_names[i]); - } - } -} - // Translate the signal effect the user specified on the command-line to the // corresponding enumeration. caffe::SolverAction::Enum GetRequestedAction( @@ -156,13 +168,20 @@ int train() { CHECK(!FLAGS_snapshot.size() || !FLAGS_weights.size()) << "Give a snapshot to resume training or weights to finetune " "but not both."; + vector stages = get_stages_from_flags(); caffe::SolverParameter solver_param; caffe::ReadSolverParamsFromTextFileOrDie(FLAGS_solver, &solver_param); + solver_param.mutable_train_state()->set_level(FLAGS_level); + for (int i = 0; i < stages.size(); i++) { + solver_param.mutable_train_state()->add_stage(stages[i]); + } + // If the gpus flag is not provided, allow the mode and device to be set // in the solver prototxt. if (FLAGS_gpu.size() == 0 + && solver_param.has_solver_mode() && solver_param.solver_mode() == caffe::SolverParameter_SolverMode_GPU) { if (solver_param.has_device_id()) { FLAGS_gpu = "" + @@ -200,6 +219,13 @@ int train() { GetRequestedAction(FLAGS_sigint_effect), GetRequestedAction(FLAGS_sighup_effect)); + if (FLAGS_snapshot.size()) { + solver_param.clear_weights(); + } else if (FLAGS_weights.size()) { + solver_param.clear_weights(); + solver_param.add_weights(FLAGS_weights); + } + shared_ptr > solver(caffe::SolverRegistry::CreateSolver(solver_param)); @@ -208,15 +234,17 @@ int train() { if (FLAGS_snapshot.size()) { LOG(INFO) << "Resuming from " << FLAGS_snapshot; solver->Restore(FLAGS_snapshot.c_str()); - } else if (FLAGS_weights.size()) { - CopyLayers(solver.get(), FLAGS_weights); } + LOG(INFO) << "Starting Optimization"; if (gpus.size() > 1) { - caffe::P2PSync sync(solver, NULL, solver->param()); - sync.Run(gpus); +#ifdef USE_NCCL + caffe::NCCL nccl(solver); + nccl.Run(gpus, FLAGS_snapshot.size() > 0 ? FLAGS_snapshot.c_str() : NULL); +#else + LOG(FATAL) << "Multi-GPU execution not available - rebuild with USE_NCCL"; +#endif } else { - LOG(INFO) << "Starting Optimization"; solver->Solve(); } LOG(INFO) << "Optimization Done."; @@ -229,6 +257,7 @@ RegisterBrewFunction(train); int test() { CHECK_GT(FLAGS_model.size(), 0) << "Need a model definition to score."; CHECK_GT(FLAGS_weights.size(), 0) << "Need model weights to score."; + vector stages = get_stages_from_flags(); // Set device id and mode vector gpus; @@ -247,7 +276,7 @@ int test() { Caffe::set_mode(Caffe::CPU); } // Instantiate the caffe net. - Net caffe_net(FLAGS_model, caffe::TEST); + Net caffe_net(FLAGS_model, caffe::TEST, FLAGS_level, &stages); caffe_net.CopyTrainedLayersFrom(FLAGS_weights); LOG(INFO) << "Running for " << FLAGS_iterations << " iterations."; @@ -300,6 +329,8 @@ RegisterBrewFunction(test); // Time: benchmark the execution time of a model. int time() { CHECK_GT(FLAGS_model.size(), 0) << "Need a model definition to time."; + caffe::Phase phase = get_phase_from_flags(caffe::TRAIN); + vector stages = get_stages_from_flags(); // Set device id and mode vector gpus; @@ -313,7 +344,7 @@ int time() { Caffe::set_mode(Caffe::CPU); } // Instantiate the caffe net. - Net caffe_net(FLAGS_model, caffe::TRAIN); + Net caffe_net(FLAGS_model, phase, FLAGS_level, &stages); // Do a clean forward and backward pass, so that memory allocation are done // and future iterations will be more stable. diff --git a/tools/compute_image_mean.cpp b/tools/compute_image_mean.cpp index 2035d515195..417f5e4c622 100644 --- a/tools/compute_image_mean.cpp +++ b/tools/compute_image_mean.cpp @@ -22,9 +22,11 @@ DEFINE_string(backend, "lmdb", "The backend {leveldb, lmdb} containing the images"); int main(int argc, char** argv) { +#ifdef USE_OPENCV ::google::InitGoogleLogging(argv[0]); + // Print output to stderr (while still logging) + FLAGS_alsologtostderr = 1; -#ifdef USE_OPENCV #ifndef GFLAGS_GFLAGS_H_ namespace gflags = google; #endif @@ -65,7 +67,7 @@ int main(int argc, char** argv) { for (int i = 0; i < size_in_datum; ++i) { sum_blob.add_data(0.); } - LOG(INFO) << "Starting Iteration"; + LOG(INFO) << "Starting iteration"; while (cursor->valid()) { Datum datum; datum.ParseFromString(cursor->value()); @@ -114,7 +116,7 @@ int main(int argc, char** argv) { for (int i = 0; i < dim; ++i) { mean_values[c] += sum_blob.data(dim * c + i); } - LOG(INFO) << "mean_value channel [" << c << "]:" << mean_values[c] / dim; + LOG(INFO) << "mean_value channel [" << c << "]: " << mean_values[c] / dim; } #else LOG(FATAL) << "This tool requires OpenCV; compile with USE_OPENCV."; diff --git a/tools/extra/extract_seconds.py b/tools/extra/extract_seconds.py index 591a51f96bd..68af69a2788 100755 --- a/tools/extra/extract_seconds.py +++ b/tools/extra/extract_seconds.py @@ -48,11 +48,19 @@ def extract_seconds(input_file, output_file): start_datetime = get_start_time(lines, log_created_year) assert start_datetime, 'Start time not found' + last_dt = start_datetime out = open(output_file, 'w') for line in lines: line = line.strip() if line.find('Iteration') != -1: dt = extract_datetime_from_line(line, log_created_year) + + # if it's another year + if dt.month < last_dt.month: + log_created_year += 1 + dt = extract_datetime_from_line(line, log_created_year) + last_dt = dt + elapsed_seconds = (dt - start_datetime).total_seconds() out.write('%f\n' % elapsed_seconds) out.close() diff --git a/tools/extra/parse_log.py b/tools/extra/parse_log.py index bb9b65ad615..4248e2b87a3 100755 --- a/tools/extra/parse_log.py +++ b/tools/extra/parse_log.py @@ -16,13 +16,10 @@ def parse_log(path_to_log): """Parse log file - Returns (train_dict_list, train_dict_names, test_dict_list, test_dict_names) + Returns (train_dict_list, test_dict_list) train_dict_list and test_dict_list are lists of dicts that define the table rows - - train_dict_names and test_dict_names are ordered tuples of the column names - for the two dict_lists """ regex_iteration = re.compile('Iteration (\d+)') @@ -41,6 +38,7 @@ def parse_log(path_to_log): logfile_year = extract_seconds.get_log_created_year(path_to_log) with open(path_to_log) as f: start_time = extract_seconds.get_start_time(f, logfile_year) + last_time = start_time for line in f: iteration_match = regex_iteration.search(line) @@ -51,8 +49,19 @@ def parse_log(path_to_log): # iteration continue - time = extract_seconds.extract_datetime_from_line(line, - logfile_year) + try: + time = extract_seconds.extract_datetime_from_line(line, + logfile_year) + except ValueError: + # Skip lines with bad formatting, for example when resuming solver + continue + + # if it's another year + if time.month < last_time.month: + logfile_year += 1 + time = extract_seconds.extract_datetime_from_line(line, logfile_year) + last_time = time + seconds = (time - start_time).total_seconds() learning_rate_match = regex_learning_rate.search(line) @@ -194,7 +203,7 @@ def main(): args = parse_args() train_dict_list, test_dict_list = parse_log(args.logfile_path) save_csv_files(args.logfile_path, args.output_dir, train_dict_list, - test_dict_list, delimiter=args.delimiter) + test_dict_list, delimiter=args.delimiter, verbose=args.verbose) if __name__ == '__main__': diff --git a/tools/extra/parse_log.sh b/tools/extra/parse_log.sh index 9892c897682..122eb9e6eed 100755 --- a/tools/extra/parse_log.sh +++ b/tools/extra/parse_log.sh @@ -39,7 +39,7 @@ rm aux.txt aux0.txt aux1.txt aux2.txt aux3.txt aux4.txt grep '] Solving ' $1 > aux.txt grep ', loss = ' $1 >> aux.txt grep 'Iteration ' aux.txt | sed 's/.*Iteration \([[:digit:]]*\).*/\1/g' > aux0.txt -grep ', loss = ' $1 | awk '{print $9}' > aux1.txt +grep ', loss = ' $1 | awk -F = '{print $2}' > aux1.txt grep ', lr = ' $1 | awk '{print $9}' > aux2.txt # Extracting elapsed seconds diff --git a/tools/extra/plot_log.gnuplot.example b/tools/extra/plot_log.gnuplot.example index 748b96e6925..02c68e1d24f 100644 --- a/tools/extra/plot_log.gnuplot.example +++ b/tools/extra/plot_log.gnuplot.example @@ -4,7 +4,7 @@ # Be warned that the fields in the training log may change in the future. # You had better check the data files before designing your own plots. -# Please generate the neccessary data files with +# Please generate the necessary data files with # /path/to/caffe/tools/extra/parse_log.sh before plotting. # Example usage: # ./parse_log.sh mnist.log diff --git a/tools/extra/plot_training_log.py.example b/tools/extra/plot_training_log.py.example index 79924ae5a5a..8caca6b8a67 100755 --- a/tools/extra/plot_training_log.py.example +++ b/tools/extra/plot_training_log.py.example @@ -90,9 +90,9 @@ def load_data(data_file, field_idx0, field_idx1): def random_marker(): markers = mks.MarkerStyle.markers - num = len(markers.values()) + num = len(markers.keys()) idx = random.randint(0, num - 1) - return markers.values()[idx] + return markers.keys()[idx] def get_data_label(path_to_log): label = path_to_log[path_to_log.rfind('/')+1 : path_to_log.rfind( @@ -126,16 +126,9 @@ def plot_chart(chart_type, path_to_png, path_to_log_list): plt.plot(data[0], data[1], label = label, color = color, linewidth = linewidth) else: - ok = False - ## Some markers throw ValueError: Unrecognized marker style - while not ok: - try: - marker = random_marker() - plt.plot(data[0], data[1], label = label, color = color, - marker = marker, linewidth = linewidth) - ok = True - except: - pass + marker = random_marker() + plt.plot(data[0], data[1], label = label, color = color, + marker = marker, linewidth = linewidth) legend_loc = get_legend_loc(chart_type) plt.legend(loc = legend_loc, ncol = 1) # ajust ncol to fit the space plt.title(get_chart_type_description(chart_type)) diff --git a/tools/extra/resize_and_crop_images.py b/tools/extra/resize_and_crop_images.py index c844f590c06..fd2c3134edb 100755 --- a/tools/extra/resize_and_crop_images.py +++ b/tools/extra/resize_and_crop_images.py @@ -101,7 +101,7 @@ def map(self, key, value): yield value, FLAGS.output_folder mapreducer.REGISTER_DEFAULT_MAPPER(ResizeCropImagesMapper) - +mapreducer.REGISTER_DEFAULT_REDUCER(mapreducer.NoPassReducer) mapreducer.REGISTER_DEFAULT_READER(mapreducer.FileReader) mapreducer.REGISTER_DEFAULT_WRITER(mapreducer.FileWriter)