Skip to content

Commit

Permalink
Merge pull request BVLC#9 from leizhangcn/master
Browse files Browse the repository at this point in the history
Solve linker issue and update managed wrapper
  • Loading branch information
zer0n committed Jan 15, 2016
2 parents 0c6cda2 + 20351ef commit 52377b8
Show file tree
Hide file tree
Showing 10 changed files with 318 additions and 8 deletions.
5 changes: 4 additions & 1 deletion include/caffe/layer_factory.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -45,6 +45,7 @@

#include "caffe/common.hpp"
#include "caffe/layer.hpp"
#include "caffe/linker_hooks.hpp"
#include "caffe/proto/caffe.pb.h"

namespace caffe {
Expand Down Expand Up @@ -124,9 +125,11 @@ class LayerRegisterer {
};


// If the following macro produces compiler error, add hook to layer in linker_hooks.hpp
#define REGISTER_LAYER_CREATOR(type, creator) \
static LayerRegisterer<float> g_creator_f_##type(#type, creator<float>); \
static LayerRegisterer<double> g_creator_d_##type(#type, creator<double>) \
static LayerRegisterer<double> g_creator_d_##type(#type, creator<double>); \
ENSURE_HOOKED(type)

#define REGISTER_LAYER_CLASS(type) \
template <typename Dtype> \
Expand Down
114 changes: 114 additions & 0 deletions include/caffe/linker_hooks.hpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,114 @@
// Layers are typically instantiated using LayerRegistry::CreateLayer() factory
// method (see layer_factory.hpp). This mechanism assumes that all object files
// (even those which don't contain any referenced symbols) get linked into the
// final executable. In GCC this behavior is forced by passing --whole-archive
// to the linker. Unfortunately, Visual Studio linker doesn't have a similar
// option. To work around that, a dummy variable ("hook") is defined in layer's
// .cpp file through REGISTER_LAYER_CREATOR/ENSURE_HOOKED macro and then used
// in net.cpp through USE_HOOK macro. This forces linking the entire .obj file
// for the layer.
//
// The same logic applies to solvers.

#pragma once

#if defined(_MSC_VER)

namespace caffe {

#define ENSURE_HOOKED(X) X##Class_hook X##Class_hook_instance;

// Macro that declares external members that can get used as a hook to ensure that OBJ file will
// get linked into final binary. Typedef is used only to ensure that all classes will
// be hooked (otherwise there will be compiler error).
#define CREATE_HOOK(X) \
typedef int X##Class_hook; \
extern X##Class_hook X##Class_hook_instance

// DummyFunction is used to ensure that the external symbol gets used in executable,
// to ensure that object file where symbol is defined will get linked.
template<class T>
void DummyFunction(T){}

// Macro used to ensure that all external symbols get used in executable, ensuring that
// their object files will get linked into resulting binary.
#define USE_HOOK(X) caffe::DummyFunction(caffe::X##Class_hook_instance); \

// Macro that allows you to use another macro on all layer types.
#define FOR_ALL_STANDARD_LAYERS(FUNC) \
FUNC(AbsVal); \
FUNC(Accuracy); \
FUNC(ArgMax); \
FUNC(BatchNorm); \
FUNC(BatchReindex); \
FUNC(BNLL); \
FUNC(Concat); \
FUNC(ContrastiveLoss); \
FUNC(Convolution); \
FUNC(Deconvolution); \
FUNC(Dropout); \
FUNC(DummyData); \
FUNC(Eltwise); \
FUNC(EuclideanLoss); \
FUNC(Exp); \
FUNC(Embed); \
FUNC(Filter); \
FUNC(Flatten); \
FUNC(Data); \
FUNC(HDF5Data); \
FUNC(HDF5Output); \
FUNC(HingeLoss); \
FUNC(Im2col); \
FUNC(InfogainLoss); \
FUNC(InnerProduct); \
FUNC(Log); \
FUNC(LRN); \
FUNC(MemoryData); \
FUNC(MIL); \
FUNC(MILData); \
FUNC(MultinomialLogisticLoss); \
FUNC(MVN); \
FUNC(Pooling); \
FUNC(Power); \
FUNC(PReLU); \
FUNC(Reduction); \
FUNC(ReLU); \
FUNC(Reshape); \
FUNC(Sigmoid); \
FUNC(Silence); \
FUNC(Slice); \
FUNC(SigmoidCrossEntropyLoss); \
FUNC(Softmax); \
FUNC(SoftmaxWithLoss); \
FUNC(Split); \
FUNC(SPP); \
FUNC(TanH); \
FUNC(Threshold); \
FUNC(Tile)

#ifdef WITH_PYTHON_LAYER
#define FOR_PYTHON_LAYER(FUNC) \
FUNC(Python)
#else
#define FOR_PYTHON_LAYER(FUNC)
#endif

#define FOR_ALL_LAYERS(FUNC) \
FOR_ALL_STANDARD_LAYERS(FUNC); \
FOR_PYTHON_LAYER(FUNC)

#define FOR_ALL_SOLVERS(FUNC) \
FUNC(AdaDelta); \
FUNC(AdaGrad); \
FUNC(Adam); \
FUNC(Nesterov); \
FUNC(RMSProp); \
FUNC(SGD)

FOR_ALL_LAYERS(CREATE_HOOK);
FOR_ALL_SOLVERS(CREATE_HOOK);
}
#else
#define FOR_ALL_LAYERS(FUNC)
#define ENSURE_HOOKED(X)
#endif
5 changes: 4 additions & 1 deletion include/caffe/solver_factory.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -43,6 +43,7 @@
#include <vector>

#include "caffe/common.hpp"
#include "caffe/linker_hooks.hpp"
#include "caffe/proto/caffe.pb.h"

namespace caffe {
Expand Down Expand Up @@ -119,9 +120,11 @@ class SolverRegisterer {
};


// If the following macro produces compiler error, add hook to solver in linker_hooks.hpp
#define REGISTER_SOLVER_CREATOR(type, creator) \
static SolverRegisterer<float> g_creator_f_##type(#type, creator<float>); \
static SolverRegisterer<double> g_creator_d_##type(#type, creator<double>) \
static SolverRegisterer<double> g_creator_d_##type(#type, creator<double>); \
ENSURE_HOOKED(type)

#define REGISTER_SOLVER_CLASS(type) \
template <typename Dtype> \
Expand Down
34 changes: 28 additions & 6 deletions src/caffe/layers/memory_data_layer.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -19,10 +19,19 @@ void MemoryDataLayer<Dtype>::DataLayerSetUp(const vector<Blob<Dtype>*>& bottom,
CHECK_GT(batch_size_ * size_, 0) <<
"batch_size, channels, height, and width must be specified and"
" positive in memory_data_param";
int crop_size = transform_param_.crop_size();
if (crop_size > 0)
{
top[0]->Reshape(batch_size_, channels_, crop_size, crop_size);
added_data_.Reshape(batch_size_, channels_, crop_size, crop_size);
}
else
{
top[0]->Reshape(batch_size_, channels_, height_, width_);
added_data_.Reshape(batch_size_, channels_, height_, width_);
}
vector<int> label_shape(1, batch_size_);
top[0]->Reshape(batch_size_, channels_, height_, width_);
top[1]->Reshape(label_shape);
added_data_.Reshape(batch_size_, channels_, height_, width_);
added_label_.Reshape(label_shape);
data_ = NULL;
labels_ = NULL;
Expand All @@ -38,7 +47,11 @@ void MemoryDataLayer<Dtype>::AddDatumVector(const vector<Datum>& datum_vector) {
CHECK_GT(num, 0) << "There is no datum to add.";
CHECK_EQ(num % batch_size_, 0) <<
"The added data must be a multiple of the batch size.";
added_data_.Reshape(num, channels_, height_, width_);
int crop_size = transform_param_.crop_size();
if (crop_size > 0)
added_data_.Reshape(num, channels_, crop_size, crop_size);
else
added_data_.Reshape(num, channels_, height_, width_);
added_label_.Reshape(num, 1, 1, 1);
// Apply data transformations (mirror, scale, crop...)
this->data_transformer_->Transform(datum_vector, &added_data_);
Expand All @@ -63,7 +76,11 @@ void MemoryDataLayer<Dtype>::AddMatVector(const vector<cv::Mat>& mat_vector,
CHECK_GT(num, 0) << "There is no mat to add";
CHECK_EQ(num % batch_size_, 0) <<
"The added data must be a multiple of the batch size.";
added_data_.Reshape(num, channels_, height_, width_);
int crop_size = transform_param_.crop_size();
if (crop_size > 0)
added_data_.Reshape(num, channels_, crop_size, crop_size);
else
added_data_.Reshape(num, channels_, height_, width_);
added_label_.Reshape(num, 1, 1, 1);
// Apply data transformations (mirror, scale, crop...)
this->data_transformer_->Transform(mat_vector, &added_data_);
Expand All @@ -87,7 +104,8 @@ void MemoryDataLayer<Dtype>::Reset(Dtype* data, Dtype* labels, int n) {
// Warn with transformation parameters since a memory array is meant to
// be generic and no transformations are done with Reset().
if (this->layer_param_.has_transform_param()) {
LOG(WARNING) << this->type() << " does not transform array data on Reset()";
//suppress this warning as we have applied transformation before calling Reset
//LOG(WARNING) << this->type() << " does not transform array data on Reset()";
}
data_ = data;
labels_ = labels;
Expand All @@ -108,7 +126,11 @@ template <typename Dtype>
void MemoryDataLayer<Dtype>::Forward_cpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
CHECK(data_) << "MemoryDataLayer needs to be initalized by calling Reset";
top[0]->Reshape(batch_size_, channels_, height_, width_);
int crop_size = transform_param_.crop_size();
if (crop_size > 0)
top[0]->Reshape(batch_size_, channels_, crop_size, crop_size);
else
top[0]->Reshape(batch_size_, channels_, height_, width_);
top[1]->Reshape(batch_size_, 1, 1, 1);
top[0]->set_cpu_data(data_ + pos_ * size_);
top[1]->set_cpu_data(labels_ + pos_);
Expand Down
10 changes: 10 additions & 0 deletions src/caffe/net.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,16 @@

namespace caffe {

#if defined(_MSC_VER)
// Reference all layers and solvers to make sure they get linked into
// the executable.
void HookRegisteredClasses()
{
FOR_ALL_LAYERS(USE_HOOK);
FOR_ALL_SOLVERS(USE_HOOK);
}
#endif

template <typename Dtype>
Net<Dtype>::Net(const NetParameter& param, const Net* root_net)
: root_net_(root_net) {
Expand Down
63 changes: 63 additions & 0 deletions windows/caffe.managed/_CaffeModel.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@
#include <stdio.h>
#include "caffe/caffe.hpp"
#include "caffe/blob.hpp"
#include "caffe/layers/memory_data_layer.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#pragma warning(push, 0)
Expand Down Expand Up @@ -44,6 +45,24 @@ void _CaffeModel::SetDevice(int deviceId)
Caffe::set_mode(Caffe::CPU);
}

int _CaffeModel::GetInputImageWidth()
{
MemoryDataLayer<float> * layer = (MemoryDataLayer<float>*)_net->layer_by_name("data").get();
return layer->width();
}

int _CaffeModel::GetInputImageHeight()
{
MemoryDataLayer<float> * layer = (MemoryDataLayer<float>*)_net->layer_by_name("data").get();
return layer->height();
}

int _CaffeModel::GetInputImageChannels()
{
MemoryDataLayer<float> * layer = (MemoryDataLayer<float>*)_net->layer_by_name("data").get();
return layer->channels();
}

cv::Mat CVReadImage(const string &imageFile, int height, int width, int interpolation)
{
float means[3] = { 103.939, 116.779, 123.68 }; //REVIEW ktran: why hardcoded and why is it useful?
Expand Down Expand Up @@ -120,4 +139,48 @@ vector<FloatArray> _CaffeModel::ExtractOutputs(const string &imageFile, int inte
results.push_back(FloatArray(blob->cpu_data(), blob->count()));
}
return results;
}

void EvaluateBitmap(caffe::Net<float>* net, const string &imageData, int interpolation)
{
// Net initialization
float loss = 0.0;
shared_ptr<MemoryDataLayer<float> > memory_data_layer;
memory_data_layer = static_pointer_cast<MemoryDataLayer<float>>(net->layer_by_name("data"));

Datum datum;
datum.set_channels(3);
datum.set_height(memory_data_layer->height());
datum.set_width(memory_data_layer->width());
datum.set_label(0);
datum.clear_data();
datum.clear_float_data();
datum.set_data(imageData);

std::vector<Datum> datums;
for (int i = 0; i < 1; i++)
datums.push_back(datum);

memory_data_layer->AddDatumVector(datums);
const std::vector<Blob<float>*>& results = net->ForwardPrefilled(&loss);

}

FloatArray _CaffeModel::ExtractBitmapOutputs(const std::string &imageData, int interpolation, const string &blobName)
{
EvaluateBitmap(_net, imageData, interpolation);
auto blob = _net->blob_by_name(blobName);
return FloatArray(blob->cpu_data(), blob->count());
}

vector<FloatArray> _CaffeModel::ExtractBitmapOutputs(const std::string &imageData, int interpolation, const vector<string> &layerNames)
{
EvaluateBitmap(_net, imageData, interpolation);
vector<FloatArray> results;
for (auto& name : layerNames)
{
auto blob = _net->blob_by_name(name);
results.push_back(FloatArray(blob->cpu_data(), blob->count()));
}
return results;
}
10 changes: 10 additions & 0 deletions windows/caffe.managed/_CaffeModel.h
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,17 @@ class _CaffeModel
_CaffeModel(const std::string &netFile, const std::string &modelFile);
~_CaffeModel();

int GetInputImageWidth();
int GetInputImageHeight();
int GetInputImageChannels();

//REVIEW ktran: these APIs only make sense for images
FloatArray ExtractOutputs(const std::string &imageFile, int interpolation, const std::string &layerName);
std::vector<FloatArray> ExtractOutputs(const std::string &imageFile, int interpolation, const std::vector<std::string> &layerNames);

// imageData needs to be of size channel*height*width as required by the "data" blob.
// The C++/CLI caller can use GetInputImageWidth()/Height/Channels to get the desired dimension.
FloatArray ExtractBitmapOutputs(const std::string &imageData, int interpolation, const std::string &layerName);
std::vector<FloatArray> ExtractBitmapOutputs(const std::string &imageData, int interpolation, const std::vector<std::string> &layerNames);

};
Loading

0 comments on commit 52377b8

Please sign in to comment.