Skip to content

Commit

Permalink
Merge pull request #1686 from longjon/net-const
Browse files Browse the repository at this point in the history
Improve const-ness of Net
  • Loading branch information
longjon committed Jan 16, 2015
2 parents 6f71db5 + 2377b68 commit c24c83e
Show file tree
Hide file tree
Showing 4 changed files with 70 additions and 45 deletions.
72 changes: 47 additions & 25 deletions include/caffe/net.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -89,7 +89,7 @@ class Net {
* @brief For an already initialized net, implicitly copies (i.e., using no
* additional memory) the pre-trained layers from another Net.
*/
void ShareTrainedLayersWith(Net* other);
void ShareTrainedLayersWith(const Net* other);
// For an already initialized net, CopyTrainedLayersFrom() copies the already
// trained layers from another net parameter instance.
/**
Expand All @@ -99,51 +99,73 @@ class Net {
void CopyTrainedLayersFrom(const NetParameter& param);
void CopyTrainedLayersFrom(const string trained_filename);
/// @brief Writes the net to a proto.
void ToProto(NetParameter* param, bool write_diff = false);
void ToProto(NetParameter* param, bool write_diff = false) const;

/// @brief returns the network name.
inline const string& name() { return name_; }
inline const string& name() const { return name_; }
/// @brief returns the layer names
inline const vector<string>& layer_names() { return layer_names_; }
inline const vector<string>& layer_names() const { return layer_names_; }
/// @brief returns the blob names
inline const vector<string>& blob_names() { return blob_names_; }
inline const vector<string>& blob_names() const { return blob_names_; }
/// @brief returns the blobs
inline const vector<shared_ptr<Blob<Dtype> > >& blobs() { return blobs_; }
inline const vector<shared_ptr<Blob<Dtype> > >& blobs() const {
return blobs_;
}
/// @brief returns the layers
inline const vector<shared_ptr<Layer<Dtype> > >& layers() { return layers_; }
inline const vector<shared_ptr<Layer<Dtype> > >& layers() const {
return layers_;
}
/**
* @brief returns the bottom vecs for each layer -- usually you won't
* need this unless you do per-layer checks such as gradients.
*/
inline vector<vector<Blob<Dtype>*> >& bottom_vecs() { return bottom_vecs_; }
inline const vector<vector<Blob<Dtype>*> >& bottom_vecs() const {
return bottom_vecs_;
}
/**
* @brief returns the top vecs for each layer -- usually you won't
* need this unless you do per-layer checks such as gradients.
*/
inline vector<vector<Blob<Dtype>*> >& top_vecs() { return top_vecs_; }
inline vector<vector<bool> >& bottom_need_backward() {
inline const vector<vector<Blob<Dtype>*> >& top_vecs() const {
return top_vecs_;
}
inline const vector<vector<bool> >& bottom_need_backward() const {
return bottom_need_backward_;
}
inline vector<Dtype>& blob_loss_weights() {
inline const vector<Dtype>& blob_loss_weights() const {
return blob_loss_weights_;
}
/// @brief returns the parameters
inline vector<shared_ptr<Blob<Dtype> > >& params() { return params_; }
inline const vector<shared_ptr<Blob<Dtype> > >& params() const {
return params_;
}
/// @brief returns the parameter learning rate multipliers
inline vector<float>& params_lr() { return params_lr_; }
inline vector<float>& params_weight_decay() { return params_weight_decay_; }
const map<string, int>& param_names_index() { return param_names_index_; }
inline const vector<float>& params_lr() const { return params_lr_; }
inline const vector<float>& params_weight_decay() const {
return params_weight_decay_;
}
const map<string, int>& param_names_index() const {
return param_names_index_;
}
/// @brief Input and output blob numbers
inline int num_inputs() { return net_input_blobs_.size(); }
inline int num_outputs() { return net_output_blobs_.size(); }
inline vector<Blob<Dtype>*>& input_blobs() { return net_input_blobs_; }
inline vector<Blob<Dtype>*>& output_blobs() { return net_output_blobs_; }
inline vector<int>& input_blob_indices() { return net_input_blob_indices_; }
inline vector<int>& output_blob_indices() { return net_output_blob_indices_; }
bool has_blob(const string& blob_name);
const shared_ptr<Blob<Dtype> > blob_by_name(const string& blob_name);
bool has_layer(const string& layer_name);
const shared_ptr<Layer<Dtype> > layer_by_name(const string& layer_name);
inline int num_inputs() const { return net_input_blobs_.size(); }
inline int num_outputs() const { return net_output_blobs_.size(); }
inline const vector<Blob<Dtype>*>& input_blobs() const {
return net_input_blobs_;
}
inline const vector<Blob<Dtype>*>& output_blobs() const {
return net_output_blobs_;
}
inline const vector<int>& input_blob_indices() const {
return net_input_blob_indices_;
}
inline const vector<int>& output_blob_indices() const {
return net_output_blob_indices_;
}
bool has_blob(const string& blob_name) const;
const shared_ptr<Blob<Dtype> > blob_by_name(const string& blob_name) const;
bool has_layer(const string& layer_name) const;
const shared_ptr<Layer<Dtype> > layer_by_name(const string& layer_name) const;

void set_debug_info(const bool value) { debug_info_ = value; }

Expand Down
16 changes: 8 additions & 8 deletions src/caffe/net.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -636,7 +636,7 @@ void Net<Dtype>::UpdateDebugInfo(const int param_id) {
}

template <typename Dtype>
void Net<Dtype>::ShareTrainedLayersWith(Net* other) {
void Net<Dtype>::ShareTrainedLayersWith(const Net* other) {
int num_source_layers = other->layers().size();
for (int i = 0; i < num_source_layers; ++i) {
Layer<Dtype>* source_layer = other->layers()[i].get();
Expand Down Expand Up @@ -726,7 +726,7 @@ void Net<Dtype>::CopyTrainedLayersFrom(const string trained_filename) {
}

template <typename Dtype>
void Net<Dtype>::ToProto(NetParameter* param, bool write_diff) {
void Net<Dtype>::ToProto(NetParameter* param, bool write_diff) const {
param->Clear();
param->set_name(name_);
// Add bottom and top
Expand Down Expand Up @@ -785,16 +785,16 @@ void Net<Dtype>::Update() {
}

template <typename Dtype>
bool Net<Dtype>::has_blob(const string& blob_name) {
bool Net<Dtype>::has_blob(const string& blob_name) const {
return blob_names_index_.find(blob_name) != blob_names_index_.end();
}

template <typename Dtype>
const shared_ptr<Blob<Dtype> > Net<Dtype>::blob_by_name(
const string& blob_name) {
const string& blob_name) const {
shared_ptr<Blob<Dtype> > blob_ptr;
if (has_blob(blob_name)) {
blob_ptr = blobs_[blob_names_index_[blob_name]];
blob_ptr = blobs_[blob_names_index_.find(blob_name)->second];
} else {
blob_ptr.reset((Blob<Dtype>*)(NULL));
LOG(WARNING) << "Unknown blob name " << blob_name;
Expand All @@ -803,16 +803,16 @@ const shared_ptr<Blob<Dtype> > Net<Dtype>::blob_by_name(
}

template <typename Dtype>
bool Net<Dtype>::has_layer(const string& layer_name) {
bool Net<Dtype>::has_layer(const string& layer_name) const {
return layer_names_index_.find(layer_name) != layer_names_index_.end();
}

template <typename Dtype>
const shared_ptr<Layer<Dtype> > Net<Dtype>::layer_by_name(
const string& layer_name) {
const string& layer_name) const {
shared_ptr<Layer<Dtype> > layer_ptr;
if (has_layer(layer_name)) {
layer_ptr = layers_[layer_names_index_[layer_name]];
layer_ptr = layers_[layer_names_index_.find(layer_name)->second];
} else {
layer_ptr.reset((Layer<Dtype>*)(NULL));
LOG(WARNING) << "Unknown layer name " << layer_name;
Expand Down
23 changes: 13 additions & 10 deletions src/caffe/solver.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -418,7 +418,7 @@ Dtype SGDSolver<Dtype>::GetLearningRate() {
template <typename Dtype>
void SGDSolver<Dtype>::PreSolve() {
// Initialize the history
vector<shared_ptr<Blob<Dtype> > >& net_params = this->net_->params();
const vector<shared_ptr<Blob<Dtype> > >& net_params = this->net_->params();
history_.clear();
update_.clear();
temp_.clear();
Expand All @@ -439,9 +439,10 @@ void SGDSolver<Dtype>::PreSolve() {

template <typename Dtype>
void SGDSolver<Dtype>::ComputeUpdateValue() {
vector<shared_ptr<Blob<Dtype> > >& net_params = this->net_->params();
vector<float>& net_params_lr = this->net_->params_lr();
vector<float>& net_params_weight_decay = this->net_->params_weight_decay();
const vector<shared_ptr<Blob<Dtype> > >& net_params = this->net_->params();
const vector<float>& net_params_lr = this->net_->params_lr();
const vector<float>& net_params_weight_decay =
this->net_->params_weight_decay();
// get the learning rate
Dtype rate = GetLearningRate();
if (this->param_.display() && this->iter_ % this->param_.display() == 0) {
Expand Down Expand Up @@ -552,9 +553,10 @@ void SGDSolver<Dtype>::RestoreSolverState(const SolverState& state) {

template <typename Dtype>
void NesterovSolver<Dtype>::ComputeUpdateValue() {
vector<shared_ptr<Blob<Dtype> > >& net_params = this->net_->params();
vector<float>& net_params_lr = this->net_->params_lr();
vector<float>& net_params_weight_decay = this->net_->params_weight_decay();
const vector<shared_ptr<Blob<Dtype> > >& net_params = this->net_->params();
const vector<float>& net_params_lr = this->net_->params_lr();
const vector<float>& net_params_weight_decay =
this->net_->params_weight_decay();
// get the learning rate
Dtype rate = this->GetLearningRate();
if (this->param_.display() && this->iter_ % this->param_.display() == 0) {
Expand Down Expand Up @@ -667,9 +669,10 @@ void NesterovSolver<Dtype>::ComputeUpdateValue() {

template <typename Dtype>
void AdaGradSolver<Dtype>::ComputeUpdateValue() {
vector<shared_ptr<Blob<Dtype> > >& net_params = this->net_->params();
vector<float>& net_params_lr = this->net_->params_lr();
vector<float>& net_params_weight_decay = this->net_->params_weight_decay();
const vector<shared_ptr<Blob<Dtype> > >& net_params = this->net_->params();
const vector<float>& net_params_lr = this->net_->params_lr();
const vector<float>& net_params_weight_decay =
this->net_->params_weight_decay();
// get the learning rate
Dtype rate = this->GetLearningRate();
Dtype delta = this->param_.delta();
Expand Down
4 changes: 2 additions & 2 deletions tools/caffe.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -220,8 +220,8 @@ int time() {
caffe_net.Backward();

const vector<shared_ptr<Layer<float> > >& layers = caffe_net.layers();
vector<vector<Blob<float>*> >& bottom_vecs = caffe_net.bottom_vecs();
vector<vector<Blob<float>*> >& top_vecs = caffe_net.top_vecs();
const vector<vector<Blob<float>*> >& bottom_vecs = caffe_net.bottom_vecs();
const vector<vector<Blob<float>*> >& top_vecs = caffe_net.top_vecs();
const vector<vector<bool> >& bottom_need_backward =
caffe_net.bottom_need_backward();
LOG(INFO) << "*** Benchmark begins ***";
Expand Down

0 comments on commit c24c83e

Please sign in to comment.