Skip to content

Commit

Permalink
Blobs are ND tensors instead of 4D arrays.
Browse files Browse the repository at this point in the history
vector<int> shape_ instead of (num, channels, height, width).
  • Loading branch information
jeffdonahue committed Nov 26, 2014
1 parent a35929f commit 3715eab
Show file tree
Hide file tree
Showing 4 changed files with 146 additions and 50 deletions.
73 changes: 52 additions & 21 deletions include/caffe/blob.hpp
Original file line number Diff line number Diff line change
@@ -1,6 +1,9 @@
#ifndef CAFFE_BLOB_HPP_
#define CAFFE_BLOB_HPP_

#include <string>
#include <vector>

#include "caffe/common.hpp"
#include "caffe/proto/caffe.pb.h"
#include "caffe/syncedmem.hpp"
Expand All @@ -19,10 +22,16 @@ template <typename Dtype>
class Blob {
public:
Blob()
: data_(), diff_(), num_(0), channels_(0), height_(0), width_(0),
count_(0), capacity_(0) {}
: data_(), diff_(), count_(0), capacity_(0) {}

/// @brief Deprecated; use <code>Blob(const vector<int>& shape)</code>.
explicit Blob(const int num, const int channels, const int height,
const int width);
const int width);
explicit Blob(const vector<int>& shape);

/// @brief Deprecated; use <code>Reshape(const vector<int>& shape)</code>.
void Reshape(const int num, const int channels, const int height,
const int width);
/**
* @brief Change the dimensions of the blob, allocating new memory if
* necessary.
Expand All @@ -37,25 +46,46 @@ class Blob {
* an error; either Net::Forward or Net::Reshape need to be called to
* propagate the new input shape to higher layers.
*/
void Reshape(const int num, const int channels, const int height,
const int width);
void Reshape(const vector<int>& shape);
void ReshapeLike(const Blob& other);
inline int num() const { return num_; }
inline int channels() const { return channels_; }
inline int height() const { return height_; }
inline int width() const { return width_; }
inline string shape_string() const {
ostringstream stream;
for (int i = 0; i < shape_.size(); ++i) {
stream << shape_[i] << " ";
}
stream << "(" << count_ << ")";
return stream.str();
}
inline const vector<int>& shape() const { return shape_; }
inline int shape(int index) const {
CHECK_GE(index, 0) << "index must be non-negative";
if (index < shape_.size()) {
// Explicitly specified dimension; return it.
return shape_[index];
} else if (count_ == 0) {
// Empty blob; unspecified dimensions are 0.
return 0;
} else {
// Non-empty blob; unspecified dimensions are singletons (1).
return 1;
}
}
inline int num() const { return shape(0); }
inline int channels() const { return shape(1); }
inline int height() const { return shape(2); }
inline int width() const { return shape(3); }
inline int count() const { return count_; }
inline int offset(const int n, const int c = 0, const int h = 0,

This comment has been minimized.

Copy link
@longjon

longjon Nov 26, 2014

Collaborator

Should we generalize this (and/or provide generalized indexing), perhaps in another PR?

This comment has been minimized.

Copy link
@jeffdonahue

jeffdonahue Nov 26, 2014

Author Owner

Yup, there should be a generalized version. I skipped over it because it was taking me too long to decide what the arguments should be (va_arg seems to be discouraged in C++?), but I'll put it on the TODO list for this PR along with the IPLayer parameter backwards compatibility issue.

const int w = 0) const {
CHECK_GE(n, 0);
CHECK_LE(n, num_);
CHECK_GE(channels_, 0);
CHECK_LE(c, channels_);
CHECK_GE(height_, 0);
CHECK_LE(h, height_);
CHECK_GE(width_, 0);
CHECK_LE(w, width_);
return ((n * channels_ + c) * height_ + h) * width_ + w;
CHECK_LE(n, shape(0));
CHECK_GE(shape(1), 0);
CHECK_LE(c, shape(1));
CHECK_GE(shape(2), 0);
CHECK_LE(h, shape(2));
CHECK_GE(shape(3), 0);
CHECK_LE(w, shape(3));
return ((n * shape(1) + c) * shape(2) + h) * shape(3) + w;
}
/**
* @brief Copy from a source Blob.
Expand Down Expand Up @@ -126,13 +156,14 @@ class Blob {
*/
void ShareDiff(const Blob& other);

bool ShapeEquals(const vector<int>& other_shape);
bool ShapeEquals(const Blob& other);
bool ShapeEquals(const BlobProto& other);

This comment has been minimized.

Copy link
@longjon

longjon Nov 26, 2014

Collaborator

Why not just use blah.shape() == other_blah.shape(), i.e., vector's operator ==?

This comment has been minimized.

Copy link
@jeffdonahue

jeffdonahue Nov 26, 2014

Author Owner

good point...didn't know that was a thing


protected:
shared_ptr<SyncedMemory> data_;
shared_ptr<SyncedMemory> diff_;
int num_;
int channels_;
int height_;
int width_;
vector<int> shape_;
int count_;
int capacity_;

Expand Down
105 changes: 87 additions & 18 deletions src/caffe/blob.cpp
Original file line number Diff line number Diff line change
@@ -1,3 +1,5 @@
#include <vector>

#include "caffe/blob.hpp"
#include "caffe/common.hpp"
#include "caffe/syncedmem.hpp"
Expand All @@ -8,15 +10,23 @@ namespace caffe {
template <typename Dtype>
void Blob<Dtype>::Reshape(const int num, const int channels, const int height,
const int width) {
CHECK_GE(num, 0);
CHECK_GE(channels, 0);
CHECK_GE(height, 0);
CHECK_GE(width, 0);
num_ = num;
channels_ = channels;
height_ = height;
width_ = width;
count_ = num_ * channels_ * height_ * width_;
vector<int> shape(4);
shape[0] = num;
shape[1] = channels;
shape[2] = height;
shape[3] = width;
Reshape(shape);
}

template <typename Dtype>
void Blob<Dtype>::Reshape(const vector<int>& shape) {
count_ = 1;
shape_.resize(shape.size());
for (int i = 0; i < shape.size(); ++i) {
CHECK_GE(shape[i], 0);
count_ *= shape[i];
shape_[i] = shape[i];
}
if (count_ > capacity_) {
capacity_ = count_;
data_.reset(new SyncedMemory(capacity_ * sizeof(Dtype)));
Expand All @@ -26,7 +36,7 @@ void Blob<Dtype>::Reshape(const int num, const int channels, const int height,

template <typename Dtype>
void Blob<Dtype>::ReshapeLike(const Blob<Dtype>& other) {
Reshape(other.num(), other.channels(), other.height(), other.width());
Reshape(other.shape());
}

template <typename Dtype>
Expand All @@ -37,6 +47,13 @@ Blob<Dtype>::Blob(const int num, const int channels, const int height,
Reshape(num, channels, height, width);
}

template <typename Dtype>
Blob<Dtype>::Blob(const vector<int>& shape)
// capacity_ must be initialized before calling Reshape
: capacity_(0) {
Reshape(shape);
}

template <typename Dtype>
const Dtype* Blob<Dtype>::cpu_data() const {
CHECK(data_);
Expand Down Expand Up @@ -205,12 +222,49 @@ Dtype Blob<Dtype>::asum_diff() const {
return 0;
}

template <typename Dtype>
bool Blob<Dtype>::ShapeEquals(const vector<int>& other_shape) {
const int num_dims = shape_.size();
if (other_shape.size() != num_dims) {
return false;
}
for (int i = 0; i < num_dims; ++i) {
if (other_shape[i] != shape_[i]) {
return false;
}
}
return true;
}

template <typename Dtype>
bool Blob<Dtype>::ShapeEquals(const Blob& other) {
return ShapeEquals(other.shape());
}

template <typename Dtype>
bool Blob<Dtype>::ShapeEquals(const BlobProto& other) {
if (other.has_num() || other.has_channels() ||
other.has_height() || other.has_width()) {
// Using deprecated 4D Blob dimensions --
// shape is (num, channels, height, width).
return shape_.size() == 4 &&
num() == other.num() &&
channels() == other.channels() &&
height() == other.height() &&
width() == other.width();
}
vector<int> other_shape(other.dim_size());
for (int i = 0; i < other.dim_size(); ++i) {
other_shape[i] = other.dim(i);
}
return ShapeEquals(other_shape);
}

template <typename Dtype>
void Blob<Dtype>::CopyFrom(const Blob& source, bool copy_diff, bool reshape) {
if (num_ != source.num() || channels_ != source.channels() ||
height_ != source.height() || width_ != source.width()) {
if (!ShapeEquals(source)) {
if (reshape) {
Reshape(source.num(), source.channels(), source.height(), source.width());
ReshapeLike(source);
} else {
LOG(FATAL) << "Trying to copy blobs of different sizes.";
}
Expand Down Expand Up @@ -241,7 +295,23 @@ void Blob<Dtype>::CopyFrom(const Blob& source, bool copy_diff, bool reshape) {

template <typename Dtype>
void Blob<Dtype>::FromProto(const BlobProto& proto) {
Reshape(proto.num(), proto.channels(), proto.height(), proto.width());
vector<int> shape;
if (proto.has_num() || proto.has_channels() ||
proto.has_height() || proto.has_width()) {
// Using deprecated 4D Blob dimensions --
// shape is (num, channels, height, width).
shape.resize(4);
shape[0] = proto.num();
shape[1] = proto.channels();
shape[2] = proto.height();
shape[3] = proto.width();
} else {
shape.resize(proto.dim_size());
for (int i = 0; i < proto.dim_size(); ++i) {
shape[i] = proto.dim(i);
}
}
Reshape(shape);
// copy data
Dtype* data_vec = mutable_cpu_data();
for (int i = 0; i < count_; ++i) {
Expand All @@ -257,10 +327,9 @@ void Blob<Dtype>::FromProto(const BlobProto& proto) {

template <typename Dtype>
void Blob<Dtype>::ToProto(BlobProto* proto, bool write_diff) const {
proto->set_num(num_);
proto->set_channels(channels_);
proto->set_height(height_);
proto->set_width(width_);
for (int i = 0; i < shape_.size(); ++i) {
proto->add_dim(shape_[i]);
}
proto->clear_data();
proto->clear_diff();
const Dtype* data_vec = cpu_data();
Expand Down
11 changes: 2 additions & 9 deletions src/caffe/net.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -100,11 +100,7 @@ void Net<Dtype>::Init(const NetParameter& in_param) {
blob_loss_weights_.resize(top_id_vecs_[layer_id][top_id] + 1, Dtype(0));
}
blob_loss_weights_[top_id_vecs_[layer_id][top_id]] = layer->loss(top_id);
LOG(INFO) << "Top shape: " << top_vecs_[layer_id][top_id]->num() << " "
<< top_vecs_[layer_id][top_id]->channels() << " "
<< top_vecs_[layer_id][top_id]->height() << " "
<< top_vecs_[layer_id][top_id]->width() << " ("
<< top_vecs_[layer_id][top_id]->count() << ")";
LOG(INFO) << "Top shape: " << top_vecs_[layer_id][top_id]->shape_string();
if (layer->loss(top_id)) {
LOG(INFO) << " with loss weight " << layer->loss(top_id);
}
Expand Down Expand Up @@ -708,10 +704,7 @@ void Net<Dtype>::CopyTrainedLayersFrom(const NetParameter& param) {
CHECK_EQ(target_blobs.size(), source_layer.blobs_size())
<< "Incompatible number of blobs for layer " << source_layer_name;
for (int j = 0; j < target_blobs.size(); ++j) {
CHECK_EQ(target_blobs[j]->num(), source_layer.blobs(j).num());
CHECK_EQ(target_blobs[j]->channels(), source_layer.blobs(j).channels());
CHECK_EQ(target_blobs[j]->height(), source_layer.blobs(j).height());
CHECK_EQ(target_blobs[j]->width(), source_layer.blobs(j).width());
CHECK(target_blobs[j]->ShapeEquals(source_layer.blobs(j)));

This comment has been minimized.

Copy link
@jeffdonahue

jeffdonahue Nov 26, 2014

Author Owner

[note to self: also fix ShareTrainedLayersWith and the weight sharing checks, and grep for any other instances of the old dimensions throughout the code (oh yeah, solver...)]

target_blobs[j]->FromProto(source_layer.blobs(j));
}
}
Expand Down
7 changes: 5 additions & 2 deletions src/caffe/proto/caffe.proto
Original file line number Diff line number Diff line change
Expand Up @@ -3,12 +3,15 @@ syntax = "proto2";
package caffe;

message BlobProto {
repeated int32 dim = 7 [packed = true];
repeated float data = 5 [packed = true];
repeated float diff = 6 [packed = true];

// 4D dimensions -- deprecated. Use "dim" instead.
optional int32 num = 1 [default = 0];
optional int32 channels = 2 [default = 0];
optional int32 height = 3 [default = 0];
optional int32 width = 4 [default = 0];
repeated float data = 5 [packed = true];
repeated float diff = 6 [packed = true];
}

// The BlobProtoVector is simply a way to pass multiple blobproto instances
Expand Down

0 comments on commit 3715eab

Please sign in to comment.