Skip to content

Commit

Permalink
Necessary fixes for recent merge branch 'master' of https://github.co…
Browse files Browse the repository at this point in the history
  • Loading branch information
settle committed Feb 19, 2018
1 parent 5a3bb14 commit 04a5dcd
Show file tree
Hide file tree
Showing 2 changed files with 6 additions and 7 deletions.
1 change: 0 additions & 1 deletion include/ristretto/base_ristretto_layer.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,6 @@
#include "caffe/layers/inner_product_layer.hpp"
#include "caffe/layers/base_data_layer.hpp"
#include "caffe/layers/lrn_layer.hpp"
#include "caffe/data_reader.hpp"
#include "caffe/proto/caffe.pb.h"

namespace caffe {
Expand Down
12 changes: 6 additions & 6 deletions src/caffe/ristretto/quantization.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -185,7 +185,7 @@ void Quantization::Quantize2DynamicFixedPoint() {
for (int bitwidth = 16; bitwidth > 0; bitwidth /= 2) {
EditNetDescriptionDynamicFixedPoint(&param, "Convolution", "Parameters",
bitwidth, -1, -1, -1);
net_test = new Net<float>(param, NULL);
net_test = new Net<float>(param);
net_test->CopyTrainedLayersFrom(weights_);
RunForwardBatches(iterations_, net_test, &accuracy);
test_bw_conv_params.push_back(bitwidth);
Expand All @@ -203,7 +203,7 @@ void Quantization::Quantize2DynamicFixedPoint() {
for (int bitwidth = 16; bitwidth > 0; bitwidth /= 2) {
EditNetDescriptionDynamicFixedPoint(&param, "InnerProduct", "Parameters",
-1, bitwidth, -1, -1);
net_test = new Net<float>(param, NULL);
net_test = new Net<float>(param);
net_test->CopyTrainedLayersFrom(weights_);
RunForwardBatches(iterations_, net_test, &accuracy);
test_bw_fc_params.push_back(bitwidth);
Expand All @@ -221,7 +221,7 @@ void Quantization::Quantize2DynamicFixedPoint() {
for (int bitwidth = 16; bitwidth > 0; bitwidth /= 2) {
EditNetDescriptionDynamicFixedPoint(&param, "Convolution_and_InnerProduct",
"Activations", -1, -1, bitwidth, bitwidth);
net_test = new Net<float>(param, NULL);
net_test = new Net<float>(param);
net_test->CopyTrainedLayersFrom(weights_);
RunForwardBatches(iterations_, net_test, &accuracy);
test_bw_layer_activations.push_back(bitwidth);
Expand Down Expand Up @@ -265,7 +265,7 @@ void Quantization::Quantize2DynamicFixedPoint() {
EditNetDescriptionDynamicFixedPoint(&param, "Convolution_and_InnerProduct",
"Parameters_and_Activations", bw_conv_params_, bw_fc_params_, bw_in_,
bw_out_);
net_test = new Net<float>(param, NULL);
net_test = new Net<float>(param);
net_test->CopyTrainedLayersFrom(weights_);
RunForwardBatches(iterations_, net_test, &accuracy);
delete net_test;
Expand Down Expand Up @@ -326,7 +326,7 @@ void Quantization::Quantize2MiniFloat() {
// Test the net with different bit-widths
for (int bitwidth = 16; bitwidth - 1 - exp_bits_ > 0; bitwidth /= 2) {
EditNetDescriptionMiniFloat(&param, bitwidth);
net_test = new Net<float>(param, NULL);
net_test = new Net<float>(param);
net_test->CopyTrainedLayersFrom(weights_);
RunForwardBatches(iterations_, net_test, &accuracy);
test_bitwidth.push_back(bitwidth);
Expand Down Expand Up @@ -382,7 +382,7 @@ void Quantization::Quantize2IntegerPowerOf2Weights() {
// Bit-width of layer activations is hard-coded to 8-bit.
EditNetDescriptionDynamicFixedPoint(&param, "Convolution_and_InnerProduct",
"Activations", -1, -1, 8, 8);
net_test = new Net<float>(param, NULL);
net_test = new Net<float>(param);
net_test->CopyTrainedLayersFrom(weights_);
RunForwardBatches(iterations_, net_test, &accuracy);
delete net_test;
Expand Down

0 comments on commit 04a5dcd

Please sign in to comment.