From 04a5dcd17738c74d4d6dc1355d0299fb386c8a68 Mon Sep 17 00:00:00 2001 From: Sean Settle Date: Mon, 19 Feb 2018 15:02:02 -0800 Subject: [PATCH] Necessary fixes for recent merge branch 'master' of https://github.com/BVLC/caffe into https://github.com/pmgysel/caffe --- include/ristretto/base_ristretto_layer.hpp | 1 - src/caffe/ristretto/quantization.cpp | 12 ++++++------ 2 files changed, 6 insertions(+), 7 deletions(-) diff --git a/include/ristretto/base_ristretto_layer.hpp b/include/ristretto/base_ristretto_layer.hpp index 995daa70c44..af5c9d041b9 100644 --- a/include/ristretto/base_ristretto_layer.hpp +++ b/include/ristretto/base_ristretto_layer.hpp @@ -8,7 +8,6 @@ #include "caffe/layers/inner_product_layer.hpp" #include "caffe/layers/base_data_layer.hpp" #include "caffe/layers/lrn_layer.hpp" -#include "caffe/data_reader.hpp" #include "caffe/proto/caffe.pb.h" namespace caffe { diff --git a/src/caffe/ristretto/quantization.cpp b/src/caffe/ristretto/quantization.cpp index 53d4a4bd625..447183d61d5 100644 --- a/src/caffe/ristretto/quantization.cpp +++ b/src/caffe/ristretto/quantization.cpp @@ -185,7 +185,7 @@ void Quantization::Quantize2DynamicFixedPoint() { for (int bitwidth = 16; bitwidth > 0; bitwidth /= 2) { EditNetDescriptionDynamicFixedPoint(¶m, "Convolution", "Parameters", bitwidth, -1, -1, -1); - net_test = new Net(param, NULL); + net_test = new Net(param); net_test->CopyTrainedLayersFrom(weights_); RunForwardBatches(iterations_, net_test, &accuracy); test_bw_conv_params.push_back(bitwidth); @@ -203,7 +203,7 @@ void Quantization::Quantize2DynamicFixedPoint() { for (int bitwidth = 16; bitwidth > 0; bitwidth /= 2) { EditNetDescriptionDynamicFixedPoint(¶m, "InnerProduct", "Parameters", -1, bitwidth, -1, -1); - net_test = new Net(param, NULL); + net_test = new Net(param); net_test->CopyTrainedLayersFrom(weights_); RunForwardBatches(iterations_, net_test, &accuracy); test_bw_fc_params.push_back(bitwidth); @@ -221,7 +221,7 @@ void Quantization::Quantize2DynamicFixedPoint() { for (int bitwidth = 16; bitwidth > 0; bitwidth /= 2) { EditNetDescriptionDynamicFixedPoint(¶m, "Convolution_and_InnerProduct", "Activations", -1, -1, bitwidth, bitwidth); - net_test = new Net(param, NULL); + net_test = new Net(param); net_test->CopyTrainedLayersFrom(weights_); RunForwardBatches(iterations_, net_test, &accuracy); test_bw_layer_activations.push_back(bitwidth); @@ -265,7 +265,7 @@ void Quantization::Quantize2DynamicFixedPoint() { EditNetDescriptionDynamicFixedPoint(¶m, "Convolution_and_InnerProduct", "Parameters_and_Activations", bw_conv_params_, bw_fc_params_, bw_in_, bw_out_); - net_test = new Net(param, NULL); + net_test = new Net(param); net_test->CopyTrainedLayersFrom(weights_); RunForwardBatches(iterations_, net_test, &accuracy); delete net_test; @@ -326,7 +326,7 @@ void Quantization::Quantize2MiniFloat() { // Test the net with different bit-widths for (int bitwidth = 16; bitwidth - 1 - exp_bits_ > 0; bitwidth /= 2) { EditNetDescriptionMiniFloat(¶m, bitwidth); - net_test = new Net(param, NULL); + net_test = new Net(param); net_test->CopyTrainedLayersFrom(weights_); RunForwardBatches(iterations_, net_test, &accuracy); test_bitwidth.push_back(bitwidth); @@ -382,7 +382,7 @@ void Quantization::Quantize2IntegerPowerOf2Weights() { // Bit-width of layer activations is hard-coded to 8-bit. EditNetDescriptionDynamicFixedPoint(¶m, "Convolution_and_InnerProduct", "Activations", -1, -1, 8, 8); - net_test = new Net(param, NULL); + net_test = new Net(param); net_test->CopyTrainedLayersFrom(weights_); RunForwardBatches(iterations_, net_test, &accuracy); delete net_test;