Skip to content

Commit

Permalink
format
Browse files Browse the repository at this point in the history
  • Loading branch information
dzhwinter committed May 18, 2017
1 parent 376669d commit 0a03b9d
Show file tree
Hide file tree
Showing 4 changed files with 8 additions and 8 deletions.
4 changes: 2 additions & 2 deletions paddle/lib/parameter_optimizer.h
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
#ifndef __PADDLE_LIB_OPTIMIZER_BASE_H__
#define __PADDLE_LIB_OPTIMIZER_BASE_H__
#ifndef PADDLE_LIB_OPTIMIZER_BASE_H_
#define PADDLE_LIB_OPTIMIZER_BASE_H_

#include <string>
#include <functional>
Expand Down
6 changes: 4 additions & 2 deletions paddle/lib/regularizer_ops.h
Original file line number Diff line number Diff line change
Expand Up @@ -2,11 +2,13 @@
#define PADDLE_LIB_REGULARIZER_OPS_H

/*! \brief L1 implement */
emplate<T>
void applyL1(Tensor<T> parameter,
template<class T>
void applyL1(Tensor<T> &parameter,
int32_t pass_num,
double learning_rate) {
// TODO need to find out how to add pass_num


}

#endif
2 changes: 0 additions & 2 deletions paddle/lib/sgd_optimizer.h
Original file line number Diff line number Diff line change
Expand Up @@ -12,8 +12,6 @@ class SGDOptimizer : public ParameterOptimizer {
double learning_rate) {
applyGradientDescent(parameter, gradient, learning_rate);
}

private:
};

class MomentumOptimizer : public ParameterOptimizer {
Expand Down
4 changes: 2 additions & 2 deletions paddle/lib/training_ops.cc
Original file line number Diff line number Diff line change
Expand Up @@ -10,8 +10,8 @@ void applyGradientDescent(Tensor<T> &parameter,
double learning_rate) {
/*! \brief TODO(will replace with matrix dot) */
for(size_t i=0; i < parameter.size(); ++i) {
parameter[i] -= gradient[i] * learning
}
parameter[i] -= gradient[i] * learning_rate;
}
}

template<typename T>
Expand Down

0 comments on commit 0a03b9d

Please sign in to comment.