Skip to content

Commit

Permalink
[Jenkins] auto-formatting by clang-format version 6.0.0-1ubuntu2~16.0…
Browse files Browse the repository at this point in the history
…4.1 (tags/RELEASE_600/final)
  • Loading branch information
stan-buildbot committed Mar 17, 2021
1 parent f2b1207 commit 9b89e76
Show file tree
Hide file tree
Showing 8 changed files with 156 additions and 179 deletions.
9 changes: 4 additions & 5 deletions src/stan/services/experimental/advi/lowrank.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -54,8 +54,7 @@ int lowrank(Model& model, const stan::io::var_context& init,
double tol_rel_obj, int rank, double eta, bool adapt_engaged,
int adapt_iterations, int eval_elbo, int output_samples,
callbacks::interrupt& interrupt, callbacks::logger& logger,
callbacks::writer& init_writer,
callbacks::writer& parameter_writer,
callbacks::writer& init_writer, callbacks::writer& parameter_writer,
callbacks::writer& diagnostic_writer) {
util::experimental_message(logger);

Expand All @@ -75,9 +74,9 @@ int lowrank(Model& model, const stan::io::var_context& init,
Eigen::VectorXd cont_params
= Eigen::Map<Eigen::VectorXd>(&cont_vector[0], cont_vector.size(), 1);

stan::variational::advi_lowrank<Model, boost::ecuyer1988>
cmd_advi(model, cont_params, rng, rank, grad_samples, elbo_samples,
eval_elbo, output_samples);
stan::variational::advi_lowrank<Model, boost::ecuyer1988> cmd_advi(
model, cont_params, rng, rank, grad_samples, elbo_samples, eval_elbo,
output_samples);
cmd_advi.run(eta, adapt_engaged, adapt_iterations, tol_rel_obj,
max_iterations, logger, parameter_writer, diagnostic_writer);

Expand Down
61 changes: 29 additions & 32 deletions src/stan/variational/advi.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -569,53 +569,50 @@ class advi_base {
virtual Q init_variational(size_t dimension) const = 0;
};

template<class Model, class Q, class BaseRNG>
template <class Model, class Q, class BaseRNG>
class advi : public advi_base<Model, Q, BaseRNG> {
public:
advi(Model& m, Eigen::VectorXd& cont_params, BaseRNG& rng,
int n_monte_carlo_grad, int n_monte_carlo_elbo, int eval_elbo,
int n_posterior_samples)
: advi_base<Model, Q, BaseRNG>(m, cont_params, rng, n_monte_carlo_grad,
n_monte_carlo_elbo, eval_elbo,
n_posterior_samples) {}
int n_monte_carlo_grad, int n_monte_carlo_elbo, int eval_elbo,
int n_posterior_samples)
: advi_base<Model, Q, BaseRNG>(m, cont_params, rng, n_monte_carlo_grad,
n_monte_carlo_elbo, eval_elbo,
n_posterior_samples) {}

private:
Q init_variational(Eigen::VectorXd& cont_params) const {
return Q(cont_params);
}

Q init_variational(size_t dimension) const {
return Q(dimension);
}
Q init_variational(size_t dimension) const { return Q(dimension); }
};

template<class Model, class BaseRNG>
class advi_lowrank : public advi_base
<Model, stan::variational::normal_lowrank, BaseRNG> {
template <class Model, class BaseRNG>
class advi_lowrank
: public advi_base<Model, stan::variational::normal_lowrank, BaseRNG> {
public:
/**
* Constructor
*
* @param[in] m stan model
* @param[in] cont_params initialization of continuous parameters
* @param[in,out] rng random number generator
* @param[in] rank rank of approximation
* @param[in] n_monte_carlo_grad number of samples for gradient computation
* @param[in] n_monte_carlo_elbo number of samples for ELBO computation
* @param[in] eval_elbo evaluate ELBO at every "eval_elbo" iters
* @param[in] n_posterior_samples number of samples to draw from posterior
* @throw std::runtime_error if n_monte_carlo_grad is not positive
* @throw std::runtime_error if n_monte_carlo_elbo is not positive
* @throw std::runtime_error if eval_elbo is not positive
* @throw std::runtime_error if n_posterior_samples is not positive
*/
* Constructor
*
* @param[in] m stan model
* @param[in] cont_params initialization of continuous parameters
* @param[in,out] rng random number generator
* @param[in] rank rank of approximation
* @param[in] n_monte_carlo_grad number of samples for gradient computation
* @param[in] n_monte_carlo_elbo number of samples for ELBO computation
* @param[in] eval_elbo evaluate ELBO at every "eval_elbo" iters
* @param[in] n_posterior_samples number of samples to draw from posterior
* @throw std::runtime_error if n_monte_carlo_grad is not positive
* @throw std::runtime_error if n_monte_carlo_elbo is not positive
* @throw std::runtime_error if eval_elbo is not positive
* @throw std::runtime_error if n_posterior_samples is not positive
*/
advi_lowrank(Model& m, Eigen::VectorXd& cont_params, BaseRNG& rng,
size_t rank, int n_monte_carlo_grad, int n_monte_carlo_elbo,
int eval_elbo, int n_posterior_samples)
: advi_base<Model, stan::variational::normal_lowrank, BaseRNG>(m,
cont_params,
rng, n_monte_carlo_grad, n_monte_carlo_elbo,
eval_elbo, n_posterior_samples),
: advi_base<Model, stan::variational::normal_lowrank, BaseRNG>(
m, cont_params, rng, n_monte_carlo_grad, n_monte_carlo_elbo,
eval_elbo, n_posterior_samples),
rank_(rank) {
static const char* function = "stan::variational::advi_lowrank";
math::check_positive(function, "Approximation rank", rank_);
Expand All @@ -626,7 +623,7 @@ class advi_lowrank : public advi_base

private:
stan::variational::normal_lowrank init_variational(
Eigen::VectorXd& cont_params) const {
Eigen::VectorXd& cont_params) const {
return stan::variational::normal_lowrank(cont_params, rank_);
}

Expand Down
3 changes: 1 addition & 2 deletions src/stan/variational/base_family.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -25,8 +25,7 @@ class base_family {
template <class BaseRNG>
Eigen::VectorXd sample(BaseRNG& rng, Eigen::VectorXd& eta) const;
template <class BaseRNG>
Eigen::VectorXd sample_log_g(BaseRNG& rng,
Eigen::VectorXd& eta,
Eigen::VectorXd sample_log_g(BaseRNG& rng, Eigen::VectorXd& eta,
double& log_g) const;
double calc_log_g(Eigen::VectorXd& eta) const;
template <class M, class BaseRNG>
Expand Down
124 changes: 59 additions & 65 deletions src/stan/variational/families/normal_lowrank.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -20,54 +20,47 @@ class normal_lowrank : public base_family {
const int dimension_;
const int rank_;

void validate_mean(const char* function,
const Eigen::VectorXd& mu) {
void validate_mean(const char* function, const Eigen::VectorXd& mu) {
stan::math::check_not_nan(function, "Mean vector", mu);
stan::math::check_size_match(function,
"Dimension of input vector", mu.size(),
"Dimension of current vector", dimension());
stan::math::check_size_match(function, "Dimension of input vector",
mu.size(), "Dimension of current vector",
dimension());
}

void validate_factor(const char* function,
const Eigen::MatrixXd& B) {
void validate_factor(const char* function, const Eigen::MatrixXd& B) {
stan::math::check_not_nan(function, "Low rank factor", B);
stan::math::check_size_match(function,
"Dimension of mean vector", dimension(),
"Dimension of low-rank factor", B.rows());
stan::math::check_size_match(function,
"Rank of factor", B.cols(),
stan::math::check_size_match(function, "Dimension of mean vector",
dimension(), "Dimension of low-rank factor",
B.rows());
stan::math::check_size_match(function, "Rank of factor", B.cols(),
"Rank of approximation", rank());
}

void validate_noise(const char *function,
const Eigen::VectorXd& log_d) {
void validate_noise(const char* function, const Eigen::VectorXd& log_d) {
stan::math::check_not_nan(function, "log std vector", log_d);
stan::math::check_size_match(function,
"Dimension of mean vector", dimension(),
"Dimension of log std vector", log_d.size());
stan::math::check_size_match(function, "Dimension of mean vector",
dimension(), "Dimension of log std vector",
log_d.size());
}

public:
explicit normal_lowrank(const Eigen::VectorXd& mu, size_t rank)
: mu_(mu),
B_(Eigen::MatrixXd::Zero(mu.size(), rank)),
log_d_(Eigen::VectorXd::Zero(mu.size())),
dimension_(mu.size()),
rank_(rank) {
}
: mu_(mu),
B_(Eigen::MatrixXd::Zero(mu.size(), rank)),
log_d_(Eigen::VectorXd::Zero(mu.size())),
dimension_(mu.size()),
rank_(rank) {}

explicit normal_lowrank(size_t dimension, size_t rank)
: mu_(Eigen::VectorXd::Zero(dimension)),
B_(Eigen::MatrixXd::Zero(dimension, rank)),
log_d_(Eigen::VectorXd::Zero(dimension)),
dimension_(dimension),
rank_(rank) {
}
: mu_(Eigen::VectorXd::Zero(dimension)),
B_(Eigen::MatrixXd::Zero(dimension, rank)),
log_d_(Eigen::VectorXd::Zero(dimension)),
dimension_(dimension),
rank_(rank) {}

explicit normal_lowrank(const Eigen::VectorXd& mu,
const Eigen::MatrixXd& B,
explicit normal_lowrank(const Eigen::VectorXd& mu, const Eigen::MatrixXd& B,
const Eigen::VectorXd& log_d)
: mu_(mu), B_(B), log_d_(log_d), dimension_(mu.size()), rank_(B.cols()) {
: mu_(mu), B_(B), log_d_(log_d), dimension_(mu.size()), rank_(B.cols()) {
static const char* function = "stan::variational::normal_lowrank";
validate_mean(function, mu);
validate_factor(function, B);
Expand Down Expand Up @@ -123,8 +116,8 @@ class normal_lowrank : public base_family {
= "stan::variational::normal_lowrank::operator=";
stan::math::check_size_match(function, "Dimension of lhs", dimension(),
"Dimension of rhs", rhs.dimension());
stan::math::check_size_match(function, "Rank of lhs", rank(),
"Rank of rhs", rhs.rank());
stan::math::check_size_match(function, "Rank of lhs", rank(), "Rank of rhs",
rhs.rank());
mu_ = rhs.mu();
B_ = rhs.B();
log_d_ = rhs.log_d();
Expand All @@ -136,8 +129,8 @@ class normal_lowrank : public base_family {
= "stan::variational::normal_lowrank::operator+=";
stan::math::check_size_match(function, "Dimension of lhs", dimension(),
"Dimension of rhs", rhs.dimension());
stan::math::check_size_match(function, "Rank of lhs", rank(),
"Rank of rhs", rhs.rank());
stan::math::check_size_match(function, "Rank of lhs", rank(), "Rank of rhs",
rhs.rank());
mu_ += rhs.mu();
B_ += rhs.B();
log_d_ += rhs.log_d();
Expand All @@ -150,8 +143,8 @@ class normal_lowrank : public base_family {

stan::math::check_size_match(function, "Dimension of lhs", dimension(),
"Dimension of rhs", rhs.dimension());
stan::math::check_size_match(function, "Rank of lhs", rank(),
"Rank of rhs", rhs.rank());
stan::math::check_size_match(function, "Rank of lhs", rank(), "Rank of rhs",
rhs.rank());
mu_.array() /= rhs.mu().array();
B_.array() /= rhs.B().array();
log_d_.array() /= rhs.log_d().array();
Expand Down Expand Up @@ -179,24 +172,29 @@ class normal_lowrank : public base_family {
// Determinant by the matrix determinant lemma
// det(D^2 + B.B^T) = det(I + B^T.D^-2.B) * det(D^2)
// where D^2 is diagonal and so can be computed accordingly
result
+= 0.5 * log(
(Eigen::MatrixXd::Identity(r, r) +
B_.transpose() *
log_d_.array().exp().square().matrix().asDiagonal().inverse() *
B_).determinant());
result += 0.5
* log((Eigen::MatrixXd::Identity(r, r)
+ B_.transpose()
* log_d_.array()
.exp()
.square()
.matrix()
.asDiagonal()
.inverse()
* B_)
.determinant());
for (int d = 0; d < dimension(); ++d) {
result += log_d_(d);
}
return result;
}

Eigen::VectorXd transform(const Eigen::VectorXd& eta) const {
static const char* function =
"stan::variational::normal_lowrank::transform";
stan::math::check_size_match(function,
"Dimension of input vector", eta.size(),
"Sum of dimension and rank", dimension() + rank());
static const char* function
= "stan::variational::normal_lowrank::transform";
stan::math::check_size_match(function, "Dimension of input vector",
eta.size(), "Sum of dimension and rank",
dimension() + rank());
stan::math::check_not_nan(function, "Input vector", eta);
Eigen::VectorXd z = eta.head(rank());
Eigen::VectorXd eps = eta.tail(dimension());
Expand Down Expand Up @@ -238,14 +236,11 @@ class normal_lowrank : public base_family {
}

template <class M, class BaseRNG>
void calc_grad(normal_lowrank& elbo_grad,
M& m,
Eigen::VectorXd& cont_params,
int n_monte_carlo_grad,
BaseRNG& rng,
void calc_grad(normal_lowrank& elbo_grad, M& m, Eigen::VectorXd& cont_params,
int n_monte_carlo_grad, BaseRNG& rng,
callbacks::logger& logger) const {
static const char* function =
"stan::variational::normal_lowrank::calc_grad";
static const char* function
= "stan::variational::normal_lowrank::calc_grad";

stan::math::check_size_match(function, "Dimension of elbo_grad",
elbo_grad.dimension(),
Expand All @@ -255,8 +250,8 @@ class normal_lowrank : public base_family {
cont_params.size());

stan::math::check_size_match(function, "Rank of elbo_grad",
elbo_grad.rank(),
"Rank of variational q", rank());
elbo_grad.rank(), "Rank of variational q",
rank());

Eigen::VectorXd mu_grad = Eigen::VectorXd::Zero(dimension());
Eigen::MatrixXd B_grad = Eigen::MatrixXd::Zero(dimension(), rank());
Expand All @@ -279,7 +274,7 @@ class normal_lowrank : public base_family {

// Naive Monte Carlo integration
static const int n_retries = 10;
for (int i = 0, n_monte_carlo_drop = 0; i < n_monte_carlo_grad; ) {
for (int i = 0, n_monte_carlo_drop = 0; i < n_monte_carlo_grad;) {
// Draw from standard normal and transform to real-coordinate space
for (int d = 0; d < dimension() + rank(); ++d) {
eta(d) = stan::math::normal_rng(0, 1, rng);
Expand Down Expand Up @@ -313,8 +308,9 @@ class normal_lowrank : public base_family {
const char* name = "The number of dropped evaluations";
const char* msg1 = "has reached its maximum amount (";
int y = n_retries * n_monte_carlo_grad;
const char* msg2 = "). Your model may be either severely "
"ill-conditioned or misspecified.";
const char* msg2
= "). Your model may be either severely "
"ill-conditioned or misspecified.";
stan::math::domain_error(function, name, y, msg1, msg2);
}
}
Expand All @@ -337,8 +333,7 @@ class normal_lowrank : public base_family {
}
};

inline normal_lowrank operator+(normal_lowrank lhs,
const normal_lowrank& rhs) {
inline normal_lowrank operator+(normal_lowrank lhs, const normal_lowrank& rhs) {
return lhs += rhs;
}

Expand All @@ -351,8 +346,7 @@ inline normal_lowrank operator+(normal_lowrank lhs,
* @return Elementwise division of the specified approximations.
* @throw std::domain_error If the dimensionalities do not match.
*/
inline normal_lowrank operator/(normal_lowrank lhs,
const normal_lowrank& rhs) {
inline normal_lowrank operator/(normal_lowrank lhs, const normal_lowrank& rhs) {
return lhs /= rhs;
}

Expand Down
4 changes: 2 additions & 2 deletions src/test/unit/variational/advi_messages_test.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -116,8 +116,8 @@ TEST_F(advi_test, prev_elbo_larger_fullrank) {
}

TEST_F(advi_test, prev_elbo_larger_lowrank) {
EXPECT_EQ(0, advi_lowrank_->run(10, 0, 50, 0.2, 100, logger,
parameter_writer, diagnostic_writer));
EXPECT_EQ(0, advi_lowrank_->run(10, 0, 50, 0.2, 100, logger, parameter_writer,
diagnostic_writer));
EXPECT_TRUE(log_stream_.str().find(err_msg2) != std::string::npos)
<< "The message should have err_msg2 inside it.";
}
Loading

0 comments on commit 9b89e76

Please sign in to comment.