log_prob_grad.hpp
Go to the documentation of this file.
1 #ifndef STAN_MODEL_LOG_PROB_GRAD_HPP
2 #define STAN_MODEL_LOG_PROB_GRAD_HPP
3 
4 #include <stan/math/rev/mat.hpp>
5 #include <iostream>
6 #include <vector>
7 
8 namespace stan {
9  namespace model {
10 
11  /**
12  * Compute the gradient using reverse-mode automatic
13  * differentiation, writing the result into the specified
14  * gradient, using the specified perturbation.
15  *
16  * @tparam propto True if calculation is up to proportion
17  * (double-only terms dropped).
18  * @tparam jacobian_adjust_transform True if the log absolute
19  * Jacobian determinant of inverse parameter transforms is added to
20  * the log probability.
21  * @tparam M Class of model.
22  * @param[in] model Model.
23  * @param[in] params_r Real-valued parameters.
24  * @param[in] params_i Integer-valued parameters.
25  * @param[out] gradient Vector into which gradient is written.
26  * @param[in,out] msgs
27  */
28  template <bool propto, bool jacobian_adjust_transform, class M>
29  double log_prob_grad(const M& model,
30  std::vector<double>& params_r,
31  std::vector<int>& params_i,
32  std::vector<double>& gradient,
33  std::ostream* msgs = 0) {
34  using std::vector;
35  using stan::math::var;
36  double lp;
37  try {
38  vector<var> ad_params_r(params_r.size());
39  for (size_t i = 0; i < model.num_params_r(); ++i) {
40  stan::math::var var_i(params_r[i]);
41  ad_params_r[i] = var_i;
42  }
43  var adLogProb
44  = model.template log_prob<propto, jacobian_adjust_transform>
45  (ad_params_r, params_i, msgs);
46  lp = adLogProb.val();
47  adLogProb.grad(ad_params_r, gradient);
48  } catch (const std::exception &ex) {
50  throw;
51  }
53  return lp;
54  }
55 
56  /**
57  * Compute the gradient using reverse-mode automatic
58  * differentiation, writing the result into the specified
59  * gradient, using the specified perturbation.
60  *
61  * @tparam propto True if calculation is up to proportion
62  * (double-only terms dropped).
63  * @tparam jacobian_adjust_transform True if the log absolute
64  * Jacobian determinant of inverse parameter transforms is added to
65  * the log probability.
66  * @tparam M Class of model.
67  * @param[in] model Model.
68  * @param[in] params_r Real-valued parameters.
69  * @param[out] gradient Vector into which gradient is written.
70  * @param[in,out] msgs
71  */
72  template <bool propto, bool jacobian_adjust_transform, class M>
73  double log_prob_grad(const M& model,
74  Eigen::VectorXd& params_r,
75  Eigen::VectorXd& gradient,
76  std::ostream* msgs = 0) {
77  using std::vector;
78  using stan::math::var;
79 
80  Eigen::Matrix<var, Eigen::Dynamic, 1> ad_params_r(params_r.size());
81  for (size_t i = 0; i < model.num_params_r(); ++i) {
82  stan::math::var var_i(params_r[i]);
83  ad_params_r[i] = var_i;
84  }
85  try {
86  var adLogProb
87  = model
88  .template log_prob<propto,
89  jacobian_adjust_transform>(ad_params_r, msgs);
90  double val = adLogProb.val();
91  stan::math::grad(adLogProb, ad_params_r, gradient);
92  return val;
93  } catch (std::exception &ex) {
95  throw;
96  }
97  }
98 
99  }
100 }
101 #endif
double log_prob_grad(const M &model, std::vector< double > &params_r, std::vector< int > &params_i, std::vector< double > &gradient, std::ostream *msgs=0)
::xsd::cxx::tree::exception< char > exception
Definition: Database.h:225
void gradient(const M &model, const Eigen::Matrix< double, Eigen::Dynamic, 1 > &x, double &f, Eigen::Matrix< double, Eigen::Dynamic, 1 > &grad_f, std::ostream *msgs=0)
Definition: gradient.hpp:15
static void grad(vari *vi)
Definition: grad.hpp:30
static void recover_memory()
const XML_Char XML_Content * model
Definition: expat.h:151