grad_hess_log_prob.hpp
Go to the documentation of this file.
1 #ifndef STAN_MODEL_GRAD_HESS_LOG_PROB_HPP
2 #define STAN_MODEL_GRAD_HESS_LOG_PROB_HPP
3 
5 #include <iostream>
6 #include <vector>
7 
8 namespace stan {
9  namespace model {
10 
11  /**
12  * Evaluate the log-probability, its gradient, and its Hessian
13  * at params_r. This default version computes the Hessian
14  * numerically by finite-differencing the gradient, at a cost of
15  * O(params_r.size()^2).
16  *
17  * @tparam propto True if calculation is up to proportion
18  * (double-only terms dropped).
19  * @tparam jacobian_adjust_transform True if the log absolute
20  * Jacobian determinant of inverse parameter transforms is added to the
21  * log probability.
22  * @tparam M Class of model.
23  * @param[in] model Model.
24  * @param[in] params_r Real-valued parameter vector.
25  * @param[in] params_i Integer-valued parameter vector.
26  * @param[out] gradient Vector to write gradient to.
27  * @param[out] hessian Vector to write gradient to. hessian[i*D + j]
28  * gives the element at the ith row and jth column of the Hessian
29  * (where D=params_r.size()).
30  * @param[in, out] msgs Stream to which print statements in Stan
31  * programs are written, default is 0
32  */
33  template <bool propto, bool jacobian_adjust_transform, class M>
34  double grad_hess_log_prob(const M& model, std::vector<double>& params_r,
35  std::vector<int>& params_i,
36  std::vector<double>& gradient,
37  std::vector<double>& hessian,
38  std::ostream* msgs = 0) {
39  static const double epsilon = 1e-3;
40  static const double half_epsilon = 0.5 * epsilon;
41  static const int order = 4;
42  static const double perturbations[order]
43  = {-2*epsilon, -1*epsilon, epsilon, 2*epsilon};
44  static const double coefficients[order]
45  = { 1.0 / 12.0, -2.0 / 3.0, 2.0 / 3.0, -1.0 / 12.0 };
46 
47  double result
48  = log_prob_grad<propto, jacobian_adjust_transform>(model, params_r,
49  params_i, gradient,
50  msgs);
51  hessian.assign(params_r.size() * params_r.size(), 0);
52  std::vector<double> temp_grad(params_r.size());
53  std::vector<double> perturbed_params(params_r.begin(), params_r.end());
54  for (size_t d = 0; d < params_r.size(); ++d) {
55  double* row = &hessian[d*params_r.size()];
56  for (int i = 0; i < order; ++i) {
57  perturbed_params[d] = params_r[d] + perturbations[i];
58  log_prob_grad<propto, jacobian_adjust_transform>(model,
59  perturbed_params,
60  params_i, temp_grad);
61  for (size_t dd = 0; dd < params_r.size(); ++dd) {
62  double increment = half_epsilon * coefficients[i] * temp_grad[dd];
63  row[dd] += increment;
64  hessian[d + dd*params_r.size()] += increment;
65  }
66  }
67  perturbed_params[d] = params_r[d];
68  }
69  return result;
70  }
71 
72  }
73 }
74 #endif
void hessian(const M &model, const Eigen::Matrix< double, Eigen::Dynamic, 1 > &x, double &f, Eigen::Matrix< double, Eigen::Dynamic, 1 > &grad_f, Eigen::Matrix< double, Eigen::Dynamic, Eigen::Dynamic > &hess_f, std::ostream *msgs=0)
Definition: hessian.hpp:12
void gradient(const M &model, const Eigen::Matrix< double, Eigen::Dynamic, 1 > &x, double &f, Eigen::Matrix< double, Eigen::Dynamic, 1 > &grad_f, std::ostream *msgs=0)
Definition: gradient.hpp:15
Float_t d
Definition: plot.C:236
double grad_hess_log_prob(const M &model, std::vector< double > &params_r, std::vector< int > &params_i, std::vector< double > &gradient, std::vector< double > &hessian, std::ostream *msgs=0)
double epsilon
Float_t e
Definition: plot.C:35
const XML_Char XML_Content * model
Definition: expat.h:151