1 #ifndef STAN_MODEL_GRAD_HESS_LOG_PROB_HPP 2 #define STAN_MODEL_GRAD_HESS_LOG_PROB_HPP 33 template <
bool propto,
bool jacobian_adjust_transform,
class M>
35 std::vector<int>& params_i,
38 std::ostream*
msgs = 0) {
40 static const double half_epsilon = 0.5 *
epsilon;
41 static const int order = 4;
42 static const double perturbations[order]
44 static const double coefficients[order]
45 = { 1.0 / 12.0, -2.0 / 3.0, 2.0 / 3.0, -1.0 / 12.0 };
48 = log_prob_grad<propto, jacobian_adjust_transform>(
model, params_r,
51 hessian.assign(params_r.size() * params_r.size(), 0);
52 std::vector<double> temp_grad(params_r.size());
53 std::vector<double> perturbed_params(params_r.begin(), params_r.end());
54 for (
size_t d = 0;
d < params_r.size(); ++
d) {
55 double*
row = &hessian[
d*params_r.size()];
56 for (
int i = 0;
i < order; ++
i) {
57 perturbed_params[
d] = params_r[
d] + perturbations[
i];
58 log_prob_grad<propto, jacobian_adjust_transform>(
model,
61 for (
size_t dd = 0; dd < params_r.size(); ++dd) {
62 double increment = half_epsilon * coefficients[
i] * temp_grad[dd];
64 hessian[
d + dd*params_r.size()] += increment;
67 perturbed_params[
d] = params_r[
d];
void hessian(const M &model, const Eigen::Matrix< double, Eigen::Dynamic, 1 > &x, double &f, Eigen::Matrix< double, Eigen::Dynamic, 1 > &grad_f, Eigen::Matrix< double, Eigen::Dynamic, Eigen::Dynamic > &hess_f, std::ostream *msgs=0)
void gradient(const M &model, const Eigen::Matrix< double, Eigen::Dynamic, 1 > &x, double &f, Eigen::Matrix< double, Eigen::Dynamic, 1 > &grad_f, std::ostream *msgs=0)
double grad_hess_log_prob(const M &model, std::vector< double > ¶ms_r, std::vector< int > ¶ms_i, std::vector< double > &gradient, std::vector< double > &hessian, std::ostream *msgs=0)
const XML_Char XML_Content * model