1 #ifndef STAN_MODEL_LOG_PROB_GRAD_HPP 2 #define STAN_MODEL_LOG_PROB_GRAD_HPP 28 template <
bool propto,
bool jacobian_adjust_transform,
class M>
30 std::vector<double>& params_r,
31 std::vector<int>& params_i,
33 std::ostream*
msgs = 0) {
38 vector<var> ad_params_r(params_r.size());
39 for (
size_t i = 0;
i < model.num_params_r(); ++
i) {
41 ad_params_r[
i] = var_i;
44 = model.template log_prob<propto, jacobian_adjust_transform>
45 (ad_params_r, params_i,
msgs);
47 adLogProb.grad(ad_params_r, gradient);
72 template <
bool propto,
bool jacobian_adjust_transform,
class M>
74 Eigen::VectorXd& params_r,
76 std::ostream*
msgs = 0) {
80 Eigen::Matrix<var, Eigen::Dynamic, 1> ad_params_r(params_r.size());
81 for (
size_t i = 0;
i < model.num_params_r(); ++
i) {
83 ad_params_r[
i] = var_i;
88 .template log_prob<propto,
89 jacobian_adjust_transform>(ad_params_r,
msgs);
90 double val = adLogProb.val();
double log_prob_grad(const M &model, std::vector< double > ¶ms_r, std::vector< int > ¶ms_i, std::vector< double > &gradient, std::ostream *msgs=0)
::xsd::cxx::tree::exception< char > exception
void gradient(const M &model, const Eigen::Matrix< double, Eigen::Dynamic, 1 > &x, double &f, Eigen::Matrix< double, Eigen::Dynamic, 1 > &grad_f, std::ostream *msgs=0)
static void grad(vari *vi)
static void recover_memory()
const XML_Char XML_Content * model